file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
nc_read_functions.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' Set of auxliary functions for the WRF_read script author: Roberto Chavez <[email protected]> march/2020 ''' import numpy as np import pandas as pd import datetime from scipy.spatial import cKDTree import utm from wrf import extract_dim, ll_to_xy, xy_to_ll def getVarEff(ncfid, varName, iTimes, iBT, iSN, iWE): ''' Memmory efficient method of getting the nc data and destagered (if that is the case ) into a centered grid. NOTE: it assumes that the variables stored in the netcdf have as first dimension of the Time dimension Parameters ---------- ncfid : file identifier class netcdf file identifier. varName : str name of the variable to extract from the netcdf. Should match exactly the name of the variable as stored in the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the extracted 3D or 4D data for the given time and coords dimensions ''' if not varName in ncfid.variables.keys(): raise Exception("ERROR: " + varName + " variable does not exist in the netcdf file") varObj = ncfid.variables.get(varName) ncDims = varObj.dimensions # it is assuming that first dimension is Time! logicSlices = [iTimes] stageredDim = -1 for ii, iStr in enumerate(ncDims[1:]): if iStr.find('stag') > 0: stageredDim = ii + 1 if iStr.startswith('bottom'): logicSlices.append(np.append(iBT, iBT[-1] + 1)) elif iStr.startswith('south'): logicSlices.append(np.append(iSN, iSN[-1] + 1)) elif iStr.startswith('west'): logicSlices.append(np.append(iWE, iWE[-1] + 1)) else: if iStr.startswith('bottom'): logicSlices.append(iBT) elif iStr.startswith('south'): logicSlices.append(iSN) elif iStr.startswith('west'): logicSlices.append(iWE) # Extract and unstager the data varData = varObj[logicSlices] if (len(ncDims) == 3) and (stageredDim > 0): if stageredDim == 1: varData = (varData[:, 0:-1, :] + varData[:, 1:, :]) * 0.5 elif stageredDim == 2: varData = (varData[:, :, 0:-1] + varData[:, :, 1:]) * 0.5 elif (len(ncDims) == 4) and (stageredDim > 0): if stageredDim == 1: varData = (varData[:, 0:-1, :, :] + varData[:, 1:, :, :]) * 0.5 elif stageredDim == 2: varData = (varData[:, :, 0:-1, :] + varData[:, :, 1:, :]) * 0.5 elif stageredDim == 3: varData = (varData[:, :, :, 0:-1] + varData[:, :, :, 1:]) * 0.5 return varData.data def readAllvars(ncfid, varsWRF, iTimes, iBT, iSN, iWE): ''' Function to loop over the same netcdf to extract several variables for a given set of times and subdomain. It is more efficient as it doesn't need to open and close the nc file several times Parameters ---------- ncfid : file identifier class netcdf file identifier.. varsWRF : dict dictionary with the list of string of the variables aimed to extract from the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- dOut : dict dictionary with the variableName:nparray of the subset of data from the netcdf for each variable. ''' # create output dictionary dOut = {} # loop over all variables for v2extract in varsWRF: # print(' extracting from netcdf: '+v2extract) if v2extract == 'L': # RMOL stands for the 1/ Obukhov length, thus we directly save it as L dOut['L'] = 1. / getVarEff(ncfid, 'RMOL', iTimes, iBT, iSN, iWE) dOut['L'][np.isinf(dOut['L'])] = np.nan elif v2extract == 'TENDENCIES': # coriolis as it is specified in nc file fc = getVarEff(ncfid, 'F', iTimes, iBT, iSN, iWE) fc = fc.mean() dOut['Vg'] = -(1 / fc) * getVarEff(ncfid, 'RU_TEND_PGF', iTimes, iBT, iSN, iWE) dOut['Ug'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_PGF', iTimes, iBT, iSN, iWE) dOut['UADV'] = (1 / fc) * getVarEff(ncfid, 'RU_TEND_ADV', iTimes, iBT, iSN, iWE) dOut['VADV'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_ADV', iTimes, iBT, iSN, iWE) dOut['POT_ADV'] = getVarEff(ncfid, 'T_TEND_ADV', iTimes, iBT, iSN, iWE) elif v2extract == 'Th': # The Potential temperature is extracted as the perturbation potential temperature + base state # temperature (t00 T00 = ncfid.variables.get('T00')[iTimes] dOut['Th'] = getVarEff(ncfid, 'T', iTimes, iBT, iSN, iWE) for iT in range(len(T00)): dOut['Th'][iT, :, :, :] = dOut['Th'][iT, :, :, :] + T00[iT] elif v2extract == 'TKE': dOut['TKE'] = getVarEff(ncfid, 'TKE_PBL', iTimes, iBT, iSN, iWE) else: dOut[v2extract] = getVarEff(ncfid, v2extract, iTimes, iBT, iSN, iWE) # dummy matrix with zeros, to be added as surface values to 4D data zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE))) # add a layer at the surface for some variables for vN in dOut: if vN == 'Th': # for better consistency the skin temperature is added as sfc level tsk = getVarEff(ncfid, 'TSK', iTimes, iBT, iSN, iWE) dOut['Th'] = np.concatenate((np.reshape(tsk, zSfc.shape), dOut['Th']), axis=1) elif len(dOut[vN].shape) == 4: dOut[vN] = np.concatenate((zSfc, dOut[vN]), axis=1) return dOut def
(ncfid, iTimes, iBT, iSN, iWE): ''' Memory efficient function to extract the height (averaged in time) of WRF output The height is provided in 3D (i.e. bottom-top, north-south, west-east) Parameters ---------- ncfid : file id file of the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the height above sea level ''' nz = len(iBT) + 1 ltmp = getVarEff(ncfid, 'XLAT', iTimes, iBT, iSN, iWE).mean(axis=0) LAT = np.tile(ltmp, (nz, 1, 1)) ltmp = getVarEff(ncfid, 'XLONG', iTimes, iBT, iSN, iWE).mean(axis=0) LON = np.tile(ltmp, (nz, 1, 1)) PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81 HGT = getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE) # time-average height zT = PHT.mean(axis=0) # time-average surface height hSfc = np.zeros((1, len(iSN), len(iWE))) hSfc[0, :, :] = HGT.mean(axis=0) Z = np.concatenate((hSfc, zT), axis=0) return LON, LAT, Z def get_zagl(ncfid, iTimes, iBT, iSN, iWE): ''' Memory efficient function to extract the height above ground of WRF output The height is provided in 4D (i.e. with time) dimensions and cell-centered Parameters ---------- ncfid : file id file of the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the height above ground ''' # dummy matrix with zeros, to be added as value at the surface zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE))) PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81 HGT = np.reshape(getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE), zSfc.shape) zaglT = PHT - np.repeat(HGT, PHT.shape[1], axis=1) return np.concatenate((zSfc, zaglT), axis=1) def coriolis(lat): ''' Compute the Coriolis frequency based on the latitude parameter Parameters ---------- lat : float latitude . Returns ------- fc : float Coriolis frequency. ''' # angular speed of the Earth [rad/s] omega = 7.2921159e-5 return 2 * omega * np.sin(lat * np.pi / 180) def getneighbours(Nxy, inear, jnear): ''' Function to obtain the indexes of the neighours to a given central index from WRF in a given spatial box INPUS: Nxy = number of points to include in the spatial averaging box inear = given index in the west-east direction jnear = given index in the south-north direction ''' if Nxy == 1: # nearest grid point ixav = np.array([inear]).astype(int) iyav = np.array([jnear]).astype(int) elif Nxy == 2: # four nearest grid points ixav = np.array([inear, inear + 1]).astype(int) iyav = np.array([jnear, jnear + 1]).astype(int) else: if Nxy % 2 == 1: # Nxy (odd) nearest points ixav = np.arange(inear - 0.5 * (Nxy - 1), inear + 0.5 * (Nxy - 1) + 1).astype(int) iyav = np.arange(jnear - 0.5 * (Nxy - 1), jnear + 0.5 * (Nxy - 1) + 1).astype(int) else: ixav = np.arange(inear - 0.5 * Nxy + 1, inear + 0.5 * Nxy + 1).astype(int) iyav = np.arange(jnear - 0.5 * Nxy + 1, jnear + 0.5 * Nxy + 1).astype(int) return ixav, iyav def get_index_of_subset_domain(ncfid, lat_s, lon_s, L=None): lat = ncfid.variables.get('XLAT')[0, :, 0] lon = ncfid.variables.get('XLONG')[0, 0, :] # makes sure central box coordinates lie inside wrf domain box if (min(abs(lat - lat_s)) > max(np.diff(lat))) | (min(abs(lon - lon_s)) > max(np.diff(lon))): raise Exception("ERROR: lat | lon chosen is outside wrf domain box") # get the grid-spacing assuming dx=dy dxy = ncfid.getncattr('DX') # Extract only a box of LxL of all wrf domain to make the process more memory efficient # the selection is made by the grid-spacing of wrf output if L is None: if dxy >= 9e3: L = 55e3 elif dxy >= 1e3: L = 10e3 else: L = 1e3 print(' Only data in a box of ' + str(L) + 'x' + str(L) + ' is extracted as subset') # number of points to include in the spatial sampling box. Nxy = int(L / dxy) + 1 # number of grid centered points in the bottom-top (vertical) dimension nz = extract_dim(ncfid, 'bottom_top') # get indexes of the bottom-top levels to extract (so far is all levels) iBT = np.arange(0, nz) # get indexes of nearest coordinate to the given point inear_WE, inear_SN = ll_to_xy(ncfid, lat_s, lon_s) # get indexes of nearest + Nxy points to the given point iWE, iSN = getneighbours(Nxy, inear_WE, inear_SN) return iBT, iSN, iWE class nc_results: ''' Class (lean version) to query any given point or set of points in from a previously created WRF dictionary ''' def __init__(self, timesSim, zagl): ''' Initialize the class ''' self.times = timesSim # Reference Time for the netcdf files self.referenceDate = np.datetime64(datetime.datetime(1970, 1, 1, 0, 0, 0)) # convert time stamp to "seconds since <dateRef>" self.seconds = pd.Series(self.time - self.referenceDate).dt.total_seconds().values self.heights = np.nanmean(zagl, axis=(0, 2, 3)) self.nt, self.nz, self.ny, self.nx = zagl.shape def to_timeseries(self, vDict, X, Y, Z, qCoords): ''' Function that writes the variables extracted and procesed from WRF into a new nc file of time series stlye Parameters ---------- vDict : dictionary Python dictionary with the variables extracted from WRF Returns ------- out : ndarray numpy array with the height above ground ''' index = self._find_nearest_index(X, Y, Z, qCoords) self.timeseries = {} for iVar in vDict: print(' variable: ' + iVar) vtmp = vDict[iVar] if len(vDict[iVar].shape) == 3: vtmp = np.reshape(vtmp, (self.nt, 1, self.ny, self.nx)) vtmp = np.tile(vtmp, (1, self.nz, 1, 1)) vtmp = np.reshape(vtmp, (self.nt, self.nz * self.ny * self.nx)) self.timeseries[iVar] = pd.DataFrame(vtmp[:, index], index=self.time) self.timeseries[iVar].name = iVar def _find_nearest_index(self, X, Y, Z, qCoords): # avoid creating the tree every time the function is called if not hasattr(self, 'tree'): coords = np.column_stack((X.flatten(), Y.flatten(), Z.flatten())) self.tree = cKDTree(coords) dist, index = self.tree.query(qCoords) return index
get_nc_coordinates
identifier_name
nc_read_functions.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' Set of auxliary functions for the WRF_read script author: Roberto Chavez <[email protected]> march/2020 ''' import numpy as np import pandas as pd import datetime from scipy.spatial import cKDTree import utm from wrf import extract_dim, ll_to_xy, xy_to_ll def getVarEff(ncfid, varName, iTimes, iBT, iSN, iWE): ''' Memmory efficient method of getting the nc data and destagered (if that is the case ) into a centered grid. NOTE: it assumes that the variables stored in the netcdf have as first dimension of the Time dimension Parameters ---------- ncfid : file identifier class netcdf file identifier. varName : str name of the variable to extract from the netcdf. Should match exactly the name of the variable as stored in the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the extracted 3D or 4D data for the given time and coords dimensions ''' if not varName in ncfid.variables.keys(): raise Exception("ERROR: " + varName + " variable does not exist in the netcdf file") varObj = ncfid.variables.get(varName) ncDims = varObj.dimensions # it is assuming that first dimension is Time! logicSlices = [iTimes] stageredDim = -1 for ii, iStr in enumerate(ncDims[1:]): if iStr.find('stag') > 0: stageredDim = ii + 1 if iStr.startswith('bottom'): logicSlices.append(np.append(iBT, iBT[-1] + 1)) elif iStr.startswith('south'): logicSlices.append(np.append(iSN, iSN[-1] + 1)) elif iStr.startswith('west'): logicSlices.append(np.append(iWE, iWE[-1] + 1)) else: if iStr.startswith('bottom'): logicSlices.append(iBT) elif iStr.startswith('south'): logicSlices.append(iSN) elif iStr.startswith('west'): logicSlices.append(iWE) # Extract and unstager the data varData = varObj[logicSlices] if (len(ncDims) == 3) and (stageredDim > 0): if stageredDim == 1: varData = (varData[:, 0:-1, :] + varData[:, 1:, :]) * 0.5 elif stageredDim == 2: varData = (varData[:, :, 0:-1] + varData[:, :, 1:]) * 0.5 elif (len(ncDims) == 4) and (stageredDim > 0): if stageredDim == 1: varData = (varData[:, 0:-1, :, :] + varData[:, 1:, :, :]) * 0.5 elif stageredDim == 2: varData = (varData[:, :, 0:-1, :] + varData[:, :, 1:, :]) * 0.5 elif stageredDim == 3: varData = (varData[:, :, :, 0:-1] + varData[:, :, :, 1:]) * 0.5 return varData.data def readAllvars(ncfid, varsWRF, iTimes, iBT, iSN, iWE): ''' Function to loop over the same netcdf to extract several variables for a given set of times and subdomain. It is more efficient as it doesn't need to open and close the nc file several times Parameters ---------- ncfid : file identifier class netcdf file identifier.. varsWRF : dict dictionary with the list of string of the variables aimed to extract from the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- dOut : dict dictionary with the variableName:nparray of the subset of data from the netcdf for each variable. ''' # create output dictionary dOut = {} # loop over all variables for v2extract in varsWRF: # print(' extracting from netcdf: '+v2extract) if v2extract == 'L': # RMOL stands for the 1/ Obukhov length, thus we directly save it as L dOut['L'] = 1. / getVarEff(ncfid, 'RMOL', iTimes, iBT, iSN, iWE) dOut['L'][np.isinf(dOut['L'])] = np.nan elif v2extract == 'TENDENCIES': # coriolis as it is specified in nc file fc = getVarEff(ncfid, 'F', iTimes, iBT, iSN, iWE) fc = fc.mean() dOut['Vg'] = -(1 / fc) * getVarEff(ncfid, 'RU_TEND_PGF', iTimes, iBT, iSN, iWE) dOut['Ug'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_PGF', iTimes, iBT, iSN, iWE) dOut['UADV'] = (1 / fc) * getVarEff(ncfid, 'RU_TEND_ADV', iTimes, iBT, iSN, iWE) dOut['VADV'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_ADV', iTimes, iBT, iSN, iWE) dOut['POT_ADV'] = getVarEff(ncfid, 'T_TEND_ADV', iTimes, iBT, iSN, iWE) elif v2extract == 'Th': # The Potential temperature is extracted as the perturbation potential temperature + base state # temperature (t00 T00 = ncfid.variables.get('T00')[iTimes] dOut['Th'] = getVarEff(ncfid, 'T', iTimes, iBT, iSN, iWE) for iT in range(len(T00)): dOut['Th'][iT, :, :, :] = dOut['Th'][iT, :, :, :] + T00[iT] elif v2extract == 'TKE': dOut['TKE'] = getVarEff(ncfid, 'TKE_PBL', iTimes, iBT, iSN, iWE) else: dOut[v2extract] = getVarEff(ncfid, v2extract, iTimes, iBT, iSN, iWE) # dummy matrix with zeros, to be added as surface values to 4D data zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE))) # add a layer at the surface for some variables for vN in dOut: if vN == 'Th': # for better consistency the skin temperature is added as sfc level tsk = getVarEff(ncfid, 'TSK', iTimes, iBT, iSN, iWE) dOut['Th'] = np.concatenate((np.reshape(tsk, zSfc.shape), dOut['Th']), axis=1) elif len(dOut[vN].shape) == 4: dOut[vN] = np.concatenate((zSfc, dOut[vN]), axis=1) return dOut def get_nc_coordinates(ncfid, iTimes, iBT, iSN, iWE): ''' Memory efficient function to extract the height (averaged in time) of WRF output The height is provided in 3D (i.e. bottom-top, north-south, west-east) Parameters ---------- ncfid : file id file of the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the height above sea level ''' nz = len(iBT) + 1 ltmp = getVarEff(ncfid, 'XLAT', iTimes, iBT, iSN, iWE).mean(axis=0) LAT = np.tile(ltmp, (nz, 1, 1)) ltmp = getVarEff(ncfid, 'XLONG', iTimes, iBT, iSN, iWE).mean(axis=0) LON = np.tile(ltmp, (nz, 1, 1)) PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81 HGT = getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE) # time-average height zT = PHT.mean(axis=0) # time-average surface height hSfc = np.zeros((1, len(iSN), len(iWE))) hSfc[0, :, :] = HGT.mean(axis=0) Z = np.concatenate((hSfc, zT), axis=0) return LON, LAT, Z def get_zagl(ncfid, iTimes, iBT, iSN, iWE): ''' Memory efficient function to extract the height above ground of WRF output The height is provided in 4D (i.e. with time) dimensions and cell-centered Parameters ---------- ncfid : file id file of the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the height above ground ''' # dummy matrix with zeros, to be added as value at the surface zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE))) PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81 HGT = np.reshape(getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE), zSfc.shape) zaglT = PHT - np.repeat(HGT, PHT.shape[1], axis=1) return np.concatenate((zSfc, zaglT), axis=1) def coriolis(lat): ''' Compute the Coriolis frequency based on the latitude parameter Parameters ---------- lat : float latitude . Returns ------- fc : float Coriolis frequency. ''' # angular speed of the Earth [rad/s] omega = 7.2921159e-5 return 2 * omega * np.sin(lat * np.pi / 180) def getneighbours(Nxy, inear, jnear): ''' Function to obtain the indexes of the neighours to a given central index from WRF in a given spatial box INPUS: Nxy = number of points to include in the spatial averaging box inear = given index in the west-east direction jnear = given index in the south-north direction ''' if Nxy == 1: # nearest grid point ixav = np.array([inear]).astype(int) iyav = np.array([jnear]).astype(int) elif Nxy == 2: # four nearest grid points ixav = np.array([inear, inear + 1]).astype(int) iyav = np.array([jnear, jnear + 1]).astype(int) else: if Nxy % 2 == 1: # Nxy (odd) nearest points ixav = np.arange(inear - 0.5 * (Nxy - 1), inear + 0.5 * (Nxy - 1) + 1).astype(int) iyav = np.arange(jnear - 0.5 * (Nxy - 1), jnear + 0.5 * (Nxy - 1) + 1).astype(int) else: ixav = np.arange(inear - 0.5 * Nxy + 1, inear + 0.5 * Nxy + 1).astype(int) iyav = np.arange(jnear - 0.5 * Nxy + 1, jnear + 0.5 * Nxy + 1).astype(int) return ixav, iyav def get_index_of_subset_domain(ncfid, lat_s, lon_s, L=None):
class nc_results: ''' Class (lean version) to query any given point or set of points in from a previously created WRF dictionary ''' def __init__(self, timesSim, zagl): ''' Initialize the class ''' self.times = timesSim # Reference Time for the netcdf files self.referenceDate = np.datetime64(datetime.datetime(1970, 1, 1, 0, 0, 0)) # convert time stamp to "seconds since <dateRef>" self.seconds = pd.Series(self.time - self.referenceDate).dt.total_seconds().values self.heights = np.nanmean(zagl, axis=(0, 2, 3)) self.nt, self.nz, self.ny, self.nx = zagl.shape def to_timeseries(self, vDict, X, Y, Z, qCoords): ''' Function that writes the variables extracted and procesed from WRF into a new nc file of time series stlye Parameters ---------- vDict : dictionary Python dictionary with the variables extracted from WRF Returns ------- out : ndarray numpy array with the height above ground ''' index = self._find_nearest_index(X, Y, Z, qCoords) self.timeseries = {} for iVar in vDict: print(' variable: ' + iVar) vtmp = vDict[iVar] if len(vDict[iVar].shape) == 3: vtmp = np.reshape(vtmp, (self.nt, 1, self.ny, self.nx)) vtmp = np.tile(vtmp, (1, self.nz, 1, 1)) vtmp = np.reshape(vtmp, (self.nt, self.nz * self.ny * self.nx)) self.timeseries[iVar] = pd.DataFrame(vtmp[:, index], index=self.time) self.timeseries[iVar].name = iVar def _find_nearest_index(self, X, Y, Z, qCoords): # avoid creating the tree every time the function is called if not hasattr(self, 'tree'): coords = np.column_stack((X.flatten(), Y.flatten(), Z.flatten())) self.tree = cKDTree(coords) dist, index = self.tree.query(qCoords) return index
lat = ncfid.variables.get('XLAT')[0, :, 0] lon = ncfid.variables.get('XLONG')[0, 0, :] # makes sure central box coordinates lie inside wrf domain box if (min(abs(lat - lat_s)) > max(np.diff(lat))) | (min(abs(lon - lon_s)) > max(np.diff(lon))): raise Exception("ERROR: lat | lon chosen is outside wrf domain box") # get the grid-spacing assuming dx=dy dxy = ncfid.getncattr('DX') # Extract only a box of LxL of all wrf domain to make the process more memory efficient # the selection is made by the grid-spacing of wrf output if L is None: if dxy >= 9e3: L = 55e3 elif dxy >= 1e3: L = 10e3 else: L = 1e3 print(' Only data in a box of ' + str(L) + 'x' + str(L) + ' is extracted as subset') # number of points to include in the spatial sampling box. Nxy = int(L / dxy) + 1 # number of grid centered points in the bottom-top (vertical) dimension nz = extract_dim(ncfid, 'bottom_top') # get indexes of the bottom-top levels to extract (so far is all levels) iBT = np.arange(0, nz) # get indexes of nearest coordinate to the given point inear_WE, inear_SN = ll_to_xy(ncfid, lat_s, lon_s) # get indexes of nearest + Nxy points to the given point iWE, iSN = getneighbours(Nxy, inear_WE, inear_SN) return iBT, iSN, iWE
identifier_body
nc_read_functions.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' Set of auxliary functions for the WRF_read script author: Roberto Chavez <[email protected]> march/2020 ''' import numpy as np import pandas as pd import datetime from scipy.spatial import cKDTree import utm from wrf import extract_dim, ll_to_xy, xy_to_ll def getVarEff(ncfid, varName, iTimes, iBT, iSN, iWE): ''' Memmory efficient method of getting the nc data and destagered (if that is the case ) into a centered grid. NOTE: it assumes that the variables stored in the netcdf have as first dimension of the Time dimension Parameters ---------- ncfid : file identifier class netcdf file identifier. varName : str name of the variable to extract from the netcdf. Should match exactly the name of the variable as stored in the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the extracted 3D or 4D data for the given time and coords dimensions ''' if not varName in ncfid.variables.keys(): raise Exception("ERROR: " + varName + " variable does not exist in the netcdf file") varObj = ncfid.variables.get(varName) ncDims = varObj.dimensions # it is assuming that first dimension is Time! logicSlices = [iTimes] stageredDim = -1 for ii, iStr in enumerate(ncDims[1:]): if iStr.find('stag') > 0: stageredDim = ii + 1 if iStr.startswith('bottom'): logicSlices.append(np.append(iBT, iBT[-1] + 1)) elif iStr.startswith('south'): logicSlices.append(np.append(iSN, iSN[-1] + 1)) elif iStr.startswith('west'): logicSlices.append(np.append(iWE, iWE[-1] + 1)) else: if iStr.startswith('bottom'): logicSlices.append(iBT) elif iStr.startswith('south'):
elif iStr.startswith('west'): logicSlices.append(iWE) # Extract and unstager the data varData = varObj[logicSlices] if (len(ncDims) == 3) and (stageredDim > 0): if stageredDim == 1: varData = (varData[:, 0:-1, :] + varData[:, 1:, :]) * 0.5 elif stageredDim == 2: varData = (varData[:, :, 0:-1] + varData[:, :, 1:]) * 0.5 elif (len(ncDims) == 4) and (stageredDim > 0): if stageredDim == 1: varData = (varData[:, 0:-1, :, :] + varData[:, 1:, :, :]) * 0.5 elif stageredDim == 2: varData = (varData[:, :, 0:-1, :] + varData[:, :, 1:, :]) * 0.5 elif stageredDim == 3: varData = (varData[:, :, :, 0:-1] + varData[:, :, :, 1:]) * 0.5 return varData.data def readAllvars(ncfid, varsWRF, iTimes, iBT, iSN, iWE): ''' Function to loop over the same netcdf to extract several variables for a given set of times and subdomain. It is more efficient as it doesn't need to open and close the nc file several times Parameters ---------- ncfid : file identifier class netcdf file identifier.. varsWRF : dict dictionary with the list of string of the variables aimed to extract from the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- dOut : dict dictionary with the variableName:nparray of the subset of data from the netcdf for each variable. ''' # create output dictionary dOut = {} # loop over all variables for v2extract in varsWRF: # print(' extracting from netcdf: '+v2extract) if v2extract == 'L': # RMOL stands for the 1/ Obukhov length, thus we directly save it as L dOut['L'] = 1. / getVarEff(ncfid, 'RMOL', iTimes, iBT, iSN, iWE) dOut['L'][np.isinf(dOut['L'])] = np.nan elif v2extract == 'TENDENCIES': # coriolis as it is specified in nc file fc = getVarEff(ncfid, 'F', iTimes, iBT, iSN, iWE) fc = fc.mean() dOut['Vg'] = -(1 / fc) * getVarEff(ncfid, 'RU_TEND_PGF', iTimes, iBT, iSN, iWE) dOut['Ug'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_PGF', iTimes, iBT, iSN, iWE) dOut['UADV'] = (1 / fc) * getVarEff(ncfid, 'RU_TEND_ADV', iTimes, iBT, iSN, iWE) dOut['VADV'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_ADV', iTimes, iBT, iSN, iWE) dOut['POT_ADV'] = getVarEff(ncfid, 'T_TEND_ADV', iTimes, iBT, iSN, iWE) elif v2extract == 'Th': # The Potential temperature is extracted as the perturbation potential temperature + base state # temperature (t00 T00 = ncfid.variables.get('T00')[iTimes] dOut['Th'] = getVarEff(ncfid, 'T', iTimes, iBT, iSN, iWE) for iT in range(len(T00)): dOut['Th'][iT, :, :, :] = dOut['Th'][iT, :, :, :] + T00[iT] elif v2extract == 'TKE': dOut['TKE'] = getVarEff(ncfid, 'TKE_PBL', iTimes, iBT, iSN, iWE) else: dOut[v2extract] = getVarEff(ncfid, v2extract, iTimes, iBT, iSN, iWE) # dummy matrix with zeros, to be added as surface values to 4D data zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE))) # add a layer at the surface for some variables for vN in dOut: if vN == 'Th': # for better consistency the skin temperature is added as sfc level tsk = getVarEff(ncfid, 'TSK', iTimes, iBT, iSN, iWE) dOut['Th'] = np.concatenate((np.reshape(tsk, zSfc.shape), dOut['Th']), axis=1) elif len(dOut[vN].shape) == 4: dOut[vN] = np.concatenate((zSfc, dOut[vN]), axis=1) return dOut def get_nc_coordinates(ncfid, iTimes, iBT, iSN, iWE): ''' Memory efficient function to extract the height (averaged in time) of WRF output The height is provided in 3D (i.e. bottom-top, north-south, west-east) Parameters ---------- ncfid : file id file of the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the height above sea level ''' nz = len(iBT) + 1 ltmp = getVarEff(ncfid, 'XLAT', iTimes, iBT, iSN, iWE).mean(axis=0) LAT = np.tile(ltmp, (nz, 1, 1)) ltmp = getVarEff(ncfid, 'XLONG', iTimes, iBT, iSN, iWE).mean(axis=0) LON = np.tile(ltmp, (nz, 1, 1)) PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81 HGT = getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE) # time-average height zT = PHT.mean(axis=0) # time-average surface height hSfc = np.zeros((1, len(iSN), len(iWE))) hSfc[0, :, :] = HGT.mean(axis=0) Z = np.concatenate((hSfc, zT), axis=0) return LON, LAT, Z def get_zagl(ncfid, iTimes, iBT, iSN, iWE): ''' Memory efficient function to extract the height above ground of WRF output The height is provided in 4D (i.e. with time) dimensions and cell-centered Parameters ---------- ncfid : file id file of the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the height above ground ''' # dummy matrix with zeros, to be added as value at the surface zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE))) PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81 HGT = np.reshape(getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE), zSfc.shape) zaglT = PHT - np.repeat(HGT, PHT.shape[1], axis=1) return np.concatenate((zSfc, zaglT), axis=1) def coriolis(lat): ''' Compute the Coriolis frequency based on the latitude parameter Parameters ---------- lat : float latitude . Returns ------- fc : float Coriolis frequency. ''' # angular speed of the Earth [rad/s] omega = 7.2921159e-5 return 2 * omega * np.sin(lat * np.pi / 180) def getneighbours(Nxy, inear, jnear): ''' Function to obtain the indexes of the neighours to a given central index from WRF in a given spatial box INPUS: Nxy = number of points to include in the spatial averaging box inear = given index in the west-east direction jnear = given index in the south-north direction ''' if Nxy == 1: # nearest grid point ixav = np.array([inear]).astype(int) iyav = np.array([jnear]).astype(int) elif Nxy == 2: # four nearest grid points ixav = np.array([inear, inear + 1]).astype(int) iyav = np.array([jnear, jnear + 1]).astype(int) else: if Nxy % 2 == 1: # Nxy (odd) nearest points ixav = np.arange(inear - 0.5 * (Nxy - 1), inear + 0.5 * (Nxy - 1) + 1).astype(int) iyav = np.arange(jnear - 0.5 * (Nxy - 1), jnear + 0.5 * (Nxy - 1) + 1).astype(int) else: ixav = np.arange(inear - 0.5 * Nxy + 1, inear + 0.5 * Nxy + 1).astype(int) iyav = np.arange(jnear - 0.5 * Nxy + 1, jnear + 0.5 * Nxy + 1).astype(int) return ixav, iyav def get_index_of_subset_domain(ncfid, lat_s, lon_s, L=None): lat = ncfid.variables.get('XLAT')[0, :, 0] lon = ncfid.variables.get('XLONG')[0, 0, :] # makes sure central box coordinates lie inside wrf domain box if (min(abs(lat - lat_s)) > max(np.diff(lat))) | (min(abs(lon - lon_s)) > max(np.diff(lon))): raise Exception("ERROR: lat | lon chosen is outside wrf domain box") # get the grid-spacing assuming dx=dy dxy = ncfid.getncattr('DX') # Extract only a box of LxL of all wrf domain to make the process more memory efficient # the selection is made by the grid-spacing of wrf output if L is None: if dxy >= 9e3: L = 55e3 elif dxy >= 1e3: L = 10e3 else: L = 1e3 print(' Only data in a box of ' + str(L) + 'x' + str(L) + ' is extracted as subset') # number of points to include in the spatial sampling box. Nxy = int(L / dxy) + 1 # number of grid centered points in the bottom-top (vertical) dimension nz = extract_dim(ncfid, 'bottom_top') # get indexes of the bottom-top levels to extract (so far is all levels) iBT = np.arange(0, nz) # get indexes of nearest coordinate to the given point inear_WE, inear_SN = ll_to_xy(ncfid, lat_s, lon_s) # get indexes of nearest + Nxy points to the given point iWE, iSN = getneighbours(Nxy, inear_WE, inear_SN) return iBT, iSN, iWE class nc_results: ''' Class (lean version) to query any given point or set of points in from a previously created WRF dictionary ''' def __init__(self, timesSim, zagl): ''' Initialize the class ''' self.times = timesSim # Reference Time for the netcdf files self.referenceDate = np.datetime64(datetime.datetime(1970, 1, 1, 0, 0, 0)) # convert time stamp to "seconds since <dateRef>" self.seconds = pd.Series(self.time - self.referenceDate).dt.total_seconds().values self.heights = np.nanmean(zagl, axis=(0, 2, 3)) self.nt, self.nz, self.ny, self.nx = zagl.shape def to_timeseries(self, vDict, X, Y, Z, qCoords): ''' Function that writes the variables extracted and procesed from WRF into a new nc file of time series stlye Parameters ---------- vDict : dictionary Python dictionary with the variables extracted from WRF Returns ------- out : ndarray numpy array with the height above ground ''' index = self._find_nearest_index(X, Y, Z, qCoords) self.timeseries = {} for iVar in vDict: print(' variable: ' + iVar) vtmp = vDict[iVar] if len(vDict[iVar].shape) == 3: vtmp = np.reshape(vtmp, (self.nt, 1, self.ny, self.nx)) vtmp = np.tile(vtmp, (1, self.nz, 1, 1)) vtmp = np.reshape(vtmp, (self.nt, self.nz * self.ny * self.nx)) self.timeseries[iVar] = pd.DataFrame(vtmp[:, index], index=self.time) self.timeseries[iVar].name = iVar def _find_nearest_index(self, X, Y, Z, qCoords): # avoid creating the tree every time the function is called if not hasattr(self, 'tree'): coords = np.column_stack((X.flatten(), Y.flatten(), Z.flatten())) self.tree = cKDTree(coords) dist, index = self.tree.query(qCoords) return index
logicSlices.append(iSN)
conditional_block
nc_read_functions.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' Set of auxliary functions for the WRF_read script author: Roberto Chavez <[email protected]> march/2020 ''' import numpy as np import pandas as pd import datetime from scipy.spatial import cKDTree import utm from wrf import extract_dim, ll_to_xy, xy_to_ll def getVarEff(ncfid, varName, iTimes, iBT, iSN, iWE): ''' Memmory efficient method of getting the nc data and destagered (if that is the case ) into a centered grid. NOTE: it assumes that the variables stored in the netcdf have as first dimension of the Time dimension Parameters ---------- ncfid : file identifier class netcdf file identifier. varName : str name of the variable to extract from the netcdf. Should match exactly the name of the variable as stored in the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the extracted 3D or 4D data for the given time and coords dimensions ''' if not varName in ncfid.variables.keys(): raise Exception("ERROR: " + varName + " variable does not exist in the netcdf file") varObj = ncfid.variables.get(varName) ncDims = varObj.dimensions # it is assuming that first dimension is Time! logicSlices = [iTimes] stageredDim = -1 for ii, iStr in enumerate(ncDims[1:]): if iStr.find('stag') > 0: stageredDim = ii + 1 if iStr.startswith('bottom'): logicSlices.append(np.append(iBT, iBT[-1] + 1)) elif iStr.startswith('south'): logicSlices.append(np.append(iSN, iSN[-1] + 1)) elif iStr.startswith('west'): logicSlices.append(np.append(iWE, iWE[-1] + 1)) else: if iStr.startswith('bottom'): logicSlices.append(iBT) elif iStr.startswith('south'): logicSlices.append(iSN) elif iStr.startswith('west'): logicSlices.append(iWE) # Extract and unstager the data varData = varObj[logicSlices] if (len(ncDims) == 3) and (stageredDim > 0): if stageredDim == 1: varData = (varData[:, 0:-1, :] + varData[:, 1:, :]) * 0.5 elif stageredDim == 2: varData = (varData[:, :, 0:-1] + varData[:, :, 1:]) * 0.5 elif (len(ncDims) == 4) and (stageredDim > 0): if stageredDim == 1: varData = (varData[:, 0:-1, :, :] + varData[:, 1:, :, :]) * 0.5 elif stageredDim == 2: varData = (varData[:, :, 0:-1, :] + varData[:, :, 1:, :]) * 0.5 elif stageredDim == 3: varData = (varData[:, :, :, 0:-1] + varData[:, :, :, 1:]) * 0.5 return varData.data def readAllvars(ncfid, varsWRF, iTimes, iBT, iSN, iWE): ''' Function to loop over the same netcdf to extract several variables for a given set of times and subdomain. It is more efficient as it doesn't need to open and close the nc file several times Parameters ---------- ncfid : file identifier class netcdf file identifier.. varsWRF : dict dictionary with the list of string of the variables aimed to extract from the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- dOut : dict dictionary with the variableName:nparray of the subset of data from the netcdf for each variable. ''' # create output dictionary dOut = {} # loop over all variables for v2extract in varsWRF: # print(' extracting from netcdf: '+v2extract) if v2extract == 'L': # RMOL stands for the 1/ Obukhov length, thus we directly save it as L dOut['L'] = 1. / getVarEff(ncfid, 'RMOL', iTimes, iBT, iSN, iWE) dOut['L'][np.isinf(dOut['L'])] = np.nan elif v2extract == 'TENDENCIES': # coriolis as it is specified in nc file fc = getVarEff(ncfid, 'F', iTimes, iBT, iSN, iWE) fc = fc.mean() dOut['Vg'] = -(1 / fc) * getVarEff(ncfid, 'RU_TEND_PGF', iTimes, iBT, iSN, iWE) dOut['Ug'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_PGF', iTimes, iBT, iSN, iWE) dOut['UADV'] = (1 / fc) * getVarEff(ncfid, 'RU_TEND_ADV', iTimes, iBT, iSN, iWE) dOut['VADV'] = (1 / fc) * getVarEff(ncfid, 'RV_TEND_ADV', iTimes, iBT, iSN, iWE) dOut['POT_ADV'] = getVarEff(ncfid, 'T_TEND_ADV', iTimes, iBT, iSN, iWE) elif v2extract == 'Th': # The Potential temperature is extracted as the perturbation potential temperature + base state # temperature (t00 T00 = ncfid.variables.get('T00')[iTimes] dOut['Th'] = getVarEff(ncfid, 'T', iTimes, iBT, iSN, iWE) for iT in range(len(T00)): dOut['Th'][iT, :, :, :] = dOut['Th'][iT, :, :, :] + T00[iT] elif v2extract == 'TKE': dOut['TKE'] = getVarEff(ncfid, 'TKE_PBL', iTimes, iBT, iSN, iWE) else: dOut[v2extract] = getVarEff(ncfid, v2extract, iTimes, iBT, iSN, iWE) # dummy matrix with zeros, to be added as surface values to 4D data zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE))) # add a layer at the surface for some variables for vN in dOut: if vN == 'Th': # for better consistency the skin temperature is added as sfc level tsk = getVarEff(ncfid, 'TSK', iTimes, iBT, iSN, iWE) dOut['Th'] = np.concatenate((np.reshape(tsk, zSfc.shape), dOut['Th']), axis=1) elif len(dOut[vN].shape) == 4: dOut[vN] = np.concatenate((zSfc, dOut[vN]), axis=1) return dOut def get_nc_coordinates(ncfid, iTimes, iBT, iSN, iWE): ''' Memory efficient function to extract the height (averaged in time) of WRF output The height is provided in 3D (i.e. bottom-top, north-south, west-east) Parameters ---------- ncfid : file id file of the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels.
iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the height above sea level ''' nz = len(iBT) + 1 ltmp = getVarEff(ncfid, 'XLAT', iTimes, iBT, iSN, iWE).mean(axis=0) LAT = np.tile(ltmp, (nz, 1, 1)) ltmp = getVarEff(ncfid, 'XLONG', iTimes, iBT, iSN, iWE).mean(axis=0) LON = np.tile(ltmp, (nz, 1, 1)) PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81 HGT = getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE) # time-average height zT = PHT.mean(axis=0) # time-average surface height hSfc = np.zeros((1, len(iSN), len(iWE))) hSfc[0, :, :] = HGT.mean(axis=0) Z = np.concatenate((hSfc, zT), axis=0) return LON, LAT, Z def get_zagl(ncfid, iTimes, iBT, iSN, iWE): ''' Memory efficient function to extract the height above ground of WRF output The height is provided in 4D (i.e. with time) dimensions and cell-centered Parameters ---------- ncfid : file id file of the netcdf. iTimes : int, logic index of the times to extract form the netcdf data. iBT : int CENTERED (i.e. unstaggered) indexes of desired bottom-top levels. iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates. iWE : int CENTERED (i.e. unstaggered) indexes of desired weast-east coordinates. Returns ------- out : ndarray numpy array with the height above ground ''' # dummy matrix with zeros, to be added as value at the surface zSfc = np.zeros((sum(iTimes), 1, len(iSN), len(iWE))) PHT = (getVarEff(ncfid, 'PH', iTimes, iBT, iSN, iWE) + getVarEff(ncfid, 'PHB', iTimes, iBT, iSN, iWE)) / 9.81 HGT = np.reshape(getVarEff(ncfid, 'HGT', iTimes, iBT, iSN, iWE), zSfc.shape) zaglT = PHT - np.repeat(HGT, PHT.shape[1], axis=1) return np.concatenate((zSfc, zaglT), axis=1) def coriolis(lat): ''' Compute the Coriolis frequency based on the latitude parameter Parameters ---------- lat : float latitude . Returns ------- fc : float Coriolis frequency. ''' # angular speed of the Earth [rad/s] omega = 7.2921159e-5 return 2 * omega * np.sin(lat * np.pi / 180) def getneighbours(Nxy, inear, jnear): ''' Function to obtain the indexes of the neighours to a given central index from WRF in a given spatial box INPUS: Nxy = number of points to include in the spatial averaging box inear = given index in the west-east direction jnear = given index in the south-north direction ''' if Nxy == 1: # nearest grid point ixav = np.array([inear]).astype(int) iyav = np.array([jnear]).astype(int) elif Nxy == 2: # four nearest grid points ixav = np.array([inear, inear + 1]).astype(int) iyav = np.array([jnear, jnear + 1]).astype(int) else: if Nxy % 2 == 1: # Nxy (odd) nearest points ixav = np.arange(inear - 0.5 * (Nxy - 1), inear + 0.5 * (Nxy - 1) + 1).astype(int) iyav = np.arange(jnear - 0.5 * (Nxy - 1), jnear + 0.5 * (Nxy - 1) + 1).astype(int) else: ixav = np.arange(inear - 0.5 * Nxy + 1, inear + 0.5 * Nxy + 1).astype(int) iyav = np.arange(jnear - 0.5 * Nxy + 1, jnear + 0.5 * Nxy + 1).astype(int) return ixav, iyav def get_index_of_subset_domain(ncfid, lat_s, lon_s, L=None): lat = ncfid.variables.get('XLAT')[0, :, 0] lon = ncfid.variables.get('XLONG')[0, 0, :] # makes sure central box coordinates lie inside wrf domain box if (min(abs(lat - lat_s)) > max(np.diff(lat))) | (min(abs(lon - lon_s)) > max(np.diff(lon))): raise Exception("ERROR: lat | lon chosen is outside wrf domain box") # get the grid-spacing assuming dx=dy dxy = ncfid.getncattr('DX') # Extract only a box of LxL of all wrf domain to make the process more memory efficient # the selection is made by the grid-spacing of wrf output if L is None: if dxy >= 9e3: L = 55e3 elif dxy >= 1e3: L = 10e3 else: L = 1e3 print(' Only data in a box of ' + str(L) + 'x' + str(L) + ' is extracted as subset') # number of points to include in the spatial sampling box. Nxy = int(L / dxy) + 1 # number of grid centered points in the bottom-top (vertical) dimension nz = extract_dim(ncfid, 'bottom_top') # get indexes of the bottom-top levels to extract (so far is all levels) iBT = np.arange(0, nz) # get indexes of nearest coordinate to the given point inear_WE, inear_SN = ll_to_xy(ncfid, lat_s, lon_s) # get indexes of nearest + Nxy points to the given point iWE, iSN = getneighbours(Nxy, inear_WE, inear_SN) return iBT, iSN, iWE class nc_results: ''' Class (lean version) to query any given point or set of points in from a previously created WRF dictionary ''' def __init__(self, timesSim, zagl): ''' Initialize the class ''' self.times = timesSim # Reference Time for the netcdf files self.referenceDate = np.datetime64(datetime.datetime(1970, 1, 1, 0, 0, 0)) # convert time stamp to "seconds since <dateRef>" self.seconds = pd.Series(self.time - self.referenceDate).dt.total_seconds().values self.heights = np.nanmean(zagl, axis=(0, 2, 3)) self.nt, self.nz, self.ny, self.nx = zagl.shape def to_timeseries(self, vDict, X, Y, Z, qCoords): ''' Function that writes the variables extracted and procesed from WRF into a new nc file of time series stlye Parameters ---------- vDict : dictionary Python dictionary with the variables extracted from WRF Returns ------- out : ndarray numpy array with the height above ground ''' index = self._find_nearest_index(X, Y, Z, qCoords) self.timeseries = {} for iVar in vDict: print(' variable: ' + iVar) vtmp = vDict[iVar] if len(vDict[iVar].shape) == 3: vtmp = np.reshape(vtmp, (self.nt, 1, self.ny, self.nx)) vtmp = np.tile(vtmp, (1, self.nz, 1, 1)) vtmp = np.reshape(vtmp, (self.nt, self.nz * self.ny * self.nx)) self.timeseries[iVar] = pd.DataFrame(vtmp[:, index], index=self.time) self.timeseries[iVar].name = iVar def _find_nearest_index(self, X, Y, Z, qCoords): # avoid creating the tree every time the function is called if not hasattr(self, 'tree'): coords = np.column_stack((X.flatten(), Y.flatten(), Z.flatten())) self.tree = cKDTree(coords) dist, index = self.tree.query(qCoords) return index
iSN : int CENTERED (i.e. unstaggered) indexes of desired south-north coordinates.
random_line_split
types.rs
use crate::{encode_section, Encode, Section, SectionId}; /// Represents a subtype of possible other types in a WebAssembly module. #[derive(Debug, Clone)] pub struct SubType { /// Is the subtype final. pub is_final: bool,
pub structural_type: StructuralType, } /// Represents a structural type in a WebAssembly module. #[derive(Debug, Clone)] pub enum StructuralType { /// The type is for a function. Func(FuncType), /// The type is for an array. Array(ArrayType), /// The type is for a struct. Struct(StructType), } /// Represents a type of a function in a WebAssembly module. #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct FuncType { /// The combined parameters and result types. params_results: Box<[ValType]>, /// The number of parameter types. len_params: usize, } /// Represents a type of an array in a WebAssembly module. #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct ArrayType(pub FieldType); /// Represents a type of a struct in a WebAssembly module. #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct StructType { /// Struct fields. pub fields: Box<[FieldType]>, } /// Field type in structural types (structs, arrays). #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub struct FieldType { /// Storage type of the field. pub element_type: StorageType, /// Is the field mutable. pub mutable: bool, } /// Storage type for structural type fields. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub enum StorageType { /// The `i8` type. I8, /// The `i16` type. I16, /// A value type. Val(ValType), } /// The type of a core WebAssembly value. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub enum ValType { /// The `i32` type. I32, /// The `i64` type. I64, /// The `f32` type. F32, /// The `f64` type. F64, /// The `v128` type. /// /// Part of the SIMD proposal. V128, /// A reference type. /// /// The `funcref` and `externref` type fall into this category and the full /// generalization here is due to the implementation of the /// function-references proposal. Ref(RefType), } impl FuncType { /// Creates a new [`FuncType`] from the given `params` and `results`. pub fn new<P, R>(params: P, results: R) -> Self where P: IntoIterator<Item = ValType>, R: IntoIterator<Item = ValType>, { let mut buffer = params.into_iter().collect::<Vec<_>>(); let len_params = buffer.len(); buffer.extend(results); Self { params_results: buffer.into(), len_params, } } /// Returns a shared slice to the parameter types of the [`FuncType`]. #[inline] pub fn params(&self) -> &[ValType] { &self.params_results[..self.len_params] } /// Returns a shared slice to the result types of the [`FuncType`]. #[inline] pub fn results(&self) -> &[ValType] { &self.params_results[self.len_params..] } } impl ValType { /// Alias for the `funcref` type in WebAssembly pub const FUNCREF: ValType = ValType::Ref(RefType::FUNCREF); /// Alias for the `externref` type in WebAssembly pub const EXTERNREF: ValType = ValType::Ref(RefType::EXTERNREF); } impl Encode for StorageType { fn encode(&self, sink: &mut Vec<u8>) { match self { StorageType::I8 => sink.push(0x7A), StorageType::I16 => sink.push(0x79), StorageType::Val(vt) => vt.encode(sink), } } } impl Encode for ValType { fn encode(&self, sink: &mut Vec<u8>) { match self { ValType::I32 => sink.push(0x7F), ValType::I64 => sink.push(0x7E), ValType::F32 => sink.push(0x7D), ValType::F64 => sink.push(0x7C), ValType::V128 => sink.push(0x7B), ValType::Ref(rt) => rt.encode(sink), } } } /// A reference type. /// /// This is largely part of the function references proposal for WebAssembly but /// additionally is used by the `funcref` and `externref` types. The full /// generality of this type is only exercised with function-references. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] #[allow(missing_docs)] pub struct RefType { pub nullable: bool, pub heap_type: HeapType, } impl RefType { /// Alias for the `funcref` type in WebAssembly pub const FUNCREF: RefType = RefType { nullable: true, heap_type: HeapType::Func, }; /// Alias for the `externref` type in WebAssembly pub const EXTERNREF: RefType = RefType { nullable: true, heap_type: HeapType::Extern, }; } impl Encode for RefType { fn encode(&self, sink: &mut Vec<u8>) { if self.nullable { // Favor the original encodings of `funcref` and `externref` where // possible match self.heap_type { HeapType::Func => return sink.push(0x70), HeapType::Extern => return sink.push(0x6f), _ => {} } } if self.nullable { sink.push(0x6C); } else { sink.push(0x6B); } self.heap_type.encode(sink); } } impl From<RefType> for ValType { fn from(ty: RefType) -> ValType { ValType::Ref(ty) } } /// Part of the function references proposal. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub enum HeapType { /// Untyped (any) function. Func, /// External heap type. Extern, /// The `any` heap type. The common supertype (a.k.a. top) of all internal types. Any, /// The `none` heap type. The common subtype (a.k.a. bottom) of all internal types. None, /// The `noextern` heap type. The common subtype (a.k.a. bottom) of all external types. NoExtern, /// The `nofunc` heap type. The common subtype (a.k.a. bottom) of all function types. NoFunc, /// The `eq` heap type. The common supertype of all referenceable types on which comparison /// (ref.eq) is allowed. Eq, /// The `struct` heap type. The common supertype of all struct types. Struct, /// The `array` heap type. The common supertype of all array types. Array, /// The i31 heap type. I31, /// User defined type at the given index. Indexed(u32), } impl Encode for HeapType { fn encode(&self, sink: &mut Vec<u8>) { match self { HeapType::Func => sink.push(0x70), HeapType::Extern => sink.push(0x6F), HeapType::Any => sink.push(0x6E), HeapType::None => sink.push(0x65), HeapType::NoExtern => sink.push(0x69), HeapType::NoFunc => sink.push(0x68), HeapType::Eq => sink.push(0x6D), HeapType::Struct => sink.push(0x67), HeapType::Array => sink.push(0x66), HeapType::I31 => sink.push(0x6A), // Note that this is encoded as a signed type rather than unsigned // as it's decoded as an s33 HeapType::Indexed(i) => i64::from(*i).encode(sink), } } } /// An encoder for the type section of WebAssembly modules. /// /// # Example /// /// ```rust /// use wasm_encoder::{Module, TypeSection, ValType}; /// /// let mut types = TypeSection::new(); /// /// types.function([ValType::I32, ValType::I32], [ValType::I64]); /// /// let mut module = Module::new(); /// module.section(&types); /// /// let bytes = module.finish(); /// ``` #[derive(Clone, Debug, Default)] pub struct TypeSection { bytes: Vec<u8>, num_added: u32, } impl TypeSection { /// Create a new module type section encoder. pub fn new() -> Self { Self::default() } /// The number of types in the section. pub fn len(&self) -> u32 { self.num_added } /// Determines if the section is empty. pub fn is_empty(&self) -> bool { self.num_added == 0 } /// Define a function type in this type section. pub fn function<P, R>(&mut self, params: P, results: R) -> &mut Self where P: IntoIterator<Item = ValType>, P::IntoIter: ExactSizeIterator, R: IntoIterator<Item = ValType>, R::IntoIter: ExactSizeIterator, { let params = params.into_iter(); let results = results.into_iter(); self.bytes.push(0x60); params.len().encode(&mut self.bytes); params.for_each(|p| p.encode(&mut self.bytes)); results.len().encode(&mut self.bytes); results.for_each(|p| p.encode(&mut self.bytes)); self.num_added += 1; self } /// Define an array type in this type section. pub fn array(&mut self, ty: &StorageType, mutable: bool) -> &mut Self { self.bytes.push(0x5e); self.field(ty, mutable); self.num_added += 1; self } fn field(&mut self, ty: &StorageType, mutable: bool) -> &mut Self { ty.encode(&mut self.bytes); self.bytes.push(mutable as u8); self } /// Define a struct type in this type section. pub fn struct_(&mut self, fields: Vec<FieldType>) -> &mut Self { self.bytes.push(0x5f); fields.len().encode(&mut self.bytes); for f in fields.iter() { self.field(&f.element_type, f.mutable); } self.num_added += 1; self } /// Define an explicit subtype in this type section. pub fn subtype(&mut self, ty: &SubType) -> &mut Self { // In the GC spec, supertypes is a vector, not an option. let st = match ty.supertype_idx { Some(idx) => vec![idx], None => vec![], }; if ty.is_final { self.bytes.push(0x4e); st.encode(&mut self.bytes); } else if !st.is_empty() { self.bytes.push(0x50); st.encode(&mut self.bytes); } match &ty.structural_type { StructuralType::Func(ty) => { self.function(ty.params().iter().copied(), ty.results().iter().copied()); } StructuralType::Array(ArrayType(ty)) => { self.array(&ty.element_type, ty.mutable); } StructuralType::Struct(ty) => { self.struct_(ty.fields.to_vec()); } } self } } impl Encode for TypeSection { fn encode(&self, sink: &mut Vec<u8>) { encode_section(sink, self.num_added, &self.bytes); } } impl Section for TypeSection { fn id(&self) -> u8 { SectionId::Type.into() } }
/// The list of supertype indexes. As of GC MVP, there can be at most one supertype. pub supertype_idx: Option<u32>, /// The structural type of the subtype.
random_line_split
types.rs
use crate::{encode_section, Encode, Section, SectionId}; /// Represents a subtype of possible other types in a WebAssembly module. #[derive(Debug, Clone)] pub struct SubType { /// Is the subtype final. pub is_final: bool, /// The list of supertype indexes. As of GC MVP, there can be at most one supertype. pub supertype_idx: Option<u32>, /// The structural type of the subtype. pub structural_type: StructuralType, } /// Represents a structural type in a WebAssembly module. #[derive(Debug, Clone)] pub enum StructuralType { /// The type is for a function. Func(FuncType), /// The type is for an array. Array(ArrayType), /// The type is for a struct. Struct(StructType), } /// Represents a type of a function in a WebAssembly module. #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct FuncType { /// The combined parameters and result types. params_results: Box<[ValType]>, /// The number of parameter types. len_params: usize, } /// Represents a type of an array in a WebAssembly module. #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct ArrayType(pub FieldType); /// Represents a type of a struct in a WebAssembly module. #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct StructType { /// Struct fields. pub fields: Box<[FieldType]>, } /// Field type in structural types (structs, arrays). #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub struct FieldType { /// Storage type of the field. pub element_type: StorageType, /// Is the field mutable. pub mutable: bool, } /// Storage type for structural type fields. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub enum
{ /// The `i8` type. I8, /// The `i16` type. I16, /// A value type. Val(ValType), } /// The type of a core WebAssembly value. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub enum ValType { /// The `i32` type. I32, /// The `i64` type. I64, /// The `f32` type. F32, /// The `f64` type. F64, /// The `v128` type. /// /// Part of the SIMD proposal. V128, /// A reference type. /// /// The `funcref` and `externref` type fall into this category and the full /// generalization here is due to the implementation of the /// function-references proposal. Ref(RefType), } impl FuncType { /// Creates a new [`FuncType`] from the given `params` and `results`. pub fn new<P, R>(params: P, results: R) -> Self where P: IntoIterator<Item = ValType>, R: IntoIterator<Item = ValType>, { let mut buffer = params.into_iter().collect::<Vec<_>>(); let len_params = buffer.len(); buffer.extend(results); Self { params_results: buffer.into(), len_params, } } /// Returns a shared slice to the parameter types of the [`FuncType`]. #[inline] pub fn params(&self) -> &[ValType] { &self.params_results[..self.len_params] } /// Returns a shared slice to the result types of the [`FuncType`]. #[inline] pub fn results(&self) -> &[ValType] { &self.params_results[self.len_params..] } } impl ValType { /// Alias for the `funcref` type in WebAssembly pub const FUNCREF: ValType = ValType::Ref(RefType::FUNCREF); /// Alias for the `externref` type in WebAssembly pub const EXTERNREF: ValType = ValType::Ref(RefType::EXTERNREF); } impl Encode for StorageType { fn encode(&self, sink: &mut Vec<u8>) { match self { StorageType::I8 => sink.push(0x7A), StorageType::I16 => sink.push(0x79), StorageType::Val(vt) => vt.encode(sink), } } } impl Encode for ValType { fn encode(&self, sink: &mut Vec<u8>) { match self { ValType::I32 => sink.push(0x7F), ValType::I64 => sink.push(0x7E), ValType::F32 => sink.push(0x7D), ValType::F64 => sink.push(0x7C), ValType::V128 => sink.push(0x7B), ValType::Ref(rt) => rt.encode(sink), } } } /// A reference type. /// /// This is largely part of the function references proposal for WebAssembly but /// additionally is used by the `funcref` and `externref` types. The full /// generality of this type is only exercised with function-references. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] #[allow(missing_docs)] pub struct RefType { pub nullable: bool, pub heap_type: HeapType, } impl RefType { /// Alias for the `funcref` type in WebAssembly pub const FUNCREF: RefType = RefType { nullable: true, heap_type: HeapType::Func, }; /// Alias for the `externref` type in WebAssembly pub const EXTERNREF: RefType = RefType { nullable: true, heap_type: HeapType::Extern, }; } impl Encode for RefType { fn encode(&self, sink: &mut Vec<u8>) { if self.nullable { // Favor the original encodings of `funcref` and `externref` where // possible match self.heap_type { HeapType::Func => return sink.push(0x70), HeapType::Extern => return sink.push(0x6f), _ => {} } } if self.nullable { sink.push(0x6C); } else { sink.push(0x6B); } self.heap_type.encode(sink); } } impl From<RefType> for ValType { fn from(ty: RefType) -> ValType { ValType::Ref(ty) } } /// Part of the function references proposal. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub enum HeapType { /// Untyped (any) function. Func, /// External heap type. Extern, /// The `any` heap type. The common supertype (a.k.a. top) of all internal types. Any, /// The `none` heap type. The common subtype (a.k.a. bottom) of all internal types. None, /// The `noextern` heap type. The common subtype (a.k.a. bottom) of all external types. NoExtern, /// The `nofunc` heap type. The common subtype (a.k.a. bottom) of all function types. NoFunc, /// The `eq` heap type. The common supertype of all referenceable types on which comparison /// (ref.eq) is allowed. Eq, /// The `struct` heap type. The common supertype of all struct types. Struct, /// The `array` heap type. The common supertype of all array types. Array, /// The i31 heap type. I31, /// User defined type at the given index. Indexed(u32), } impl Encode for HeapType { fn encode(&self, sink: &mut Vec<u8>) { match self { HeapType::Func => sink.push(0x70), HeapType::Extern => sink.push(0x6F), HeapType::Any => sink.push(0x6E), HeapType::None => sink.push(0x65), HeapType::NoExtern => sink.push(0x69), HeapType::NoFunc => sink.push(0x68), HeapType::Eq => sink.push(0x6D), HeapType::Struct => sink.push(0x67), HeapType::Array => sink.push(0x66), HeapType::I31 => sink.push(0x6A), // Note that this is encoded as a signed type rather than unsigned // as it's decoded as an s33 HeapType::Indexed(i) => i64::from(*i).encode(sink), } } } /// An encoder for the type section of WebAssembly modules. /// /// # Example /// /// ```rust /// use wasm_encoder::{Module, TypeSection, ValType}; /// /// let mut types = TypeSection::new(); /// /// types.function([ValType::I32, ValType::I32], [ValType::I64]); /// /// let mut module = Module::new(); /// module.section(&types); /// /// let bytes = module.finish(); /// ``` #[derive(Clone, Debug, Default)] pub struct TypeSection { bytes: Vec<u8>, num_added: u32, } impl TypeSection { /// Create a new module type section encoder. pub fn new() -> Self { Self::default() } /// The number of types in the section. pub fn len(&self) -> u32 { self.num_added } /// Determines if the section is empty. pub fn is_empty(&self) -> bool { self.num_added == 0 } /// Define a function type in this type section. pub fn function<P, R>(&mut self, params: P, results: R) -> &mut Self where P: IntoIterator<Item = ValType>, P::IntoIter: ExactSizeIterator, R: IntoIterator<Item = ValType>, R::IntoIter: ExactSizeIterator, { let params = params.into_iter(); let results = results.into_iter(); self.bytes.push(0x60); params.len().encode(&mut self.bytes); params.for_each(|p| p.encode(&mut self.bytes)); results.len().encode(&mut self.bytes); results.for_each(|p| p.encode(&mut self.bytes)); self.num_added += 1; self } /// Define an array type in this type section. pub fn array(&mut self, ty: &StorageType, mutable: bool) -> &mut Self { self.bytes.push(0x5e); self.field(ty, mutable); self.num_added += 1; self } fn field(&mut self, ty: &StorageType, mutable: bool) -> &mut Self { ty.encode(&mut self.bytes); self.bytes.push(mutable as u8); self } /// Define a struct type in this type section. pub fn struct_(&mut self, fields: Vec<FieldType>) -> &mut Self { self.bytes.push(0x5f); fields.len().encode(&mut self.bytes); for f in fields.iter() { self.field(&f.element_type, f.mutable); } self.num_added += 1; self } /// Define an explicit subtype in this type section. pub fn subtype(&mut self, ty: &SubType) -> &mut Self { // In the GC spec, supertypes is a vector, not an option. let st = match ty.supertype_idx { Some(idx) => vec![idx], None => vec![], }; if ty.is_final { self.bytes.push(0x4e); st.encode(&mut self.bytes); } else if !st.is_empty() { self.bytes.push(0x50); st.encode(&mut self.bytes); } match &ty.structural_type { StructuralType::Func(ty) => { self.function(ty.params().iter().copied(), ty.results().iter().copied()); } StructuralType::Array(ArrayType(ty)) => { self.array(&ty.element_type, ty.mutable); } StructuralType::Struct(ty) => { self.struct_(ty.fields.to_vec()); } } self } } impl Encode for TypeSection { fn encode(&self, sink: &mut Vec<u8>) { encode_section(sink, self.num_added, &self.bytes); } } impl Section for TypeSection { fn id(&self) -> u8 { SectionId::Type.into() } }
StorageType
identifier_name
types.rs
use crate::{encode_section, Encode, Section, SectionId}; /// Represents a subtype of possible other types in a WebAssembly module. #[derive(Debug, Clone)] pub struct SubType { /// Is the subtype final. pub is_final: bool, /// The list of supertype indexes. As of GC MVP, there can be at most one supertype. pub supertype_idx: Option<u32>, /// The structural type of the subtype. pub structural_type: StructuralType, } /// Represents a structural type in a WebAssembly module. #[derive(Debug, Clone)] pub enum StructuralType { /// The type is for a function. Func(FuncType), /// The type is for an array. Array(ArrayType), /// The type is for a struct. Struct(StructType), } /// Represents a type of a function in a WebAssembly module. #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct FuncType { /// The combined parameters and result types. params_results: Box<[ValType]>, /// The number of parameter types. len_params: usize, } /// Represents a type of an array in a WebAssembly module. #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct ArrayType(pub FieldType); /// Represents a type of a struct in a WebAssembly module. #[derive(Debug, Clone, Eq, PartialEq, Hash)] pub struct StructType { /// Struct fields. pub fields: Box<[FieldType]>, } /// Field type in structural types (structs, arrays). #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub struct FieldType { /// Storage type of the field. pub element_type: StorageType, /// Is the field mutable. pub mutable: bool, } /// Storage type for structural type fields. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub enum StorageType { /// The `i8` type. I8, /// The `i16` type. I16, /// A value type. Val(ValType), } /// The type of a core WebAssembly value. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub enum ValType { /// The `i32` type. I32, /// The `i64` type. I64, /// The `f32` type. F32, /// The `f64` type. F64, /// The `v128` type. /// /// Part of the SIMD proposal. V128, /// A reference type. /// /// The `funcref` and `externref` type fall into this category and the full /// generalization here is due to the implementation of the /// function-references proposal. Ref(RefType), } impl FuncType { /// Creates a new [`FuncType`] from the given `params` and `results`. pub fn new<P, R>(params: P, results: R) -> Self where P: IntoIterator<Item = ValType>, R: IntoIterator<Item = ValType>, { let mut buffer = params.into_iter().collect::<Vec<_>>(); let len_params = buffer.len(); buffer.extend(results); Self { params_results: buffer.into(), len_params, } } /// Returns a shared slice to the parameter types of the [`FuncType`]. #[inline] pub fn params(&self) -> &[ValType] { &self.params_results[..self.len_params] } /// Returns a shared slice to the result types of the [`FuncType`]. #[inline] pub fn results(&self) -> &[ValType] { &self.params_results[self.len_params..] } } impl ValType { /// Alias for the `funcref` type in WebAssembly pub const FUNCREF: ValType = ValType::Ref(RefType::FUNCREF); /// Alias for the `externref` type in WebAssembly pub const EXTERNREF: ValType = ValType::Ref(RefType::EXTERNREF); } impl Encode for StorageType { fn encode(&self, sink: &mut Vec<u8>) { match self { StorageType::I8 => sink.push(0x7A), StorageType::I16 => sink.push(0x79), StorageType::Val(vt) => vt.encode(sink), } } } impl Encode for ValType { fn encode(&self, sink: &mut Vec<u8>) { match self { ValType::I32 => sink.push(0x7F), ValType::I64 => sink.push(0x7E), ValType::F32 => sink.push(0x7D), ValType::F64 => sink.push(0x7C), ValType::V128 => sink.push(0x7B), ValType::Ref(rt) => rt.encode(sink), } } } /// A reference type. /// /// This is largely part of the function references proposal for WebAssembly but /// additionally is used by the `funcref` and `externref` types. The full /// generality of this type is only exercised with function-references. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] #[allow(missing_docs)] pub struct RefType { pub nullable: bool, pub heap_type: HeapType, } impl RefType { /// Alias for the `funcref` type in WebAssembly pub const FUNCREF: RefType = RefType { nullable: true, heap_type: HeapType::Func, }; /// Alias for the `externref` type in WebAssembly pub const EXTERNREF: RefType = RefType { nullable: true, heap_type: HeapType::Extern, }; } impl Encode for RefType { fn encode(&self, sink: &mut Vec<u8>) { if self.nullable { // Favor the original encodings of `funcref` and `externref` where // possible match self.heap_type { HeapType::Func => return sink.push(0x70), HeapType::Extern => return sink.push(0x6f), _ => {} } } if self.nullable { sink.push(0x6C); } else { sink.push(0x6B); } self.heap_type.encode(sink); } } impl From<RefType> for ValType { fn from(ty: RefType) -> ValType { ValType::Ref(ty) } } /// Part of the function references proposal. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Ord, PartialOrd)] pub enum HeapType { /// Untyped (any) function. Func, /// External heap type. Extern, /// The `any` heap type. The common supertype (a.k.a. top) of all internal types. Any, /// The `none` heap type. The common subtype (a.k.a. bottom) of all internal types. None, /// The `noextern` heap type. The common subtype (a.k.a. bottom) of all external types. NoExtern, /// The `nofunc` heap type. The common subtype (a.k.a. bottom) of all function types. NoFunc, /// The `eq` heap type. The common supertype of all referenceable types on which comparison /// (ref.eq) is allowed. Eq, /// The `struct` heap type. The common supertype of all struct types. Struct, /// The `array` heap type. The common supertype of all array types. Array, /// The i31 heap type. I31, /// User defined type at the given index. Indexed(u32), } impl Encode for HeapType { fn encode(&self, sink: &mut Vec<u8>) { match self { HeapType::Func => sink.push(0x70), HeapType::Extern => sink.push(0x6F), HeapType::Any => sink.push(0x6E), HeapType::None => sink.push(0x65), HeapType::NoExtern => sink.push(0x69), HeapType::NoFunc => sink.push(0x68), HeapType::Eq => sink.push(0x6D), HeapType::Struct => sink.push(0x67), HeapType::Array => sink.push(0x66), HeapType::I31 => sink.push(0x6A), // Note that this is encoded as a signed type rather than unsigned // as it's decoded as an s33 HeapType::Indexed(i) => i64::from(*i).encode(sink), } } } /// An encoder for the type section of WebAssembly modules. /// /// # Example /// /// ```rust /// use wasm_encoder::{Module, TypeSection, ValType}; /// /// let mut types = TypeSection::new(); /// /// types.function([ValType::I32, ValType::I32], [ValType::I64]); /// /// let mut module = Module::new(); /// module.section(&types); /// /// let bytes = module.finish(); /// ``` #[derive(Clone, Debug, Default)] pub struct TypeSection { bytes: Vec<u8>, num_added: u32, } impl TypeSection { /// Create a new module type section encoder. pub fn new() -> Self { Self::default() } /// The number of types in the section. pub fn len(&self) -> u32 { self.num_added } /// Determines if the section is empty. pub fn is_empty(&self) -> bool { self.num_added == 0 } /// Define a function type in this type section. pub fn function<P, R>(&mut self, params: P, results: R) -> &mut Self where P: IntoIterator<Item = ValType>, P::IntoIter: ExactSizeIterator, R: IntoIterator<Item = ValType>, R::IntoIter: ExactSizeIterator, { let params = params.into_iter(); let results = results.into_iter(); self.bytes.push(0x60); params.len().encode(&mut self.bytes); params.for_each(|p| p.encode(&mut self.bytes)); results.len().encode(&mut self.bytes); results.for_each(|p| p.encode(&mut self.bytes)); self.num_added += 1; self } /// Define an array type in this type section. pub fn array(&mut self, ty: &StorageType, mutable: bool) -> &mut Self { self.bytes.push(0x5e); self.field(ty, mutable); self.num_added += 1; self } fn field(&mut self, ty: &StorageType, mutable: bool) -> &mut Self { ty.encode(&mut self.bytes); self.bytes.push(mutable as u8); self } /// Define a struct type in this type section. pub fn struct_(&mut self, fields: Vec<FieldType>) -> &mut Self
/// Define an explicit subtype in this type section. pub fn subtype(&mut self, ty: &SubType) -> &mut Self { // In the GC spec, supertypes is a vector, not an option. let st = match ty.supertype_idx { Some(idx) => vec![idx], None => vec![], }; if ty.is_final { self.bytes.push(0x4e); st.encode(&mut self.bytes); } else if !st.is_empty() { self.bytes.push(0x50); st.encode(&mut self.bytes); } match &ty.structural_type { StructuralType::Func(ty) => { self.function(ty.params().iter().copied(), ty.results().iter().copied()); } StructuralType::Array(ArrayType(ty)) => { self.array(&ty.element_type, ty.mutable); } StructuralType::Struct(ty) => { self.struct_(ty.fields.to_vec()); } } self } } impl Encode for TypeSection { fn encode(&self, sink: &mut Vec<u8>) { encode_section(sink, self.num_added, &self.bytes); } } impl Section for TypeSection { fn id(&self) -> u8 { SectionId::Type.into() } }
{ self.bytes.push(0x5f); fields.len().encode(&mut self.bytes); for f in fields.iter() { self.field(&f.element_type, f.mutable); } self.num_added += 1; self }
identifier_body
bid.rs
//! Auctions and bidding during the first phase of the deal. use std::fmt; use std::str::FromStr; use serde::{Deserialize, Serialize}; use strum_macros::EnumIter; use super::cards; use super::deal; use super::pos; /// Goal set by a contract. /// /// Determines the winning conditions and the score on success. #[derive(EnumIter, PartialEq, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)] pub enum Target { Prise, Garde, GardeSans, GardeContre, } impl Target { /// Returns the score this target would give on success. pub fn multiplier(self) -> i32 { match self { Target::Prise => 1, Target::Garde => 2, Target::GardeSans => 4, Target::GardeContre => 6, } } pub fn to_str(self) -> &'static str { match self { Target::Prise => "prise", Target::Garde => "garde", Target::GardeSans => "garde sans", Target::GardeContre => "garde contre", } } } impl FromStr for Target { type Err = String; fn from_str(s: &str) -> Result<Self, String> { match s { "prise" => Ok(Target::Prise), "garde" => Ok(Target::Garde), "garde sans" => Ok(Target::GardeSans), "garde contre" => Ok(Target::GardeContre), _ => Err(format!("invalid target: {}", s)), } } } impl ToString for Target { fn to_string(&self) -> String { self.to_str().to_owned() } } /// Contract taken #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct Contract { /// Initial author of the contract. pub author: pos::PlayerPos, /// Target for the contract. pub target: Target, /// Slam asked ? pub slam: bool, } impl Contract { fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self { Contract { author, target, slam, } } } impl ToString for Contract { fn to_string(&self) -> String { let str_slam = if self.slam { " SLAM" } else { "" }; format!("{}{}", self.target.to_str(), str_slam) } } /// Current state of an auction #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub enum AuctionState { /// Players are still bidding for the highest contract Bidding, /// Auction is over, deal will begin Over, /// No contract was taken, a new deal will start Cancelled, } /// Bidding status for a player #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub enum BidStatus { Todo, Passed, Bid, } /// Represents the entire auction process. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Auction { contract: Option<Contract>, players_status: Vec<BidStatus>, first: pos::PlayerPos, state: AuctionState, players: Vec<cards::Hand>, dog: cards::Hand, } /// Possible error occuring during an Auction. #[derive(PartialEq, Debug)] pub enum BidError { /// The auction was closed and does not accept more contracts. AuctionClosed, /// A player tried bidding before his turn. TurnError, /// The given bid was not higher than the previous one. NonRaisedTarget, /// Cannot complete the auction when it is still running. AuctionRunning, /// No contract was offered during the auction, it cannot complete. NoContract, } impl fmt::Display for BidError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { BidError::AuctionClosed => write!(f, "auctions are closed"), BidError::TurnError => write!(f, "invalid turn order"), BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"), BidError::AuctionRunning => write!(f, "the auction are still running"), BidError::NoContract => write!(f, "no contract was offered"), } } } impl Auction { /// Starts a new auction, starting with the player `first`. pub fn new(first: pos::PlayerPos) -> Self { let count = first.count as usize; let (hands, dog) = super::deal_hands(count); Auction { contract: None, players_status: vec![BidStatus::Todo; count], state: AuctionState::Bidding, first, players: hands, dog } } /// Override Auction hands (for tests) pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) { self.players = hands; self.dog = dog; } /// Returns the current state of the auctions. pub fn get_state(&self) -> AuctionState { self.state } fn can_bid(&self, target: Target) -> Result<(), BidError> { if self.state != AuctionState::Bidding { return Err(BidError::AuctionClosed); } if let Some(contract) = self.contract.clone() { if target.multiplier() <= contract.target.multiplier() { return Err(BidError::NonRaisedTarget); } } Ok(()) } fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus { self.players_status[pos.to_n()] } fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) { self.players_status[pos.to_n()] = status; } /// Returns the player that is expected to bid next. pub fn next_player(&self) -> pos::PlayerPos { let pos_init = if let Some(contract) = self.contract.clone() { contract.author.next() } else { self.first }; let mut next_pos = pos_init; while self.get_player_status(next_pos) != BidStatus::Todo { next_pos = next_pos.next(); if next_pos == pos_init { panic!("all players have talked") } } next_pos } /// Check if there are still players waiting for bidding fn no_player_left(&self) -> bool { !self.players_status.contains(&BidStatus::Todo) } /// Bid a new, higher contract. pub fn bid( &mut self, pos: pos::PlayerPos, target: Target, slam: bool, ) -> Result<AuctionState, BidError> { if pos != self.next_player() { return Err(BidError::TurnError); } self.can_bid(target)?; // Reset previous bidder status if let Some(contract) = self.contract.clone() { self.set_player_status(contract.author, BidStatus::Todo); } let contract = Contract::new(pos, target, slam); self.contract = Some(contract); self.set_player_status(pos, BidStatus::Bid); // If we're all the way to the top, there's nowhere else to go if self.no_player_left() || target == Target::GardeContre { self.state = AuctionState::Over; } Ok(self.state) } /// Look at the last offered contract. /// /// Returns `None` if no contract was offered yet. pub fn current_contract(&self) -> Option<&Contract> { self.contract.as_ref() } /// Returns the players cards. pub fn
(&self) -> &Vec<cards::Hand> { &self.players } /// The current player passes his turn. /// /// Returns the new auction state : /// /// * `AuctionState::Cancelled` if all players passed /// * `AuctionState::Over` if 5 players passed in a row /// * The previous state otherwise pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> { if pos != self.next_player() { return Err(BidError::TurnError); } self.set_player_status(pos, BidStatus::Passed); if self.no_player_left() { self.state = if self.contract.is_some() { AuctionState::Over } else { AuctionState::Cancelled } } Ok(self.state) } /// Consumes a complete auction to enter the second deal phase. /// /// If the auction was ready, returns `Ok<DealState>` pub fn complete(&self) -> Result<deal::DealState, BidError> { if self.state != AuctionState::Over { Err(BidError::AuctionRunning) // } else if self.contract.is_none() { } else { if let Some(contract) = self.contract.clone() { Ok(deal::DealState::new( self.first, self.players.clone(), self.dog, contract, pos::PlayerPos::from_n(0,5), //XXX placeholder )) } else { Err(BidError::NoContract) } } } } #[cfg(test)] mod tests { use super::*; use crate::pos; #[test] fn test_auction() { let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5)); assert!(auction.state == AuctionState::Bidding); assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding)); assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding)); assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Err(BidError::TurnError)); assert_eq!(auction.pass(pos::PlayerPos::from_n(2, 5)), Ok(AuctionState::Bidding)); // Someone bids. assert_eq!( auction.bid(pos::PlayerPos::from_n(3, 5), Target::Garde, false), Ok(AuctionState::Bidding) ); assert_eq!( auction.bid(pos::PlayerPos::from_n(4, 5), Target::Garde, false).err(), Some(BidError::NonRaisedTarget) ); // Surbid assert_eq!( auction.bid(pos::PlayerPos::from_n(4, 5), Target::GardeSans, false), Ok(AuctionState::Bidding) ); // Allready passed assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Err(BidError::TurnError)); // Last to pass assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Ok(AuctionState::Over)); assert!(auction.state == AuctionState::Over); match auction.complete() { Err(_) => assert!(false), _ => {} } } }
hands
identifier_name
bid.rs
//! Auctions and bidding during the first phase of the deal. use std::fmt; use std::str::FromStr; use serde::{Deserialize, Serialize}; use strum_macros::EnumIter; use super::cards; use super::deal; use super::pos; /// Goal set by a contract. /// /// Determines the winning conditions and the score on success. #[derive(EnumIter, PartialEq, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)] pub enum Target { Prise, Garde, GardeSans, GardeContre, } impl Target { /// Returns the score this target would give on success. pub fn multiplier(self) -> i32 { match self { Target::Prise => 1, Target::Garde => 2, Target::GardeSans => 4, Target::GardeContre => 6, } } pub fn to_str(self) -> &'static str { match self { Target::Prise => "prise", Target::Garde => "garde", Target::GardeSans => "garde sans", Target::GardeContre => "garde contre", } } } impl FromStr for Target { type Err = String; fn from_str(s: &str) -> Result<Self, String> { match s { "prise" => Ok(Target::Prise), "garde" => Ok(Target::Garde), "garde sans" => Ok(Target::GardeSans), "garde contre" => Ok(Target::GardeContre), _ => Err(format!("invalid target: {}", s)), } } } impl ToString for Target { fn to_string(&self) -> String { self.to_str().to_owned() } } /// Contract taken #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct Contract { /// Initial author of the contract. pub author: pos::PlayerPos, /// Target for the contract. pub target: Target, /// Slam asked ? pub slam: bool, } impl Contract { fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self { Contract { author, target, slam, } } } impl ToString for Contract { fn to_string(&self) -> String { let str_slam = if self.slam { " SLAM" } else { "" }; format!("{}{}", self.target.to_str(), str_slam) } } /// Current state of an auction #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub enum AuctionState { /// Players are still bidding for the highest contract Bidding, /// Auction is over, deal will begin Over, /// No contract was taken, a new deal will start Cancelled, } /// Bidding status for a player #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub enum BidStatus { Todo, Passed, Bid, } /// Represents the entire auction process. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Auction { contract: Option<Contract>, players_status: Vec<BidStatus>, first: pos::PlayerPos, state: AuctionState, players: Vec<cards::Hand>, dog: cards::Hand, } /// Possible error occuring during an Auction. #[derive(PartialEq, Debug)] pub enum BidError { /// The auction was closed and does not accept more contracts. AuctionClosed, /// A player tried bidding before his turn. TurnError, /// The given bid was not higher than the previous one. NonRaisedTarget, /// Cannot complete the auction when it is still running. AuctionRunning, /// No contract was offered during the auction, it cannot complete. NoContract, } impl fmt::Display for BidError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { BidError::AuctionClosed => write!(f, "auctions are closed"), BidError::TurnError => write!(f, "invalid turn order"), BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"), BidError::AuctionRunning => write!(f, "the auction are still running"), BidError::NoContract => write!(f, "no contract was offered"), } } } impl Auction { /// Starts a new auction, starting with the player `first`. pub fn new(first: pos::PlayerPos) -> Self { let count = first.count as usize; let (hands, dog) = super::deal_hands(count); Auction { contract: None, players_status: vec![BidStatus::Todo; count], state: AuctionState::Bidding, first, players: hands, dog } } /// Override Auction hands (for tests) pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) { self.players = hands; self.dog = dog; } /// Returns the current state of the auctions. pub fn get_state(&self) -> AuctionState { self.state } fn can_bid(&self, target: Target) -> Result<(), BidError> { if self.state != AuctionState::Bidding { return Err(BidError::AuctionClosed); } if let Some(contract) = self.contract.clone()
Ok(()) } fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus { self.players_status[pos.to_n()] } fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) { self.players_status[pos.to_n()] = status; } /// Returns the player that is expected to bid next. pub fn next_player(&self) -> pos::PlayerPos { let pos_init = if let Some(contract) = self.contract.clone() { contract.author.next() } else { self.first }; let mut next_pos = pos_init; while self.get_player_status(next_pos) != BidStatus::Todo { next_pos = next_pos.next(); if next_pos == pos_init { panic!("all players have talked") } } next_pos } /// Check if there are still players waiting for bidding fn no_player_left(&self) -> bool { !self.players_status.contains(&BidStatus::Todo) } /// Bid a new, higher contract. pub fn bid( &mut self, pos: pos::PlayerPos, target: Target, slam: bool, ) -> Result<AuctionState, BidError> { if pos != self.next_player() { return Err(BidError::TurnError); } self.can_bid(target)?; // Reset previous bidder status if let Some(contract) = self.contract.clone() { self.set_player_status(contract.author, BidStatus::Todo); } let contract = Contract::new(pos, target, slam); self.contract = Some(contract); self.set_player_status(pos, BidStatus::Bid); // If we're all the way to the top, there's nowhere else to go if self.no_player_left() || target == Target::GardeContre { self.state = AuctionState::Over; } Ok(self.state) } /// Look at the last offered contract. /// /// Returns `None` if no contract was offered yet. pub fn current_contract(&self) -> Option<&Contract> { self.contract.as_ref() } /// Returns the players cards. pub fn hands(&self) -> &Vec<cards::Hand> { &self.players } /// The current player passes his turn. /// /// Returns the new auction state : /// /// * `AuctionState::Cancelled` if all players passed /// * `AuctionState::Over` if 5 players passed in a row /// * The previous state otherwise pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> { if pos != self.next_player() { return Err(BidError::TurnError); } self.set_player_status(pos, BidStatus::Passed); if self.no_player_left() { self.state = if self.contract.is_some() { AuctionState::Over } else { AuctionState::Cancelled } } Ok(self.state) } /// Consumes a complete auction to enter the second deal phase. /// /// If the auction was ready, returns `Ok<DealState>` pub fn complete(&self) -> Result<deal::DealState, BidError> { if self.state != AuctionState::Over { Err(BidError::AuctionRunning) // } else if self.contract.is_none() { } else { if let Some(contract) = self.contract.clone() { Ok(deal::DealState::new( self.first, self.players.clone(), self.dog, contract, pos::PlayerPos::from_n(0,5), //XXX placeholder )) } else { Err(BidError::NoContract) } } } } #[cfg(test)] mod tests { use super::*; use crate::pos; #[test] fn test_auction() { let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5)); assert!(auction.state == AuctionState::Bidding); assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding)); assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding)); assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Err(BidError::TurnError)); assert_eq!(auction.pass(pos::PlayerPos::from_n(2, 5)), Ok(AuctionState::Bidding)); // Someone bids. assert_eq!( auction.bid(pos::PlayerPos::from_n(3, 5), Target::Garde, false), Ok(AuctionState::Bidding) ); assert_eq!( auction.bid(pos::PlayerPos::from_n(4, 5), Target::Garde, false).err(), Some(BidError::NonRaisedTarget) ); // Surbid assert_eq!( auction.bid(pos::PlayerPos::from_n(4, 5), Target::GardeSans, false), Ok(AuctionState::Bidding) ); // Allready passed assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Err(BidError::TurnError)); // Last to pass assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Ok(AuctionState::Over)); assert!(auction.state == AuctionState::Over); match auction.complete() { Err(_) => assert!(false), _ => {} } } }
{ if target.multiplier() <= contract.target.multiplier() { return Err(BidError::NonRaisedTarget); } }
conditional_block
bid.rs
//! Auctions and bidding during the first phase of the deal. use std::fmt; use std::str::FromStr; use serde::{Deserialize, Serialize}; use strum_macros::EnumIter; use super::cards; use super::deal; use super::pos; /// Goal set by a contract. /// /// Determines the winning conditions and the score on success. #[derive(EnumIter, PartialEq, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)] pub enum Target { Prise, Garde, GardeSans, GardeContre, } impl Target { /// Returns the score this target would give on success. pub fn multiplier(self) -> i32 { match self { Target::Prise => 1, Target::Garde => 2, Target::GardeSans => 4, Target::GardeContre => 6, } } pub fn to_str(self) -> &'static str { match self { Target::Prise => "prise", Target::Garde => "garde", Target::GardeSans => "garde sans", Target::GardeContre => "garde contre", } } } impl FromStr for Target { type Err = String; fn from_str(s: &str) -> Result<Self, String> { match s { "prise" => Ok(Target::Prise), "garde" => Ok(Target::Garde), "garde sans" => Ok(Target::GardeSans), "garde contre" => Ok(Target::GardeContre), _ => Err(format!("invalid target: {}", s)), } } } impl ToString for Target { fn to_string(&self) -> String { self.to_str().to_owned() } } /// Contract taken #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct Contract { /// Initial author of the contract. pub author: pos::PlayerPos, /// Target for the contract. pub target: Target, /// Slam asked ? pub slam: bool, } impl Contract { fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self { Contract { author, target, slam, } } } impl ToString for Contract { fn to_string(&self) -> String { let str_slam = if self.slam { " SLAM" } else { "" }; format!("{}{}", self.target.to_str(), str_slam) } } /// Current state of an auction #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub enum AuctionState { /// Players are still bidding for the highest contract Bidding, /// Auction is over, deal will begin Over, /// No contract was taken, a new deal will start Cancelled, } /// Bidding status for a player #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub enum BidStatus { Todo, Passed, Bid, } /// Represents the entire auction process. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Auction { contract: Option<Contract>, players_status: Vec<BidStatus>, first: pos::PlayerPos, state: AuctionState, players: Vec<cards::Hand>, dog: cards::Hand, } /// Possible error occuring during an Auction. #[derive(PartialEq, Debug)] pub enum BidError { /// The auction was closed and does not accept more contracts. AuctionClosed, /// A player tried bidding before his turn. TurnError, /// The given bid was not higher than the previous one. NonRaisedTarget, /// Cannot complete the auction when it is still running. AuctionRunning, /// No contract was offered during the auction, it cannot complete. NoContract, } impl fmt::Display for BidError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
} impl Auction { /// Starts a new auction, starting with the player `first`. pub fn new(first: pos::PlayerPos) -> Self { let count = first.count as usize; let (hands, dog) = super::deal_hands(count); Auction { contract: None, players_status: vec![BidStatus::Todo; count], state: AuctionState::Bidding, first, players: hands, dog } } /// Override Auction hands (for tests) pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) { self.players = hands; self.dog = dog; } /// Returns the current state of the auctions. pub fn get_state(&self) -> AuctionState { self.state } fn can_bid(&self, target: Target) -> Result<(), BidError> { if self.state != AuctionState::Bidding { return Err(BidError::AuctionClosed); } if let Some(contract) = self.contract.clone() { if target.multiplier() <= contract.target.multiplier() { return Err(BidError::NonRaisedTarget); } } Ok(()) } fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus { self.players_status[pos.to_n()] } fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) { self.players_status[pos.to_n()] = status; } /// Returns the player that is expected to bid next. pub fn next_player(&self) -> pos::PlayerPos { let pos_init = if let Some(contract) = self.contract.clone() { contract.author.next() } else { self.first }; let mut next_pos = pos_init; while self.get_player_status(next_pos) != BidStatus::Todo { next_pos = next_pos.next(); if next_pos == pos_init { panic!("all players have talked") } } next_pos } /// Check if there are still players waiting for bidding fn no_player_left(&self) -> bool { !self.players_status.contains(&BidStatus::Todo) } /// Bid a new, higher contract. pub fn bid( &mut self, pos: pos::PlayerPos, target: Target, slam: bool, ) -> Result<AuctionState, BidError> { if pos != self.next_player() { return Err(BidError::TurnError); } self.can_bid(target)?; // Reset previous bidder status if let Some(contract) = self.contract.clone() { self.set_player_status(contract.author, BidStatus::Todo); } let contract = Contract::new(pos, target, slam); self.contract = Some(contract); self.set_player_status(pos, BidStatus::Bid); // If we're all the way to the top, there's nowhere else to go if self.no_player_left() || target == Target::GardeContre { self.state = AuctionState::Over; } Ok(self.state) } /// Look at the last offered contract. /// /// Returns `None` if no contract was offered yet. pub fn current_contract(&self) -> Option<&Contract> { self.contract.as_ref() } /// Returns the players cards. pub fn hands(&self) -> &Vec<cards::Hand> { &self.players } /// The current player passes his turn. /// /// Returns the new auction state : /// /// * `AuctionState::Cancelled` if all players passed /// * `AuctionState::Over` if 5 players passed in a row /// * The previous state otherwise pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> { if pos != self.next_player() { return Err(BidError::TurnError); } self.set_player_status(pos, BidStatus::Passed); if self.no_player_left() { self.state = if self.contract.is_some() { AuctionState::Over } else { AuctionState::Cancelled } } Ok(self.state) } /// Consumes a complete auction to enter the second deal phase. /// /// If the auction was ready, returns `Ok<DealState>` pub fn complete(&self) -> Result<deal::DealState, BidError> { if self.state != AuctionState::Over { Err(BidError::AuctionRunning) // } else if self.contract.is_none() { } else { if let Some(contract) = self.contract.clone() { Ok(deal::DealState::new( self.first, self.players.clone(), self.dog, contract, pos::PlayerPos::from_n(0,5), //XXX placeholder )) } else { Err(BidError::NoContract) } } } } #[cfg(test)] mod tests { use super::*; use crate::pos; #[test] fn test_auction() { let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5)); assert!(auction.state == AuctionState::Bidding); assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding)); assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding)); assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Err(BidError::TurnError)); assert_eq!(auction.pass(pos::PlayerPos::from_n(2, 5)), Ok(AuctionState::Bidding)); // Someone bids. assert_eq!( auction.bid(pos::PlayerPos::from_n(3, 5), Target::Garde, false), Ok(AuctionState::Bidding) ); assert_eq!( auction.bid(pos::PlayerPos::from_n(4, 5), Target::Garde, false).err(), Some(BidError::NonRaisedTarget) ); // Surbid assert_eq!( auction.bid(pos::PlayerPos::from_n(4, 5), Target::GardeSans, false), Ok(AuctionState::Bidding) ); // Allready passed assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Err(BidError::TurnError)); // Last to pass assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Ok(AuctionState::Over)); assert!(auction.state == AuctionState::Over); match auction.complete() { Err(_) => assert!(false), _ => {} } } }
{ match *self { BidError::AuctionClosed => write!(f, "auctions are closed"), BidError::TurnError => write!(f, "invalid turn order"), BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"), BidError::AuctionRunning => write!(f, "the auction are still running"), BidError::NoContract => write!(f, "no contract was offered"), } }
identifier_body
bid.rs
//! Auctions and bidding during the first phase of the deal. use std::fmt; use std::str::FromStr; use serde::{Deserialize, Serialize}; use strum_macros::EnumIter; use super::cards; use super::deal; use super::pos; /// Goal set by a contract. /// /// Determines the winning conditions and the score on success. #[derive(EnumIter, PartialEq, PartialOrd, Clone, Copy, Debug, Serialize, Deserialize)] pub enum Target { Prise, Garde, GardeSans, GardeContre, } impl Target { /// Returns the score this target would give on success. pub fn multiplier(self) -> i32 { match self { Target::Prise => 1, Target::Garde => 2, Target::GardeSans => 4, Target::GardeContre => 6, } } pub fn to_str(self) -> &'static str { match self { Target::Prise => "prise", Target::Garde => "garde", Target::GardeSans => "garde sans", Target::GardeContre => "garde contre", } } } impl FromStr for Target { type Err = String; fn from_str(s: &str) -> Result<Self, String> { match s { "prise" => Ok(Target::Prise), "garde" => Ok(Target::Garde), "garde sans" => Ok(Target::GardeSans), "garde contre" => Ok(Target::GardeContre), _ => Err(format!("invalid target: {}", s)), } } } impl ToString for Target { fn to_string(&self) -> String { self.to_str().to_owned() } } /// Contract taken #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] pub struct Contract { /// Initial author of the contract. pub author: pos::PlayerPos, /// Target for the contract. pub target: Target, /// Slam asked ? pub slam: bool, } impl Contract { fn new(author: pos::PlayerPos, target: Target, slam: bool) -> Self { Contract { author, target, slam, } } } impl ToString for Contract { fn to_string(&self) -> String { let str_slam = if self.slam { " SLAM" } else { "" }; format!("{}{}", self.target.to_str(), str_slam) } } /// Current state of an auction #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub enum AuctionState { /// Players are still bidding for the highest contract Bidding, /// Auction is over, deal will begin Over, /// No contract was taken, a new deal will start Cancelled, } /// Bidding status for a player #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)] pub enum BidStatus { Todo, Passed, Bid, } /// Represents the entire auction process. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct Auction { contract: Option<Contract>, players_status: Vec<BidStatus>, first: pos::PlayerPos, state: AuctionState, players: Vec<cards::Hand>, dog: cards::Hand, } /// Possible error occuring during an Auction. #[derive(PartialEq, Debug)] pub enum BidError { /// The auction was closed and does not accept more contracts. AuctionClosed, /// A player tried bidding before his turn. TurnError, /// The given bid was not higher than the previous one. NonRaisedTarget, /// Cannot complete the auction when it is still running. AuctionRunning, /// No contract was offered during the auction, it cannot complete. NoContract, } impl fmt::Display for BidError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { BidError::AuctionClosed => write!(f, "auctions are closed"), BidError::TurnError => write!(f, "invalid turn order"), BidError::NonRaisedTarget => write!(f, "bid must be higher than current contract"), BidError::AuctionRunning => write!(f, "the auction are still running"), BidError::NoContract => write!(f, "no contract was offered"), } } } impl Auction { /// Starts a new auction, starting with the player `first`. pub fn new(first: pos::PlayerPos) -> Self { let count = first.count as usize; let (hands, dog) = super::deal_hands(count); Auction { contract: None, players_status: vec![BidStatus::Todo; count], state: AuctionState::Bidding, first, players: hands, dog } } /// Override Auction hands (for tests) pub fn set_hands(&mut self, hands: Vec<cards::Hand>, dog: cards::Hand) { self.players = hands; self.dog = dog; } /// Returns the current state of the auctions. pub fn get_state(&self) -> AuctionState { self.state } fn can_bid(&self, target: Target) -> Result<(), BidError> { if self.state != AuctionState::Bidding { return Err(BidError::AuctionClosed); } if let Some(contract) = self.contract.clone() { if target.multiplier() <= contract.target.multiplier() { return Err(BidError::NonRaisedTarget); } } Ok(()) } fn get_player_status(&self, pos: pos::PlayerPos) -> BidStatus { self.players_status[pos.to_n()] } fn set_player_status(&mut self, pos: pos::PlayerPos, status: BidStatus) { self.players_status[pos.to_n()] = status; } /// Returns the player that is expected to bid next. pub fn next_player(&self) -> pos::PlayerPos { let pos_init = if let Some(contract) = self.contract.clone() { contract.author.next() } else { self.first }; let mut next_pos = pos_init; while self.get_player_status(next_pos) != BidStatus::Todo { next_pos = next_pos.next(); if next_pos == pos_init { panic!("all players have talked") } } next_pos } /// Check if there are still players waiting for bidding fn no_player_left(&self) -> bool { !self.players_status.contains(&BidStatus::Todo) } /// Bid a new, higher contract. pub fn bid( &mut self, pos: pos::PlayerPos, target: Target, slam: bool, ) -> Result<AuctionState, BidError> { if pos != self.next_player() { return Err(BidError::TurnError); } self.can_bid(target)?;
let contract = Contract::new(pos, target, slam); self.contract = Some(contract); self.set_player_status(pos, BidStatus::Bid); // If we're all the way to the top, there's nowhere else to go if self.no_player_left() || target == Target::GardeContre { self.state = AuctionState::Over; } Ok(self.state) } /// Look at the last offered contract. /// /// Returns `None` if no contract was offered yet. pub fn current_contract(&self) -> Option<&Contract> { self.contract.as_ref() } /// Returns the players cards. pub fn hands(&self) -> &Vec<cards::Hand> { &self.players } /// The current player passes his turn. /// /// Returns the new auction state : /// /// * `AuctionState::Cancelled` if all players passed /// * `AuctionState::Over` if 5 players passed in a row /// * The previous state otherwise pub fn pass(&mut self, pos: pos::PlayerPos) -> Result<AuctionState, BidError> { if pos != self.next_player() { return Err(BidError::TurnError); } self.set_player_status(pos, BidStatus::Passed); if self.no_player_left() { self.state = if self.contract.is_some() { AuctionState::Over } else { AuctionState::Cancelled } } Ok(self.state) } /// Consumes a complete auction to enter the second deal phase. /// /// If the auction was ready, returns `Ok<DealState>` pub fn complete(&self) -> Result<deal::DealState, BidError> { if self.state != AuctionState::Over { Err(BidError::AuctionRunning) // } else if self.contract.is_none() { } else { if let Some(contract) = self.contract.clone() { Ok(deal::DealState::new( self.first, self.players.clone(), self.dog, contract, pos::PlayerPos::from_n(0,5), //XXX placeholder )) } else { Err(BidError::NoContract) } } } } #[cfg(test)] mod tests { use super::*; use crate::pos; #[test] fn test_auction() { let mut auction = Auction::new(pos::PlayerPos::from_n(0, 5)); assert!(auction.state == AuctionState::Bidding); assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Ok(AuctionState::Bidding)); assert_eq!(auction.pass(pos::PlayerPos::from_n(1, 5)), Ok(AuctionState::Bidding)); assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Err(BidError::TurnError)); assert_eq!(auction.pass(pos::PlayerPos::from_n(2, 5)), Ok(AuctionState::Bidding)); // Someone bids. assert_eq!( auction.bid(pos::PlayerPos::from_n(3, 5), Target::Garde, false), Ok(AuctionState::Bidding) ); assert_eq!( auction.bid(pos::PlayerPos::from_n(4, 5), Target::Garde, false).err(), Some(BidError::NonRaisedTarget) ); // Surbid assert_eq!( auction.bid(pos::PlayerPos::from_n(4, 5), Target::GardeSans, false), Ok(AuctionState::Bidding) ); // Allready passed assert_eq!(auction.pass(pos::PlayerPos::from_n(0, 5)), Err(BidError::TurnError)); // Last to pass assert_eq!(auction.pass(pos::PlayerPos::from_n(3, 5)), Ok(AuctionState::Over)); assert!(auction.state == AuctionState::Over); match auction.complete() { Err(_) => assert!(false), _ => {} } } }
// Reset previous bidder status if let Some(contract) = self.contract.clone() { self.set_player_status(contract.author, BidStatus::Todo); }
random_line_split
bot.py
import discord from discord.ext import commands import datetime import time import sys import asyncio import os from cogs.utils import launcher import json import logging import random from cogs.utils.paginator import Pages import io import textwrap import traceback from contextlib import redirect_stdout logger = logging.getLogger('discord') logger.setLevel(logging.INFO) handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w') handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')) logger.addHandler(handler) ##launcher.check() if 'TOKEN' in os.environ: heroku = True TOKEN = os.environ['TOKEN'] info = launcher.bot() owner = '300396755193954306' startup_extensions = [ 'cogs.mod', 'cogs.misc', 'cogs.util', 'cogs.info', 'cogs.fun' ] Client = discord.Client() description = ('Bot made with discord.py by -= shadeyg56 =-#1702. ' 'Made by shadeyg56 \n') async def get_pre(bot, message): with open('cogs/utils/t_config.json') as f: config = json.loads(f.read()) try: if message.server.id not in config: return 'd.' except: pass else: return config[message.server.id]['prefix'] bot = commands.Bot(description=description, command_prefix='d.', pm_help=None) bot.remove_command('help') @bot.event async def on_ready(): print('------------------------------------') print('THE BOT IS ONLINE') print('------------------------------------') print("Name: {}".format(bot.user.name)) print('Author: shadeyg56') print("ID: {}".format(bot.user.id)) print('DV: {}'.format(discord.__version__)) bot.uptime = datetime.datetime.now() embed=discord.Embed(title='Good Morning', description='Up and at em', color=0xed) embed.set_footer(text='Darkness ready for use') server = len(bot.servers) channel = bot.get_channel('356599668739670049') users = sum(1 for _ in bot.get_all_members()) await bot.send_message(channel, embed=embed) while 1 == 1: await bot.change_presence(game=discord.Game(name='with {} servers'.format(server))) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='with {} users'.format(users))) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='PREFIX = d.')) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='Currently WIP | Darkness')) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='d.support | d.invite')) await asyncio.sleep(25) @bot.command(pass_context=True) async def help(ctx): await bot.delete_message(ctx.message) msg = open('cogs/utils/help.txt').read().replace('\\u200b','\u200b').splitlines() for i, line in enumerate(msg): if line.strip().startswith('.'): x = line.strip().strip('.') x = ctx.prefix + x msg[i] = '`' + x + '`' try: p = Pages(bot, message=ctx.message, entries=msg) p.embed.set_author(name='Help - Darkness Commands', icon_url=bot.user.avatar_url) p.embed.color = 0x00FFFF await p.paginate() except: embed = discord.Embed(title='Darkness Commands', color=0xed) embed.add_field(name='Moderation:', value='kick, ban, unban, softban, warn, purge') embed.add_field(name='Information:', value='info, serverinfo, userinfo, avatar') embed.add_field(name='Miscellaneous:', value='ping, suggest, invite, support') embed.add_field(name='Utilities:', value='calc, remind, addrole, removerole') embed.add_field(name='Fun:', value='8ball, cat') embed.set_footer(text='Bot Dev: -= shadeyg56 =-#1702') await bot.say(embed=embed) def owner_only(): return commands.check(lambda ctx: ctx.message.author == ctx.message.server.owner) def is_owner(): return commands.check(lambda ctx: ctx.message.author.id == owner) @bot.event async def on_member_join(member): darkness = bot.get_channel('356599668739670049') if member.server.id == '356599668739670048': await bot.send_message(darkness, 'Welcome {0.mention} to {}. Please read #info-and-rules and enjoy your stay. Do d.help to check out the bot'.format(member, server)) kats = bot.get_channel('313863292126756864') if member.server.id == '294262760752152576': await bot.send_message(kats, '{0.mention} Welcome to **Dragons and Kats**! Have a great time here and enjoy yourselves!!!:wink: !'.format(member)) else: print('Member joined {}, but message not sent'.format(member.server)) @bot.event async def on_command(command, ctx): if str(command) == 'eval': return print('------------------------------------') print('Command > {}{} < invoked with > {} <\nServer: {} | {}\nUser: {} | {}' .format(ctx.prefix, command, ctx.invoked_with, ctx.message.server.name, ctx.message.server.id, ctx.message.author.name, ctx.message.author.id)) @bot.event async def on_member_remove(member): server = member.server with open('cogs/utils/t_config.json') as f: data = json.loads(f.read()) status = data[server.id]["leave"]["status"] if status: msg = data[server.id]["leave"]["msg"] channel = data[server.id]['leave']['channel'] if channel == 'default': channel = server else: channel = discord.utils.get(server.channels, id=channel) await bot.send_message(channel, msg.format(member, server)) @bot.event async def on_server_join(server): embed = discord.Embed(title='Darkness Info', color=0xed) owner = server.owner servers = len(bot.servers) embed.add_field(name='Author', value='<@300396755193954306>') embed.add_field(name='Servers', value=servers) embed.add_field(name='Prefix', value='d.') embed.set_footer(text='Powered by discord.py') embed.set_thumbnail(url='http://data.whicdn.com/images/150102219/large.gif') embed.add_field(name='Invite', value='https://discordapp.com/oauth2/authorize?client_id=355189919410421760&scope=bot&permissions=66186303') embed.add_field(name='Support', value='https://discord.gg/Jjdp8hf') embed.add_field(name='GitHub', value='https://github.com/shadeyg56/darkness') await bot.send_message(owner, embed=embed) def fmt_help(page): cmd = '' for line in page.splitlines(): if line.startswith('.'): cmd = line.strip('.') break em = discord.Embed(color=0x00FFFF) em.set_author(name='Help - {}'.format(cmd)) async def send_cmd_help(ctx): if ctx.invoked_subcommand: pages = bot.formatter.format_help_for(ctx, ctx.invoked_subcommand) for page in pages: # page = page.strip('```css').strip('```') await bot.send_message(ctx.message.channel, page) print('Sent command help') else: pages = bot.formatter.format_help_for(ctx, ctx.command) for page in pages: await bot.send_message(ctx.message.channel, page) print('Sent command help') @bot.event async def on_command_error(error, ctx): print(error) channel = ctx.message.channel if isinstance(error, commands.MissingRequiredArgument): await send_cmd_help(ctx) print('Sent command help') elif isinstance(error, commands.BadArgument): await send_cmd_help(ctx) print('Sent command help') elif isinstance(error, commands.DisabledCommand): await bot.send_message(channel, "That command is disabled.") print('Command disabled.') elif isinstance(error, commands.CommandInvokeError): # A bit hacky, couldn't find a better way no_dms = "Cannot send messages to this user" is_help_cmd = ctx.command.qualified_name == "help" is_forbidden = isinstance(error.original, discord.Forbidden) if is_help_cmd and is_forbidden and error.original.text == no_dms: msg = ("I couldn't send the help message to you in DM. Either" " you blocked me or you disabled DMs in this server.") await bot.send_message(channel, msg) return @bot.command(pass_context=True,name='cog') @owner_only() async def _reload(ctx,*, module : str): """Reloads a module.""" channel = ctx.message.channel module = 'cogs.'+module try: bot.unload_extension(module) x = await bot.send_message(channel,'Successfully Unloaded.') bot.load_extension(module) x = await bot.edit_message(x,'Successfully Reloaded.') except Exception as e: x = await bot.edit_message(x,'\N{PISTOL}') await bot.say('{}: {}'.format(type(e).__name__, e)) else: x = await bot.edit_message(x,'Done. \N{OK HAND SIGN}') @bot.command(name='presence') async def _set(Type=None,*,thing=None): """Change the bot's discord game/stream!""" server = len(bot.servers) if Type is None: await bot.say('Usage: `.presence [game/stream] [message]`') else: if Type.lower() == 'stream': await bot.change_presence(game=discord.Game(name=thing,type=1,url='https://www.twitch.tv/a'),status='online') await bot.say('Set presence to. `Streaming {}`'.format(thing)) elif Type.lower() == 'game': await bot.change_presence(game=discord.Game(name=thing)) await bot.say('Set presence to `Playing {}`'.format(thing)) elif Type.lower() == 'clear': await bot.change_presence(game=None) await bot.say('Cleared Presence') elif Type.lower() == 'servers': await bot.change_presence(game=discord.Game(name='with {} servers'.format(server))) await bot.say('**Im now playing with {} servers.**'.format(server)) else: await bot.say('Usage: `.presence [game/stream] [message]`') @bot.command(pass_context=True) @is_owner() async def _leave_all_servers_(ctx): for server in bot.servers: await bot.leave_server(server) await bot.say('I left `{}`'.format(server.name)) @bot.command(pass_context=True) async def servers(ctx): servers = ', '.join([i.name for i in bot.servers]).strip(', ') await bot.say('**Current list of servers:**\n ```bf\n{}```'.format(servers)) @bot.command(pass_context=True) @is_owner() async def _leave_server(ctx, server): to_leave = discord.utils.get(bot.servers, id=str(server)) try: await bot.leave_server(to_leave) except: await self.bot.say('Failed.') else: await self.bot.say('Successfully left {}'.format(to_leave.name)) @bot.command(pass_context=True) async def register(ctx): server = ctx.message.server channel = discord.utils.get(server.channels, name='server-event') user = ctx.message.author with open('cogs/utils/registrations.txt') as f: data = f.read() print(data ) if ctx.message.channel != channel: await bot.say('You can only register in {}'.format(channel.mention)) return if str(user) in data: await bot.delete_message(ctx.message) await bot.send_message(user, "You can't register more than once.") return with open('cogs/utils/registrations.txt','a') as f: f.write(str(user)+'\n') role = discord.utils.get(server.roles, name='4row') await bot.add_roles(user, role) await bot.add_reaction(ctx.message, '\u2705') @bot.command(pass_context = True) @is_owner() async def shutdown(ctx): timestamp = ctx.message.timestamp embed=discord.Embed(title='Good Night', description='See you tomorrow', color=0xed, timestamp=timestamp) embed.set_footer(text='Darkness no longer online') await bot.say(embed=embed) await bot.logout() if __name__ == "__main__": for extension in startup_extensions: try: bot.load_extension(extension) print('Loaded: {}'.format(extension)) except Exception as e: exc = '{}: {}'.format(type(e).__name__, e) print('Error on load: {}\n{}'.format(extension, exc)) def cleanup_code( content): """Automatically removes code blocks from the code.""" # remove ```py\n``` if content.startswith('```') and content.endswith('```'): return '\n'.join(content.split('\n')[1:-1]) # remove `foo` return content.strip('` \n') def get_syntax_error(e): if e.text is None: return '```py\n{0.__class__.__name__}: {0}\n```'.format(e) return '```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```'.format(e, '^', type(e).__name__) async def
(ctx, body): if body.startswith('```') and body.endswith('```'): content = '\n'.join(body.split('\n')[1:-1]) else: content = body.strip('`') await bot.edit_message(ctx.message, '```py\n'+content+'```') @bot.command(pass_context=True, name='eval') @is_owner() async def _eval(ctx, *, body: str): '''Run python scripts on discord!''' env = { 'bot': bot, 'ctx': ctx, 'channel': ctx.message.channel, 'author': ctx.message.author, 'server': ctx.message.server, 'message': ctx.message, } env.update(globals()) body = cleanup_code(content=body) stdout = io.StringIO() to_compile = 'async def func():\n%s' % textwrap.indent(body, ' ') try: exec(to_compile, env) except SyntaxError as e: return await bot.say(get_syntax_error(e)) func = env['func'] try: with redirect_stdout(stdout): ret = await func() except Exception as e: value = stdout.getvalue() x = await bot.say('```py\n{}{}\n```'.format(value, traceback.format_exc())) try: await bot.add_reaction(x, '\U0001f534') except: pass else: value = stdout.getvalue() if TOKEN in value: value = value.replace(TOKEN,"[EXPUNGED]") if ret is None: if value: try: x = await bot.say('```py\n%s\n```' % value) except: x = await bot.say('```py\n\'Result was too long.\'```') try: await bot.add_reaction(x, '\U0001f535') except: pass else: try: await bot.add_reaction(ctx.message, '\U0001f535') except: pass else: try: x = await bot.say('```py\n%s%s\n```' % (value, ret)) except: x = await bot.say('```py\n\'Result was too long.\'```') try: await bot.add_reaction(x, '\U0001f535') except: pass @bot.command(pass_context = True) async def devcontact(ctx, *, msg: str): dev = '@-= shadeyg56™ =-#1702' user = ctx.message.author await bot.send_message(dev, '{} sent the following message: {}'.format(user, msg)) await bot.say('Your message has been sent. It will be checked by the dev asap. If your message was a troll or you keep resending/spamming a message you will be blacklisted from the command') await bot.delete_message(ctx.message) @bot.command(pass_context = True) async def dm(ctx, user: discord.Member, *, msg: str): if ctx.message.author.id == '300396755193954306': await bot.send_message(user, '{}'.format(msg)) await bot.delete_message(ctx.message) else: message = await bot.say('Only the bot dev can use this command') await asyncio.sleep(5) await bot.delete_message(message) await bot.delete_message(ctx.message) @bot.command(pass_context=True) @is_owner() async def blacklist(ctx, user_id: str): with open('cogs/utils/blacklists.json') as f: data = json.loads(f.read()) data = data[user_id] = "blacklisted" data = json.dumps(data, indent=4, sort_keys=True) with open('cogs/utils/blacklists.json', 'w') as f: f.write(data) await bot.say('Succesfully blacklisted id {}'.format(user_id)) @asyncio.coroutine async def on_message(message): channel = bot.get_channel('356602525740433408') if message.channel.id == '356602525740433408': await bot.send_message(channel, 'test') await bot.process_commands(message) bot.run(TOKEN)
to_code_block
identifier_name
bot.py
import discord from discord.ext import commands import datetime import time import sys import asyncio import os from cogs.utils import launcher import json import logging import random from cogs.utils.paginator import Pages import io import textwrap import traceback from contextlib import redirect_stdout logger = logging.getLogger('discord') logger.setLevel(logging.INFO) handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w') handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')) logger.addHandler(handler) ##launcher.check() if 'TOKEN' in os.environ: heroku = True TOKEN = os.environ['TOKEN'] info = launcher.bot() owner = '300396755193954306' startup_extensions = [ 'cogs.mod', 'cogs.misc', 'cogs.util', 'cogs.info', 'cogs.fun' ] Client = discord.Client() description = ('Bot made with discord.py by -= shadeyg56 =-#1702. ' 'Made by shadeyg56 \n') async def get_pre(bot, message): with open('cogs/utils/t_config.json') as f: config = json.loads(f.read()) try: if message.server.id not in config: return 'd.' except: pass else: return config[message.server.id]['prefix'] bot = commands.Bot(description=description, command_prefix='d.', pm_help=None) bot.remove_command('help') @bot.event async def on_ready(): print('------------------------------------') print('THE BOT IS ONLINE') print('------------------------------------') print("Name: {}".format(bot.user.name)) print('Author: shadeyg56') print("ID: {}".format(bot.user.id)) print('DV: {}'.format(discord.__version__)) bot.uptime = datetime.datetime.now() embed=discord.Embed(title='Good Morning', description='Up and at em', color=0xed) embed.set_footer(text='Darkness ready for use') server = len(bot.servers) channel = bot.get_channel('356599668739670049') users = sum(1 for _ in bot.get_all_members()) await bot.send_message(channel, embed=embed) while 1 == 1: await bot.change_presence(game=discord.Game(name='with {} servers'.format(server))) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='with {} users'.format(users))) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='PREFIX = d.')) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='Currently WIP | Darkness')) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='d.support | d.invite')) await asyncio.sleep(25) @bot.command(pass_context=True) async def help(ctx): await bot.delete_message(ctx.message) msg = open('cogs/utils/help.txt').read().replace('\\u200b','\u200b').splitlines() for i, line in enumerate(msg): if line.strip().startswith('.'): x = line.strip().strip('.') x = ctx.prefix + x msg[i] = '`' + x + '`' try: p = Pages(bot, message=ctx.message, entries=msg) p.embed.set_author(name='Help - Darkness Commands', icon_url=bot.user.avatar_url) p.embed.color = 0x00FFFF await p.paginate() except: embed = discord.Embed(title='Darkness Commands', color=0xed) embed.add_field(name='Moderation:', value='kick, ban, unban, softban, warn, purge') embed.add_field(name='Information:', value='info, serverinfo, userinfo, avatar') embed.add_field(name='Miscellaneous:', value='ping, suggest, invite, support') embed.add_field(name='Utilities:', value='calc, remind, addrole, removerole') embed.add_field(name='Fun:', value='8ball, cat') embed.set_footer(text='Bot Dev: -= shadeyg56 =-#1702') await bot.say(embed=embed) def owner_only(): return commands.check(lambda ctx: ctx.message.author == ctx.message.server.owner) def is_owner(): return commands.check(lambda ctx: ctx.message.author.id == owner) @bot.event async def on_member_join(member): darkness = bot.get_channel('356599668739670049') if member.server.id == '356599668739670048':
kats = bot.get_channel('313863292126756864') if member.server.id == '294262760752152576': await bot.send_message(kats, '{0.mention} Welcome to **Dragons and Kats**! Have a great time here and enjoy yourselves!!!:wink: !'.format(member)) else: print('Member joined {}, but message not sent'.format(member.server)) @bot.event async def on_command(command, ctx): if str(command) == 'eval': return print('------------------------------------') print('Command > {}{} < invoked with > {} <\nServer: {} | {}\nUser: {} | {}' .format(ctx.prefix, command, ctx.invoked_with, ctx.message.server.name, ctx.message.server.id, ctx.message.author.name, ctx.message.author.id)) @bot.event async def on_member_remove(member): server = member.server with open('cogs/utils/t_config.json') as f: data = json.loads(f.read()) status = data[server.id]["leave"]["status"] if status: msg = data[server.id]["leave"]["msg"] channel = data[server.id]['leave']['channel'] if channel == 'default': channel = server else: channel = discord.utils.get(server.channels, id=channel) await bot.send_message(channel, msg.format(member, server)) @bot.event async def on_server_join(server): embed = discord.Embed(title='Darkness Info', color=0xed) owner = server.owner servers = len(bot.servers) embed.add_field(name='Author', value='<@300396755193954306>') embed.add_field(name='Servers', value=servers) embed.add_field(name='Prefix', value='d.') embed.set_footer(text='Powered by discord.py') embed.set_thumbnail(url='http://data.whicdn.com/images/150102219/large.gif') embed.add_field(name='Invite', value='https://discordapp.com/oauth2/authorize?client_id=355189919410421760&scope=bot&permissions=66186303') embed.add_field(name='Support', value='https://discord.gg/Jjdp8hf') embed.add_field(name='GitHub', value='https://github.com/shadeyg56/darkness') await bot.send_message(owner, embed=embed) def fmt_help(page): cmd = '' for line in page.splitlines(): if line.startswith('.'): cmd = line.strip('.') break em = discord.Embed(color=0x00FFFF) em.set_author(name='Help - {}'.format(cmd)) async def send_cmd_help(ctx): if ctx.invoked_subcommand: pages = bot.formatter.format_help_for(ctx, ctx.invoked_subcommand) for page in pages: # page = page.strip('```css').strip('```') await bot.send_message(ctx.message.channel, page) print('Sent command help') else: pages = bot.formatter.format_help_for(ctx, ctx.command) for page in pages: await bot.send_message(ctx.message.channel, page) print('Sent command help') @bot.event async def on_command_error(error, ctx): print(error) channel = ctx.message.channel if isinstance(error, commands.MissingRequiredArgument): await send_cmd_help(ctx) print('Sent command help') elif isinstance(error, commands.BadArgument): await send_cmd_help(ctx) print('Sent command help') elif isinstance(error, commands.DisabledCommand): await bot.send_message(channel, "That command is disabled.") print('Command disabled.') elif isinstance(error, commands.CommandInvokeError): # A bit hacky, couldn't find a better way no_dms = "Cannot send messages to this user" is_help_cmd = ctx.command.qualified_name == "help" is_forbidden = isinstance(error.original, discord.Forbidden) if is_help_cmd and is_forbidden and error.original.text == no_dms: msg = ("I couldn't send the help message to you in DM. Either" " you blocked me or you disabled DMs in this server.") await bot.send_message(channel, msg) return @bot.command(pass_context=True,name='cog') @owner_only() async def _reload(ctx,*, module : str): """Reloads a module.""" channel = ctx.message.channel module = 'cogs.'+module try: bot.unload_extension(module) x = await bot.send_message(channel,'Successfully Unloaded.') bot.load_extension(module) x = await bot.edit_message(x,'Successfully Reloaded.') except Exception as e: x = await bot.edit_message(x,'\N{PISTOL}') await bot.say('{}: {}'.format(type(e).__name__, e)) else: x = await bot.edit_message(x,'Done. \N{OK HAND SIGN}') @bot.command(name='presence') async def _set(Type=None,*,thing=None): """Change the bot's discord game/stream!""" server = len(bot.servers) if Type is None: await bot.say('Usage: `.presence [game/stream] [message]`') else: if Type.lower() == 'stream': await bot.change_presence(game=discord.Game(name=thing,type=1,url='https://www.twitch.tv/a'),status='online') await bot.say('Set presence to. `Streaming {}`'.format(thing)) elif Type.lower() == 'game': await bot.change_presence(game=discord.Game(name=thing)) await bot.say('Set presence to `Playing {}`'.format(thing)) elif Type.lower() == 'clear': await bot.change_presence(game=None) await bot.say('Cleared Presence') elif Type.lower() == 'servers': await bot.change_presence(game=discord.Game(name='with {} servers'.format(server))) await bot.say('**Im now playing with {} servers.**'.format(server)) else: await bot.say('Usage: `.presence [game/stream] [message]`') @bot.command(pass_context=True) @is_owner() async def _leave_all_servers_(ctx): for server in bot.servers: await bot.leave_server(server) await bot.say('I left `{}`'.format(server.name)) @bot.command(pass_context=True) async def servers(ctx): servers = ', '.join([i.name for i in bot.servers]).strip(', ') await bot.say('**Current list of servers:**\n ```bf\n{}```'.format(servers)) @bot.command(pass_context=True) @is_owner() async def _leave_server(ctx, server): to_leave = discord.utils.get(bot.servers, id=str(server)) try: await bot.leave_server(to_leave) except: await self.bot.say('Failed.') else: await self.bot.say('Successfully left {}'.format(to_leave.name)) @bot.command(pass_context=True) async def register(ctx): server = ctx.message.server channel = discord.utils.get(server.channels, name='server-event') user = ctx.message.author with open('cogs/utils/registrations.txt') as f: data = f.read() print(data ) if ctx.message.channel != channel: await bot.say('You can only register in {}'.format(channel.mention)) return if str(user) in data: await bot.delete_message(ctx.message) await bot.send_message(user, "You can't register more than once.") return with open('cogs/utils/registrations.txt','a') as f: f.write(str(user)+'\n') role = discord.utils.get(server.roles, name='4row') await bot.add_roles(user, role) await bot.add_reaction(ctx.message, '\u2705') @bot.command(pass_context = True) @is_owner() async def shutdown(ctx): timestamp = ctx.message.timestamp embed=discord.Embed(title='Good Night', description='See you tomorrow', color=0xed, timestamp=timestamp) embed.set_footer(text='Darkness no longer online') await bot.say(embed=embed) await bot.logout() if __name__ == "__main__": for extension in startup_extensions: try: bot.load_extension(extension) print('Loaded: {}'.format(extension)) except Exception as e: exc = '{}: {}'.format(type(e).__name__, e) print('Error on load: {}\n{}'.format(extension, exc)) def cleanup_code( content): """Automatically removes code blocks from the code.""" # remove ```py\n``` if content.startswith('```') and content.endswith('```'): return '\n'.join(content.split('\n')[1:-1]) # remove `foo` return content.strip('` \n') def get_syntax_error(e): if e.text is None: return '```py\n{0.__class__.__name__}: {0}\n```'.format(e) return '```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```'.format(e, '^', type(e).__name__) async def to_code_block(ctx, body): if body.startswith('```') and body.endswith('```'): content = '\n'.join(body.split('\n')[1:-1]) else: content = body.strip('`') await bot.edit_message(ctx.message, '```py\n'+content+'```') @bot.command(pass_context=True, name='eval') @is_owner() async def _eval(ctx, *, body: str): '''Run python scripts on discord!''' env = { 'bot': bot, 'ctx': ctx, 'channel': ctx.message.channel, 'author': ctx.message.author, 'server': ctx.message.server, 'message': ctx.message, } env.update(globals()) body = cleanup_code(content=body) stdout = io.StringIO() to_compile = 'async def func():\n%s' % textwrap.indent(body, ' ') try: exec(to_compile, env) except SyntaxError as e: return await bot.say(get_syntax_error(e)) func = env['func'] try: with redirect_stdout(stdout): ret = await func() except Exception as e: value = stdout.getvalue() x = await bot.say('```py\n{}{}\n```'.format(value, traceback.format_exc())) try: await bot.add_reaction(x, '\U0001f534') except: pass else: value = stdout.getvalue() if TOKEN in value: value = value.replace(TOKEN,"[EXPUNGED]") if ret is None: if value: try: x = await bot.say('```py\n%s\n```' % value) except: x = await bot.say('```py\n\'Result was too long.\'```') try: await bot.add_reaction(x, '\U0001f535') except: pass else: try: await bot.add_reaction(ctx.message, '\U0001f535') except: pass else: try: x = await bot.say('```py\n%s%s\n```' % (value, ret)) except: x = await bot.say('```py\n\'Result was too long.\'```') try: await bot.add_reaction(x, '\U0001f535') except: pass @bot.command(pass_context = True) async def devcontact(ctx, *, msg: str): dev = '@-= shadeyg56™ =-#1702' user = ctx.message.author await bot.send_message(dev, '{} sent the following message: {}'.format(user, msg)) await bot.say('Your message has been sent. It will be checked by the dev asap. If your message was a troll or you keep resending/spamming a message you will be blacklisted from the command') await bot.delete_message(ctx.message) @bot.command(pass_context = True) async def dm(ctx, user: discord.Member, *, msg: str): if ctx.message.author.id == '300396755193954306': await bot.send_message(user, '{}'.format(msg)) await bot.delete_message(ctx.message) else: message = await bot.say('Only the bot dev can use this command') await asyncio.sleep(5) await bot.delete_message(message) await bot.delete_message(ctx.message) @bot.command(pass_context=True) @is_owner() async def blacklist(ctx, user_id: str): with open('cogs/utils/blacklists.json') as f: data = json.loads(f.read()) data = data[user_id] = "blacklisted" data = json.dumps(data, indent=4, sort_keys=True) with open('cogs/utils/blacklists.json', 'w') as f: f.write(data) await bot.say('Succesfully blacklisted id {}'.format(user_id)) @asyncio.coroutine async def on_message(message): channel = bot.get_channel('356602525740433408') if message.channel.id == '356602525740433408': await bot.send_message(channel, 'test') await bot.process_commands(message) bot.run(TOKEN)
await bot.send_message(darkness, 'Welcome {0.mention} to {}. Please read #info-and-rules and enjoy your stay. Do d.help to check out the bot'.format(member, server))
conditional_block
bot.py
import discord from discord.ext import commands import datetime import time import sys import asyncio import os from cogs.utils import launcher import json import logging import random from cogs.utils.paginator import Pages import io import textwrap import traceback from contextlib import redirect_stdout logger = logging.getLogger('discord') logger.setLevel(logging.INFO) handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w') handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')) logger.addHandler(handler) ##launcher.check() if 'TOKEN' in os.environ: heroku = True TOKEN = os.environ['TOKEN'] info = launcher.bot() owner = '300396755193954306' startup_extensions = [ 'cogs.mod', 'cogs.misc', 'cogs.util', 'cogs.info', 'cogs.fun' ] Client = discord.Client() description = ('Bot made with discord.py by -= shadeyg56 =-#1702. ' 'Made by shadeyg56 \n') async def get_pre(bot, message): with open('cogs/utils/t_config.json') as f: config = json.loads(f.read()) try: if message.server.id not in config: return 'd.' except: pass else: return config[message.server.id]['prefix'] bot = commands.Bot(description=description, command_prefix='d.', pm_help=None) bot.remove_command('help') @bot.event async def on_ready(): print('------------------------------------') print('THE BOT IS ONLINE') print('------------------------------------') print("Name: {}".format(bot.user.name)) print('Author: shadeyg56') print("ID: {}".format(bot.user.id)) print('DV: {}'.format(discord.__version__)) bot.uptime = datetime.datetime.now() embed=discord.Embed(title='Good Morning', description='Up and at em', color=0xed) embed.set_footer(text='Darkness ready for use') server = len(bot.servers) channel = bot.get_channel('356599668739670049') users = sum(1 for _ in bot.get_all_members()) await bot.send_message(channel, embed=embed) while 1 == 1: await bot.change_presence(game=discord.Game(name='with {} servers'.format(server))) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='with {} users'.format(users))) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='PREFIX = d.')) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='Currently WIP | Darkness')) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='d.support | d.invite')) await asyncio.sleep(25) @bot.command(pass_context=True) async def help(ctx): await bot.delete_message(ctx.message) msg = open('cogs/utils/help.txt').read().replace('\\u200b','\u200b').splitlines() for i, line in enumerate(msg): if line.strip().startswith('.'): x = line.strip().strip('.') x = ctx.prefix + x msg[i] = '`' + x + '`' try: p = Pages(bot, message=ctx.message, entries=msg) p.embed.set_author(name='Help - Darkness Commands', icon_url=bot.user.avatar_url) p.embed.color = 0x00FFFF await p.paginate() except: embed = discord.Embed(title='Darkness Commands', color=0xed) embed.add_field(name='Moderation:', value='kick, ban, unban, softban, warn, purge') embed.add_field(name='Information:', value='info, serverinfo, userinfo, avatar') embed.add_field(name='Miscellaneous:', value='ping, suggest, invite, support') embed.add_field(name='Utilities:', value='calc, remind, addrole, removerole') embed.add_field(name='Fun:', value='8ball, cat') embed.set_footer(text='Bot Dev: -= shadeyg56 =-#1702') await bot.say(embed=embed) def owner_only(): return commands.check(lambda ctx: ctx.message.author == ctx.message.server.owner) def is_owner(): return commands.check(lambda ctx: ctx.message.author.id == owner) @bot.event async def on_member_join(member): darkness = bot.get_channel('356599668739670049') if member.server.id == '356599668739670048': await bot.send_message(darkness, 'Welcome {0.mention} to {}. Please read #info-and-rules and enjoy your stay. Do d.help to check out the bot'.format(member, server)) kats = bot.get_channel('313863292126756864') if member.server.id == '294262760752152576': await bot.send_message(kats, '{0.mention} Welcome to **Dragons and Kats**! Have a great time here and enjoy yourselves!!!:wink: !'.format(member)) else: print('Member joined {}, but message not sent'.format(member.server)) @bot.event async def on_command(command, ctx): if str(command) == 'eval': return print('------------------------------------') print('Command > {}{} < invoked with > {} <\nServer: {} | {}\nUser: {} | {}' .format(ctx.prefix, command, ctx.invoked_with, ctx.message.server.name, ctx.message.server.id, ctx.message.author.name, ctx.message.author.id)) @bot.event async def on_member_remove(member): server = member.server with open('cogs/utils/t_config.json') as f: data = json.loads(f.read()) status = data[server.id]["leave"]["status"] if status: msg = data[server.id]["leave"]["msg"] channel = data[server.id]['leave']['channel'] if channel == 'default': channel = server else: channel = discord.utils.get(server.channels, id=channel) await bot.send_message(channel, msg.format(member, server)) @bot.event async def on_server_join(server): embed = discord.Embed(title='Darkness Info', color=0xed) owner = server.owner servers = len(bot.servers) embed.add_field(name='Author', value='<@300396755193954306>') embed.add_field(name='Servers', value=servers) embed.add_field(name='Prefix', value='d.') embed.set_footer(text='Powered by discord.py') embed.set_thumbnail(url='http://data.whicdn.com/images/150102219/large.gif') embed.add_field(name='Invite', value='https://discordapp.com/oauth2/authorize?client_id=355189919410421760&scope=bot&permissions=66186303') embed.add_field(name='Support', value='https://discord.gg/Jjdp8hf') embed.add_field(name='GitHub', value='https://github.com/shadeyg56/darkness') await bot.send_message(owner, embed=embed) def fmt_help(page): cmd = '' for line in page.splitlines(): if line.startswith('.'): cmd = line.strip('.') break em = discord.Embed(color=0x00FFFF) em.set_author(name='Help - {}'.format(cmd)) async def send_cmd_help(ctx): if ctx.invoked_subcommand: pages = bot.formatter.format_help_for(ctx, ctx.invoked_subcommand) for page in pages: # page = page.strip('```css').strip('```') await bot.send_message(ctx.message.channel, page) print('Sent command help') else: pages = bot.formatter.format_help_for(ctx, ctx.command) for page in pages: await bot.send_message(ctx.message.channel, page) print('Sent command help') @bot.event async def on_command_error(error, ctx): print(error) channel = ctx.message.channel if isinstance(error, commands.MissingRequiredArgument): await send_cmd_help(ctx) print('Sent command help') elif isinstance(error, commands.BadArgument): await send_cmd_help(ctx) print('Sent command help') elif isinstance(error, commands.DisabledCommand): await bot.send_message(channel, "That command is disabled.") print('Command disabled.') elif isinstance(error, commands.CommandInvokeError): # A bit hacky, couldn't find a better way no_dms = "Cannot send messages to this user" is_help_cmd = ctx.command.qualified_name == "help" is_forbidden = isinstance(error.original, discord.Forbidden) if is_help_cmd and is_forbidden and error.original.text == no_dms: msg = ("I couldn't send the help message to you in DM. Either" " you blocked me or you disabled DMs in this server.") await bot.send_message(channel, msg) return @bot.command(pass_context=True,name='cog') @owner_only() async def _reload(ctx,*, module : str): """Reloads a module.""" channel = ctx.message.channel module = 'cogs.'+module try: bot.unload_extension(module) x = await bot.send_message(channel,'Successfully Unloaded.') bot.load_extension(module) x = await bot.edit_message(x,'Successfully Reloaded.') except Exception as e: x = await bot.edit_message(x,'\N{PISTOL}') await bot.say('{}: {}'.format(type(e).__name__, e)) else: x = await bot.edit_message(x,'Done. \N{OK HAND SIGN}') @bot.command(name='presence') async def _set(Type=None,*,thing=None): """Change the bot's discord game/stream!""" server = len(bot.servers) if Type is None: await bot.say('Usage: `.presence [game/stream] [message]`') else: if Type.lower() == 'stream': await bot.change_presence(game=discord.Game(name=thing,type=1,url='https://www.twitch.tv/a'),status='online') await bot.say('Set presence to. `Streaming {}`'.format(thing)) elif Type.lower() == 'game': await bot.change_presence(game=discord.Game(name=thing)) await bot.say('Set presence to `Playing {}`'.format(thing)) elif Type.lower() == 'clear': await bot.change_presence(game=None) await bot.say('Cleared Presence') elif Type.lower() == 'servers': await bot.change_presence(game=discord.Game(name='with {} servers'.format(server))) await bot.say('**Im now playing with {} servers.**'.format(server)) else: await bot.say('Usage: `.presence [game/stream] [message]`') @bot.command(pass_context=True) @is_owner() async def _leave_all_servers_(ctx): for server in bot.servers: await bot.leave_server(server) await bot.say('I left `{}`'.format(server.name)) @bot.command(pass_context=True) async def servers(ctx): servers = ', '.join([i.name for i in bot.servers]).strip(', ') await bot.say('**Current list of servers:**\n ```bf\n{}```'.format(servers)) @bot.command(pass_context=True) @is_owner() async def _leave_server(ctx, server): to_leave = discord.utils.get(bot.servers, id=str(server)) try: await bot.leave_server(to_leave) except: await self.bot.say('Failed.') else: await self.bot.say('Successfully left {}'.format(to_leave.name)) @bot.command(pass_context=True) async def register(ctx): server = ctx.message.server channel = discord.utils.get(server.channels, name='server-event') user = ctx.message.author with open('cogs/utils/registrations.txt') as f: data = f.read() print(data ) if ctx.message.channel != channel: await bot.say('You can only register in {}'.format(channel.mention)) return if str(user) in data: await bot.delete_message(ctx.message) await bot.send_message(user, "You can't register more than once.") return with open('cogs/utils/registrations.txt','a') as f: f.write(str(user)+'\n') role = discord.utils.get(server.roles, name='4row') await bot.add_roles(user, role) await bot.add_reaction(ctx.message, '\u2705') @bot.command(pass_context = True) @is_owner() async def shutdown(ctx): timestamp = ctx.message.timestamp embed=discord.Embed(title='Good Night', description='See you tomorrow', color=0xed, timestamp=timestamp) embed.set_footer(text='Darkness no longer online') await bot.say(embed=embed) await bot.logout() if __name__ == "__main__": for extension in startup_extensions: try: bot.load_extension(extension) print('Loaded: {}'.format(extension)) except Exception as e: exc = '{}: {}'.format(type(e).__name__, e) print('Error on load: {}\n{}'.format(extension, exc)) def cleanup_code( content): """Automatically removes code blocks from the code.""" # remove ```py\n``` if content.startswith('```') and content.endswith('```'): return '\n'.join(content.split('\n')[1:-1]) # remove `foo` return content.strip('` \n') def get_syntax_error(e): if e.text is None: return '```py\n{0.__class__.__name__}: {0}\n```'.format(e) return '```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```'.format(e, '^', type(e).__name__) async def to_code_block(ctx, body): if body.startswith('```') and body.endswith('```'): content = '\n'.join(body.split('\n')[1:-1]) else: content = body.strip('`') await bot.edit_message(ctx.message, '```py\n'+content+'```') @bot.command(pass_context=True, name='eval') @is_owner() async def _eval(ctx, *, body: str): '''Run python scripts on discord!''' env = { 'bot': bot, 'ctx': ctx, 'channel': ctx.message.channel, 'author': ctx.message.author, 'server': ctx.message.server, 'message': ctx.message, } env.update(globals()) body = cleanup_code(content=body) stdout = io.StringIO() to_compile = 'async def func():\n%s' % textwrap.indent(body, ' ') try: exec(to_compile, env) except SyntaxError as e: return await bot.say(get_syntax_error(e)) func = env['func'] try: with redirect_stdout(stdout): ret = await func() except Exception as e: value = stdout.getvalue() x = await bot.say('```py\n{}{}\n```'.format(value, traceback.format_exc())) try: await bot.add_reaction(x, '\U0001f534') except: pass else: value = stdout.getvalue() if TOKEN in value: value = value.replace(TOKEN,"[EXPUNGED]") if ret is None: if value: try:
await bot.add_reaction(x, '\U0001f535') except: pass else: try: await bot.add_reaction(ctx.message, '\U0001f535') except: pass else: try: x = await bot.say('```py\n%s%s\n```' % (value, ret)) except: x = await bot.say('```py\n\'Result was too long.\'```') try: await bot.add_reaction(x, '\U0001f535') except: pass @bot.command(pass_context = True) async def devcontact(ctx, *, msg: str): dev = '@-= shadeyg56™ =-#1702' user = ctx.message.author await bot.send_message(dev, '{} sent the following message: {}'.format(user, msg)) await bot.say('Your message has been sent. It will be checked by the dev asap. If your message was a troll or you keep resending/spamming a message you will be blacklisted from the command') await bot.delete_message(ctx.message) @bot.command(pass_context = True) async def dm(ctx, user: discord.Member, *, msg: str): if ctx.message.author.id == '300396755193954306': await bot.send_message(user, '{}'.format(msg)) await bot.delete_message(ctx.message) else: message = await bot.say('Only the bot dev can use this command') await asyncio.sleep(5) await bot.delete_message(message) await bot.delete_message(ctx.message) @bot.command(pass_context=True) @is_owner() async def blacklist(ctx, user_id: str): with open('cogs/utils/blacklists.json') as f: data = json.loads(f.read()) data = data[user_id] = "blacklisted" data = json.dumps(data, indent=4, sort_keys=True) with open('cogs/utils/blacklists.json', 'w') as f: f.write(data) await bot.say('Succesfully blacklisted id {}'.format(user_id)) @asyncio.coroutine async def on_message(message): channel = bot.get_channel('356602525740433408') if message.channel.id == '356602525740433408': await bot.send_message(channel, 'test') await bot.process_commands(message) bot.run(TOKEN)
x = await bot.say('```py\n%s\n```' % value) except: x = await bot.say('```py\n\'Result was too long.\'```') try:
random_line_split
bot.py
import discord from discord.ext import commands import datetime import time import sys import asyncio import os from cogs.utils import launcher import json import logging import random from cogs.utils.paginator import Pages import io import textwrap import traceback from contextlib import redirect_stdout logger = logging.getLogger('discord') logger.setLevel(logging.INFO) handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w') handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')) logger.addHandler(handler) ##launcher.check() if 'TOKEN' in os.environ: heroku = True TOKEN = os.environ['TOKEN'] info = launcher.bot() owner = '300396755193954306' startup_extensions = [ 'cogs.mod', 'cogs.misc', 'cogs.util', 'cogs.info', 'cogs.fun' ] Client = discord.Client() description = ('Bot made with discord.py by -= shadeyg56 =-#1702. ' 'Made by shadeyg56 \n') async def get_pre(bot, message): with open('cogs/utils/t_config.json') as f: config = json.loads(f.read()) try: if message.server.id not in config: return 'd.' except: pass else: return config[message.server.id]['prefix'] bot = commands.Bot(description=description, command_prefix='d.', pm_help=None) bot.remove_command('help') @bot.event async def on_ready(): print('------------------------------------') print('THE BOT IS ONLINE') print('------------------------------------') print("Name: {}".format(bot.user.name)) print('Author: shadeyg56') print("ID: {}".format(bot.user.id)) print('DV: {}'.format(discord.__version__)) bot.uptime = datetime.datetime.now() embed=discord.Embed(title='Good Morning', description='Up and at em', color=0xed) embed.set_footer(text='Darkness ready for use') server = len(bot.servers) channel = bot.get_channel('356599668739670049') users = sum(1 for _ in bot.get_all_members()) await bot.send_message(channel, embed=embed) while 1 == 1: await bot.change_presence(game=discord.Game(name='with {} servers'.format(server))) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='with {} users'.format(users))) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='PREFIX = d.')) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='Currently WIP | Darkness')) await asyncio.sleep(10) await bot.change_presence(game=discord.Game(name='d.support | d.invite')) await asyncio.sleep(25) @bot.command(pass_context=True) async def help(ctx): await bot.delete_message(ctx.message) msg = open('cogs/utils/help.txt').read().replace('\\u200b','\u200b').splitlines() for i, line in enumerate(msg): if line.strip().startswith('.'): x = line.strip().strip('.') x = ctx.prefix + x msg[i] = '`' + x + '`' try: p = Pages(bot, message=ctx.message, entries=msg) p.embed.set_author(name='Help - Darkness Commands', icon_url=bot.user.avatar_url) p.embed.color = 0x00FFFF await p.paginate() except: embed = discord.Embed(title='Darkness Commands', color=0xed) embed.add_field(name='Moderation:', value='kick, ban, unban, softban, warn, purge') embed.add_field(name='Information:', value='info, serverinfo, userinfo, avatar') embed.add_field(name='Miscellaneous:', value='ping, suggest, invite, support') embed.add_field(name='Utilities:', value='calc, remind, addrole, removerole') embed.add_field(name='Fun:', value='8ball, cat') embed.set_footer(text='Bot Dev: -= shadeyg56 =-#1702') await bot.say(embed=embed) def owner_only(): return commands.check(lambda ctx: ctx.message.author == ctx.message.server.owner) def is_owner(): return commands.check(lambda ctx: ctx.message.author.id == owner) @bot.event async def on_member_join(member): darkness = bot.get_channel('356599668739670049') if member.server.id == '356599668739670048': await bot.send_message(darkness, 'Welcome {0.mention} to {}. Please read #info-and-rules and enjoy your stay. Do d.help to check out the bot'.format(member, server)) kats = bot.get_channel('313863292126756864') if member.server.id == '294262760752152576': await bot.send_message(kats, '{0.mention} Welcome to **Dragons and Kats**! Have a great time here and enjoy yourselves!!!:wink: !'.format(member)) else: print('Member joined {}, but message not sent'.format(member.server)) @bot.event async def on_command(command, ctx): if str(command) == 'eval': return print('------------------------------------') print('Command > {}{} < invoked with > {} <\nServer: {} | {}\nUser: {} | {}' .format(ctx.prefix, command, ctx.invoked_with, ctx.message.server.name, ctx.message.server.id, ctx.message.author.name, ctx.message.author.id)) @bot.event async def on_member_remove(member): server = member.server with open('cogs/utils/t_config.json') as f: data = json.loads(f.read()) status = data[server.id]["leave"]["status"] if status: msg = data[server.id]["leave"]["msg"] channel = data[server.id]['leave']['channel'] if channel == 'default': channel = server else: channel = discord.utils.get(server.channels, id=channel) await bot.send_message(channel, msg.format(member, server)) @bot.event async def on_server_join(server): embed = discord.Embed(title='Darkness Info', color=0xed) owner = server.owner servers = len(bot.servers) embed.add_field(name='Author', value='<@300396755193954306>') embed.add_field(name='Servers', value=servers) embed.add_field(name='Prefix', value='d.') embed.set_footer(text='Powered by discord.py') embed.set_thumbnail(url='http://data.whicdn.com/images/150102219/large.gif') embed.add_field(name='Invite', value='https://discordapp.com/oauth2/authorize?client_id=355189919410421760&scope=bot&permissions=66186303') embed.add_field(name='Support', value='https://discord.gg/Jjdp8hf') embed.add_field(name='GitHub', value='https://github.com/shadeyg56/darkness') await bot.send_message(owner, embed=embed) def fmt_help(page): cmd = '' for line in page.splitlines(): if line.startswith('.'): cmd = line.strip('.') break em = discord.Embed(color=0x00FFFF) em.set_author(name='Help - {}'.format(cmd)) async def send_cmd_help(ctx): if ctx.invoked_subcommand: pages = bot.formatter.format_help_for(ctx, ctx.invoked_subcommand) for page in pages: # page = page.strip('```css').strip('```') await bot.send_message(ctx.message.channel, page) print('Sent command help') else: pages = bot.formatter.format_help_for(ctx, ctx.command) for page in pages: await bot.send_message(ctx.message.channel, page) print('Sent command help') @bot.event async def on_command_error(error, ctx): print(error) channel = ctx.message.channel if isinstance(error, commands.MissingRequiredArgument): await send_cmd_help(ctx) print('Sent command help') elif isinstance(error, commands.BadArgument): await send_cmd_help(ctx) print('Sent command help') elif isinstance(error, commands.DisabledCommand): await bot.send_message(channel, "That command is disabled.") print('Command disabled.') elif isinstance(error, commands.CommandInvokeError): # A bit hacky, couldn't find a better way no_dms = "Cannot send messages to this user" is_help_cmd = ctx.command.qualified_name == "help" is_forbidden = isinstance(error.original, discord.Forbidden) if is_help_cmd and is_forbidden and error.original.text == no_dms: msg = ("I couldn't send the help message to you in DM. Either" " you blocked me or you disabled DMs in this server.") await bot.send_message(channel, msg) return @bot.command(pass_context=True,name='cog') @owner_only() async def _reload(ctx,*, module : str): """Reloads a module.""" channel = ctx.message.channel module = 'cogs.'+module try: bot.unload_extension(module) x = await bot.send_message(channel,'Successfully Unloaded.') bot.load_extension(module) x = await bot.edit_message(x,'Successfully Reloaded.') except Exception as e: x = await bot.edit_message(x,'\N{PISTOL}') await bot.say('{}: {}'.format(type(e).__name__, e)) else: x = await bot.edit_message(x,'Done. \N{OK HAND SIGN}') @bot.command(name='presence') async def _set(Type=None,*,thing=None): """Change the bot's discord game/stream!""" server = len(bot.servers) if Type is None: await bot.say('Usage: `.presence [game/stream] [message]`') else: if Type.lower() == 'stream': await bot.change_presence(game=discord.Game(name=thing,type=1,url='https://www.twitch.tv/a'),status='online') await bot.say('Set presence to. `Streaming {}`'.format(thing)) elif Type.lower() == 'game': await bot.change_presence(game=discord.Game(name=thing)) await bot.say('Set presence to `Playing {}`'.format(thing)) elif Type.lower() == 'clear': await bot.change_presence(game=None) await bot.say('Cleared Presence') elif Type.lower() == 'servers': await bot.change_presence(game=discord.Game(name='with {} servers'.format(server))) await bot.say('**Im now playing with {} servers.**'.format(server)) else: await bot.say('Usage: `.presence [game/stream] [message]`') @bot.command(pass_context=True) @is_owner() async def _leave_all_servers_(ctx): for server in bot.servers: await bot.leave_server(server) await bot.say('I left `{}`'.format(server.name)) @bot.command(pass_context=True) async def servers(ctx): servers = ', '.join([i.name for i in bot.servers]).strip(', ') await bot.say('**Current list of servers:**\n ```bf\n{}```'.format(servers)) @bot.command(pass_context=True) @is_owner() async def _leave_server(ctx, server): to_leave = discord.utils.get(bot.servers, id=str(server)) try: await bot.leave_server(to_leave) except: await self.bot.say('Failed.') else: await self.bot.say('Successfully left {}'.format(to_leave.name)) @bot.command(pass_context=True) async def register(ctx): server = ctx.message.server channel = discord.utils.get(server.channels, name='server-event') user = ctx.message.author with open('cogs/utils/registrations.txt') as f: data = f.read() print(data ) if ctx.message.channel != channel: await bot.say('You can only register in {}'.format(channel.mention)) return if str(user) in data: await bot.delete_message(ctx.message) await bot.send_message(user, "You can't register more than once.") return with open('cogs/utils/registrations.txt','a') as f: f.write(str(user)+'\n') role = discord.utils.get(server.roles, name='4row') await bot.add_roles(user, role) await bot.add_reaction(ctx.message, '\u2705') @bot.command(pass_context = True) @is_owner() async def shutdown(ctx): timestamp = ctx.message.timestamp embed=discord.Embed(title='Good Night', description='See you tomorrow', color=0xed, timestamp=timestamp) embed.set_footer(text='Darkness no longer online') await bot.say(embed=embed) await bot.logout() if __name__ == "__main__": for extension in startup_extensions: try: bot.load_extension(extension) print('Loaded: {}'.format(extension)) except Exception as e: exc = '{}: {}'.format(type(e).__name__, e) print('Error on load: {}\n{}'.format(extension, exc)) def cleanup_code( content): """Automatically removes code blocks from the code.""" # remove ```py\n``` if content.startswith('```') and content.endswith('```'): return '\n'.join(content.split('\n')[1:-1]) # remove `foo` return content.strip('` \n') def get_syntax_error(e): if e.text is None: return '```py\n{0.__class__.__name__}: {0}\n```'.format(e) return '```py\n{0.text}{1:>{0.offset}}\n{2}: {0}```'.format(e, '^', type(e).__name__) async def to_code_block(ctx, body): if body.startswith('```') and body.endswith('```'): content = '\n'.join(body.split('\n')[1:-1]) else: content = body.strip('`') await bot.edit_message(ctx.message, '```py\n'+content+'```') @bot.command(pass_context=True, name='eval') @is_owner() async def _eval(ctx, *, body: str): '''Run python scripts on discord!''' env = { 'bot': bot, 'ctx': ctx, 'channel': ctx.message.channel, 'author': ctx.message.author, 'server': ctx.message.server, 'message': ctx.message, } env.update(globals()) body = cleanup_code(content=body) stdout = io.StringIO() to_compile = 'async def func():\n%s' % textwrap.indent(body, ' ') try: exec(to_compile, env) except SyntaxError as e: return await bot.say(get_syntax_error(e)) func = env['func'] try: with redirect_stdout(stdout): ret = await func() except Exception as e: value = stdout.getvalue() x = await bot.say('```py\n{}{}\n```'.format(value, traceback.format_exc())) try: await bot.add_reaction(x, '\U0001f534') except: pass else: value = stdout.getvalue() if TOKEN in value: value = value.replace(TOKEN,"[EXPUNGED]") if ret is None: if value: try: x = await bot.say('```py\n%s\n```' % value) except: x = await bot.say('```py\n\'Result was too long.\'```') try: await bot.add_reaction(x, '\U0001f535') except: pass else: try: await bot.add_reaction(ctx.message, '\U0001f535') except: pass else: try: x = await bot.say('```py\n%s%s\n```' % (value, ret)) except: x = await bot.say('```py\n\'Result was too long.\'```') try: await bot.add_reaction(x, '\U0001f535') except: pass @bot.command(pass_context = True) async def devcontact(ctx, *, msg: str):
@bot.command(pass_context = True) async def dm(ctx, user: discord.Member, *, msg: str): if ctx.message.author.id == '300396755193954306': await bot.send_message(user, '{}'.format(msg)) await bot.delete_message(ctx.message) else: message = await bot.say('Only the bot dev can use this command') await asyncio.sleep(5) await bot.delete_message(message) await bot.delete_message(ctx.message) @bot.command(pass_context=True) @is_owner() async def blacklist(ctx, user_id: str): with open('cogs/utils/blacklists.json') as f: data = json.loads(f.read()) data = data[user_id] = "blacklisted" data = json.dumps(data, indent=4, sort_keys=True) with open('cogs/utils/blacklists.json', 'w') as f: f.write(data) await bot.say('Succesfully blacklisted id {}'.format(user_id)) @asyncio.coroutine async def on_message(message): channel = bot.get_channel('356602525740433408') if message.channel.id == '356602525740433408': await bot.send_message(channel, 'test') await bot.process_commands(message) bot.run(TOKEN)
dev = '@-= shadeyg56™ =-#1702' user = ctx.message.author await bot.send_message(dev, '{} sent the following message: {}'.format(user, msg)) await bot.say('Your message has been sent. It will be checked by the dev asap. If your message was a troll or you keep resending/spamming a message you will be blacklisted from the command') await bot.delete_message(ctx.message)
identifier_body
trigger.go
/* Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package trigger import ( "context" "net/url" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/reconciler/names" "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker" brokerresources "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/path" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources" "github.com/knative/eventing/pkg/utils/resolve" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) const ( // controllerAgentName is the string used by this controller to identify // itself when creating events. controllerAgentName = "trigger-controller" // Name of the corev1.Events emitted from the reconciliation process. triggerReconciled = "TriggerReconciled" triggerReconcileFailed = "TriggerReconcileFailed" triggerUpdateStatusFailed = "TriggerUpdateStatusFailed" subscriptionDeleteFailed = "SubscriptionDeleteFailed" subscriptionCreateFailed = "SubscriptionCreateFailed" ) type reconciler struct { client client.Client dynamicClient dynamic.Interface recorder record.EventRecorder logger *zap.Logger } // Verify the struct implements reconcile.Reconciler. var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a function that returns a Trigger controller. func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Triggers. r := &reconciler{ recorder: mgr.GetRecorder(controllerAgentName), logger: logger, } c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: r, }) if err != nil { return nil, err } // Watch Triggers. if err = c.Watch(&source.Kind{Type: &v1alpha1.Trigger{}}, &handler.EnqueueRequestForObject{}); err != nil { return nil, err } // Watch all the resources that the Trigger reconciles. for _, t := range []runtime.Object{&corev1.Service{}, &istiov1alpha3.VirtualService{}, &v1alpha1.Subscription{}} { err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.Trigger{}, IsController: true}) if err != nil { return nil, err } } // Watch for Broker changes. E.g. if the Broker is deleted and recreated, we need to reconcile // the Trigger again. if err = c.Watch(&source.Kind{Type: &v1alpha1.Broker{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: &mapBrokerToTriggers{r: r}}); err != nil { return nil, err } // TODO reconcile after a change to the subscriber. I'm not sure how this is possible, but we should do it if we // can find a way. return c, nil } // mapBrokerToTriggers maps Broker changes to all the Triggers that correspond to that Broker. type mapBrokerToTriggers struct { r *reconciler } // Map implements handler.Mapper.Map. func (b *mapBrokerToTriggers) Map(o handler.MapObject) []reconcile.Request { ctx := context.Background() triggers := make([]reconcile.Request, 0) opts := &client.ListOptions{ Namespace: o.Meta.GetNamespace(), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } for { tl := &v1alpha1.TriggerList{} if err := b.r.client.List(ctx, opts, tl); err != nil { b.r.logger.Error("Error listing Triggers when Broker changed. Some Triggers may not be reconciled.", zap.Error(err), zap.Any("broker", o)) return triggers } for _, t := range tl.Items { if t.Spec.Broker == o.Meta.GetName() { triggers = append(triggers, reconcile.Request{ NamespacedName: types.NamespacedName{ Namespace: t.Namespace, Name: t.Name, }, }) } } if tl.Continue != "" { opts.Raw.Continue = tl.Continue } else { return triggers } } } // InjectClient implements controller runtime's inject.Client. func (r *reconciler) InjectClient(c client.Client) error { r.client = c return nil } // InjectConfig implements controller runtime's inject.Config. func (r *reconciler) InjectConfig(c *rest.Config) error { var err error r.dynamicClient, err = dynamic.NewForConfig(c) return err } // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Trigger resource // with the current status of the resource. func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { ctx := context.TODO() ctx = logging.WithLogger(ctx, r.logger.With(zap.Any("request", request))) trigger := &v1alpha1.Trigger{} err := r.client.Get(ctx, request.NamespacedName, trigger) if errors.IsNotFound(err) { logging.FromContext(ctx).Info("Could not find Trigger") return reconcile.Result{}, nil } if err != nil { logging.FromContext(ctx).Error("Could not get Trigger", zap.Error(err)) return reconcile.Result{}, err } // Reconcile this copy of the Trigger and then write back any status updates regardless of // whether the reconcile error out. reconcileErr := r.reconcile(ctx, trigger) if reconcileErr != nil { logging.FromContext(ctx).Error("Error reconciling Trigger", zap.Error(reconcileErr)) r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerReconcileFailed, "Trigger reconciliation failed: %v", reconcileErr) } else { logging.FromContext(ctx).Debug("Trigger reconciled") r.recorder.Event(trigger, corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled") } if _, err = r.updateStatus(trigger); err != nil { logging.FromContext(ctx).Error("Failed to update Trigger status", zap.Error(err)) r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", err) return reconcile.Result{}, err } // Requeue if the resource is not ready return reconcile.Result{}, reconcileErr } func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error { t.Status.InitializeConditions() // 1. Verify the Broker exists. // 2. Get the Broker's: // - Filter Channel // - Ingress Channel // - Filter Service // 3. Find the Subscriber's URI. // 4. Creates a Subscription from the Broker's Filter Channel to this Trigger via the Broker's // Filter Service with a specific path, and reply set to the Broker's Ingress Channel. if t.DeletionTimestamp != nil { // Everything is cleaned up by the garbage collector. return nil } b, err := r.getBroker(ctx, t) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err)) t.Status.MarkBrokerDoesNotExist() return err } t.Status.MarkBrokerExists() brokerTrigger, err := r.getBrokerTriggerChannel(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's Trigger Channel", zap.Error(err)) return err } brokerIngress, err := r.getBrokerIngressChannel(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's Ingress Channel", zap.Error(err)) return err } // Get Broker filter service. filterSvc, err := r.getBrokerFilterService(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's filter Service", zap.Error(err)) return err } subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, t.Namespace, t.Spec.Subscriber) if err != nil { logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err)) return err } t.Status.SubscriberURI = subscriberURI _, err = r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc) if err != nil { logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err)) t.Status.MarkNotSubscribed("notSubscribed", "%v", err) return err } t.Status.MarkSubscribed() return nil } // updateStatus may in fact update the trigger's finalizers in addition to the status. func (r *reconciler)
(trigger *v1alpha1.Trigger) (*v1alpha1.Trigger, error) { ctx := context.TODO() objectKey := client.ObjectKey{Namespace: trigger.Namespace, Name: trigger.Name} latestTrigger := &v1alpha1.Trigger{} if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil { return nil, err } triggerChanged := false if !equality.Semantic.DeepEqual(latestTrigger.Finalizers, trigger.Finalizers) { latestTrigger.SetFinalizers(trigger.ObjectMeta.Finalizers) if err := r.client.Update(ctx, latestTrigger); err != nil { return nil, err } triggerChanged = true } if equality.Semantic.DeepEqual(latestTrigger.Status, trigger.Status) { return latestTrigger, nil } if triggerChanged { // Refetch latestTrigger = &v1alpha1.Trigger{} if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil { return nil, err } } latestTrigger.Status = trigger.Status if err := r.client.Status().Update(ctx, latestTrigger); err != nil { return nil, err } return latestTrigger, nil } // getBroker returns the Broker for Trigger 't' if exists, otherwise it returns an error. func (r *reconciler) getBroker(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Broker, error) { b := &v1alpha1.Broker{} name := types.NamespacedName{ Namespace: t.Namespace, Name: t.Spec.Broker, } err := r.client.Get(ctx, name, b) return b, err } // getBrokerTriggerChannel return the Broker's Trigger Channel if it exists, otherwise it returns an // error. func (r *reconciler) getBrokerTriggerChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) { return r.getChannel(ctx, b, labels.SelectorFromSet(broker.TriggerChannelLabels(b))) } // getBrokerIngressChannel return the Broker's Ingress Channel if it exists, otherwise it returns an // error. func (r *reconciler) getBrokerIngressChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) { return r.getChannel(ctx, b, labels.SelectorFromSet(broker.IngressChannelLabels(b))) } // getChannel returns the Broker's channel if it exists, otherwise it returns an error. func (r *reconciler) getChannel(ctx context.Context, b *v1alpha1.Broker, ls labels.Selector) (*v1alpha1.Channel, error) { list := &v1alpha1.ChannelList{} opts := &runtimeclient.ListOptions{ Namespace: b.Namespace, LabelSelector: ls, // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, c := range list.Items { if metav1.IsControlledBy(&c, b) { return &c, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") } // getService returns the K8s service for trigger 't' if exists, // otherwise it returns an error. func (r *reconciler) getBrokerFilterService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) { list := &corev1.ServiceList{} opts := &runtimeclient.ListOptions{ Namespace: b.Namespace, LabelSelector: labels.SelectorFromSet(brokerresources.FilterLabels(b)), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, svc := range list.Items { if metav1.IsControlledBy(&svc, b) { return &svc, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") } // subscribeToBrokerChannel subscribes service 'svc' to the Broker's channels. func (r *reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.Trigger, brokerTrigger, brokerIngress *v1alpha1.Channel, svc *corev1.Service) (*v1alpha1.Subscription, error) { uri := &url.URL{ Scheme: "http", Host: names.ServiceHostName(svc.Name, svc.Namespace), Path: path.Generate(t), } expected := resources.NewSubscription(t, brokerTrigger, brokerIngress, uri) sub, err := r.getSubscription(ctx, t) // If the resource doesn't exist, we'll create it if k8serrors.IsNotFound(err) { sub = expected err = r.client.Create(ctx, sub) if err != nil { return nil, err } return sub, nil } else if err != nil { return nil, err } // Update Subscription if it has changed. Ignore the generation. expected.Spec.DeprecatedGeneration = sub.Spec.DeprecatedGeneration if !equality.Semantic.DeepDerivative(expected.Spec, sub.Spec) { // Given that spec.channel is immutable, we cannot just update the Subscription. We delete // it and re-create it instead. err = r.client.Delete(ctx, sub) if err != nil { logging.FromContext(ctx).Info("Cannot delete subscription", zap.Error(err)) r.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionDeleteFailed, "Delete Trigger's subscription failed: %v", err) return nil, err } sub = expected err = r.client.Create(ctx, sub) if err != nil { logging.FromContext(ctx).Info("Cannot create subscription", zap.Error(err)) r.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Trigger's subscription failed: %v", err) return nil, err } } return sub, nil } // getSubscription returns the subscription of trigger 't' if exists, // otherwise it returns an error. func (r *reconciler) getSubscription(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Subscription, error) { list := &v1alpha1.SubscriptionList{} opts := &runtimeclient.ListOptions{ Namespace: t.Namespace, LabelSelector: labels.SelectorFromSet(resources.SubscriptionLabels(t)), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, s := range list.Items { if metav1.IsControlledBy(&s, t) { return &s, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") }
updateStatus
identifier_name
trigger.go
/* Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package trigger import ( "context" "net/url" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/reconciler/names" "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker" brokerresources "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/path" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources" "github.com/knative/eventing/pkg/utils/resolve" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) const ( // controllerAgentName is the string used by this controller to identify // itself when creating events. controllerAgentName = "trigger-controller" // Name of the corev1.Events emitted from the reconciliation process. triggerReconciled = "TriggerReconciled" triggerReconcileFailed = "TriggerReconcileFailed" triggerUpdateStatusFailed = "TriggerUpdateStatusFailed" subscriptionDeleteFailed = "SubscriptionDeleteFailed" subscriptionCreateFailed = "SubscriptionCreateFailed" ) type reconciler struct { client client.Client dynamicClient dynamic.Interface recorder record.EventRecorder logger *zap.Logger } // Verify the struct implements reconcile.Reconciler. var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a function that returns a Trigger controller. func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Triggers. r := &reconciler{ recorder: mgr.GetRecorder(controllerAgentName), logger: logger, } c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: r, }) if err != nil { return nil, err } // Watch Triggers. if err = c.Watch(&source.Kind{Type: &v1alpha1.Trigger{}}, &handler.EnqueueRequestForObject{}); err != nil { return nil, err } // Watch all the resources that the Trigger reconciles. for _, t := range []runtime.Object{&corev1.Service{}, &istiov1alpha3.VirtualService{}, &v1alpha1.Subscription{}} { err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.Trigger{}, IsController: true}) if err != nil { return nil, err } } // Watch for Broker changes. E.g. if the Broker is deleted and recreated, we need to reconcile // the Trigger again. if err = c.Watch(&source.Kind{Type: &v1alpha1.Broker{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: &mapBrokerToTriggers{r: r}}); err != nil { return nil, err } // TODO reconcile after a change to the subscriber. I'm not sure how this is possible, but we should do it if we // can find a way. return c, nil } // mapBrokerToTriggers maps Broker changes to all the Triggers that correspond to that Broker. type mapBrokerToTriggers struct { r *reconciler } // Map implements handler.Mapper.Map. func (b *mapBrokerToTriggers) Map(o handler.MapObject) []reconcile.Request { ctx := context.Background() triggers := make([]reconcile.Request, 0) opts := &client.ListOptions{ Namespace: o.Meta.GetNamespace(), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } for { tl := &v1alpha1.TriggerList{} if err := b.r.client.List(ctx, opts, tl); err != nil { b.r.logger.Error("Error listing Triggers when Broker changed. Some Triggers may not be reconciled.", zap.Error(err), zap.Any("broker", o)) return triggers } for _, t := range tl.Items { if t.Spec.Broker == o.Meta.GetName() { triggers = append(triggers, reconcile.Request{ NamespacedName: types.NamespacedName{ Namespace: t.Namespace, Name: t.Name, }, }) } } if tl.Continue != "" { opts.Raw.Continue = tl.Continue } else { return triggers } } } // InjectClient implements controller runtime's inject.Client. func (r *reconciler) InjectClient(c client.Client) error { r.client = c return nil } // InjectConfig implements controller runtime's inject.Config. func (r *reconciler) InjectConfig(c *rest.Config) error { var err error r.dynamicClient, err = dynamic.NewForConfig(c) return err } // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Trigger resource // with the current status of the resource. func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { ctx := context.TODO() ctx = logging.WithLogger(ctx, r.logger.With(zap.Any("request", request))) trigger := &v1alpha1.Trigger{} err := r.client.Get(ctx, request.NamespacedName, trigger) if errors.IsNotFound(err) { logging.FromContext(ctx).Info("Could not find Trigger") return reconcile.Result{}, nil } if err != nil { logging.FromContext(ctx).Error("Could not get Trigger", zap.Error(err)) return reconcile.Result{}, err } // Reconcile this copy of the Trigger and then write back any status updates regardless of // whether the reconcile error out. reconcileErr := r.reconcile(ctx, trigger) if reconcileErr != nil { logging.FromContext(ctx).Error("Error reconciling Trigger", zap.Error(reconcileErr)) r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerReconcileFailed, "Trigger reconciliation failed: %v", reconcileErr) } else { logging.FromContext(ctx).Debug("Trigger reconciled") r.recorder.Event(trigger, corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled") } if _, err = r.updateStatus(trigger); err != nil { logging.FromContext(ctx).Error("Failed to update Trigger status", zap.Error(err)) r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", err) return reconcile.Result{}, err } // Requeue if the resource is not ready return reconcile.Result{}, reconcileErr } func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error { t.Status.InitializeConditions() // 1. Verify the Broker exists. // 2. Get the Broker's: // - Filter Channel // - Ingress Channel // - Filter Service // 3. Find the Subscriber's URI. // 4. Creates a Subscription from the Broker's Filter Channel to this Trigger via the Broker's // Filter Service with a specific path, and reply set to the Broker's Ingress Channel. if t.DeletionTimestamp != nil { // Everything is cleaned up by the garbage collector. return nil } b, err := r.getBroker(ctx, t) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err)) t.Status.MarkBrokerDoesNotExist() return err } t.Status.MarkBrokerExists() brokerTrigger, err := r.getBrokerTriggerChannel(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's Trigger Channel", zap.Error(err)) return err } brokerIngress, err := r.getBrokerIngressChannel(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's Ingress Channel", zap.Error(err)) return err } // Get Broker filter service. filterSvc, err := r.getBrokerFilterService(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's filter Service", zap.Error(err)) return err } subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, t.Namespace, t.Spec.Subscriber) if err != nil { logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err)) return err } t.Status.SubscriberURI = subscriberURI _, err = r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc) if err != nil { logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err)) t.Status.MarkNotSubscribed("notSubscribed", "%v", err) return err } t.Status.MarkSubscribed() return nil } // updateStatus may in fact update the trigger's finalizers in addition to the status. func (r *reconciler) updateStatus(trigger *v1alpha1.Trigger) (*v1alpha1.Trigger, error) { ctx := context.TODO() objectKey := client.ObjectKey{Namespace: trigger.Namespace, Name: trigger.Name} latestTrigger := &v1alpha1.Trigger{} if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil { return nil, err } triggerChanged := false if !equality.Semantic.DeepEqual(latestTrigger.Finalizers, trigger.Finalizers) { latestTrigger.SetFinalizers(trigger.ObjectMeta.Finalizers) if err := r.client.Update(ctx, latestTrigger); err != nil { return nil, err } triggerChanged = true } if equality.Semantic.DeepEqual(latestTrigger.Status, trigger.Status) { return latestTrigger, nil } if triggerChanged { // Refetch latestTrigger = &v1alpha1.Trigger{} if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil { return nil, err } } latestTrigger.Status = trigger.Status if err := r.client.Status().Update(ctx, latestTrigger); err != nil { return nil, err } return latestTrigger, nil } // getBroker returns the Broker for Trigger 't' if exists, otherwise it returns an error. func (r *reconciler) getBroker(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Broker, error) { b := &v1alpha1.Broker{} name := types.NamespacedName{ Namespace: t.Namespace, Name: t.Spec.Broker, } err := r.client.Get(ctx, name, b) return b, err } // getBrokerTriggerChannel return the Broker's Trigger Channel if it exists, otherwise it returns an // error. func (r *reconciler) getBrokerTriggerChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) { return r.getChannel(ctx, b, labels.SelectorFromSet(broker.TriggerChannelLabels(b))) } // getBrokerIngressChannel return the Broker's Ingress Channel if it exists, otherwise it returns an // error. func (r *reconciler) getBrokerIngressChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error)
// getChannel returns the Broker's channel if it exists, otherwise it returns an error. func (r *reconciler) getChannel(ctx context.Context, b *v1alpha1.Broker, ls labels.Selector) (*v1alpha1.Channel, error) { list := &v1alpha1.ChannelList{} opts := &runtimeclient.ListOptions{ Namespace: b.Namespace, LabelSelector: ls, // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, c := range list.Items { if metav1.IsControlledBy(&c, b) { return &c, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") } // getService returns the K8s service for trigger 't' if exists, // otherwise it returns an error. func (r *reconciler) getBrokerFilterService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) { list := &corev1.ServiceList{} opts := &runtimeclient.ListOptions{ Namespace: b.Namespace, LabelSelector: labels.SelectorFromSet(brokerresources.FilterLabels(b)), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, svc := range list.Items { if metav1.IsControlledBy(&svc, b) { return &svc, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") } // subscribeToBrokerChannel subscribes service 'svc' to the Broker's channels. func (r *reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.Trigger, brokerTrigger, brokerIngress *v1alpha1.Channel, svc *corev1.Service) (*v1alpha1.Subscription, error) { uri := &url.URL{ Scheme: "http", Host: names.ServiceHostName(svc.Name, svc.Namespace), Path: path.Generate(t), } expected := resources.NewSubscription(t, brokerTrigger, brokerIngress, uri) sub, err := r.getSubscription(ctx, t) // If the resource doesn't exist, we'll create it if k8serrors.IsNotFound(err) { sub = expected err = r.client.Create(ctx, sub) if err != nil { return nil, err } return sub, nil } else if err != nil { return nil, err } // Update Subscription if it has changed. Ignore the generation. expected.Spec.DeprecatedGeneration = sub.Spec.DeprecatedGeneration if !equality.Semantic.DeepDerivative(expected.Spec, sub.Spec) { // Given that spec.channel is immutable, we cannot just update the Subscription. We delete // it and re-create it instead. err = r.client.Delete(ctx, sub) if err != nil { logging.FromContext(ctx).Info("Cannot delete subscription", zap.Error(err)) r.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionDeleteFailed, "Delete Trigger's subscription failed: %v", err) return nil, err } sub = expected err = r.client.Create(ctx, sub) if err != nil { logging.FromContext(ctx).Info("Cannot create subscription", zap.Error(err)) r.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Trigger's subscription failed: %v", err) return nil, err } } return sub, nil } // getSubscription returns the subscription of trigger 't' if exists, // otherwise it returns an error. func (r *reconciler) getSubscription(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Subscription, error) { list := &v1alpha1.SubscriptionList{} opts := &runtimeclient.ListOptions{ Namespace: t.Namespace, LabelSelector: labels.SelectorFromSet(resources.SubscriptionLabels(t)), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, s := range list.Items { if metav1.IsControlledBy(&s, t) { return &s, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") }
{ return r.getChannel(ctx, b, labels.SelectorFromSet(broker.IngressChannelLabels(b))) }
identifier_body
trigger.go
/* Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package trigger import ( "context" "net/url" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/reconciler/names" "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker" brokerresources "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/path" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources" "github.com/knative/eventing/pkg/utils/resolve" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) const ( // controllerAgentName is the string used by this controller to identify // itself when creating events. controllerAgentName = "trigger-controller" // Name of the corev1.Events emitted from the reconciliation process. triggerReconciled = "TriggerReconciled" triggerReconcileFailed = "TriggerReconcileFailed" triggerUpdateStatusFailed = "TriggerUpdateStatusFailed" subscriptionDeleteFailed = "SubscriptionDeleteFailed" subscriptionCreateFailed = "SubscriptionCreateFailed" ) type reconciler struct { client client.Client dynamicClient dynamic.Interface recorder record.EventRecorder logger *zap.Logger } // Verify the struct implements reconcile.Reconciler. var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a function that returns a Trigger controller. func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Triggers. r := &reconciler{ recorder: mgr.GetRecorder(controllerAgentName), logger: logger, } c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: r, }) if err != nil
// Watch Triggers. if err = c.Watch(&source.Kind{Type: &v1alpha1.Trigger{}}, &handler.EnqueueRequestForObject{}); err != nil { return nil, err } // Watch all the resources that the Trigger reconciles. for _, t := range []runtime.Object{&corev1.Service{}, &istiov1alpha3.VirtualService{}, &v1alpha1.Subscription{}} { err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.Trigger{}, IsController: true}) if err != nil { return nil, err } } // Watch for Broker changes. E.g. if the Broker is deleted and recreated, we need to reconcile // the Trigger again. if err = c.Watch(&source.Kind{Type: &v1alpha1.Broker{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: &mapBrokerToTriggers{r: r}}); err != nil { return nil, err } // TODO reconcile after a change to the subscriber. I'm not sure how this is possible, but we should do it if we // can find a way. return c, nil } // mapBrokerToTriggers maps Broker changes to all the Triggers that correspond to that Broker. type mapBrokerToTriggers struct { r *reconciler } // Map implements handler.Mapper.Map. func (b *mapBrokerToTriggers) Map(o handler.MapObject) []reconcile.Request { ctx := context.Background() triggers := make([]reconcile.Request, 0) opts := &client.ListOptions{ Namespace: o.Meta.GetNamespace(), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } for { tl := &v1alpha1.TriggerList{} if err := b.r.client.List(ctx, opts, tl); err != nil { b.r.logger.Error("Error listing Triggers when Broker changed. Some Triggers may not be reconciled.", zap.Error(err), zap.Any("broker", o)) return triggers } for _, t := range tl.Items { if t.Spec.Broker == o.Meta.GetName() { triggers = append(triggers, reconcile.Request{ NamespacedName: types.NamespacedName{ Namespace: t.Namespace, Name: t.Name, }, }) } } if tl.Continue != "" { opts.Raw.Continue = tl.Continue } else { return triggers } } } // InjectClient implements controller runtime's inject.Client. func (r *reconciler) InjectClient(c client.Client) error { r.client = c return nil } // InjectConfig implements controller runtime's inject.Config. func (r *reconciler) InjectConfig(c *rest.Config) error { var err error r.dynamicClient, err = dynamic.NewForConfig(c) return err } // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Trigger resource // with the current status of the resource. func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { ctx := context.TODO() ctx = logging.WithLogger(ctx, r.logger.With(zap.Any("request", request))) trigger := &v1alpha1.Trigger{} err := r.client.Get(ctx, request.NamespacedName, trigger) if errors.IsNotFound(err) { logging.FromContext(ctx).Info("Could not find Trigger") return reconcile.Result{}, nil } if err != nil { logging.FromContext(ctx).Error("Could not get Trigger", zap.Error(err)) return reconcile.Result{}, err } // Reconcile this copy of the Trigger and then write back any status updates regardless of // whether the reconcile error out. reconcileErr := r.reconcile(ctx, trigger) if reconcileErr != nil { logging.FromContext(ctx).Error("Error reconciling Trigger", zap.Error(reconcileErr)) r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerReconcileFailed, "Trigger reconciliation failed: %v", reconcileErr) } else { logging.FromContext(ctx).Debug("Trigger reconciled") r.recorder.Event(trigger, corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled") } if _, err = r.updateStatus(trigger); err != nil { logging.FromContext(ctx).Error("Failed to update Trigger status", zap.Error(err)) r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", err) return reconcile.Result{}, err } // Requeue if the resource is not ready return reconcile.Result{}, reconcileErr } func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error { t.Status.InitializeConditions() // 1. Verify the Broker exists. // 2. Get the Broker's: // - Filter Channel // - Ingress Channel // - Filter Service // 3. Find the Subscriber's URI. // 4. Creates a Subscription from the Broker's Filter Channel to this Trigger via the Broker's // Filter Service with a specific path, and reply set to the Broker's Ingress Channel. if t.DeletionTimestamp != nil { // Everything is cleaned up by the garbage collector. return nil } b, err := r.getBroker(ctx, t) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err)) t.Status.MarkBrokerDoesNotExist() return err } t.Status.MarkBrokerExists() brokerTrigger, err := r.getBrokerTriggerChannel(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's Trigger Channel", zap.Error(err)) return err } brokerIngress, err := r.getBrokerIngressChannel(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's Ingress Channel", zap.Error(err)) return err } // Get Broker filter service. filterSvc, err := r.getBrokerFilterService(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's filter Service", zap.Error(err)) return err } subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, t.Namespace, t.Spec.Subscriber) if err != nil { logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err)) return err } t.Status.SubscriberURI = subscriberURI _, err = r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc) if err != nil { logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err)) t.Status.MarkNotSubscribed("notSubscribed", "%v", err) return err } t.Status.MarkSubscribed() return nil } // updateStatus may in fact update the trigger's finalizers in addition to the status. func (r *reconciler) updateStatus(trigger *v1alpha1.Trigger) (*v1alpha1.Trigger, error) { ctx := context.TODO() objectKey := client.ObjectKey{Namespace: trigger.Namespace, Name: trigger.Name} latestTrigger := &v1alpha1.Trigger{} if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil { return nil, err } triggerChanged := false if !equality.Semantic.DeepEqual(latestTrigger.Finalizers, trigger.Finalizers) { latestTrigger.SetFinalizers(trigger.ObjectMeta.Finalizers) if err := r.client.Update(ctx, latestTrigger); err != nil { return nil, err } triggerChanged = true } if equality.Semantic.DeepEqual(latestTrigger.Status, trigger.Status) { return latestTrigger, nil } if triggerChanged { // Refetch latestTrigger = &v1alpha1.Trigger{} if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil { return nil, err } } latestTrigger.Status = trigger.Status if err := r.client.Status().Update(ctx, latestTrigger); err != nil { return nil, err } return latestTrigger, nil } // getBroker returns the Broker for Trigger 't' if exists, otherwise it returns an error. func (r *reconciler) getBroker(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Broker, error) { b := &v1alpha1.Broker{} name := types.NamespacedName{ Namespace: t.Namespace, Name: t.Spec.Broker, } err := r.client.Get(ctx, name, b) return b, err } // getBrokerTriggerChannel return the Broker's Trigger Channel if it exists, otherwise it returns an // error. func (r *reconciler) getBrokerTriggerChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) { return r.getChannel(ctx, b, labels.SelectorFromSet(broker.TriggerChannelLabels(b))) } // getBrokerIngressChannel return the Broker's Ingress Channel if it exists, otherwise it returns an // error. func (r *reconciler) getBrokerIngressChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) { return r.getChannel(ctx, b, labels.SelectorFromSet(broker.IngressChannelLabels(b))) } // getChannel returns the Broker's channel if it exists, otherwise it returns an error. func (r *reconciler) getChannel(ctx context.Context, b *v1alpha1.Broker, ls labels.Selector) (*v1alpha1.Channel, error) { list := &v1alpha1.ChannelList{} opts := &runtimeclient.ListOptions{ Namespace: b.Namespace, LabelSelector: ls, // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, c := range list.Items { if metav1.IsControlledBy(&c, b) { return &c, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") } // getService returns the K8s service for trigger 't' if exists, // otherwise it returns an error. func (r *reconciler) getBrokerFilterService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) { list := &corev1.ServiceList{} opts := &runtimeclient.ListOptions{ Namespace: b.Namespace, LabelSelector: labels.SelectorFromSet(brokerresources.FilterLabels(b)), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, svc := range list.Items { if metav1.IsControlledBy(&svc, b) { return &svc, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") } // subscribeToBrokerChannel subscribes service 'svc' to the Broker's channels. func (r *reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.Trigger, brokerTrigger, brokerIngress *v1alpha1.Channel, svc *corev1.Service) (*v1alpha1.Subscription, error) { uri := &url.URL{ Scheme: "http", Host: names.ServiceHostName(svc.Name, svc.Namespace), Path: path.Generate(t), } expected := resources.NewSubscription(t, brokerTrigger, brokerIngress, uri) sub, err := r.getSubscription(ctx, t) // If the resource doesn't exist, we'll create it if k8serrors.IsNotFound(err) { sub = expected err = r.client.Create(ctx, sub) if err != nil { return nil, err } return sub, nil } else if err != nil { return nil, err } // Update Subscription if it has changed. Ignore the generation. expected.Spec.DeprecatedGeneration = sub.Spec.DeprecatedGeneration if !equality.Semantic.DeepDerivative(expected.Spec, sub.Spec) { // Given that spec.channel is immutable, we cannot just update the Subscription. We delete // it and re-create it instead. err = r.client.Delete(ctx, sub) if err != nil { logging.FromContext(ctx).Info("Cannot delete subscription", zap.Error(err)) r.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionDeleteFailed, "Delete Trigger's subscription failed: %v", err) return nil, err } sub = expected err = r.client.Create(ctx, sub) if err != nil { logging.FromContext(ctx).Info("Cannot create subscription", zap.Error(err)) r.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Trigger's subscription failed: %v", err) return nil, err } } return sub, nil } // getSubscription returns the subscription of trigger 't' if exists, // otherwise it returns an error. func (r *reconciler) getSubscription(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Subscription, error) { list := &v1alpha1.SubscriptionList{} opts := &runtimeclient.ListOptions{ Namespace: t.Namespace, LabelSelector: labels.SelectorFromSet(resources.SubscriptionLabels(t)), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, s := range list.Items { if metav1.IsControlledBy(&s, t) { return &s, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") }
{ return nil, err }
conditional_block
trigger.go
/* Copyright 2018 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package trigger import ( "context" "net/url" "github.com/knative/eventing/pkg/apis/eventing/v1alpha1" "github.com/knative/eventing/pkg/logging" "github.com/knative/eventing/pkg/reconciler/names" "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker" brokerresources "github.com/knative/eventing/pkg/reconciler/v1alpha1/broker/resources" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/path" "github.com/knative/eventing/pkg/reconciler/v1alpha1/trigger/resources" "github.com/knative/eventing/pkg/utils/resolve" istiov1alpha3 "github.com/knative/pkg/apis/istio/v1alpha3" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" ) const ( // controllerAgentName is the string used by this controller to identify // itself when creating events. controllerAgentName = "trigger-controller" // Name of the corev1.Events emitted from the reconciliation process. triggerReconciled = "TriggerReconciled" triggerReconcileFailed = "TriggerReconcileFailed" triggerUpdateStatusFailed = "TriggerUpdateStatusFailed" subscriptionDeleteFailed = "SubscriptionDeleteFailed" subscriptionCreateFailed = "SubscriptionCreateFailed" ) type reconciler struct { client client.Client dynamicClient dynamic.Interface recorder record.EventRecorder logger *zap.Logger } // Verify the struct implements reconcile.Reconciler. var _ reconcile.Reconciler = &reconciler{} // ProvideController returns a function that returns a Trigger controller. func ProvideController(mgr manager.Manager, logger *zap.Logger) (controller.Controller, error) { // Setup a new controller to Reconcile Triggers. r := &reconciler{ recorder: mgr.GetRecorder(controllerAgentName), logger: logger, } c, err := controller.New(controllerAgentName, mgr, controller.Options{ Reconciler: r, }) if err != nil { return nil, err } // Watch Triggers. if err = c.Watch(&source.Kind{Type: &v1alpha1.Trigger{}}, &handler.EnqueueRequestForObject{}); err != nil { return nil, err } // Watch all the resources that the Trigger reconciles. for _, t := range []runtime.Object{&corev1.Service{}, &istiov1alpha3.VirtualService{}, &v1alpha1.Subscription{}} { err = c.Watch(&source.Kind{Type: t}, &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.Trigger{}, IsController: true}) if err != nil { return nil, err } } // Watch for Broker changes. E.g. if the Broker is deleted and recreated, we need to reconcile // the Trigger again. if err = c.Watch(&source.Kind{Type: &v1alpha1.Broker{}}, &handler.EnqueueRequestsFromMapFunc{ToRequests: &mapBrokerToTriggers{r: r}}); err != nil { return nil, err } // TODO reconcile after a change to the subscriber. I'm not sure how this is possible, but we should do it if we // can find a way. return c, nil } // mapBrokerToTriggers maps Broker changes to all the Triggers that correspond to that Broker. type mapBrokerToTriggers struct { r *reconciler } // Map implements handler.Mapper.Map. func (b *mapBrokerToTriggers) Map(o handler.MapObject) []reconcile.Request { ctx := context.Background() triggers := make([]reconcile.Request, 0) opts := &client.ListOptions{ Namespace: o.Meta.GetNamespace(), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } for { tl := &v1alpha1.TriggerList{} if err := b.r.client.List(ctx, opts, tl); err != nil { b.r.logger.Error("Error listing Triggers when Broker changed. Some Triggers may not be reconciled.", zap.Error(err), zap.Any("broker", o)) return triggers } for _, t := range tl.Items { if t.Spec.Broker == o.Meta.GetName() { triggers = append(triggers, reconcile.Request{ NamespacedName: types.NamespacedName{ Namespace: t.Namespace, Name: t.Name, }, }) } } if tl.Continue != "" { opts.Raw.Continue = tl.Continue } else { return triggers } } } // InjectClient implements controller runtime's inject.Client. func (r *reconciler) InjectClient(c client.Client) error { r.client = c return nil } // InjectConfig implements controller runtime's inject.Config. func (r *reconciler) InjectConfig(c *rest.Config) error { var err error r.dynamicClient, err = dynamic.NewForConfig(c) return err } // Reconcile compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the Trigger resource // with the current status of the resource. func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, error) { ctx := context.TODO() ctx = logging.WithLogger(ctx, r.logger.With(zap.Any("request", request))) trigger := &v1alpha1.Trigger{} err := r.client.Get(ctx, request.NamespacedName, trigger) if errors.IsNotFound(err) { logging.FromContext(ctx).Info("Could not find Trigger") return reconcile.Result{}, nil } if err != nil { logging.FromContext(ctx).Error("Could not get Trigger", zap.Error(err)) return reconcile.Result{}, err } // Reconcile this copy of the Trigger and then write back any status updates regardless of // whether the reconcile error out. reconcileErr := r.reconcile(ctx, trigger)
logging.FromContext(ctx).Error("Error reconciling Trigger", zap.Error(reconcileErr)) r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerReconcileFailed, "Trigger reconciliation failed: %v", reconcileErr) } else { logging.FromContext(ctx).Debug("Trigger reconciled") r.recorder.Event(trigger, corev1.EventTypeNormal, triggerReconciled, "Trigger reconciled") } if _, err = r.updateStatus(trigger); err != nil { logging.FromContext(ctx).Error("Failed to update Trigger status", zap.Error(err)) r.recorder.Eventf(trigger, corev1.EventTypeWarning, triggerUpdateStatusFailed, "Failed to update Trigger's status: %v", err) return reconcile.Result{}, err } // Requeue if the resource is not ready return reconcile.Result{}, reconcileErr } func (r *reconciler) reconcile(ctx context.Context, t *v1alpha1.Trigger) error { t.Status.InitializeConditions() // 1. Verify the Broker exists. // 2. Get the Broker's: // - Filter Channel // - Ingress Channel // - Filter Service // 3. Find the Subscriber's URI. // 4. Creates a Subscription from the Broker's Filter Channel to this Trigger via the Broker's // Filter Service with a specific path, and reply set to the Broker's Ingress Channel. if t.DeletionTimestamp != nil { // Everything is cleaned up by the garbage collector. return nil } b, err := r.getBroker(ctx, t) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker", zap.Error(err)) t.Status.MarkBrokerDoesNotExist() return err } t.Status.MarkBrokerExists() brokerTrigger, err := r.getBrokerTriggerChannel(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's Trigger Channel", zap.Error(err)) return err } brokerIngress, err := r.getBrokerIngressChannel(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's Ingress Channel", zap.Error(err)) return err } // Get Broker filter service. filterSvc, err := r.getBrokerFilterService(ctx, b) if err != nil { logging.FromContext(ctx).Error("Unable to get the Broker's filter Service", zap.Error(err)) return err } subscriberURI, err := resolve.SubscriberSpec(ctx, r.dynamicClient, t.Namespace, t.Spec.Subscriber) if err != nil { logging.FromContext(ctx).Error("Unable to get the Subscriber's URI", zap.Error(err)) return err } t.Status.SubscriberURI = subscriberURI _, err = r.subscribeToBrokerChannel(ctx, t, brokerTrigger, brokerIngress, filterSvc) if err != nil { logging.FromContext(ctx).Error("Unable to Subscribe", zap.Error(err)) t.Status.MarkNotSubscribed("notSubscribed", "%v", err) return err } t.Status.MarkSubscribed() return nil } // updateStatus may in fact update the trigger's finalizers in addition to the status. func (r *reconciler) updateStatus(trigger *v1alpha1.Trigger) (*v1alpha1.Trigger, error) { ctx := context.TODO() objectKey := client.ObjectKey{Namespace: trigger.Namespace, Name: trigger.Name} latestTrigger := &v1alpha1.Trigger{} if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil { return nil, err } triggerChanged := false if !equality.Semantic.DeepEqual(latestTrigger.Finalizers, trigger.Finalizers) { latestTrigger.SetFinalizers(trigger.ObjectMeta.Finalizers) if err := r.client.Update(ctx, latestTrigger); err != nil { return nil, err } triggerChanged = true } if equality.Semantic.DeepEqual(latestTrigger.Status, trigger.Status) { return latestTrigger, nil } if triggerChanged { // Refetch latestTrigger = &v1alpha1.Trigger{} if err := r.client.Get(ctx, objectKey, latestTrigger); err != nil { return nil, err } } latestTrigger.Status = trigger.Status if err := r.client.Status().Update(ctx, latestTrigger); err != nil { return nil, err } return latestTrigger, nil } // getBroker returns the Broker for Trigger 't' if exists, otherwise it returns an error. func (r *reconciler) getBroker(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Broker, error) { b := &v1alpha1.Broker{} name := types.NamespacedName{ Namespace: t.Namespace, Name: t.Spec.Broker, } err := r.client.Get(ctx, name, b) return b, err } // getBrokerTriggerChannel return the Broker's Trigger Channel if it exists, otherwise it returns an // error. func (r *reconciler) getBrokerTriggerChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) { return r.getChannel(ctx, b, labels.SelectorFromSet(broker.TriggerChannelLabels(b))) } // getBrokerIngressChannel return the Broker's Ingress Channel if it exists, otherwise it returns an // error. func (r *reconciler) getBrokerIngressChannel(ctx context.Context, b *v1alpha1.Broker) (*v1alpha1.Channel, error) { return r.getChannel(ctx, b, labels.SelectorFromSet(broker.IngressChannelLabels(b))) } // getChannel returns the Broker's channel if it exists, otherwise it returns an error. func (r *reconciler) getChannel(ctx context.Context, b *v1alpha1.Broker, ls labels.Selector) (*v1alpha1.Channel, error) { list := &v1alpha1.ChannelList{} opts := &runtimeclient.ListOptions{ Namespace: b.Namespace, LabelSelector: ls, // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, c := range list.Items { if metav1.IsControlledBy(&c, b) { return &c, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") } // getService returns the K8s service for trigger 't' if exists, // otherwise it returns an error. func (r *reconciler) getBrokerFilterService(ctx context.Context, b *v1alpha1.Broker) (*corev1.Service, error) { list := &corev1.ServiceList{} opts := &runtimeclient.ListOptions{ Namespace: b.Namespace, LabelSelector: labels.SelectorFromSet(brokerresources.FilterLabels(b)), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, svc := range list.Items { if metav1.IsControlledBy(&svc, b) { return &svc, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") } // subscribeToBrokerChannel subscribes service 'svc' to the Broker's channels. func (r *reconciler) subscribeToBrokerChannel(ctx context.Context, t *v1alpha1.Trigger, brokerTrigger, brokerIngress *v1alpha1.Channel, svc *corev1.Service) (*v1alpha1.Subscription, error) { uri := &url.URL{ Scheme: "http", Host: names.ServiceHostName(svc.Name, svc.Namespace), Path: path.Generate(t), } expected := resources.NewSubscription(t, brokerTrigger, brokerIngress, uri) sub, err := r.getSubscription(ctx, t) // If the resource doesn't exist, we'll create it if k8serrors.IsNotFound(err) { sub = expected err = r.client.Create(ctx, sub) if err != nil { return nil, err } return sub, nil } else if err != nil { return nil, err } // Update Subscription if it has changed. Ignore the generation. expected.Spec.DeprecatedGeneration = sub.Spec.DeprecatedGeneration if !equality.Semantic.DeepDerivative(expected.Spec, sub.Spec) { // Given that spec.channel is immutable, we cannot just update the Subscription. We delete // it and re-create it instead. err = r.client.Delete(ctx, sub) if err != nil { logging.FromContext(ctx).Info("Cannot delete subscription", zap.Error(err)) r.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionDeleteFailed, "Delete Trigger's subscription failed: %v", err) return nil, err } sub = expected err = r.client.Create(ctx, sub) if err != nil { logging.FromContext(ctx).Info("Cannot create subscription", zap.Error(err)) r.recorder.Eventf(t, corev1.EventTypeWarning, subscriptionCreateFailed, "Create Trigger's subscription failed: %v", err) return nil, err } } return sub, nil } // getSubscription returns the subscription of trigger 't' if exists, // otherwise it returns an error. func (r *reconciler) getSubscription(ctx context.Context, t *v1alpha1.Trigger) (*v1alpha1.Subscription, error) { list := &v1alpha1.SubscriptionList{} opts := &runtimeclient.ListOptions{ Namespace: t.Namespace, LabelSelector: labels.SelectorFromSet(resources.SubscriptionLabels(t)), // Set Raw because if we need to get more than one page, then we will put the continue token // into opts.Raw.Continue. Raw: &metav1.ListOptions{}, } err := r.client.List(ctx, opts, list) if err != nil { return nil, err } for _, s := range list.Items { if metav1.IsControlledBy(&s, t) { return &s, nil } } return nil, k8serrors.NewNotFound(schema.GroupResource{}, "") }
if reconcileErr != nil {
random_line_split
mod.rs
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! Parsing and serialization of Internet Control Message Protocol (ICMP) packets. #[macro_use] mod macros; mod common; mod icmpv4; mod icmpv6; pub mod mld; pub mod ndp; #[cfg(test)] mod testdata; pub use self::common::*; pub use self::icmpv4::*; pub use self::icmpv6::*; use core::cmp; use core::convert::{TryFrom, TryInto}; use core::fmt::Debug; use core::marker::PhantomData; use core::mem; use core::ops::Deref; use byteorder::{ByteOrder, NetworkEndian}; use internet_checksum::Checksum; use net_types::ip::{Ip, IpAddress, Ipv4, Ipv6}; use never::Never; use packet::records::options::{Options, OptionsImpl}; use packet::{ AsFragmentedByteSlice, BufferView, FragmentedByteSlice, FromRaw, PacketBuilder, PacketConstraints, ParsablePacket, ParseMetadata, SerializeBuffer, }; use zerocopy::{AsBytes, ByteSlice, FromBytes, LayoutVerified, Unaligned}; use crate::error::{ParseError, ParseResult}; use crate::ip::IpProto; use crate::ipv4::{self, Ipv4PacketRaw}; use crate::ipv6::Ipv6PacketRaw; use crate::U16; #[derive(Copy, Clone, Default, Debug, FromBytes, AsBytes, Unaligned)] #[repr(C)] struct HeaderPrefix { msg_type: u8, code: u8, checksum: [u8; 2], /* NOTE: The "Rest of Header" field is stored in message types rather than * in the HeaderPrefix. This helps consolidate how callers access data about the * packet, and is consistent with ICMPv6, which treats the field as part of * messages rather than the header. */ } impl HeaderPrefix { fn set_msg_type<T: Into<u8>>(&mut self, msg_type: T) { self.msg_type = msg_type.into(); } } /// Peek at an ICMP header to see what message type is present. /// /// Since `IcmpPacket` is statically typed with the message type expected, this /// type must be known ahead of time before calling `parse`. If multiple /// different types are valid in a given parsing context, and so the caller /// cannot know ahead of time which type to use, `peek_message_type` can be used /// to peek at the header first to figure out which static type should be used /// in a subsequent call to `parse`. /// /// Note that `peek_message_type` only inspects certain fields in the header, /// and so `peek_message_type` succeeding does not guarantee that a subsequent /// call to `parse` will also succeed. pub fn peek_message_type<MessageType: TryFrom<u8>>(bytes: &[u8]) -> ParseResult<MessageType> { let (hdr_pfx, _) = LayoutVerified::<_, HeaderPrefix>::new_unaligned_from_prefix(bytes) .ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?; MessageType::try_from(hdr_pfx.msg_type).map_err(|_| { debug_err!(ParseError::NotSupported, "unrecognized message type: {:x}", hdr_pfx.msg_type,) }) } /// An extension trait adding ICMP-related associated types to `Ipv4` and `Ipv6`. /// /// This trait is kept separate from `IcmpIpExt` to not require a type parameter /// that implements `ByteSlice`. pub trait IcmpIpTypes: Ip { /// The type of an ICMP parameter problem code. /// /// For `Ipv4`, this is `Icmpv4ParameterProblemCode`, and for `Ipv6` this /// is `Icmpv6ParameterProblemCode`. type ParameterProblemCode: PartialEq + Send + Sync + Debug; /// The type of an ICMP parameter problem pointer. /// /// For `Ipv4`, this is `u8`, and for `Ipv6` this is `u32`. type ParameterProblemPointer: PartialEq + Send + Sync + Debug; /// The type of an ICMP parameter header length. /// /// For `Ipv4`, this is `usize`, and for `Ipv6` this is `()`. type HeaderLen: PartialEq + Send + Sync + Debug; } // A default implementation for any I: Ip. This is to convince the Rust compiler // that, given an I: Ip, it's guaranteed to implement IcmpIpTypes. We humans know // that Ipv4 and Ipv6 are the only types implementing Ip and so, since we // implement IcmpIpTypes for both of these types, this is fine. The compiler isn't // so smart. This implementation should never actually be used. impl<I: Ip> IcmpIpTypes for I { default type ParameterProblemCode = Never; default type ParameterProblemPointer = Never; default type HeaderLen = Never; } impl IcmpIpTypes for Ipv4 { type ParameterProblemCode = Icmpv4ParameterProblemCode; type ParameterProblemPointer = u8; type HeaderLen = usize; } impl IcmpIpTypes for Ipv6 { type ParameterProblemCode = Icmpv6ParameterProblemCode; type ParameterProblemPointer = u32; type HeaderLen = (); } /// An extension trait adding ICMP-related functionality to `Ipv4` and `Ipv6`. pub trait IcmpIpExt: Ip { /// The type of ICMP messages. /// /// For `Ipv4`, this is `Icmpv4MessageType`, and for `Ipv6`, this is /// `Icmpv6MessageType`. type IcmpMessageType: IcmpMessageType; /// The identifier for this ICMP version. /// /// This value will be found in an IPv4 packet's Protocol field (for ICMPv4 /// packets) or an IPv6 fixed header's or last extension header's Next /// Heeader field (for ICMPv6 packets). const ICMP_IP_PROTO: IpProto; /// Compute the length of the header of the packet prefix stored in `bytes`. /// /// Given the prefix of a packet stored in `bytes`, compute the length of /// the header of that packet, or `bytes.len()` if `bytes` does not contain /// the entire header. If the version is IPv6, the returned length should /// include all extension headers. fn header_len(bytes: &[u8]) -> usize; } impl IcmpIpExt for Ipv4 { type IcmpMessageType = Icmpv4MessageType; const ICMP_IP_PROTO: IpProto = IpProto::Icmp; fn header_len(bytes: &[u8]) -> usize { if bytes.len() < ipv4::IPV4_MIN_HDR_LEN { return bytes.len(); } let (header_prefix, _) = LayoutVerified::<_, ipv4::HeaderPrefix>::new_unaligned_from_prefix(bytes).unwrap(); cmp::min(header_prefix.ihl() as usize * 4, bytes.len()) } } impl IcmpIpExt for Ipv6 { type IcmpMessageType = Icmpv6MessageType; const ICMP_IP_PROTO: IpProto = IpProto::Icmpv6; // TODO: Re-implement this in terms of partial parsing, and then get rid of // the `header_len` method. fn header_len(_bytes: &[u8]) -> usize { // NOTE: We panic here rather than doing log_unimplemented! because // there's no sane default value for this function. If it's called, it // doesn't make sense for the program to continue executing; if we did, // it would cause bugs in the caller. unimplemented!() } } /// An ICMP or ICMPv6 packet /// /// 'IcmpPacketType' is implemented by `Icmpv4Packet` and `Icmpv6Packet` pub trait IcmpPacketType<B: ByteSlice, I: Ip>: Sized + ParsablePacket<B, IcmpParseArgs<I::Addr>, Error = ParseError> { } impl<B: ByteSlice> IcmpPacketType<B, Ipv4> for Icmpv4Packet<B> {} impl<B: ByteSlice> IcmpPacketType<B, Ipv6> for Icmpv6Packet<B> {} // TODO(joshlf): Once we have generic associated types, refactor this so that we // don't have to bind B ahead of time. Removing that requirement would make some // APIs (in particular, IcmpPacketBuilder) simpler by removing the B parameter // from them as well. /// `MessageBody` represents the parsed body of the ICMP packet. /// /// - For messages that expect no body, the `MessageBody` is of type `()`. /// - For NDP messages, the `MessageBody` is of the type `ndp::Options`. /// - For all other messages, the `MessageBody` will be of the type /// `OriginalPacket`, which is a thin wrapper around `B`. pub trait MessageBody<B>: Sized { /// Whether or not a message body is expected in an ICMP packet. const EXPECTS_BODY: bool = true; /// Parse the MessageBody from the provided bytes. fn parse(bytes: B) -> ParseResult<Self> where B: ByteSlice; /// The length of the underlying buffer. fn len(&self) -> usize where B: ByteSlice; /// Is the body empty? /// /// `b.is_empty()` is equivalent to `b.len() == 0`. fn is_empty(&self) -> bool where B: ByteSlice, { self.len() == 0 } /// Return the underlying bytes. fn bytes(&self) -> &[u8] where B: Deref<Target = [u8]>; } impl<B> MessageBody<B> for () { const EXPECTS_BODY: bool = false; fn parse(bytes: B) -> ParseResult<()> where B: ByteSlice, { if !bytes.is_empty() { return debug_err!(Err(ParseError::Format), "unexpected message body"); } Ok(()) } fn len(&self) -> usize { 0 } fn bytes(&self) -> &[u8] { &[] } } /// A thin wrapper around B which implements `MessageBody`. #[derive(Debug)] pub struct OriginalPacket<B>(B); impl<B: ByteSlice + Deref<Target = [u8]>> OriginalPacket<B> { /// Returns the the body of the original packet. pub fn body<I: IcmpIpExt>(&self) -> &[u8] { // TODO(joshlf): Can these debug_asserts be triggered by external input? let header_len = I::header_len(&self.0); debug_assert!(header_len <= self.0.len()); debug_assert!(I::VERSION.is_v6() || self.0.len() - header_len == 8); &self.0[header_len..] } } impl<B> MessageBody<B> for OriginalPacket<B> { fn parse(bytes: B) -> ParseResult<OriginalPacket<B>> { Ok(OriginalPacket(bytes)) } fn len(&self) -> usize where B: ByteSlice, { self.0.len() } fn bytes(&self) -> &[u8] where B: Deref<Target = [u8]>, { &self.0 } } impl<B, O: for<'a> OptionsImpl<'a>> MessageBody<B> for Options<B, O> { fn parse(bytes: B) -> ParseResult<Options<B, O>> where B: ByteSlice, { Self::parse(bytes).map_err(|_e| debug_err!(ParseError::Format, "unable to parse options")) } fn len(&self) -> usize where B: ByteSlice, { self.bytes().len() } fn bytes(&self) -> &[u8] where B: Deref<Target = [u8]>, { self.bytes() } } /// An ICMP message. pub trait IcmpMessage<I: IcmpIpExt, B: ByteSlice>: Sized + Copy + FromBytes + AsBytes + Unaligned { /// The type of codes used with this message. /// /// The ICMP header includes an 8-bit "code" field. For a given message /// type, different values of this field carry different meanings. Not all /// code values are used - some may be invalid. This type represents a /// parsed code. For example, for TODO, it is the TODO type. type Code: Into<u8> + Copy + Debug; /// The type of the body used with this message. type Body: MessageBody<B>; /// The type corresponding to this message type. /// /// The value of the "type" field in the ICMP header corresponding to /// messages of this type. const TYPE: I::IcmpMessageType; /// Parse a `Code` from an 8-bit number. /// /// Parse a `Code` from the 8-bit "code" field in the ICMP header. Not all /// values for this field are valid. If an invalid value is passed, /// `code_from_u8` returns `None`. fn code_from_u8(code: u8) -> Option<Self::Code>; } /// The type of an ICMP message. /// /// `IcmpMessageType` is implemented by `Icmpv4MessageType` and /// `Icmpv6MessageType`. pub trait IcmpMessageType: TryFrom<u8> + Into<u8> + Copy { /// Is this an error message? /// /// For ICMP, this is true for the Destination Unreachable, Redirect, Source /// Quench, Time Exceeded, and Parameter Problem message types. For ICMPv6, /// this is true for the Destination Unreachable, Packet Too Big, Time /// Exceeded, and Parameter Problem message types. fn is_err(self) -> bool; } #[derive(Copy, Clone, Debug, FromBytes, Unaligned)] #[repr(C)] struct Header<M> { prefix: HeaderPrefix, message: M, } // So long as `M: Unaligned`, there will be no padding between the // `HeaderPrefix` and `M`. Since `HeaderPrefix` itself is `Unaligned`, the // alignment of `Header<M>` will be 1, meaning that no post-padding will need to // be added to get to a multiple of the alignment. Since there is no padding, // then so long as `M: AsBytes`, all of `Header<M>: AsBytes`. unsafe impl<M: AsBytes + Unaligned> AsBytes for Header<M> { // We're doing a bad thing, but it's necessary until derive(AsBytes) // supports type parameters. fn only_derive_is_allowed_to_implement_this_trait() {} } /// A partially parsed and not yet validated ICMP packet. /// /// An `IcmpPacketRaw` provides minimal parsing of an ICMP packet. Namely, it /// only requires that the header and message (in ICMPv6, these are both /// considered part of the header) are present, and that the header has the /// expected message type. The body may be missing (or an unexpected body may be /// present). Other than the message type, no header, message, or body field /// values will be validated. /// /// [`IcmpPacket`] provides a [`FromRaw`] implementation that can be used to /// validate an [`IcmpPacketRaw`]. #[derive(Debug)] pub struct IcmpPacketRaw<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> { header: LayoutVerified<B, Header<M>>, message_body: B, _marker: PhantomData<I>, } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketRaw<I, B, M> { /// Get the ICMP message. pub fn message(&self) -> &M { &self.header.message } } /// An ICMP packet. /// /// An `IcmpPacket` shares its underlying memory with the byte slice it was /// parsed from, meaning that no copying or extra allocation is necessary. #[derive(Debug)] pub struct IcmpPacket<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> { header: LayoutVerified<B, Header<M>>, message_body: M::Body, _marker: PhantomData<I>, } /// Arguments required to parse an ICMP packet. pub struct IcmpParseArgs<A: IpAddress> { src_ip: A, dst_ip: A, } impl<A: IpAddress> IcmpParseArgs<A> { /// Construct a new `IcmpParseArgs`. pub fn new<S: Into<A>, D: Into<A>>(src_ip: S, dst_ip: D) -> IcmpParseArgs<A> { IcmpParseArgs { src_ip: src_ip.into(), dst_ip: dst_ip.into() } } } impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, ()> for IcmpPacketRaw<I, B, M> { type Error = ParseError; fn parse_metadata(&self) -> ParseMetadata { ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0) } fn parse<BV: BufferView<B>>(mut buffer: BV, _args: ()) -> ParseResult<Self> { let header = buffer .take_obj_front::<Header<M>>() .ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?; let message_body = buffer.into_rest(); if header.prefix.msg_type != M::TYPE.into() { return debug_err!(Err(ParseError::NotExpected), "unexpected message type"); } Ok(IcmpPacketRaw { header, message_body, _marker: PhantomData }) } } impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> FromRaw<IcmpPacketRaw<I, B, M>, IcmpParseArgs<I::Addr>> for IcmpPacket<I, B, M> { type Error = ParseError; fn try_from_raw_with( raw: IcmpPacketRaw<I, B, M>, args: IcmpParseArgs<I::Addr>, ) -> ParseResult<Self> { let IcmpPacketRaw { header, message_body, _marker } = raw; if !M::Body::EXPECTS_BODY && !message_body.is_empty() { return debug_err!(Err(ParseError::Format), "unexpected message body"); } let _: M::Code = M::code_from_u8(header.prefix.code).ok_or_else(debug_err_fn!( ParseError::Format, "unrecognized code: {}", header.prefix.code ))?; let checksum = Self::compute_checksum(&header, &message_body, args.src_ip, args.dst_ip) .ok_or_else(debug_err_fn!(ParseError::Format, "packet too large"))?; if checksum != [0, 0] { return debug_err!(Err(ParseError::Checksum), "invalid checksum"); } let message_body = M::Body::parse(message_body)?; Ok(IcmpPacket { header, message_body, _marker }) } } impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, IcmpParseArgs<I::Addr>> for IcmpPacket<I, B, M> { type Error = ParseError; fn parse_metadata(&self) -> ParseMetadata { ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0) } fn parse<BV: BufferView<B>>(buffer: BV, args: IcmpParseArgs<I::Addr>) -> ParseResult<Self> { IcmpPacketRaw::parse(buffer, ()).and_then(|p| IcmpPacket::try_from_raw_with(p, args)) } } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> { /// Get the ICMP message. pub fn message(&self) -> &M { &self.header.message } /// Get the ICMP body. pub fn body(&self) -> &M::Body { &self.message_body } /// Get the ICMP message code. /// /// The code provides extra details about the message. Each message type has /// its own set of codes that are allowed. pub fn code(&self) -> M::Code { // infallible since it was validated in parse M::code_from_u8(self.header.prefix.code).unwrap() } /// Construct a builder with the same contents as this packet. pub fn builder(&self, src_ip: I::Addr, dst_ip: I::Addr) -> IcmpPacketBuilder<I, B, M> { IcmpPacketBuilder { src_ip, dst_ip, code: self.code(), msg: *self.message() } } } fn compute_checksum_fragmented< I: IcmpIpExt, B: ByteSlice, BB: packet::Fragment, M: IcmpMessage<I, B>, >( header: &Header<M>, message_body: &FragmentedByteSlice<'_, BB>, src_ip: I::Addr, dst_ip: I::Addr, ) -> Option<[u8; 2]> { let mut c = Checksum::new(); if I::VERSION.is_v6()
c.add_bytes(&[header.prefix.msg_type, header.prefix.code]); c.add_bytes(&header.prefix.checksum); c.add_bytes(header.message.as_bytes()); for p in message_body.iter_fragments() { c.add_bytes(p); } Some(c.checksum()) } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> { /// Compute the checksum, including the checksum field itself. /// /// `compute_checksum` returns `None` if the version is IPv6 and the total /// ICMP packet length overflows a u32. fn compute_checksum( header: &Header<M>, message_body: &[u8], src_ip: I::Addr, dst_ip: I::Addr, ) -> Option<[u8; 2]> { let mut body = [message_body]; compute_checksum_fragmented(header, &body.as_fragmented_byte_slice(), src_ip, dst_ip) } } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = OriginalPacket<B>>> IcmpPacket<I, B, M> { /// Get the body of the packet that caused this ICMP message. /// /// This ICMP message contains some of the bytes of the packet that caused /// this message to be emitted. `original_packet_body` returns as much of /// the body of that packet as is contained in this message. For IPv4, this /// is guaranteed to be 8 bytes. For IPv6, there are no guarantees about the /// length. pub fn original_packet_body(&self) -> &[u8] { self.message_body.body::<I>() } /// Returns the original packt that caused this ICMP message. /// /// This ICMP message contains some of the bytes of the packet that caused /// this message to be emitted. `original_packet` returns as much of the /// body of that packet as is contained in this message. For IPv4, this is /// guaranteed to be 8 bytes. For IPv6, there are no guarantees about the /// length. pub fn original_packet(&self) -> &OriginalPacket<B> { &self.message_body } } impl<B: ByteSlice, M: IcmpMessage<Ipv4, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv4, B, M> { /// Attempt to partially parse the original packet as an IPv4 packet. /// /// `f` will be invoked on the result of calling `Ipv4PacketRaw::parse` on /// the original packet. pub fn with_original_packet<O, F: FnOnce(Result<Ipv4PacketRaw<&[u8]>, &[u8]>) -> O>( &self, f: F, ) -> O { let mut bv = self.message_body.0.deref(); f(Ipv4PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref())) } } impl<B: ByteSlice, M: IcmpMessage<Ipv6, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv6, B, M> { /// Attempt to partially parse the original packet as an IPv6 packet. /// /// `f` will be invoked on the result of calling `Ipv6PacketRaw::parse` on /// the original packet. pub fn with_original_packet<O, F: FnOnce(Result<Ipv6PacketRaw<&[u8]>, &[u8]>) -> O>( &self, f: F, ) -> O { let mut bv = self.message_body.0.deref(); f(Ipv6PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref())) } } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = ndp::Options<B>>> IcmpPacket<I, B, M> { /// Get the pared list of NDP options from the ICMP message. pub fn ndp_options(&self) -> &ndp::Options<B> { &self.message_body } } /// A builder for ICMP packets. #[derive(Debug)] pub struct IcmpPacketBuilder<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> { src_ip: I::Addr, dst_ip: I::Addr, code: M::Code, msg: M, } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketBuilder<I, B, M> { /// Construct a new `IcmpPacketBuilder`. pub fn new<S: Into<I::Addr>, D: Into<I::Addr>>( src_ip: S, dst_ip: D, code: M::Code, msg: M, ) -> IcmpPacketBuilder<I, B, M> { IcmpPacketBuilder { src_ip: src_ip.into(), dst_ip: dst_ip.into(), code, msg } } } // TODO(joshlf): Figure out a way to split body and non-body message types by // trait and implement PacketBuilder for some and InnerPacketBuilder for others. impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> PacketBuilder for IcmpPacketBuilder<I, B, M> { fn constraints(&self) -> PacketConstraints { // The maximum body length constraint to make sure the body length // doesn't overflow the 32-bit length field in the pseudo-header used // for calculating the checksum. // // Note that, for messages that don't take bodies, it's important that // we don't just set this to 0. Trying to serialize a body in a message // type which doesn't take bodies is a programmer error, so we should // panic in that case. Setting the max_body_len to 0 would surface the // issue as an MTU error, which would hide the underlying problem. // Instead, we assert in serialize. Eventually, we will hopefully figure // out a way to implement InnerPacketBuilder (rather than PacketBuilder) // for these message types, and this won't be an issue anymore. PacketConstraints::new(mem::size_of::<Header<M>>(), 0, 0, core::u32::MAX as usize) } fn serialize(&self, buffer: &mut SerializeBuffer<'_>) { use packet::BufferViewMut; let (mut prefix, message_body, _) = buffer.parts(); // implements BufferViewMut, giving us take_obj_xxx_zero methods let mut prefix = &mut prefix; assert!( M::Body::EXPECTS_BODY || message_body.is_empty(), "body provided for message that doesn't take a body" ); // SECURITY: Use _zero constructors to ensure we zero memory to prevent // leaking information from packets previously stored in this buffer. let mut header = prefix.take_obj_front_zero::<Header<M>>().expect("too few bytes for ICMP message"); header.prefix.set_msg_type(M::TYPE); header.prefix.code = self.code.into(); header.message = self.msg; let checksum = compute_checksum_fragmented(&header, message_body, self.src_ip, self.dst_ip) .unwrap_or_else(|| { panic!( "total ICMP packet length of {} overflows 32-bit length field of pseudo-header", header.bytes().len() + message_body.len(), ) }); header.prefix.checksum = checksum; } } /// The type of ICMP codes that are unused. /// /// Some ICMP messages do not use codes. In Rust, the `IcmpMessage::Code` type /// associated with these messages is `IcmpUnusedCode`. The only valid numerical /// value for this code is 0. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct IcmpUnusedCode; impl From<IcmpUnusedCode> for u8 { fn from(_: IcmpUnusedCode) -> u8 { 0 } } #[derive(Copy, Clone, Debug, Eq, PartialEq, FromBytes, AsBytes, Unaligned)] #[repr(C)] struct IdAndSeq { id: U16, seq: U16, } impl IdAndSeq { fn new(id: u16, seq: u16) -> IdAndSeq { IdAndSeq { id: U16::new(id), seq: U16::new(seq) } } } #[cfg(test)] mod tests { use packet::ParseBuffer; use super::*; #[test] fn test_partial_parse() { // Test various behaviors of parsing the `IcmpPacketRaw` type. let reference_header = Header { prefix: HeaderPrefix { msg_type: <IcmpEchoRequest as IcmpMessage<Ipv4, &[u8]>>::TYPE.into(), code: 0, checksum: [0, 0], }, message: IcmpEchoRequest::new(1, 1), }; // Test that a too-short header is always rejected even if its contents // are otherwise valid (the checksum here is probably invalid, but we // explicitly check that it's a `Format` error, not a `Checksum` // error). let mut buf = &reference_header.as_bytes()[..7]; assert_eq!( buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().unwrap_err(), ParseError::Format ); // Test that a properly-sized header is rejected if the message type is wrong. let mut header = reference_header; header.prefix.msg_type = <IcmpEchoReply as IcmpMessage<Ipv4, &[u8]>>::TYPE.into(); let mut buf = header.as_bytes(); assert_eq!( buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().unwrap_err(), ParseError::NotExpected ); // Test that an invalid code is accepted. let mut header = reference_header; header.prefix.code = 0xFF; let mut buf = header.as_bytes(); assert!(buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().is_ok()); // Test that an invalid checksum is accepted. Instead of calculating the // correct checksum, we just provide two different checksums. They can't // both be valid. let mut buf = reference_header.as_bytes(); assert!(buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().is_ok()); let mut header = reference_header; header.prefix.checksum = [1, 1]; let mut buf = header.as_bytes(); assert!(buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().is_ok()); } }
{ c.add_bytes(src_ip.bytes()); c.add_bytes(dst_ip.bytes()); let icmpv6_len = mem::size_of::<Header<M>>() + message_body.len(); let mut len_bytes = [0; 4]; NetworkEndian::write_u32(&mut len_bytes, icmpv6_len.try_into().ok()?); c.add_bytes(&len_bytes[..]); c.add_bytes(&[0, 0, 0]); c.add_bytes(&[IpProto::Icmpv6.into()]); }
conditional_block
mod.rs
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! Parsing and serialization of Internet Control Message Protocol (ICMP) packets. #[macro_use] mod macros; mod common; mod icmpv4; mod icmpv6; pub mod mld; pub mod ndp; #[cfg(test)] mod testdata; pub use self::common::*; pub use self::icmpv4::*; pub use self::icmpv6::*; use core::cmp; use core::convert::{TryFrom, TryInto}; use core::fmt::Debug; use core::marker::PhantomData; use core::mem; use core::ops::Deref; use byteorder::{ByteOrder, NetworkEndian}; use internet_checksum::Checksum; use net_types::ip::{Ip, IpAddress, Ipv4, Ipv6}; use never::Never; use packet::records::options::{Options, OptionsImpl}; use packet::{ AsFragmentedByteSlice, BufferView, FragmentedByteSlice, FromRaw, PacketBuilder, PacketConstraints, ParsablePacket, ParseMetadata, SerializeBuffer, }; use zerocopy::{AsBytes, ByteSlice, FromBytes, LayoutVerified, Unaligned}; use crate::error::{ParseError, ParseResult}; use crate::ip::IpProto; use crate::ipv4::{self, Ipv4PacketRaw}; use crate::ipv6::Ipv6PacketRaw; use crate::U16; #[derive(Copy, Clone, Default, Debug, FromBytes, AsBytes, Unaligned)] #[repr(C)] struct HeaderPrefix { msg_type: u8, code: u8, checksum: [u8; 2], /* NOTE: The "Rest of Header" field is stored in message types rather than * in the HeaderPrefix. This helps consolidate how callers access data about the * packet, and is consistent with ICMPv6, which treats the field as part of * messages rather than the header. */ } impl HeaderPrefix { fn set_msg_type<T: Into<u8>>(&mut self, msg_type: T) { self.msg_type = msg_type.into(); } } /// Peek at an ICMP header to see what message type is present. /// /// Since `IcmpPacket` is statically typed with the message type expected, this /// type must be known ahead of time before calling `parse`. If multiple /// different types are valid in a given parsing context, and so the caller /// cannot know ahead of time which type to use, `peek_message_type` can be used /// to peek at the header first to figure out which static type should be used /// in a subsequent call to `parse`. /// /// Note that `peek_message_type` only inspects certain fields in the header, /// and so `peek_message_type` succeeding does not guarantee that a subsequent /// call to `parse` will also succeed. pub fn peek_message_type<MessageType: TryFrom<u8>>(bytes: &[u8]) -> ParseResult<MessageType> { let (hdr_pfx, _) = LayoutVerified::<_, HeaderPrefix>::new_unaligned_from_prefix(bytes) .ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?; MessageType::try_from(hdr_pfx.msg_type).map_err(|_| { debug_err!(ParseError::NotSupported, "unrecognized message type: {:x}", hdr_pfx.msg_type,) }) } /// An extension trait adding ICMP-related associated types to `Ipv4` and `Ipv6`. /// /// This trait is kept separate from `IcmpIpExt` to not require a type parameter /// that implements `ByteSlice`. pub trait IcmpIpTypes: Ip { /// The type of an ICMP parameter problem code. /// /// For `Ipv4`, this is `Icmpv4ParameterProblemCode`, and for `Ipv6` this /// is `Icmpv6ParameterProblemCode`. type ParameterProblemCode: PartialEq + Send + Sync + Debug; /// The type of an ICMP parameter problem pointer. /// /// For `Ipv4`, this is `u8`, and for `Ipv6` this is `u32`. type ParameterProblemPointer: PartialEq + Send + Sync + Debug; /// The type of an ICMP parameter header length. /// /// For `Ipv4`, this is `usize`, and for `Ipv6` this is `()`. type HeaderLen: PartialEq + Send + Sync + Debug; } // A default implementation for any I: Ip. This is to convince the Rust compiler // that, given an I: Ip, it's guaranteed to implement IcmpIpTypes. We humans know // that Ipv4 and Ipv6 are the only types implementing Ip and so, since we // implement IcmpIpTypes for both of these types, this is fine. The compiler isn't // so smart. This implementation should never actually be used. impl<I: Ip> IcmpIpTypes for I { default type ParameterProblemCode = Never; default type ParameterProblemPointer = Never; default type HeaderLen = Never; } impl IcmpIpTypes for Ipv4 { type ParameterProblemCode = Icmpv4ParameterProblemCode; type ParameterProblemPointer = u8; type HeaderLen = usize; } impl IcmpIpTypes for Ipv6 { type ParameterProblemCode = Icmpv6ParameterProblemCode; type ParameterProblemPointer = u32; type HeaderLen = (); } /// An extension trait adding ICMP-related functionality to `Ipv4` and `Ipv6`. pub trait IcmpIpExt: Ip { /// The type of ICMP messages. /// /// For `Ipv4`, this is `Icmpv4MessageType`, and for `Ipv6`, this is /// `Icmpv6MessageType`. type IcmpMessageType: IcmpMessageType; /// The identifier for this ICMP version. /// /// This value will be found in an IPv4 packet's Protocol field (for ICMPv4 /// packets) or an IPv6 fixed header's or last extension header's Next /// Heeader field (for ICMPv6 packets). const ICMP_IP_PROTO: IpProto; /// Compute the length of the header of the packet prefix stored in `bytes`. /// /// Given the prefix of a packet stored in `bytes`, compute the length of /// the header of that packet, or `bytes.len()` if `bytes` does not contain /// the entire header. If the version is IPv6, the returned length should /// include all extension headers. fn header_len(bytes: &[u8]) -> usize; } impl IcmpIpExt for Ipv4 { type IcmpMessageType = Icmpv4MessageType; const ICMP_IP_PROTO: IpProto = IpProto::Icmp; fn header_len(bytes: &[u8]) -> usize { if bytes.len() < ipv4::IPV4_MIN_HDR_LEN { return bytes.len(); } let (header_prefix, _) = LayoutVerified::<_, ipv4::HeaderPrefix>::new_unaligned_from_prefix(bytes).unwrap(); cmp::min(header_prefix.ihl() as usize * 4, bytes.len()) } } impl IcmpIpExt for Ipv6 { type IcmpMessageType = Icmpv6MessageType; const ICMP_IP_PROTO: IpProto = IpProto::Icmpv6; // TODO: Re-implement this in terms of partial parsing, and then get rid of // the `header_len` method. fn header_len(_bytes: &[u8]) -> usize { // NOTE: We panic here rather than doing log_unimplemented! because // there's no sane default value for this function. If it's called, it // doesn't make sense for the program to continue executing; if we did, // it would cause bugs in the caller. unimplemented!() } } /// An ICMP or ICMPv6 packet /// /// 'IcmpPacketType' is implemented by `Icmpv4Packet` and `Icmpv6Packet` pub trait IcmpPacketType<B: ByteSlice, I: Ip>: Sized + ParsablePacket<B, IcmpParseArgs<I::Addr>, Error = ParseError> { } impl<B: ByteSlice> IcmpPacketType<B, Ipv4> for Icmpv4Packet<B> {} impl<B: ByteSlice> IcmpPacketType<B, Ipv6> for Icmpv6Packet<B> {} // TODO(joshlf): Once we have generic associated types, refactor this so that we // don't have to bind B ahead of time. Removing that requirement would make some // APIs (in particular, IcmpPacketBuilder) simpler by removing the B parameter // from them as well. /// `MessageBody` represents the parsed body of the ICMP packet. /// /// - For messages that expect no body, the `MessageBody` is of type `()`. /// - For NDP messages, the `MessageBody` is of the type `ndp::Options`. /// - For all other messages, the `MessageBody` will be of the type /// `OriginalPacket`, which is a thin wrapper around `B`. pub trait MessageBody<B>: Sized { /// Whether or not a message body is expected in an ICMP packet. const EXPECTS_BODY: bool = true; /// Parse the MessageBody from the provided bytes. fn parse(bytes: B) -> ParseResult<Self> where B: ByteSlice; /// The length of the underlying buffer. fn len(&self) -> usize where B: ByteSlice; /// Is the body empty? /// /// `b.is_empty()` is equivalent to `b.len() == 0`. fn is_empty(&self) -> bool where B: ByteSlice, { self.len() == 0 } /// Return the underlying bytes. fn bytes(&self) -> &[u8] where B: Deref<Target = [u8]>; } impl<B> MessageBody<B> for () { const EXPECTS_BODY: bool = false; fn parse(bytes: B) -> ParseResult<()> where B: ByteSlice, { if !bytes.is_empty() { return debug_err!(Err(ParseError::Format), "unexpected message body"); } Ok(()) } fn len(&self) -> usize { 0 } fn bytes(&self) -> &[u8] { &[] } } /// A thin wrapper around B which implements `MessageBody`. #[derive(Debug)] pub struct OriginalPacket<B>(B); impl<B: ByteSlice + Deref<Target = [u8]>> OriginalPacket<B> { /// Returns the the body of the original packet. pub fn body<I: IcmpIpExt>(&self) -> &[u8] { // TODO(joshlf): Can these debug_asserts be triggered by external input? let header_len = I::header_len(&self.0); debug_assert!(header_len <= self.0.len()); debug_assert!(I::VERSION.is_v6() || self.0.len() - header_len == 8); &self.0[header_len..] } } impl<B> MessageBody<B> for OriginalPacket<B> { fn parse(bytes: B) -> ParseResult<OriginalPacket<B>> { Ok(OriginalPacket(bytes)) } fn len(&self) -> usize where B: ByteSlice, { self.0.len() } fn bytes(&self) -> &[u8] where B: Deref<Target = [u8]>, { &self.0 } } impl<B, O: for<'a> OptionsImpl<'a>> MessageBody<B> for Options<B, O> { fn parse(bytes: B) -> ParseResult<Options<B, O>> where B: ByteSlice, { Self::parse(bytes).map_err(|_e| debug_err!(ParseError::Format, "unable to parse options")) } fn len(&self) -> usize where B: ByteSlice, { self.bytes().len() } fn bytes(&self) -> &[u8] where B: Deref<Target = [u8]>, { self.bytes() } } /// An ICMP message. pub trait IcmpMessage<I: IcmpIpExt, B: ByteSlice>: Sized + Copy + FromBytes + AsBytes + Unaligned { /// The type of codes used with this message. /// /// The ICMP header includes an 8-bit "code" field. For a given message /// type, different values of this field carry different meanings. Not all /// code values are used - some may be invalid. This type represents a /// parsed code. For example, for TODO, it is the TODO type. type Code: Into<u8> + Copy + Debug; /// The type of the body used with this message. type Body: MessageBody<B>; /// The type corresponding to this message type. /// /// The value of the "type" field in the ICMP header corresponding to /// messages of this type. const TYPE: I::IcmpMessageType; /// Parse a `Code` from an 8-bit number. /// /// Parse a `Code` from the 8-bit "code" field in the ICMP header. Not all /// values for this field are valid. If an invalid value is passed, /// `code_from_u8` returns `None`. fn code_from_u8(code: u8) -> Option<Self::Code>; } /// The type of an ICMP message. /// /// `IcmpMessageType` is implemented by `Icmpv4MessageType` and /// `Icmpv6MessageType`. pub trait IcmpMessageType: TryFrom<u8> + Into<u8> + Copy { /// Is this an error message? /// /// For ICMP, this is true for the Destination Unreachable, Redirect, Source /// Quench, Time Exceeded, and Parameter Problem message types. For ICMPv6, /// this is true for the Destination Unreachable, Packet Too Big, Time /// Exceeded, and Parameter Problem message types. fn is_err(self) -> bool; } #[derive(Copy, Clone, Debug, FromBytes, Unaligned)] #[repr(C)] struct Header<M> { prefix: HeaderPrefix, message: M, } // So long as `M: Unaligned`, there will be no padding between the // `HeaderPrefix` and `M`. Since `HeaderPrefix` itself is `Unaligned`, the // alignment of `Header<M>` will be 1, meaning that no post-padding will need to // be added to get to a multiple of the alignment. Since there is no padding, // then so long as `M: AsBytes`, all of `Header<M>: AsBytes`. unsafe impl<M: AsBytes + Unaligned> AsBytes for Header<M> { // We're doing a bad thing, but it's necessary until derive(AsBytes) // supports type parameters. fn only_derive_is_allowed_to_implement_this_trait() {} } /// A partially parsed and not yet validated ICMP packet. /// /// An `IcmpPacketRaw` provides minimal parsing of an ICMP packet. Namely, it /// only requires that the header and message (in ICMPv6, these are both /// considered part of the header) are present, and that the header has the /// expected message type. The body may be missing (or an unexpected body may be /// present). Other than the message type, no header, message, or body field /// values will be validated. /// /// [`IcmpPacket`] provides a [`FromRaw`] implementation that can be used to /// validate an [`IcmpPacketRaw`]. #[derive(Debug)] pub struct IcmpPacketRaw<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> { header: LayoutVerified<B, Header<M>>, message_body: B, _marker: PhantomData<I>, } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketRaw<I, B, M> { /// Get the ICMP message. pub fn message(&self) -> &M { &self.header.message } } /// An ICMP packet. /// /// An `IcmpPacket` shares its underlying memory with the byte slice it was /// parsed from, meaning that no copying or extra allocation is necessary. #[derive(Debug)] pub struct IcmpPacket<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> { header: LayoutVerified<B, Header<M>>, message_body: M::Body, _marker: PhantomData<I>, } /// Arguments required to parse an ICMP packet. pub struct IcmpParseArgs<A: IpAddress> { src_ip: A, dst_ip: A, } impl<A: IpAddress> IcmpParseArgs<A> { /// Construct a new `IcmpParseArgs`. pub fn
<S: Into<A>, D: Into<A>>(src_ip: S, dst_ip: D) -> IcmpParseArgs<A> { IcmpParseArgs { src_ip: src_ip.into(), dst_ip: dst_ip.into() } } } impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, ()> for IcmpPacketRaw<I, B, M> { type Error = ParseError; fn parse_metadata(&self) -> ParseMetadata { ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0) } fn parse<BV: BufferView<B>>(mut buffer: BV, _args: ()) -> ParseResult<Self> { let header = buffer .take_obj_front::<Header<M>>() .ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?; let message_body = buffer.into_rest(); if header.prefix.msg_type != M::TYPE.into() { return debug_err!(Err(ParseError::NotExpected), "unexpected message type"); } Ok(IcmpPacketRaw { header, message_body, _marker: PhantomData }) } } impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> FromRaw<IcmpPacketRaw<I, B, M>, IcmpParseArgs<I::Addr>> for IcmpPacket<I, B, M> { type Error = ParseError; fn try_from_raw_with( raw: IcmpPacketRaw<I, B, M>, args: IcmpParseArgs<I::Addr>, ) -> ParseResult<Self> { let IcmpPacketRaw { header, message_body, _marker } = raw; if !M::Body::EXPECTS_BODY && !message_body.is_empty() { return debug_err!(Err(ParseError::Format), "unexpected message body"); } let _: M::Code = M::code_from_u8(header.prefix.code).ok_or_else(debug_err_fn!( ParseError::Format, "unrecognized code: {}", header.prefix.code ))?; let checksum = Self::compute_checksum(&header, &message_body, args.src_ip, args.dst_ip) .ok_or_else(debug_err_fn!(ParseError::Format, "packet too large"))?; if checksum != [0, 0] { return debug_err!(Err(ParseError::Checksum), "invalid checksum"); } let message_body = M::Body::parse(message_body)?; Ok(IcmpPacket { header, message_body, _marker }) } } impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, IcmpParseArgs<I::Addr>> for IcmpPacket<I, B, M> { type Error = ParseError; fn parse_metadata(&self) -> ParseMetadata { ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0) } fn parse<BV: BufferView<B>>(buffer: BV, args: IcmpParseArgs<I::Addr>) -> ParseResult<Self> { IcmpPacketRaw::parse(buffer, ()).and_then(|p| IcmpPacket::try_from_raw_with(p, args)) } } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> { /// Get the ICMP message. pub fn message(&self) -> &M { &self.header.message } /// Get the ICMP body. pub fn body(&self) -> &M::Body { &self.message_body } /// Get the ICMP message code. /// /// The code provides extra details about the message. Each message type has /// its own set of codes that are allowed. pub fn code(&self) -> M::Code { // infallible since it was validated in parse M::code_from_u8(self.header.prefix.code).unwrap() } /// Construct a builder with the same contents as this packet. pub fn builder(&self, src_ip: I::Addr, dst_ip: I::Addr) -> IcmpPacketBuilder<I, B, M> { IcmpPacketBuilder { src_ip, dst_ip, code: self.code(), msg: *self.message() } } } fn compute_checksum_fragmented< I: IcmpIpExt, B: ByteSlice, BB: packet::Fragment, M: IcmpMessage<I, B>, >( header: &Header<M>, message_body: &FragmentedByteSlice<'_, BB>, src_ip: I::Addr, dst_ip: I::Addr, ) -> Option<[u8; 2]> { let mut c = Checksum::new(); if I::VERSION.is_v6() { c.add_bytes(src_ip.bytes()); c.add_bytes(dst_ip.bytes()); let icmpv6_len = mem::size_of::<Header<M>>() + message_body.len(); let mut len_bytes = [0; 4]; NetworkEndian::write_u32(&mut len_bytes, icmpv6_len.try_into().ok()?); c.add_bytes(&len_bytes[..]); c.add_bytes(&[0, 0, 0]); c.add_bytes(&[IpProto::Icmpv6.into()]); } c.add_bytes(&[header.prefix.msg_type, header.prefix.code]); c.add_bytes(&header.prefix.checksum); c.add_bytes(header.message.as_bytes()); for p in message_body.iter_fragments() { c.add_bytes(p); } Some(c.checksum()) } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> { /// Compute the checksum, including the checksum field itself. /// /// `compute_checksum` returns `None` if the version is IPv6 and the total /// ICMP packet length overflows a u32. fn compute_checksum( header: &Header<M>, message_body: &[u8], src_ip: I::Addr, dst_ip: I::Addr, ) -> Option<[u8; 2]> { let mut body = [message_body]; compute_checksum_fragmented(header, &body.as_fragmented_byte_slice(), src_ip, dst_ip) } } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = OriginalPacket<B>>> IcmpPacket<I, B, M> { /// Get the body of the packet that caused this ICMP message. /// /// This ICMP message contains some of the bytes of the packet that caused /// this message to be emitted. `original_packet_body` returns as much of /// the body of that packet as is contained in this message. For IPv4, this /// is guaranteed to be 8 bytes. For IPv6, there are no guarantees about the /// length. pub fn original_packet_body(&self) -> &[u8] { self.message_body.body::<I>() } /// Returns the original packt that caused this ICMP message. /// /// This ICMP message contains some of the bytes of the packet that caused /// this message to be emitted. `original_packet` returns as much of the /// body of that packet as is contained in this message. For IPv4, this is /// guaranteed to be 8 bytes. For IPv6, there are no guarantees about the /// length. pub fn original_packet(&self) -> &OriginalPacket<B> { &self.message_body } } impl<B: ByteSlice, M: IcmpMessage<Ipv4, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv4, B, M> { /// Attempt to partially parse the original packet as an IPv4 packet. /// /// `f` will be invoked on the result of calling `Ipv4PacketRaw::parse` on /// the original packet. pub fn with_original_packet<O, F: FnOnce(Result<Ipv4PacketRaw<&[u8]>, &[u8]>) -> O>( &self, f: F, ) -> O { let mut bv = self.message_body.0.deref(); f(Ipv4PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref())) } } impl<B: ByteSlice, M: IcmpMessage<Ipv6, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv6, B, M> { /// Attempt to partially parse the original packet as an IPv6 packet. /// /// `f` will be invoked on the result of calling `Ipv6PacketRaw::parse` on /// the original packet. pub fn with_original_packet<O, F: FnOnce(Result<Ipv6PacketRaw<&[u8]>, &[u8]>) -> O>( &self, f: F, ) -> O { let mut bv = self.message_body.0.deref(); f(Ipv6PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref())) } } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = ndp::Options<B>>> IcmpPacket<I, B, M> { /// Get the pared list of NDP options from the ICMP message. pub fn ndp_options(&self) -> &ndp::Options<B> { &self.message_body } } /// A builder for ICMP packets. #[derive(Debug)] pub struct IcmpPacketBuilder<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> { src_ip: I::Addr, dst_ip: I::Addr, code: M::Code, msg: M, } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketBuilder<I, B, M> { /// Construct a new `IcmpPacketBuilder`. pub fn new<S: Into<I::Addr>, D: Into<I::Addr>>( src_ip: S, dst_ip: D, code: M::Code, msg: M, ) -> IcmpPacketBuilder<I, B, M> { IcmpPacketBuilder { src_ip: src_ip.into(), dst_ip: dst_ip.into(), code, msg } } } // TODO(joshlf): Figure out a way to split body and non-body message types by // trait and implement PacketBuilder for some and InnerPacketBuilder for others. impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> PacketBuilder for IcmpPacketBuilder<I, B, M> { fn constraints(&self) -> PacketConstraints { // The maximum body length constraint to make sure the body length // doesn't overflow the 32-bit length field in the pseudo-header used // for calculating the checksum. // // Note that, for messages that don't take bodies, it's important that // we don't just set this to 0. Trying to serialize a body in a message // type which doesn't take bodies is a programmer error, so we should // panic in that case. Setting the max_body_len to 0 would surface the // issue as an MTU error, which would hide the underlying problem. // Instead, we assert in serialize. Eventually, we will hopefully figure // out a way to implement InnerPacketBuilder (rather than PacketBuilder) // for these message types, and this won't be an issue anymore. PacketConstraints::new(mem::size_of::<Header<M>>(), 0, 0, core::u32::MAX as usize) } fn serialize(&self, buffer: &mut SerializeBuffer<'_>) { use packet::BufferViewMut; let (mut prefix, message_body, _) = buffer.parts(); // implements BufferViewMut, giving us take_obj_xxx_zero methods let mut prefix = &mut prefix; assert!( M::Body::EXPECTS_BODY || message_body.is_empty(), "body provided for message that doesn't take a body" ); // SECURITY: Use _zero constructors to ensure we zero memory to prevent // leaking information from packets previously stored in this buffer. let mut header = prefix.take_obj_front_zero::<Header<M>>().expect("too few bytes for ICMP message"); header.prefix.set_msg_type(M::TYPE); header.prefix.code = self.code.into(); header.message = self.msg; let checksum = compute_checksum_fragmented(&header, message_body, self.src_ip, self.dst_ip) .unwrap_or_else(|| { panic!( "total ICMP packet length of {} overflows 32-bit length field of pseudo-header", header.bytes().len() + message_body.len(), ) }); header.prefix.checksum = checksum; } } /// The type of ICMP codes that are unused. /// /// Some ICMP messages do not use codes. In Rust, the `IcmpMessage::Code` type /// associated with these messages is `IcmpUnusedCode`. The only valid numerical /// value for this code is 0. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct IcmpUnusedCode; impl From<IcmpUnusedCode> for u8 { fn from(_: IcmpUnusedCode) -> u8 { 0 } } #[derive(Copy, Clone, Debug, Eq, PartialEq, FromBytes, AsBytes, Unaligned)] #[repr(C)] struct IdAndSeq { id: U16, seq: U16, } impl IdAndSeq { fn new(id: u16, seq: u16) -> IdAndSeq { IdAndSeq { id: U16::new(id), seq: U16::new(seq) } } } #[cfg(test)] mod tests { use packet::ParseBuffer; use super::*; #[test] fn test_partial_parse() { // Test various behaviors of parsing the `IcmpPacketRaw` type. let reference_header = Header { prefix: HeaderPrefix { msg_type: <IcmpEchoRequest as IcmpMessage<Ipv4, &[u8]>>::TYPE.into(), code: 0, checksum: [0, 0], }, message: IcmpEchoRequest::new(1, 1), }; // Test that a too-short header is always rejected even if its contents // are otherwise valid (the checksum here is probably invalid, but we // explicitly check that it's a `Format` error, not a `Checksum` // error). let mut buf = &reference_header.as_bytes()[..7]; assert_eq!( buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().unwrap_err(), ParseError::Format ); // Test that a properly-sized header is rejected if the message type is wrong. let mut header = reference_header; header.prefix.msg_type = <IcmpEchoReply as IcmpMessage<Ipv4, &[u8]>>::TYPE.into(); let mut buf = header.as_bytes(); assert_eq!( buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().unwrap_err(), ParseError::NotExpected ); // Test that an invalid code is accepted. let mut header = reference_header; header.prefix.code = 0xFF; let mut buf = header.as_bytes(); assert!(buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().is_ok()); // Test that an invalid checksum is accepted. Instead of calculating the // correct checksum, we just provide two different checksums. They can't // both be valid. let mut buf = reference_header.as_bytes(); assert!(buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().is_ok()); let mut header = reference_header; header.prefix.checksum = [1, 1]; let mut buf = header.as_bytes(); assert!(buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().is_ok()); } }
new
identifier_name
mod.rs
// Copyright 2018 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! Parsing and serialization of Internet Control Message Protocol (ICMP) packets. #[macro_use] mod macros; mod common; mod icmpv4; mod icmpv6; pub mod mld; pub mod ndp; #[cfg(test)] mod testdata; pub use self::common::*; pub use self::icmpv4::*; pub use self::icmpv6::*; use core::cmp; use core::convert::{TryFrom, TryInto}; use core::fmt::Debug; use core::marker::PhantomData; use core::mem; use core::ops::Deref; use byteorder::{ByteOrder, NetworkEndian}; use internet_checksum::Checksum; use net_types::ip::{Ip, IpAddress, Ipv4, Ipv6}; use never::Never; use packet::records::options::{Options, OptionsImpl}; use packet::{ AsFragmentedByteSlice, BufferView, FragmentedByteSlice, FromRaw, PacketBuilder, PacketConstraints, ParsablePacket, ParseMetadata, SerializeBuffer, }; use zerocopy::{AsBytes, ByteSlice, FromBytes, LayoutVerified, Unaligned}; use crate::error::{ParseError, ParseResult}; use crate::ip::IpProto; use crate::ipv4::{self, Ipv4PacketRaw}; use crate::ipv6::Ipv6PacketRaw; use crate::U16; #[derive(Copy, Clone, Default, Debug, FromBytes, AsBytes, Unaligned)] #[repr(C)] struct HeaderPrefix { msg_type: u8, code: u8, checksum: [u8; 2], /* NOTE: The "Rest of Header" field is stored in message types rather than * in the HeaderPrefix. This helps consolidate how callers access data about the * packet, and is consistent with ICMPv6, which treats the field as part of * messages rather than the header. */ } impl HeaderPrefix { fn set_msg_type<T: Into<u8>>(&mut self, msg_type: T) { self.msg_type = msg_type.into(); } } /// Peek at an ICMP header to see what message type is present. /// /// Since `IcmpPacket` is statically typed with the message type expected, this /// type must be known ahead of time before calling `parse`. If multiple /// different types are valid in a given parsing context, and so the caller /// cannot know ahead of time which type to use, `peek_message_type` can be used /// to peek at the header first to figure out which static type should be used /// in a subsequent call to `parse`. /// /// Note that `peek_message_type` only inspects certain fields in the header, /// and so `peek_message_type` succeeding does not guarantee that a subsequent /// call to `parse` will also succeed. pub fn peek_message_type<MessageType: TryFrom<u8>>(bytes: &[u8]) -> ParseResult<MessageType> { let (hdr_pfx, _) = LayoutVerified::<_, HeaderPrefix>::new_unaligned_from_prefix(bytes) .ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?; MessageType::try_from(hdr_pfx.msg_type).map_err(|_| { debug_err!(ParseError::NotSupported, "unrecognized message type: {:x}", hdr_pfx.msg_type,) }) } /// An extension trait adding ICMP-related associated types to `Ipv4` and `Ipv6`. /// /// This trait is kept separate from `IcmpIpExt` to not require a type parameter /// that implements `ByteSlice`. pub trait IcmpIpTypes: Ip { /// The type of an ICMP parameter problem code. /// /// For `Ipv4`, this is `Icmpv4ParameterProblemCode`, and for `Ipv6` this /// is `Icmpv6ParameterProblemCode`. type ParameterProblemCode: PartialEq + Send + Sync + Debug; /// The type of an ICMP parameter problem pointer. /// /// For `Ipv4`, this is `u8`, and for `Ipv6` this is `u32`. type ParameterProblemPointer: PartialEq + Send + Sync + Debug; /// The type of an ICMP parameter header length. /// /// For `Ipv4`, this is `usize`, and for `Ipv6` this is `()`. type HeaderLen: PartialEq + Send + Sync + Debug; } // A default implementation for any I: Ip. This is to convince the Rust compiler // that, given an I: Ip, it's guaranteed to implement IcmpIpTypes. We humans know // that Ipv4 and Ipv6 are the only types implementing Ip and so, since we // implement IcmpIpTypes for both of these types, this is fine. The compiler isn't // so smart. This implementation should never actually be used. impl<I: Ip> IcmpIpTypes for I { default type ParameterProblemCode = Never; default type ParameterProblemPointer = Never; default type HeaderLen = Never; } impl IcmpIpTypes for Ipv4 { type ParameterProblemCode = Icmpv4ParameterProblemCode; type ParameterProblemPointer = u8; type HeaderLen = usize; } impl IcmpIpTypes for Ipv6 { type ParameterProblemCode = Icmpv6ParameterProblemCode; type ParameterProblemPointer = u32; type HeaderLen = (); } /// An extension trait adding ICMP-related functionality to `Ipv4` and `Ipv6`. pub trait IcmpIpExt: Ip { /// The type of ICMP messages. /// /// For `Ipv4`, this is `Icmpv4MessageType`, and for `Ipv6`, this is /// `Icmpv6MessageType`. type IcmpMessageType: IcmpMessageType; /// The identifier for this ICMP version. /// /// This value will be found in an IPv4 packet's Protocol field (for ICMPv4 /// packets) or an IPv6 fixed header's or last extension header's Next /// Heeader field (for ICMPv6 packets). const ICMP_IP_PROTO: IpProto; /// Compute the length of the header of the packet prefix stored in `bytes`. /// /// Given the prefix of a packet stored in `bytes`, compute the length of /// the header of that packet, or `bytes.len()` if `bytes` does not contain /// the entire header. If the version is IPv6, the returned length should /// include all extension headers. fn header_len(bytes: &[u8]) -> usize; } impl IcmpIpExt for Ipv4 { type IcmpMessageType = Icmpv4MessageType; const ICMP_IP_PROTO: IpProto = IpProto::Icmp; fn header_len(bytes: &[u8]) -> usize { if bytes.len() < ipv4::IPV4_MIN_HDR_LEN { return bytes.len(); } let (header_prefix, _) = LayoutVerified::<_, ipv4::HeaderPrefix>::new_unaligned_from_prefix(bytes).unwrap(); cmp::min(header_prefix.ihl() as usize * 4, bytes.len()) } } impl IcmpIpExt for Ipv6 { type IcmpMessageType = Icmpv6MessageType; const ICMP_IP_PROTO: IpProto = IpProto::Icmpv6; // TODO: Re-implement this in terms of partial parsing, and then get rid of // the `header_len` method. fn header_len(_bytes: &[u8]) -> usize { // NOTE: We panic here rather than doing log_unimplemented! because // there's no sane default value for this function. If it's called, it // doesn't make sense for the program to continue executing; if we did, // it would cause bugs in the caller. unimplemented!() } } /// An ICMP or ICMPv6 packet /// /// 'IcmpPacketType' is implemented by `Icmpv4Packet` and `Icmpv6Packet` pub trait IcmpPacketType<B: ByteSlice, I: Ip>: Sized + ParsablePacket<B, IcmpParseArgs<I::Addr>, Error = ParseError> { } impl<B: ByteSlice> IcmpPacketType<B, Ipv4> for Icmpv4Packet<B> {} impl<B: ByteSlice> IcmpPacketType<B, Ipv6> for Icmpv6Packet<B> {} // TODO(joshlf): Once we have generic associated types, refactor this so that we // don't have to bind B ahead of time. Removing that requirement would make some // APIs (in particular, IcmpPacketBuilder) simpler by removing the B parameter // from them as well. /// `MessageBody` represents the parsed body of the ICMP packet. /// /// - For messages that expect no body, the `MessageBody` is of type `()`. /// - For NDP messages, the `MessageBody` is of the type `ndp::Options`. /// - For all other messages, the `MessageBody` will be of the type /// `OriginalPacket`, which is a thin wrapper around `B`. pub trait MessageBody<B>: Sized { /// Whether or not a message body is expected in an ICMP packet. const EXPECTS_BODY: bool = true; /// Parse the MessageBody from the provided bytes. fn parse(bytes: B) -> ParseResult<Self> where B: ByteSlice; /// The length of the underlying buffer. fn len(&self) -> usize where B: ByteSlice; /// Is the body empty? /// /// `b.is_empty()` is equivalent to `b.len() == 0`. fn is_empty(&self) -> bool where B: ByteSlice, { self.len() == 0 } /// Return the underlying bytes. fn bytes(&self) -> &[u8] where B: Deref<Target = [u8]>; } impl<B> MessageBody<B> for () { const EXPECTS_BODY: bool = false; fn parse(bytes: B) -> ParseResult<()> where B: ByteSlice, { if !bytes.is_empty() { return debug_err!(Err(ParseError::Format), "unexpected message body"); } Ok(()) } fn len(&self) -> usize { 0 } fn bytes(&self) -> &[u8] { &[] } } /// A thin wrapper around B which implements `MessageBody`. #[derive(Debug)] pub struct OriginalPacket<B>(B); impl<B: ByteSlice + Deref<Target = [u8]>> OriginalPacket<B> { /// Returns the the body of the original packet. pub fn body<I: IcmpIpExt>(&self) -> &[u8] { // TODO(joshlf): Can these debug_asserts be triggered by external input? let header_len = I::header_len(&self.0); debug_assert!(header_len <= self.0.len()); debug_assert!(I::VERSION.is_v6() || self.0.len() - header_len == 8); &self.0[header_len..] } } impl<B> MessageBody<B> for OriginalPacket<B> { fn parse(bytes: B) -> ParseResult<OriginalPacket<B>> { Ok(OriginalPacket(bytes)) } fn len(&self) -> usize where B: ByteSlice, { self.0.len() } fn bytes(&self) -> &[u8] where B: Deref<Target = [u8]>, { &self.0 } } impl<B, O: for<'a> OptionsImpl<'a>> MessageBody<B> for Options<B, O> { fn parse(bytes: B) -> ParseResult<Options<B, O>> where B: ByteSlice, { Self::parse(bytes).map_err(|_e| debug_err!(ParseError::Format, "unable to parse options")) } fn len(&self) -> usize where B: ByteSlice, { self.bytes().len() } fn bytes(&self) -> &[u8] where B: Deref<Target = [u8]>, { self.bytes() } } /// An ICMP message. pub trait IcmpMessage<I: IcmpIpExt, B: ByteSlice>: Sized + Copy + FromBytes + AsBytes + Unaligned { /// The type of codes used with this message. /// /// The ICMP header includes an 8-bit "code" field. For a given message /// type, different values of this field carry different meanings. Not all /// code values are used - some may be invalid. This type represents a /// parsed code. For example, for TODO, it is the TODO type. type Code: Into<u8> + Copy + Debug; /// The type of the body used with this message. type Body: MessageBody<B>; /// The type corresponding to this message type. /// /// The value of the "type" field in the ICMP header corresponding to /// messages of this type. const TYPE: I::IcmpMessageType; /// Parse a `Code` from an 8-bit number. /// /// Parse a `Code` from the 8-bit "code" field in the ICMP header. Not all /// values for this field are valid. If an invalid value is passed, /// `code_from_u8` returns `None`. fn code_from_u8(code: u8) -> Option<Self::Code>; } /// The type of an ICMP message. /// /// `IcmpMessageType` is implemented by `Icmpv4MessageType` and /// `Icmpv6MessageType`. pub trait IcmpMessageType: TryFrom<u8> + Into<u8> + Copy { /// Is this an error message? /// /// For ICMP, this is true for the Destination Unreachable, Redirect, Source /// Quench, Time Exceeded, and Parameter Problem message types. For ICMPv6, /// this is true for the Destination Unreachable, Packet Too Big, Time /// Exceeded, and Parameter Problem message types. fn is_err(self) -> bool; } #[derive(Copy, Clone, Debug, FromBytes, Unaligned)] #[repr(C)] struct Header<M> { prefix: HeaderPrefix, message: M, } // So long as `M: Unaligned`, there will be no padding between the // `HeaderPrefix` and `M`. Since `HeaderPrefix` itself is `Unaligned`, the // alignment of `Header<M>` will be 1, meaning that no post-padding will need to // be added to get to a multiple of the alignment. Since there is no padding, // then so long as `M: AsBytes`, all of `Header<M>: AsBytes`. unsafe impl<M: AsBytes + Unaligned> AsBytes for Header<M> { // We're doing a bad thing, but it's necessary until derive(AsBytes) // supports type parameters. fn only_derive_is_allowed_to_implement_this_trait() {} } /// A partially parsed and not yet validated ICMP packet. /// /// An `IcmpPacketRaw` provides minimal parsing of an ICMP packet. Namely, it /// only requires that the header and message (in ICMPv6, these are both /// considered part of the header) are present, and that the header has the /// expected message type. The body may be missing (or an unexpected body may be /// present). Other than the message type, no header, message, or body field /// values will be validated. /// /// [`IcmpPacket`] provides a [`FromRaw`] implementation that can be used to /// validate an [`IcmpPacketRaw`]. #[derive(Debug)] pub struct IcmpPacketRaw<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> { header: LayoutVerified<B, Header<M>>, message_body: B, _marker: PhantomData<I>, } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketRaw<I, B, M> { /// Get the ICMP message. pub fn message(&self) -> &M { &self.header.message } } /// An ICMP packet. /// /// An `IcmpPacket` shares its underlying memory with the byte slice it was /// parsed from, meaning that no copying or extra allocation is necessary. #[derive(Debug)] pub struct IcmpPacket<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> { header: LayoutVerified<B, Header<M>>, message_body: M::Body, _marker: PhantomData<I>, } /// Arguments required to parse an ICMP packet. pub struct IcmpParseArgs<A: IpAddress> { src_ip: A, dst_ip: A, } impl<A: IpAddress> IcmpParseArgs<A> { /// Construct a new `IcmpParseArgs`. pub fn new<S: Into<A>, D: Into<A>>(src_ip: S, dst_ip: D) -> IcmpParseArgs<A> { IcmpParseArgs { src_ip: src_ip.into(), dst_ip: dst_ip.into() } } } impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, ()> for IcmpPacketRaw<I, B, M> { type Error = ParseError; fn parse_metadata(&self) -> ParseMetadata { ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0) } fn parse<BV: BufferView<B>>(mut buffer: BV, _args: ()) -> ParseResult<Self> { let header = buffer .take_obj_front::<Header<M>>() .ok_or_else(debug_err_fn!(ParseError::Format, "too few bytes for header"))?; let message_body = buffer.into_rest(); if header.prefix.msg_type != M::TYPE.into() { return debug_err!(Err(ParseError::NotExpected), "unexpected message type"); } Ok(IcmpPacketRaw { header, message_body, _marker: PhantomData }) } } impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> FromRaw<IcmpPacketRaw<I, B, M>, IcmpParseArgs<I::Addr>> for IcmpPacket<I, B, M> { type Error = ParseError; fn try_from_raw_with( raw: IcmpPacketRaw<I, B, M>, args: IcmpParseArgs<I::Addr>, ) -> ParseResult<Self> { let IcmpPacketRaw { header, message_body, _marker } = raw; if !M::Body::EXPECTS_BODY && !message_body.is_empty() { return debug_err!(Err(ParseError::Format), "unexpected message body"); } let _: M::Code = M::code_from_u8(header.prefix.code).ok_or_else(debug_err_fn!( ParseError::Format, "unrecognized code: {}", header.prefix.code ))?; let checksum = Self::compute_checksum(&header, &message_body, args.src_ip, args.dst_ip) .ok_or_else(debug_err_fn!(ParseError::Format, "packet too large"))?; if checksum != [0, 0] { return debug_err!(Err(ParseError::Checksum), "invalid checksum"); } let message_body = M::Body::parse(message_body)?; Ok(IcmpPacket { header, message_body, _marker }) } } impl<B: ByteSlice, I: IcmpIpExt, M: IcmpMessage<I, B>> ParsablePacket<B, IcmpParseArgs<I::Addr>> for IcmpPacket<I, B, M>
ParseMetadata::from_packet(self.header.bytes().len(), self.message_body.len(), 0) } fn parse<BV: BufferView<B>>(buffer: BV, args: IcmpParseArgs<I::Addr>) -> ParseResult<Self> { IcmpPacketRaw::parse(buffer, ()).and_then(|p| IcmpPacket::try_from_raw_with(p, args)) } } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> { /// Get the ICMP message. pub fn message(&self) -> &M { &self.header.message } /// Get the ICMP body. pub fn body(&self) -> &M::Body { &self.message_body } /// Get the ICMP message code. /// /// The code provides extra details about the message. Each message type has /// its own set of codes that are allowed. pub fn code(&self) -> M::Code { // infallible since it was validated in parse M::code_from_u8(self.header.prefix.code).unwrap() } /// Construct a builder with the same contents as this packet. pub fn builder(&self, src_ip: I::Addr, dst_ip: I::Addr) -> IcmpPacketBuilder<I, B, M> { IcmpPacketBuilder { src_ip, dst_ip, code: self.code(), msg: *self.message() } } } fn compute_checksum_fragmented< I: IcmpIpExt, B: ByteSlice, BB: packet::Fragment, M: IcmpMessage<I, B>, >( header: &Header<M>, message_body: &FragmentedByteSlice<'_, BB>, src_ip: I::Addr, dst_ip: I::Addr, ) -> Option<[u8; 2]> { let mut c = Checksum::new(); if I::VERSION.is_v6() { c.add_bytes(src_ip.bytes()); c.add_bytes(dst_ip.bytes()); let icmpv6_len = mem::size_of::<Header<M>>() + message_body.len(); let mut len_bytes = [0; 4]; NetworkEndian::write_u32(&mut len_bytes, icmpv6_len.try_into().ok()?); c.add_bytes(&len_bytes[..]); c.add_bytes(&[0, 0, 0]); c.add_bytes(&[IpProto::Icmpv6.into()]); } c.add_bytes(&[header.prefix.msg_type, header.prefix.code]); c.add_bytes(&header.prefix.checksum); c.add_bytes(header.message.as_bytes()); for p in message_body.iter_fragments() { c.add_bytes(p); } Some(c.checksum()) } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacket<I, B, M> { /// Compute the checksum, including the checksum field itself. /// /// `compute_checksum` returns `None` if the version is IPv6 and the total /// ICMP packet length overflows a u32. fn compute_checksum( header: &Header<M>, message_body: &[u8], src_ip: I::Addr, dst_ip: I::Addr, ) -> Option<[u8; 2]> { let mut body = [message_body]; compute_checksum_fragmented(header, &body.as_fragmented_byte_slice(), src_ip, dst_ip) } } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = OriginalPacket<B>>> IcmpPacket<I, B, M> { /// Get the body of the packet that caused this ICMP message. /// /// This ICMP message contains some of the bytes of the packet that caused /// this message to be emitted. `original_packet_body` returns as much of /// the body of that packet as is contained in this message. For IPv4, this /// is guaranteed to be 8 bytes. For IPv6, there are no guarantees about the /// length. pub fn original_packet_body(&self) -> &[u8] { self.message_body.body::<I>() } /// Returns the original packt that caused this ICMP message. /// /// This ICMP message contains some of the bytes of the packet that caused /// this message to be emitted. `original_packet` returns as much of the /// body of that packet as is contained in this message. For IPv4, this is /// guaranteed to be 8 bytes. For IPv6, there are no guarantees about the /// length. pub fn original_packet(&self) -> &OriginalPacket<B> { &self.message_body } } impl<B: ByteSlice, M: IcmpMessage<Ipv4, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv4, B, M> { /// Attempt to partially parse the original packet as an IPv4 packet. /// /// `f` will be invoked on the result of calling `Ipv4PacketRaw::parse` on /// the original packet. pub fn with_original_packet<O, F: FnOnce(Result<Ipv4PacketRaw<&[u8]>, &[u8]>) -> O>( &self, f: F, ) -> O { let mut bv = self.message_body.0.deref(); f(Ipv4PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref())) } } impl<B: ByteSlice, M: IcmpMessage<Ipv6, B, Body = OriginalPacket<B>>> IcmpPacket<Ipv6, B, M> { /// Attempt to partially parse the original packet as an IPv6 packet. /// /// `f` will be invoked on the result of calling `Ipv6PacketRaw::parse` on /// the original packet. pub fn with_original_packet<O, F: FnOnce(Result<Ipv6PacketRaw<&[u8]>, &[u8]>) -> O>( &self, f: F, ) -> O { let mut bv = self.message_body.0.deref(); f(Ipv6PacketRaw::parse(&mut bv, ()).map_err(|_| self.message_body.0.deref())) } } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B, Body = ndp::Options<B>>> IcmpPacket<I, B, M> { /// Get the pared list of NDP options from the ICMP message. pub fn ndp_options(&self) -> &ndp::Options<B> { &self.message_body } } /// A builder for ICMP packets. #[derive(Debug)] pub struct IcmpPacketBuilder<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> { src_ip: I::Addr, dst_ip: I::Addr, code: M::Code, msg: M, } impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> IcmpPacketBuilder<I, B, M> { /// Construct a new `IcmpPacketBuilder`. pub fn new<S: Into<I::Addr>, D: Into<I::Addr>>( src_ip: S, dst_ip: D, code: M::Code, msg: M, ) -> IcmpPacketBuilder<I, B, M> { IcmpPacketBuilder { src_ip: src_ip.into(), dst_ip: dst_ip.into(), code, msg } } } // TODO(joshlf): Figure out a way to split body and non-body message types by // trait and implement PacketBuilder for some and InnerPacketBuilder for others. impl<I: IcmpIpExt, B: ByteSlice, M: IcmpMessage<I, B>> PacketBuilder for IcmpPacketBuilder<I, B, M> { fn constraints(&self) -> PacketConstraints { // The maximum body length constraint to make sure the body length // doesn't overflow the 32-bit length field in the pseudo-header used // for calculating the checksum. // // Note that, for messages that don't take bodies, it's important that // we don't just set this to 0. Trying to serialize a body in a message // type which doesn't take bodies is a programmer error, so we should // panic in that case. Setting the max_body_len to 0 would surface the // issue as an MTU error, which would hide the underlying problem. // Instead, we assert in serialize. Eventually, we will hopefully figure // out a way to implement InnerPacketBuilder (rather than PacketBuilder) // for these message types, and this won't be an issue anymore. PacketConstraints::new(mem::size_of::<Header<M>>(), 0, 0, core::u32::MAX as usize) } fn serialize(&self, buffer: &mut SerializeBuffer<'_>) { use packet::BufferViewMut; let (mut prefix, message_body, _) = buffer.parts(); // implements BufferViewMut, giving us take_obj_xxx_zero methods let mut prefix = &mut prefix; assert!( M::Body::EXPECTS_BODY || message_body.is_empty(), "body provided for message that doesn't take a body" ); // SECURITY: Use _zero constructors to ensure we zero memory to prevent // leaking information from packets previously stored in this buffer. let mut header = prefix.take_obj_front_zero::<Header<M>>().expect("too few bytes for ICMP message"); header.prefix.set_msg_type(M::TYPE); header.prefix.code = self.code.into(); header.message = self.msg; let checksum = compute_checksum_fragmented(&header, message_body, self.src_ip, self.dst_ip) .unwrap_or_else(|| { panic!( "total ICMP packet length of {} overflows 32-bit length field of pseudo-header", header.bytes().len() + message_body.len(), ) }); header.prefix.checksum = checksum; } } /// The type of ICMP codes that are unused. /// /// Some ICMP messages do not use codes. In Rust, the `IcmpMessage::Code` type /// associated with these messages is `IcmpUnusedCode`. The only valid numerical /// value for this code is 0. #[derive(Copy, Clone, Debug, Eq, PartialEq)] pub struct IcmpUnusedCode; impl From<IcmpUnusedCode> for u8 { fn from(_: IcmpUnusedCode) -> u8 { 0 } } #[derive(Copy, Clone, Debug, Eq, PartialEq, FromBytes, AsBytes, Unaligned)] #[repr(C)] struct IdAndSeq { id: U16, seq: U16, } impl IdAndSeq { fn new(id: u16, seq: u16) -> IdAndSeq { IdAndSeq { id: U16::new(id), seq: U16::new(seq) } } } #[cfg(test)] mod tests { use packet::ParseBuffer; use super::*; #[test] fn test_partial_parse() { // Test various behaviors of parsing the `IcmpPacketRaw` type. let reference_header = Header { prefix: HeaderPrefix { msg_type: <IcmpEchoRequest as IcmpMessage<Ipv4, &[u8]>>::TYPE.into(), code: 0, checksum: [0, 0], }, message: IcmpEchoRequest::new(1, 1), }; // Test that a too-short header is always rejected even if its contents // are otherwise valid (the checksum here is probably invalid, but we // explicitly check that it's a `Format` error, not a `Checksum` // error). let mut buf = &reference_header.as_bytes()[..7]; assert_eq!( buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().unwrap_err(), ParseError::Format ); // Test that a properly-sized header is rejected if the message type is wrong. let mut header = reference_header; header.prefix.msg_type = <IcmpEchoReply as IcmpMessage<Ipv4, &[u8]>>::TYPE.into(); let mut buf = header.as_bytes(); assert_eq!( buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().unwrap_err(), ParseError::NotExpected ); // Test that an invalid code is accepted. let mut header = reference_header; header.prefix.code = 0xFF; let mut buf = header.as_bytes(); assert!(buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().is_ok()); // Test that an invalid checksum is accepted. Instead of calculating the // correct checksum, we just provide two different checksums. They can't // both be valid. let mut buf = reference_header.as_bytes(); assert!(buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().is_ok()); let mut header = reference_header; header.prefix.checksum = [1, 1]; let mut buf = header.as_bytes(); assert!(buf.parse::<IcmpPacketRaw<Ipv4, _, IcmpEchoRequest>>().is_ok()); } }
{ type Error = ParseError; fn parse_metadata(&self) -> ParseMetadata {
random_line_split
calc_codon_usage.py
# Copyright (C) 2017 William M. Jacobs # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or (at # your option) any later version. # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import argparse, sys, os, random, math, gzip, pickle, os.path from collections import defaultdict from itertools import combinations, combinations_with_replacement import numpy as np import scipy.stats from codons import codon_to_aa, codon_table #read a fasta file def read_fasta(path): seqs = [] keys = [] with open(path, 'r') as f: for line in f: if len(line) > 1 and '#' not in line: if '>' in line: seqs.append('') keys.append(line[1:].strip()) else: seqs[-1] += line.strip() return {keys[i] : seqs[i] for i in range(len(keys))} aa_codons = {aa : [c for c in codon_to_aa if codon_to_aa[c] == aa] \ for aa in codon_to_aa.values() if aa != 'Stop'} #determines whether or not a codon is rare def israre(codon_usage, rare_model, rare_threshold, c): if rare_model == 'no_norm': if codon_usage[c] <= rare_threshold: return True else: return False elif rare_model == 'cmax_norm': if codon_usage[c] / max(codon_usage[cc] for cc in aa_codons[codon_to_aa[c]]) <= rare_threshold: return True else: return False def calc_codon_usage(fasta, abundances=None, output="", rare_model='no_norm', rare_threshold=0.1, max_len_diff=0.2, group_dpercentile=10, wt_gi='gi|556503834|ref|NC_000913.3|', gi_index=None, verbose=False): #read fasta files seqs = {} if isinstance(fasta, str): gene = "".join(os.path.basename(fasta).split(".")[:-1]) seqs[gene] = read_fasta(fasta) elif isinstance(fasta, (list, tuple)): for path in fasta: gene = "".join(os.path.basename(path).split(".")[:-1]) seqs[gene] = read_fasta(path) if verbose: print("Loaded sequences for %d genes" % len(seqs)) gis = sorted(set(gi for gene in seqs for gi in seqs[gene].keys())) #read abundance files try: with open(abundances, 'r') as f: abundances = {line.split()[0] : float(line.split()[1]) for line in f if len(line) > 1 and line[0] != '#'} except Exception as e: abundances = {} ''' if gi_index != None: with open(gi_index, 'r') as f: gi_index = {line.split()[0] : ' '.join(line.split()[1:]) \ for line in f if len(line) > 1 and line[0] != '#'} print("GIs:") for gi in gis: print("%32s: %s" % (gi, gi_index[gi])) ''' #delete the sequences whose length differs from the WT too much nonwt_gis = [gi for gi in gis if gi != wt_gi] for gene in seqs: if wt_gi in seqs[gene]: wtlen = len(seqs[gene][wt_gi]) - seqs[gene][wt_gi].count('-') for gi in nonwt_gis: if gi in seqs[gene]: gilen = len(seqs[gene][gi]) - seqs[gene][gi].count('-') if abs(1. - gilen / wtlen) > max_len_diff: del seqs[gene][gi] rerun_flag = False try: # split sequences into deciles based on rare codon usage (calculated from first run) with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'rb') as f: input_relative_usage = pickle.load(f)['overall_codon_usage'] def get_frac_rare(seq): return np.mean([sum(1 if israre(input_relative_usage[gi], rare_model, \ rare_threshold, seq[gi][3*i:3*(i + 1)]) else 0 \ for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \ and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') / \ sum(1 for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \ and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') \ for gi in gis if gi in seq]) frac_rare = {gene : get_frac_rare(seq) for gene,seq in seqs.items() if len(seq) > 0} groups = ['ND'] + [(np.percentile(list(frac_rare.values()), percentile), \ np.percentile(list(frac_rare.values()), percentile \ + group_dpercentile)) \ for percentile in range(0, 100, group_dpercentile)][::-1] def get_gene_group(gene):
gene_group_labels = ['%05.3f:%05.3f' % (groups[i][0], groups[i][1]) \ if i > 0 else 'ND' for i in range(len(groups))] except IOError: #this is the first run, get general usage info rerun_flag = True groups = ['all'] def get_gene_group(gene): return 0 gene_group_labels = ['all'] except KeyError: #code was run in the same output directory, but on a different set of inputs (input_codon_usage.p.gz isn't correct) os.remove(os.path.join(output, 'input_codon_usage.p.gz')) rerun_flag = True groups = ['all'] def get_gene_group(gene): return 0 gene_group_labels = ['all'] gene_groups = {gene : get_gene_group(gene) for gene in seqs} if verbose: print("Gene groups:") for i in range(len(gene_group_labels)): print("%11s: n = %3d" % (gene_group_labels[i], \ sum(1 for gene in seqs if gene_groups[gene] == i))) #compute codon usage computed_codon_usage = {} computed_codon_usage_unw = {} computed_codon_usage_groupw = {} absolute_usage = {} relative_usage = {} relative_usage_unw = {} relative_usage_groupw = {} for gi in gis: computed_codon_usage[gi] = defaultdict(int) computed_codon_usage_unw[gi] = defaultdict(int) computed_codon_usage_groupw[gi] = [defaultdict(int) for i in range(len(groups))] for gene,gene_seqs in seqs.items(): if gi in gene_seqs: seq = gene_seqs[gi] for i in range(len(seq) // 3): c = seq[3*i:3*(i + 1)] if c != '---' and codon_to_aa[c] != 'Stop': if gene in abundances: computed_codon_usage[gi][c] += abundances[gene] else: computed_codon_usage[gi][c] += 1 computed_codon_usage_unw[gi][c] += 1 computed_codon_usage_groupw[gi][gene_groups[gene]][c] += 1 codons_total_gi = sum(computed_codon_usage[gi].values()) absolute_usage[gi] = {c : x / codons_total_gi for c,x in computed_codon_usage[gi].items()} relative_usage[gi] = {} relative_usage_unw[gi] = {} relative_usage_groupw[gi] = {i : {} for i in range(len(groups))} for aa in aa_codons: aa_total_gi = 0 aa_total_unw_gi = 0 for c in list(codon_to_aa): if codon_to_aa[c] == aa: aa_total_gi = aa_total_gi + computed_codon_usage[gi][c] aa_total_unw_gi = aa_total_unw_gi + computed_codon_usage_unw[gi][c] for c in aa_codons[aa]: try: relative_usage[gi][c] = computed_codon_usage[gi][c] / aa_total_gi relative_usage_unw[gi][c] = computed_codon_usage_unw[gi][c] / aa_total_unw_gi except: relative_usage[gi][c] = 1.0/len([c in aa_codons[aa]]) relative_usage_unw[gi][c] = 1.0/len([c in aa_codons[aa]]) for i in range(len(groups)): aa_total_groupw_gi_i = sum(computed_codon_usage_groupw[gi][i][c] for c in aa_codons[aa]) for c in aa_codons[aa]: if aa_total_groupw_gi_i > 0: relative_usage_groupw[gi][i][c] \ = computed_codon_usage_groupw[gi][i][c] / aa_total_groupw_gi_i else: relative_usage_groupw[gi][i][c] = 0 if rerun_flag: #first run through, print general codon usage data if verbose: print("Writing input_codon_usage.p.gz") with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'wb') as f: pickle.dump({'groups' : groups, 'gene_groups' : gene_groups, 'overall_codon_usage' : relative_usage, 'unweighted_codon_usage' : relative_usage_unw, 'gene_group_codon_usage' : relative_usage_groupw}, f) if verbose: print("WARNING: Rerun analysis to compute frac-rare groups") else: #second run through, print group codon usage data codon_list = sorted(c for c in codon_to_aa if codon_to_aa[c] != 'Stop') rare_codons = {} all_rare_codons = defaultdict(int) for gi in gis: rare_codons[gi] = sorted(c for c in codon_list \ if israre(relative_usage[gi], rare_model, \ rare_threshold, c)) for c in rare_codons[gi]: all_rare_codons[c] += 1 if verbose: print("Always common codons:", ' '.join(c for c in sorted(codon_list) \ if c not in all_rare_codons)) print("Rare codons:") for c in sorted(all_rare_codons, key=lambda y: (-all_rare_codons[y], y)): print("%s %s %d" % (c, codon_to_aa[c], all_rare_codons[c])) print("Writing rare_codons.dat") with open(os.path.join(output, 'rare_codons.dat'), 'w') as f: for gi in gis: f.write("%s %s\n" % (gi, ','.join("%s:%5.3f" % (c, relative_usage_unw[gi][c]) \ for c in sorted(rare_codons[gi])))) codon_list_aa_sorted = sorted(codon_list, \ key=lambda y: (codon_to_aa[y], \ relative_usage_groupw[wt_gi][len(groups)-1][y])) if verbose: print("Writing codon_usage.dat") with open(os.path.join(output, 'codon_usage.dat'), 'w') as f: f.write("# GI gene_group_index gene_group codon_index " "amino_acid codon israre relative_usage\n") for gi in gis: for c in codon_list_aa_sorted: if c in rare_codons[gi]: israrecodon = 1 else: israrecodon = 0 for i in range(len(gene_group_labels)): f.write("%32s %2d %s %2d %s %s %d %6.4f\n" % \ (gi, i, gene_group_labels[i], codon_list_aa_sorted.index(c), \ codon_to_aa[c], c, israrecodon, relative_usage_groupw[gi][i][c])) f.write("\n") f.write("\n") if verbose: print("Writing codon_usage_wt.dat") with open(os.path.join(output, 'codon_usage_wt.dat'), 'w') as f: f.write("# GI gene_group_index gene_group codon_index " "amino_acid codon israre relative_usage\n") for c in codon_list_aa_sorted: if c in rare_codons[wt_gi]: israrecodon = 1 else: israrecodon = 0 for i in range(len(gene_group_labels)): f.write("%32s %2d %s %2d %s %s %d %6.4f\n" % \ (wt_gi, i, gene_group_labels[i], codon_list_aa_sorted.index(c), \ codon_to_aa[c], c, israrecodon, relative_usage_groupw[wt_gi][i][c])) f.write("\n") if verbose: print("Writing codon_usage.p.gz") with gzip.open(os.path.join(output, 'codon_usage.p.gz'), 'wb') as f: pickle.dump({'groups' : groups, 'gene_groups' : gene_groups, 'overall_codon_usage' : relative_usage, 'unweighted_codon_usage' : relative_usage_unw, 'gene_group_codon_usage' : relative_usage_groupw}, f) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('fasta', type=str, nargs='+', help="path to input MSA fasta file(s)") parser.add_argument('--abundances', default=None, type=str, help="path to protein abundance file") parser.add_argument('--output', type=str, metavar='PATH', default="", help="path to the directory into which the output should be written") parser.add_argument('--rare-model', choices={'no_norm', 'cmax_norm'}, default='no_norm', \ help="normalization mode for defining rare codons ['no_norm']") parser.add_argument('--rare-threshold', type=float, default=0.1, \ help="threshold for codon rarity [0.1]") parser.add_argument('--max-len-diff', type=float, default=0.2, metavar='DIFF', \ help="maximum relative sequence-length difference compared to the WT [0.2]") parser.add_argument('--group-dpercentile', type=int, default=10, metavar='D', \ help="percentile width for gene-group calculations [10]") parser.add_argument('--wt-gi', type=str, default='gi|556503834|ref|NC_000913.3|', \ help="GI for WT sequence") parser.add_argument('--gi-index', type=str, default=None, \ help="path to index of GIs versus subject titles [None]") args = parser.parse_args() calc_codon_usage(args.fasta, args.abundances, args.output, args.rare_model, args.rare_threshold, args.max_len_diff, args.group_dpercentile, args.wt_gi, args.gi_index)
if len(seqs[gene]) == 0: return 0 else: x = get_frac_rare(seqs[gene]) for i in range(1, len(groups)): if x >= groups[i][0] and x <= groups[i][1]: return i
identifier_body
calc_codon_usage.py
# Copyright (C) 2017 William M. Jacobs # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or (at # your option) any later version. # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import argparse, sys, os, random, math, gzip, pickle, os.path from collections import defaultdict from itertools import combinations, combinations_with_replacement import numpy as np import scipy.stats from codons import codon_to_aa, codon_table #read a fasta file def read_fasta(path): seqs = [] keys = [] with open(path, 'r') as f: for line in f: if len(line) > 1 and '#' not in line: if '>' in line: seqs.append('') keys.append(line[1:].strip()) else: seqs[-1] += line.strip() return {keys[i] : seqs[i] for i in range(len(keys))} aa_codons = {aa : [c for c in codon_to_aa if codon_to_aa[c] == aa] \ for aa in codon_to_aa.values() if aa != 'Stop'} #determines whether or not a codon is rare def israre(codon_usage, rare_model, rare_threshold, c): if rare_model == 'no_norm': if codon_usage[c] <= rare_threshold: return True else: return False elif rare_model == 'cmax_norm': if codon_usage[c] / max(codon_usage[cc] for cc in aa_codons[codon_to_aa[c]]) <= rare_threshold: return True else: return False def calc_codon_usage(fasta, abundances=None, output="", rare_model='no_norm', rare_threshold=0.1, max_len_diff=0.2, group_dpercentile=10, wt_gi='gi|556503834|ref|NC_000913.3|', gi_index=None, verbose=False): #read fasta files seqs = {} if isinstance(fasta, str): gene = "".join(os.path.basename(fasta).split(".")[:-1]) seqs[gene] = read_fasta(fasta) elif isinstance(fasta, (list, tuple)): for path in fasta: gene = "".join(os.path.basename(path).split(".")[:-1]) seqs[gene] = read_fasta(path) if verbose: print("Loaded sequences for %d genes" % len(seqs)) gis = sorted(set(gi for gene in seqs for gi in seqs[gene].keys())) #read abundance files try:
abundances = {line.split()[0] : float(line.split()[1]) for line in f if len(line) > 1 and line[0] != '#'} except Exception as e: abundances = {} ''' if gi_index != None: with open(gi_index, 'r') as f: gi_index = {line.split()[0] : ' '.join(line.split()[1:]) \ for line in f if len(line) > 1 and line[0] != '#'} print("GIs:") for gi in gis: print("%32s: %s" % (gi, gi_index[gi])) ''' #delete the sequences whose length differs from the WT too much nonwt_gis = [gi for gi in gis if gi != wt_gi] for gene in seqs: if wt_gi in seqs[gene]: wtlen = len(seqs[gene][wt_gi]) - seqs[gene][wt_gi].count('-') for gi in nonwt_gis: if gi in seqs[gene]: gilen = len(seqs[gene][gi]) - seqs[gene][gi].count('-') if abs(1. - gilen / wtlen) > max_len_diff: del seqs[gene][gi] rerun_flag = False try: # split sequences into deciles based on rare codon usage (calculated from first run) with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'rb') as f: input_relative_usage = pickle.load(f)['overall_codon_usage'] def get_frac_rare(seq): return np.mean([sum(1 if israre(input_relative_usage[gi], rare_model, \ rare_threshold, seq[gi][3*i:3*(i + 1)]) else 0 \ for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \ and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') / \ sum(1 for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \ and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') \ for gi in gis if gi in seq]) frac_rare = {gene : get_frac_rare(seq) for gene,seq in seqs.items() if len(seq) > 0} groups = ['ND'] + [(np.percentile(list(frac_rare.values()), percentile), \ np.percentile(list(frac_rare.values()), percentile \ + group_dpercentile)) \ for percentile in range(0, 100, group_dpercentile)][::-1] def get_gene_group(gene): if len(seqs[gene]) == 0: return 0 else: x = get_frac_rare(seqs[gene]) for i in range(1, len(groups)): if x >= groups[i][0] and x <= groups[i][1]: return i gene_group_labels = ['%05.3f:%05.3f' % (groups[i][0], groups[i][1]) \ if i > 0 else 'ND' for i in range(len(groups))] except IOError: #this is the first run, get general usage info rerun_flag = True groups = ['all'] def get_gene_group(gene): return 0 gene_group_labels = ['all'] except KeyError: #code was run in the same output directory, but on a different set of inputs (input_codon_usage.p.gz isn't correct) os.remove(os.path.join(output, 'input_codon_usage.p.gz')) rerun_flag = True groups = ['all'] def get_gene_group(gene): return 0 gene_group_labels = ['all'] gene_groups = {gene : get_gene_group(gene) for gene in seqs} if verbose: print("Gene groups:") for i in range(len(gene_group_labels)): print("%11s: n = %3d" % (gene_group_labels[i], \ sum(1 for gene in seqs if gene_groups[gene] == i))) #compute codon usage computed_codon_usage = {} computed_codon_usage_unw = {} computed_codon_usage_groupw = {} absolute_usage = {} relative_usage = {} relative_usage_unw = {} relative_usage_groupw = {} for gi in gis: computed_codon_usage[gi] = defaultdict(int) computed_codon_usage_unw[gi] = defaultdict(int) computed_codon_usage_groupw[gi] = [defaultdict(int) for i in range(len(groups))] for gene,gene_seqs in seqs.items(): if gi in gene_seqs: seq = gene_seqs[gi] for i in range(len(seq) // 3): c = seq[3*i:3*(i + 1)] if c != '---' and codon_to_aa[c] != 'Stop': if gene in abundances: computed_codon_usage[gi][c] += abundances[gene] else: computed_codon_usage[gi][c] += 1 computed_codon_usage_unw[gi][c] += 1 computed_codon_usage_groupw[gi][gene_groups[gene]][c] += 1 codons_total_gi = sum(computed_codon_usage[gi].values()) absolute_usage[gi] = {c : x / codons_total_gi for c,x in computed_codon_usage[gi].items()} relative_usage[gi] = {} relative_usage_unw[gi] = {} relative_usage_groupw[gi] = {i : {} for i in range(len(groups))} for aa in aa_codons: aa_total_gi = 0 aa_total_unw_gi = 0 for c in list(codon_to_aa): if codon_to_aa[c] == aa: aa_total_gi = aa_total_gi + computed_codon_usage[gi][c] aa_total_unw_gi = aa_total_unw_gi + computed_codon_usage_unw[gi][c] for c in aa_codons[aa]: try: relative_usage[gi][c] = computed_codon_usage[gi][c] / aa_total_gi relative_usage_unw[gi][c] = computed_codon_usage_unw[gi][c] / aa_total_unw_gi except: relative_usage[gi][c] = 1.0/len([c in aa_codons[aa]]) relative_usage_unw[gi][c] = 1.0/len([c in aa_codons[aa]]) for i in range(len(groups)): aa_total_groupw_gi_i = sum(computed_codon_usage_groupw[gi][i][c] for c in aa_codons[aa]) for c in aa_codons[aa]: if aa_total_groupw_gi_i > 0: relative_usage_groupw[gi][i][c] \ = computed_codon_usage_groupw[gi][i][c] / aa_total_groupw_gi_i else: relative_usage_groupw[gi][i][c] = 0 if rerun_flag: #first run through, print general codon usage data if verbose: print("Writing input_codon_usage.p.gz") with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'wb') as f: pickle.dump({'groups' : groups, 'gene_groups' : gene_groups, 'overall_codon_usage' : relative_usage, 'unweighted_codon_usage' : relative_usage_unw, 'gene_group_codon_usage' : relative_usage_groupw}, f) if verbose: print("WARNING: Rerun analysis to compute frac-rare groups") else: #second run through, print group codon usage data codon_list = sorted(c for c in codon_to_aa if codon_to_aa[c] != 'Stop') rare_codons = {} all_rare_codons = defaultdict(int) for gi in gis: rare_codons[gi] = sorted(c for c in codon_list \ if israre(relative_usage[gi], rare_model, \ rare_threshold, c)) for c in rare_codons[gi]: all_rare_codons[c] += 1 if verbose: print("Always common codons:", ' '.join(c for c in sorted(codon_list) \ if c not in all_rare_codons)) print("Rare codons:") for c in sorted(all_rare_codons, key=lambda y: (-all_rare_codons[y], y)): print("%s %s %d" % (c, codon_to_aa[c], all_rare_codons[c])) print("Writing rare_codons.dat") with open(os.path.join(output, 'rare_codons.dat'), 'w') as f: for gi in gis: f.write("%s %s\n" % (gi, ','.join("%s:%5.3f" % (c, relative_usage_unw[gi][c]) \ for c in sorted(rare_codons[gi])))) codon_list_aa_sorted = sorted(codon_list, \ key=lambda y: (codon_to_aa[y], \ relative_usage_groupw[wt_gi][len(groups)-1][y])) if verbose: print("Writing codon_usage.dat") with open(os.path.join(output, 'codon_usage.dat'), 'w') as f: f.write("# GI gene_group_index gene_group codon_index " "amino_acid codon israre relative_usage\n") for gi in gis: for c in codon_list_aa_sorted: if c in rare_codons[gi]: israrecodon = 1 else: israrecodon = 0 for i in range(len(gene_group_labels)): f.write("%32s %2d %s %2d %s %s %d %6.4f\n" % \ (gi, i, gene_group_labels[i], codon_list_aa_sorted.index(c), \ codon_to_aa[c], c, israrecodon, relative_usage_groupw[gi][i][c])) f.write("\n") f.write("\n") if verbose: print("Writing codon_usage_wt.dat") with open(os.path.join(output, 'codon_usage_wt.dat'), 'w') as f: f.write("# GI gene_group_index gene_group codon_index " "amino_acid codon israre relative_usage\n") for c in codon_list_aa_sorted: if c in rare_codons[wt_gi]: israrecodon = 1 else: israrecodon = 0 for i in range(len(gene_group_labels)): f.write("%32s %2d %s %2d %s %s %d %6.4f\n" % \ (wt_gi, i, gene_group_labels[i], codon_list_aa_sorted.index(c), \ codon_to_aa[c], c, israrecodon, relative_usage_groupw[wt_gi][i][c])) f.write("\n") if verbose: print("Writing codon_usage.p.gz") with gzip.open(os.path.join(output, 'codon_usage.p.gz'), 'wb') as f: pickle.dump({'groups' : groups, 'gene_groups' : gene_groups, 'overall_codon_usage' : relative_usage, 'unweighted_codon_usage' : relative_usage_unw, 'gene_group_codon_usage' : relative_usage_groupw}, f) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('fasta', type=str, nargs='+', help="path to input MSA fasta file(s)") parser.add_argument('--abundances', default=None, type=str, help="path to protein abundance file") parser.add_argument('--output', type=str, metavar='PATH', default="", help="path to the directory into which the output should be written") parser.add_argument('--rare-model', choices={'no_norm', 'cmax_norm'}, default='no_norm', \ help="normalization mode for defining rare codons ['no_norm']") parser.add_argument('--rare-threshold', type=float, default=0.1, \ help="threshold for codon rarity [0.1]") parser.add_argument('--max-len-diff', type=float, default=0.2, metavar='DIFF', \ help="maximum relative sequence-length difference compared to the WT [0.2]") parser.add_argument('--group-dpercentile', type=int, default=10, metavar='D', \ help="percentile width for gene-group calculations [10]") parser.add_argument('--wt-gi', type=str, default='gi|556503834|ref|NC_000913.3|', \ help="GI for WT sequence") parser.add_argument('--gi-index', type=str, default=None, \ help="path to index of GIs versus subject titles [None]") args = parser.parse_args() calc_codon_usage(args.fasta, args.abundances, args.output, args.rare_model, args.rare_threshold, args.max_len_diff, args.group_dpercentile, args.wt_gi, args.gi_index)
with open(abundances, 'r') as f:
random_line_split
calc_codon_usage.py
# Copyright (C) 2017 William M. Jacobs # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or (at # your option) any later version. # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import argparse, sys, os, random, math, gzip, pickle, os.path from collections import defaultdict from itertools import combinations, combinations_with_replacement import numpy as np import scipy.stats from codons import codon_to_aa, codon_table #read a fasta file def read_fasta(path): seqs = [] keys = [] with open(path, 'r') as f: for line in f: if len(line) > 1 and '#' not in line: if '>' in line: seqs.append('') keys.append(line[1:].strip()) else: seqs[-1] += line.strip() return {keys[i] : seqs[i] for i in range(len(keys))} aa_codons = {aa : [c for c in codon_to_aa if codon_to_aa[c] == aa] \ for aa in codon_to_aa.values() if aa != 'Stop'} #determines whether or not a codon is rare def israre(codon_usage, rare_model, rare_threshold, c): if rare_model == 'no_norm': if codon_usage[c] <= rare_threshold: return True else:
elif rare_model == 'cmax_norm': if codon_usage[c] / max(codon_usage[cc] for cc in aa_codons[codon_to_aa[c]]) <= rare_threshold: return True else: return False def calc_codon_usage(fasta, abundances=None, output="", rare_model='no_norm', rare_threshold=0.1, max_len_diff=0.2, group_dpercentile=10, wt_gi='gi|556503834|ref|NC_000913.3|', gi_index=None, verbose=False): #read fasta files seqs = {} if isinstance(fasta, str): gene = "".join(os.path.basename(fasta).split(".")[:-1]) seqs[gene] = read_fasta(fasta) elif isinstance(fasta, (list, tuple)): for path in fasta: gene = "".join(os.path.basename(path).split(".")[:-1]) seqs[gene] = read_fasta(path) if verbose: print("Loaded sequences for %d genes" % len(seqs)) gis = sorted(set(gi for gene in seqs for gi in seqs[gene].keys())) #read abundance files try: with open(abundances, 'r') as f: abundances = {line.split()[0] : float(line.split()[1]) for line in f if len(line) > 1 and line[0] != '#'} except Exception as e: abundances = {} ''' if gi_index != None: with open(gi_index, 'r') as f: gi_index = {line.split()[0] : ' '.join(line.split()[1:]) \ for line in f if len(line) > 1 and line[0] != '#'} print("GIs:") for gi in gis: print("%32s: %s" % (gi, gi_index[gi])) ''' #delete the sequences whose length differs from the WT too much nonwt_gis = [gi for gi in gis if gi != wt_gi] for gene in seqs: if wt_gi in seqs[gene]: wtlen = len(seqs[gene][wt_gi]) - seqs[gene][wt_gi].count('-') for gi in nonwt_gis: if gi in seqs[gene]: gilen = len(seqs[gene][gi]) - seqs[gene][gi].count('-') if abs(1. - gilen / wtlen) > max_len_diff: del seqs[gene][gi] rerun_flag = False try: # split sequences into deciles based on rare codon usage (calculated from first run) with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'rb') as f: input_relative_usage = pickle.load(f)['overall_codon_usage'] def get_frac_rare(seq): return np.mean([sum(1 if israre(input_relative_usage[gi], rare_model, \ rare_threshold, seq[gi][3*i:3*(i + 1)]) else 0 \ for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \ and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') / \ sum(1 for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \ and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') \ for gi in gis if gi in seq]) frac_rare = {gene : get_frac_rare(seq) for gene,seq in seqs.items() if len(seq) > 0} groups = ['ND'] + [(np.percentile(list(frac_rare.values()), percentile), \ np.percentile(list(frac_rare.values()), percentile \ + group_dpercentile)) \ for percentile in range(0, 100, group_dpercentile)][::-1] def get_gene_group(gene): if len(seqs[gene]) == 0: return 0 else: x = get_frac_rare(seqs[gene]) for i in range(1, len(groups)): if x >= groups[i][0] and x <= groups[i][1]: return i gene_group_labels = ['%05.3f:%05.3f' % (groups[i][0], groups[i][1]) \ if i > 0 else 'ND' for i in range(len(groups))] except IOError: #this is the first run, get general usage info rerun_flag = True groups = ['all'] def get_gene_group(gene): return 0 gene_group_labels = ['all'] except KeyError: #code was run in the same output directory, but on a different set of inputs (input_codon_usage.p.gz isn't correct) os.remove(os.path.join(output, 'input_codon_usage.p.gz')) rerun_flag = True groups = ['all'] def get_gene_group(gene): return 0 gene_group_labels = ['all'] gene_groups = {gene : get_gene_group(gene) for gene in seqs} if verbose: print("Gene groups:") for i in range(len(gene_group_labels)): print("%11s: n = %3d" % (gene_group_labels[i], \ sum(1 for gene in seqs if gene_groups[gene] == i))) #compute codon usage computed_codon_usage = {} computed_codon_usage_unw = {} computed_codon_usage_groupw = {} absolute_usage = {} relative_usage = {} relative_usage_unw = {} relative_usage_groupw = {} for gi in gis: computed_codon_usage[gi] = defaultdict(int) computed_codon_usage_unw[gi] = defaultdict(int) computed_codon_usage_groupw[gi] = [defaultdict(int) for i in range(len(groups))] for gene,gene_seqs in seqs.items(): if gi in gene_seqs: seq = gene_seqs[gi] for i in range(len(seq) // 3): c = seq[3*i:3*(i + 1)] if c != '---' and codon_to_aa[c] != 'Stop': if gene in abundances: computed_codon_usage[gi][c] += abundances[gene] else: computed_codon_usage[gi][c] += 1 computed_codon_usage_unw[gi][c] += 1 computed_codon_usage_groupw[gi][gene_groups[gene]][c] += 1 codons_total_gi = sum(computed_codon_usage[gi].values()) absolute_usage[gi] = {c : x / codons_total_gi for c,x in computed_codon_usage[gi].items()} relative_usage[gi] = {} relative_usage_unw[gi] = {} relative_usage_groupw[gi] = {i : {} for i in range(len(groups))} for aa in aa_codons: aa_total_gi = 0 aa_total_unw_gi = 0 for c in list(codon_to_aa): if codon_to_aa[c] == aa: aa_total_gi = aa_total_gi + computed_codon_usage[gi][c] aa_total_unw_gi = aa_total_unw_gi + computed_codon_usage_unw[gi][c] for c in aa_codons[aa]: try: relative_usage[gi][c] = computed_codon_usage[gi][c] / aa_total_gi relative_usage_unw[gi][c] = computed_codon_usage_unw[gi][c] / aa_total_unw_gi except: relative_usage[gi][c] = 1.0/len([c in aa_codons[aa]]) relative_usage_unw[gi][c] = 1.0/len([c in aa_codons[aa]]) for i in range(len(groups)): aa_total_groupw_gi_i = sum(computed_codon_usage_groupw[gi][i][c] for c in aa_codons[aa]) for c in aa_codons[aa]: if aa_total_groupw_gi_i > 0: relative_usage_groupw[gi][i][c] \ = computed_codon_usage_groupw[gi][i][c] / aa_total_groupw_gi_i else: relative_usage_groupw[gi][i][c] = 0 if rerun_flag: #first run through, print general codon usage data if verbose: print("Writing input_codon_usage.p.gz") with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'wb') as f: pickle.dump({'groups' : groups, 'gene_groups' : gene_groups, 'overall_codon_usage' : relative_usage, 'unweighted_codon_usage' : relative_usage_unw, 'gene_group_codon_usage' : relative_usage_groupw}, f) if verbose: print("WARNING: Rerun analysis to compute frac-rare groups") else: #second run through, print group codon usage data codon_list = sorted(c for c in codon_to_aa if codon_to_aa[c] != 'Stop') rare_codons = {} all_rare_codons = defaultdict(int) for gi in gis: rare_codons[gi] = sorted(c for c in codon_list \ if israre(relative_usage[gi], rare_model, \ rare_threshold, c)) for c in rare_codons[gi]: all_rare_codons[c] += 1 if verbose: print("Always common codons:", ' '.join(c for c in sorted(codon_list) \ if c not in all_rare_codons)) print("Rare codons:") for c in sorted(all_rare_codons, key=lambda y: (-all_rare_codons[y], y)): print("%s %s %d" % (c, codon_to_aa[c], all_rare_codons[c])) print("Writing rare_codons.dat") with open(os.path.join(output, 'rare_codons.dat'), 'w') as f: for gi in gis: f.write("%s %s\n" % (gi, ','.join("%s:%5.3f" % (c, relative_usage_unw[gi][c]) \ for c in sorted(rare_codons[gi])))) codon_list_aa_sorted = sorted(codon_list, \ key=lambda y: (codon_to_aa[y], \ relative_usage_groupw[wt_gi][len(groups)-1][y])) if verbose: print("Writing codon_usage.dat") with open(os.path.join(output, 'codon_usage.dat'), 'w') as f: f.write("# GI gene_group_index gene_group codon_index " "amino_acid codon israre relative_usage\n") for gi in gis: for c in codon_list_aa_sorted: if c in rare_codons[gi]: israrecodon = 1 else: israrecodon = 0 for i in range(len(gene_group_labels)): f.write("%32s %2d %s %2d %s %s %d %6.4f\n" % \ (gi, i, gene_group_labels[i], codon_list_aa_sorted.index(c), \ codon_to_aa[c], c, israrecodon, relative_usage_groupw[gi][i][c])) f.write("\n") f.write("\n") if verbose: print("Writing codon_usage_wt.dat") with open(os.path.join(output, 'codon_usage_wt.dat'), 'w') as f: f.write("# GI gene_group_index gene_group codon_index " "amino_acid codon israre relative_usage\n") for c in codon_list_aa_sorted: if c in rare_codons[wt_gi]: israrecodon = 1 else: israrecodon = 0 for i in range(len(gene_group_labels)): f.write("%32s %2d %s %2d %s %s %d %6.4f\n" % \ (wt_gi, i, gene_group_labels[i], codon_list_aa_sorted.index(c), \ codon_to_aa[c], c, israrecodon, relative_usage_groupw[wt_gi][i][c])) f.write("\n") if verbose: print("Writing codon_usage.p.gz") with gzip.open(os.path.join(output, 'codon_usage.p.gz'), 'wb') as f: pickle.dump({'groups' : groups, 'gene_groups' : gene_groups, 'overall_codon_usage' : relative_usage, 'unweighted_codon_usage' : relative_usage_unw, 'gene_group_codon_usage' : relative_usage_groupw}, f) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('fasta', type=str, nargs='+', help="path to input MSA fasta file(s)") parser.add_argument('--abundances', default=None, type=str, help="path to protein abundance file") parser.add_argument('--output', type=str, metavar='PATH', default="", help="path to the directory into which the output should be written") parser.add_argument('--rare-model', choices={'no_norm', 'cmax_norm'}, default='no_norm', \ help="normalization mode for defining rare codons ['no_norm']") parser.add_argument('--rare-threshold', type=float, default=0.1, \ help="threshold for codon rarity [0.1]") parser.add_argument('--max-len-diff', type=float, default=0.2, metavar='DIFF', \ help="maximum relative sequence-length difference compared to the WT [0.2]") parser.add_argument('--group-dpercentile', type=int, default=10, metavar='D', \ help="percentile width for gene-group calculations [10]") parser.add_argument('--wt-gi', type=str, default='gi|556503834|ref|NC_000913.3|', \ help="GI for WT sequence") parser.add_argument('--gi-index', type=str, default=None, \ help="path to index of GIs versus subject titles [None]") args = parser.parse_args() calc_codon_usage(args.fasta, args.abundances, args.output, args.rare_model, args.rare_threshold, args.max_len_diff, args.group_dpercentile, args.wt_gi, args.gi_index)
return False
conditional_block
calc_codon_usage.py
# Copyright (C) 2017 William M. Jacobs # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or (at # your option) any later version. # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import argparse, sys, os, random, math, gzip, pickle, os.path from collections import defaultdict from itertools import combinations, combinations_with_replacement import numpy as np import scipy.stats from codons import codon_to_aa, codon_table #read a fasta file def read_fasta(path): seqs = [] keys = [] with open(path, 'r') as f: for line in f: if len(line) > 1 and '#' not in line: if '>' in line: seqs.append('') keys.append(line[1:].strip()) else: seqs[-1] += line.strip() return {keys[i] : seqs[i] for i in range(len(keys))} aa_codons = {aa : [c for c in codon_to_aa if codon_to_aa[c] == aa] \ for aa in codon_to_aa.values() if aa != 'Stop'} #determines whether or not a codon is rare def
(codon_usage, rare_model, rare_threshold, c): if rare_model == 'no_norm': if codon_usage[c] <= rare_threshold: return True else: return False elif rare_model == 'cmax_norm': if codon_usage[c] / max(codon_usage[cc] for cc in aa_codons[codon_to_aa[c]]) <= rare_threshold: return True else: return False def calc_codon_usage(fasta, abundances=None, output="", rare_model='no_norm', rare_threshold=0.1, max_len_diff=0.2, group_dpercentile=10, wt_gi='gi|556503834|ref|NC_000913.3|', gi_index=None, verbose=False): #read fasta files seqs = {} if isinstance(fasta, str): gene = "".join(os.path.basename(fasta).split(".")[:-1]) seqs[gene] = read_fasta(fasta) elif isinstance(fasta, (list, tuple)): for path in fasta: gene = "".join(os.path.basename(path).split(".")[:-1]) seqs[gene] = read_fasta(path) if verbose: print("Loaded sequences for %d genes" % len(seqs)) gis = sorted(set(gi for gene in seqs for gi in seqs[gene].keys())) #read abundance files try: with open(abundances, 'r') as f: abundances = {line.split()[0] : float(line.split()[1]) for line in f if len(line) > 1 and line[0] != '#'} except Exception as e: abundances = {} ''' if gi_index != None: with open(gi_index, 'r') as f: gi_index = {line.split()[0] : ' '.join(line.split()[1:]) \ for line in f if len(line) > 1 and line[0] != '#'} print("GIs:") for gi in gis: print("%32s: %s" % (gi, gi_index[gi])) ''' #delete the sequences whose length differs from the WT too much nonwt_gis = [gi for gi in gis if gi != wt_gi] for gene in seqs: if wt_gi in seqs[gene]: wtlen = len(seqs[gene][wt_gi]) - seqs[gene][wt_gi].count('-') for gi in nonwt_gis: if gi in seqs[gene]: gilen = len(seqs[gene][gi]) - seqs[gene][gi].count('-') if abs(1. - gilen / wtlen) > max_len_diff: del seqs[gene][gi] rerun_flag = False try: # split sequences into deciles based on rare codon usage (calculated from first run) with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'rb') as f: input_relative_usage = pickle.load(f)['overall_codon_usage'] def get_frac_rare(seq): return np.mean([sum(1 if israre(input_relative_usage[gi], rare_model, \ rare_threshold, seq[gi][3*i:3*(i + 1)]) else 0 \ for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \ and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') / \ sum(1 for i in range(len(seq[gi]) // 3) if seq[gi][3*i:3*(i + 1)] != '---' \ and codon_to_aa[seq[gi][3*i:3*(i + 1)]] != 'Stop') \ for gi in gis if gi in seq]) frac_rare = {gene : get_frac_rare(seq) for gene,seq in seqs.items() if len(seq) > 0} groups = ['ND'] + [(np.percentile(list(frac_rare.values()), percentile), \ np.percentile(list(frac_rare.values()), percentile \ + group_dpercentile)) \ for percentile in range(0, 100, group_dpercentile)][::-1] def get_gene_group(gene): if len(seqs[gene]) == 0: return 0 else: x = get_frac_rare(seqs[gene]) for i in range(1, len(groups)): if x >= groups[i][0] and x <= groups[i][1]: return i gene_group_labels = ['%05.3f:%05.3f' % (groups[i][0], groups[i][1]) \ if i > 0 else 'ND' for i in range(len(groups))] except IOError: #this is the first run, get general usage info rerun_flag = True groups = ['all'] def get_gene_group(gene): return 0 gene_group_labels = ['all'] except KeyError: #code was run in the same output directory, but on a different set of inputs (input_codon_usage.p.gz isn't correct) os.remove(os.path.join(output, 'input_codon_usage.p.gz')) rerun_flag = True groups = ['all'] def get_gene_group(gene): return 0 gene_group_labels = ['all'] gene_groups = {gene : get_gene_group(gene) for gene in seqs} if verbose: print("Gene groups:") for i in range(len(gene_group_labels)): print("%11s: n = %3d" % (gene_group_labels[i], \ sum(1 for gene in seqs if gene_groups[gene] == i))) #compute codon usage computed_codon_usage = {} computed_codon_usage_unw = {} computed_codon_usage_groupw = {} absolute_usage = {} relative_usage = {} relative_usage_unw = {} relative_usage_groupw = {} for gi in gis: computed_codon_usage[gi] = defaultdict(int) computed_codon_usage_unw[gi] = defaultdict(int) computed_codon_usage_groupw[gi] = [defaultdict(int) for i in range(len(groups))] for gene,gene_seqs in seqs.items(): if gi in gene_seqs: seq = gene_seqs[gi] for i in range(len(seq) // 3): c = seq[3*i:3*(i + 1)] if c != '---' and codon_to_aa[c] != 'Stop': if gene in abundances: computed_codon_usage[gi][c] += abundances[gene] else: computed_codon_usage[gi][c] += 1 computed_codon_usage_unw[gi][c] += 1 computed_codon_usage_groupw[gi][gene_groups[gene]][c] += 1 codons_total_gi = sum(computed_codon_usage[gi].values()) absolute_usage[gi] = {c : x / codons_total_gi for c,x in computed_codon_usage[gi].items()} relative_usage[gi] = {} relative_usage_unw[gi] = {} relative_usage_groupw[gi] = {i : {} for i in range(len(groups))} for aa in aa_codons: aa_total_gi = 0 aa_total_unw_gi = 0 for c in list(codon_to_aa): if codon_to_aa[c] == aa: aa_total_gi = aa_total_gi + computed_codon_usage[gi][c] aa_total_unw_gi = aa_total_unw_gi + computed_codon_usage_unw[gi][c] for c in aa_codons[aa]: try: relative_usage[gi][c] = computed_codon_usage[gi][c] / aa_total_gi relative_usage_unw[gi][c] = computed_codon_usage_unw[gi][c] / aa_total_unw_gi except: relative_usage[gi][c] = 1.0/len([c in aa_codons[aa]]) relative_usage_unw[gi][c] = 1.0/len([c in aa_codons[aa]]) for i in range(len(groups)): aa_total_groupw_gi_i = sum(computed_codon_usage_groupw[gi][i][c] for c in aa_codons[aa]) for c in aa_codons[aa]: if aa_total_groupw_gi_i > 0: relative_usage_groupw[gi][i][c] \ = computed_codon_usage_groupw[gi][i][c] / aa_total_groupw_gi_i else: relative_usage_groupw[gi][i][c] = 0 if rerun_flag: #first run through, print general codon usage data if verbose: print("Writing input_codon_usage.p.gz") with gzip.open(os.path.join(output, 'input_codon_usage.p.gz'), 'wb') as f: pickle.dump({'groups' : groups, 'gene_groups' : gene_groups, 'overall_codon_usage' : relative_usage, 'unweighted_codon_usage' : relative_usage_unw, 'gene_group_codon_usage' : relative_usage_groupw}, f) if verbose: print("WARNING: Rerun analysis to compute frac-rare groups") else: #second run through, print group codon usage data codon_list = sorted(c for c in codon_to_aa if codon_to_aa[c] != 'Stop') rare_codons = {} all_rare_codons = defaultdict(int) for gi in gis: rare_codons[gi] = sorted(c for c in codon_list \ if israre(relative_usage[gi], rare_model, \ rare_threshold, c)) for c in rare_codons[gi]: all_rare_codons[c] += 1 if verbose: print("Always common codons:", ' '.join(c for c in sorted(codon_list) \ if c not in all_rare_codons)) print("Rare codons:") for c in sorted(all_rare_codons, key=lambda y: (-all_rare_codons[y], y)): print("%s %s %d" % (c, codon_to_aa[c], all_rare_codons[c])) print("Writing rare_codons.dat") with open(os.path.join(output, 'rare_codons.dat'), 'w') as f: for gi in gis: f.write("%s %s\n" % (gi, ','.join("%s:%5.3f" % (c, relative_usage_unw[gi][c]) \ for c in sorted(rare_codons[gi])))) codon_list_aa_sorted = sorted(codon_list, \ key=lambda y: (codon_to_aa[y], \ relative_usage_groupw[wt_gi][len(groups)-1][y])) if verbose: print("Writing codon_usage.dat") with open(os.path.join(output, 'codon_usage.dat'), 'w') as f: f.write("# GI gene_group_index gene_group codon_index " "amino_acid codon israre relative_usage\n") for gi in gis: for c in codon_list_aa_sorted: if c in rare_codons[gi]: israrecodon = 1 else: israrecodon = 0 for i in range(len(gene_group_labels)): f.write("%32s %2d %s %2d %s %s %d %6.4f\n" % \ (gi, i, gene_group_labels[i], codon_list_aa_sorted.index(c), \ codon_to_aa[c], c, israrecodon, relative_usage_groupw[gi][i][c])) f.write("\n") f.write("\n") if verbose: print("Writing codon_usage_wt.dat") with open(os.path.join(output, 'codon_usage_wt.dat'), 'w') as f: f.write("# GI gene_group_index gene_group codon_index " "amino_acid codon israre relative_usage\n") for c in codon_list_aa_sorted: if c in rare_codons[wt_gi]: israrecodon = 1 else: israrecodon = 0 for i in range(len(gene_group_labels)): f.write("%32s %2d %s %2d %s %s %d %6.4f\n" % \ (wt_gi, i, gene_group_labels[i], codon_list_aa_sorted.index(c), \ codon_to_aa[c], c, israrecodon, relative_usage_groupw[wt_gi][i][c])) f.write("\n") if verbose: print("Writing codon_usage.p.gz") with gzip.open(os.path.join(output, 'codon_usage.p.gz'), 'wb') as f: pickle.dump({'groups' : groups, 'gene_groups' : gene_groups, 'overall_codon_usage' : relative_usage, 'unweighted_codon_usage' : relative_usage_unw, 'gene_group_codon_usage' : relative_usage_groupw}, f) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('fasta', type=str, nargs='+', help="path to input MSA fasta file(s)") parser.add_argument('--abundances', default=None, type=str, help="path to protein abundance file") parser.add_argument('--output', type=str, metavar='PATH', default="", help="path to the directory into which the output should be written") parser.add_argument('--rare-model', choices={'no_norm', 'cmax_norm'}, default='no_norm', \ help="normalization mode for defining rare codons ['no_norm']") parser.add_argument('--rare-threshold', type=float, default=0.1, \ help="threshold for codon rarity [0.1]") parser.add_argument('--max-len-diff', type=float, default=0.2, metavar='DIFF', \ help="maximum relative sequence-length difference compared to the WT [0.2]") parser.add_argument('--group-dpercentile', type=int, default=10, metavar='D', \ help="percentile width for gene-group calculations [10]") parser.add_argument('--wt-gi', type=str, default='gi|556503834|ref|NC_000913.3|', \ help="GI for WT sequence") parser.add_argument('--gi-index', type=str, default=None, \ help="path to index of GIs versus subject titles [None]") args = parser.parse_args() calc_codon_usage(args.fasta, args.abundances, args.output, args.rare_model, args.rare_threshold, args.max_len_diff, args.group_dpercentile, args.wt_gi, args.gi_index)
israre
identifier_name
remote_cache.rs
use std::collections::{BTreeMap, HashSet, VecDeque}; use std::ffi::OsString; use std::path::Component; use std::sync::Arc; use std::time::Instant; use async_trait::async_trait; use bazel_protos::gen::build::bazel::remote::execution::v2 as remexec; use bazel_protos::require_digest; use fs::RelativePath; use futures::FutureExt; use grpc_util::headers_to_interceptor_fn; use grpc_util::status_to_str; use hashing::Digest; use parking_lot::Mutex; use remexec::action_cache_client::ActionCacheClient; use remexec::{ActionResult, Command, FileNode, Tree}; use store::Store; use tonic::transport::Channel; use workunit_store::{with_workunit, Level, Metric, ObservationMetric, WorkunitMetadata}; use crate::remote::make_execute_request; use crate::{ Context, FallibleProcessResultWithPlatform, MultiPlatformProcess, Platform, Process, ProcessMetadata, RemoteCacheWarningsBehavior, }; /// This `CommandRunner` implementation caches results remotely using the Action Cache service /// of the Remote Execution API. /// /// This runner expects to sit between the local cache CommandRunner and the CommandRunner /// that is actually executing the Process. Thus, the local cache will be checked first, /// then the remote cache, and then execution (local or remote) as necessary if neither cache /// has a hit. On the way back out of the stack, the result will be stored remotely and /// then locally. #[derive(Clone)] pub struct CommandRunner { underlying: Arc<dyn crate::CommandRunner>, metadata: ProcessMetadata, executor: task_executor::Executor, store: Store, action_cache_client: Arc<ActionCacheClient<Channel>>, headers: BTreeMap<String, String>, platform: Platform, cache_read: bool, cache_write: bool, eager_fetch: bool, warnings_behavior: RemoteCacheWarningsBehavior, read_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>, write_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>, } impl CommandRunner { pub fn new( underlying: Arc<dyn crate::CommandRunner>, metadata: ProcessMetadata, executor: task_executor::Executor, store: Store, action_cache_address: &str, root_ca_certs: Option<Vec<u8>>, headers: BTreeMap<String, String>, platform: Platform, cache_read: bool, cache_write: bool, warnings_behavior: RemoteCacheWarningsBehavior, eager_fetch: bool, ) -> Result<Self, String> { let tls_client_config = if action_cache_address.starts_with("https://") { Some(grpc_util::create_tls_config(root_ca_certs)?) } else { None }; let endpoint = grpc_util::create_endpoint(&action_cache_address, tls_client_config.as_ref())?; let channel = tonic::transport::Channel::balance_list(vec![endpoint].into_iter()); let action_cache_client = Arc::new(if headers.is_empty() { ActionCacheClient::new(channel) } else { ActionCacheClient::with_interceptor(channel, headers_to_interceptor_fn(&headers)?) }); Ok(CommandRunner { underlying, metadata, executor, store, action_cache_client, headers, platform, cache_read, cache_write, eager_fetch, warnings_behavior, read_errors_counter: Arc::new(Mutex::new(BTreeMap::new())), write_errors_counter: Arc::new(Mutex::new(BTreeMap::new())), }) } /// Create a REAPI `Tree` protobuf for an output directory by traversing down from a Pants /// merged final output directory to find the specific path to extract. (REAPI requires /// output directories to be stored as `Tree` protos that contain all of the `Directory` /// protos that constitute the directory tree.) /// /// Note that the Tree does not include the directory_path as a prefix, per REAPI. This path /// gets stored on the OutputDirectory proto. /// /// If the output directory does not exist, then returns Ok(None). pub(crate) async fn make_tree_for_output_directory( root_directory_digest: Digest, directory_path: RelativePath, store: &Store, ) -> Result<Option<Tree>, String> { // Traverse down from the root directory digest to find the directory digest for // the output directory. let mut current_directory_digest = root_directory_digest; for next_path_component in directory_path.as_ref().components() { let next_name = match next_path_component { Component::Normal(name) => name .to_str() .ok_or_else(|| format!("unable to convert '{:?}' to string", name))?, _ => return Ok(None), }; // Load the Directory proto corresponding to `current_directory_digest`. let current_directory = match store.load_directory(current_directory_digest).await? { Some((dir, _)) => dir, None => { return Err(format!( "Directory digest {:?} was referenced in output, but was not found in store.", current_directory_digest )) } }; // Scan the current directory for the current path component. let dir_node = match current_directory .directories .iter() .find(|dn| dn.name == next_name) { Some(dn) => dn, None => return Ok(None), }; // Set the current directory digest to be the digest in the DirectoryNode just found. // If there are more path components, then the search will continue there. // Otherwise, if this loop ends then the final Directory digest has been found. current_directory_digest = require_digest(dir_node.digest.as_ref())?; } // At this point, `current_directory_digest` holds the digest of the output directory. // This will be the root of the Tree. Add it to a queue of digests to traverse. let mut tree = Tree::default(); let mut digest_queue = VecDeque::new(); digest_queue.push_back(current_directory_digest); while let Some(directory_digest) = digest_queue.pop_front() { let directory = match store.load_directory(directory_digest).await? { Some((dir, _)) => dir, None => { return Err(format!( "illegal state: directory for digest {:?} did not exist locally", &current_directory_digest )) } }; // Add all of the digests for subdirectories into the queue so they are processed // in future iterations of the loop. for subdirectory_node in &directory.directories { let subdirectory_digest = require_digest(subdirectory_node.digest.as_ref())?; digest_queue.push_back(subdirectory_digest); } // Store this directory either as the `root` or one of the `children` if not the root. if directory_digest == current_directory_digest
else { tree.children.push(directory) } } Ok(Some(tree)) } pub(crate) async fn extract_output_file( root_directory_digest: Digest, file_path: RelativePath, store: &Store, ) -> Result<Option<FileNode>, String> { // Traverse down from the root directory digest to find the directory digest for // the output directory. let mut current_directory_digest = root_directory_digest; let parent_path = file_path.as_ref().parent(); let components_opt = parent_path.map(|x| x.components()); if let Some(components) = components_opt { for next_path_component in components { let next_name = match next_path_component { Component::Normal(name) => name .to_str() .ok_or_else(|| format!("unable to convert '{:?}' to string", name))?, _ => return Ok(None), }; // Load the Directory proto corresponding to `current_directory_digest`. let current_directory = match store.load_directory(current_directory_digest).await? { Some((dir, _)) => dir, None => { return Err(format!( "Directory digest {:?} was referenced in output, but was not found in store.", current_directory_digest )) } }; // Scan the current directory for the current path component. let dir_node = match current_directory .directories .iter() .find(|dn| dn.name == next_name) { Some(dn) => dn, None => return Ok(None), }; // Set the current directory digest to be the digest in the DirectoryNode just found. // If there are more path components, then the search will continue there. // Otherwise, if this loop ends then the final Directory digest has been found. current_directory_digest = require_digest(dir_node.digest.as_ref())?; } } // Load the final directory. let directory = match store.load_directory(current_directory_digest).await? { Some((dir, _)) => dir, None => return Ok(None), }; // Search for the file. let file_base_name = file_path.as_ref().file_name().unwrap(); Ok( directory .files .iter() .find(|node| { let name = OsString::from(&node.name); name == file_base_name }) .cloned(), ) } /// Converts a REAPI `Command` and a `FallibleProcessResultWithPlatform` produced from executing /// that Command into a REAPI `ActionResult` suitable for upload to the REAPI Action Cache. /// /// This function also returns a vector of all `Digest`s referenced directly and indirectly by /// the `ActionResult` suitable for passing to `Store::ensure_remote_has_recursive`. (The /// digests may include both File and Tree digests.) pub(crate) async fn make_action_result( &self, command: &Command, result: &FallibleProcessResultWithPlatform, store: &Store, ) -> Result<(ActionResult, Vec<Digest>), String> { // Keep track of digests that need to be uploaded. let mut digests = HashSet::new(); let mut action_result = ActionResult { exit_code: result.exit_code, stdout_digest: Some(result.stdout_digest.into()), stderr_digest: Some(result.stderr_digest.into()), execution_metadata: Some(result.metadata.clone().into()), ..ActionResult::default() }; digests.insert(result.stdout_digest); digests.insert(result.stderr_digest); for output_directory in &command.output_directories { let tree = match Self::make_tree_for_output_directory( result.output_directory, RelativePath::new(output_directory).unwrap(), store, ) .await? { Some(t) => t, None => continue, }; let tree_digest = crate::remote::store_proto_locally(&self.store, &tree).await?; digests.insert(tree_digest); action_result .output_directories .push(remexec::OutputDirectory { path: output_directory.to_owned(), tree_digest: Some(tree_digest.into()), }); } for output_file in &command.output_files { let file_node = match Self::extract_output_file( result.output_directory, RelativePath::new(output_file).unwrap(), store, ) .await? { Some(node) => node, None => continue, }; let digest = require_digest(file_node.digest.as_ref())?; digests.insert(digest); action_result.output_files.push({ remexec::OutputFile { digest: Some(digest.into()), path: output_file.to_owned(), is_executable: file_node.is_executable, ..remexec::OutputFile::default() } }) } Ok((action_result, digests.into_iter().collect::<Vec<_>>())) } /// Stores an execution result into the remote Action Cache. async fn update_action_cache( &self, context: &Context, request: &Process, result: &FallibleProcessResultWithPlatform, metadata: &ProcessMetadata, command: &Command, action_digest: Digest, command_digest: Digest, ) -> Result<(), String> { // Upload the action (and related data, i.e. the embedded command and input files). // Assumption: The Action and related data has already been stored locally. with_workunit( context.workunit_store.clone(), "ensure_action_uploaded".to_owned(), WorkunitMetadata { level: Level::Trace, desc: Some(format!("ensure action uploaded for {:?}", action_digest)), ..WorkunitMetadata::default() }, crate::remote::ensure_action_uploaded( &self.store, command_digest, action_digest, request.input_files, ), |_, md| md, ) .await?; // Create an ActionResult from the process result. let (action_result, digests_for_action_result) = self .make_action_result(command, result, &self.store) .await?; // Ensure that all digests referenced by directly and indirectly by the ActionResult // have been uploaded to the remote cache. self .store .ensure_remote_has_recursive(digests_for_action_result) .await?; let update_action_cache_request = remexec::UpdateActionResultRequest { instance_name: metadata .instance_name .as_ref() .cloned() .unwrap_or_else(|| "".to_owned()), action_digest: Some(action_digest.into()), action_result: Some(action_result), ..remexec::UpdateActionResultRequest::default() }; let mut client = self.action_cache_client.as_ref().clone(); client .update_action_result(update_action_cache_request) .await .map_err(status_to_str)?; Ok(()) } fn log_cache_error(&self, err: String, err_type: CacheErrorType) { let err_count = { let mut errors_counter = match err_type { CacheErrorType::ReadError => self.read_errors_counter.lock(), CacheErrorType::WriteError => self.write_errors_counter.lock(), }; let count = errors_counter.entry(err.clone()).or_insert(0); *count += 1; *count }; let failure_desc = match err_type { CacheErrorType::ReadError => "read from", CacheErrorType::WriteError => "write to", }; let log_msg = format!( "Failed to {} remote cache ({} occurrences so far): {}", failure_desc, err_count, err ); let log_at_warn = match self.warnings_behavior { RemoteCacheWarningsBehavior::Ignore => false, RemoteCacheWarningsBehavior::FirstOnly => err_count == 1, RemoteCacheWarningsBehavior::Backoff => err_count.is_power_of_two(), }; if log_at_warn { log::warn!("{}", log_msg); } else { log::debug!("{}", log_msg); } } } enum CacheErrorType { ReadError, WriteError, } #[async_trait] impl crate::CommandRunner for CommandRunner { async fn run( &self, req: MultiPlatformProcess, context: Context, ) -> Result<FallibleProcessResultWithPlatform, String> { let cache_lookup_start = Instant::now(); // Construct the REv2 ExecuteRequest and related data for this execution request. let request = self .extract_compatible_request(&req) .ok_or_else(|| "No compatible Process found for checking remote cache.".to_owned())?; let (action, command, _execute_request) = make_execute_request(&request, self.metadata.clone())?; // Ensure the action and command are stored locally. let (command_digest, action_digest) = with_workunit( context.workunit_store.clone(), "ensure_action_stored_locally".to_owned(), WorkunitMetadata { level: Level::Trace, desc: Some(format!("ensure action stored locally for {:?}", action)), ..WorkunitMetadata::default() }, crate::remote::ensure_action_stored_locally(&self.store, &command, &action), |_, md| md, ) .await?; let mut local_execution_future = self.underlying.run(req, context.clone()); let result = if self.cache_read { // A future to read from the cache and log the results accordingly. let cache_read_future = async { let response = with_workunit( context.workunit_store.clone(), "check_action_cache".to_owned(), WorkunitMetadata { level: Level::Trace, desc: Some(format!("check action cache for {:?}", action_digest)), ..WorkunitMetadata::default() }, crate::remote::check_action_cache( action_digest, &self.metadata, self.platform, &context, self.action_cache_client.clone(), self.store.clone(), self.eager_fetch, ), |_, md| md, ) .await; match response { Ok(cached_response_opt) => { log::debug!( "remote cache response: digest={:?}: {:?}", action_digest, cached_response_opt ); cached_response_opt } Err(err) => { self.log_cache_error(err, CacheErrorType::ReadError); None } } } .boxed(); // We speculate between reading from the remote cache vs. running locally. If there was a // cache hit, we return early because there will be no need to write to the cache. Otherwise, // we run the process locally and will possibly write it to the cache later. tokio::select! { cache_result = cache_read_future => { if let Some(cached_response) = cache_result { let lookup_elapsed = cache_lookup_start.elapsed(); context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationRemoteCompletedFirst, 1); if let Some(time_saved) = cached_response.metadata.time_saved_from_cache(lookup_elapsed) { let time_saved = time_saved.as_millis() as u64; context .workunit_store .increment_counter(Metric::RemoteCacheTotalTimeSavedMs, time_saved); context .workunit_store .record_observation(ObservationMetric::RemoteCacheTimeSavedMs, time_saved); } return Ok(cached_response); } else { // Note that we don't increment a counter here, as there is nothing of note in this // scenario: the remote cache did not save unnecessary local work, nor was the remote // trip unusually slow such that local execution was faster. local_execution_future.await? } } local_result = &mut local_execution_future => { context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationLocalCompletedFirst, 1); local_result? } } } else { local_execution_future.await? }; if result.exit_code == 0 && self.cache_write { let command_runner = self.clone(); let result = result.clone(); let context2 = context.clone(); // NB: We use `TaskExecutor::spawn` instead of `tokio::spawn` to ensure logging still works. let cache_write_future = async move { context2 .workunit_store .increment_counter(Metric::RemoteCacheWriteStarted, 1); let write_result = command_runner .update_action_cache( &context2, &request, &result, &command_runner.metadata, &command, action_digest, command_digest, ) .await; context2 .workunit_store .increment_counter(Metric::RemoteCacheWriteFinished, 1); if let Err(err) = write_result { command_runner.log_cache_error(err, CacheErrorType::WriteError); context2 .workunit_store .increment_counter(Metric::RemoteCacheWriteErrors, 1); }; } .boxed(); let _write_join = self.executor.spawn(with_workunit( context.workunit_store, "remote_cache_write".to_owned(), WorkunitMetadata { level: Level::Trace, ..WorkunitMetadata::default() }, cache_write_future, |_, md| md, )); } Ok(result) } fn extract_compatible_request(&self, req: &MultiPlatformProcess) -> Option<Process> { self.underlying.extract_compatible_request(req) } }
{ tree.root = Some(directory); }
conditional_block
remote_cache.rs
use std::collections::{BTreeMap, HashSet, VecDeque}; use std::ffi::OsString; use std::path::Component; use std::sync::Arc; use std::time::Instant; use async_trait::async_trait; use bazel_protos::gen::build::bazel::remote::execution::v2 as remexec; use bazel_protos::require_digest; use fs::RelativePath; use futures::FutureExt; use grpc_util::headers_to_interceptor_fn; use grpc_util::status_to_str; use hashing::Digest; use parking_lot::Mutex; use remexec::action_cache_client::ActionCacheClient; use remexec::{ActionResult, Command, FileNode, Tree}; use store::Store; use tonic::transport::Channel; use workunit_store::{with_workunit, Level, Metric, ObservationMetric, WorkunitMetadata}; use crate::remote::make_execute_request; use crate::{ Context, FallibleProcessResultWithPlatform, MultiPlatformProcess, Platform, Process, ProcessMetadata, RemoteCacheWarningsBehavior, }; /// This `CommandRunner` implementation caches results remotely using the Action Cache service /// of the Remote Execution API. /// /// This runner expects to sit between the local cache CommandRunner and the CommandRunner /// that is actually executing the Process. Thus, the local cache will be checked first, /// then the remote cache, and then execution (local or remote) as necessary if neither cache /// has a hit. On the way back out of the stack, the result will be stored remotely and /// then locally. #[derive(Clone)] pub struct CommandRunner { underlying: Arc<dyn crate::CommandRunner>, metadata: ProcessMetadata, executor: task_executor::Executor, store: Store, action_cache_client: Arc<ActionCacheClient<Channel>>, headers: BTreeMap<String, String>, platform: Platform, cache_read: bool, cache_write: bool, eager_fetch: bool, warnings_behavior: RemoteCacheWarningsBehavior, read_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>, write_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>, } impl CommandRunner { pub fn new( underlying: Arc<dyn crate::CommandRunner>, metadata: ProcessMetadata, executor: task_executor::Executor, store: Store, action_cache_address: &str, root_ca_certs: Option<Vec<u8>>, headers: BTreeMap<String, String>, platform: Platform, cache_read: bool, cache_write: bool, warnings_behavior: RemoteCacheWarningsBehavior, eager_fetch: bool, ) -> Result<Self, String> { let tls_client_config = if action_cache_address.starts_with("https://") { Some(grpc_util::create_tls_config(root_ca_certs)?) } else { None }; let endpoint = grpc_util::create_endpoint(&action_cache_address, tls_client_config.as_ref())?; let channel = tonic::transport::Channel::balance_list(vec![endpoint].into_iter()); let action_cache_client = Arc::new(if headers.is_empty() { ActionCacheClient::new(channel) } else { ActionCacheClient::with_interceptor(channel, headers_to_interceptor_fn(&headers)?) }); Ok(CommandRunner { underlying, metadata, executor, store, action_cache_client, headers, platform, cache_read, cache_write, eager_fetch, warnings_behavior, read_errors_counter: Arc::new(Mutex::new(BTreeMap::new())), write_errors_counter: Arc::new(Mutex::new(BTreeMap::new())), }) } /// Create a REAPI `Tree` protobuf for an output directory by traversing down from a Pants /// merged final output directory to find the specific path to extract. (REAPI requires /// output directories to be stored as `Tree` protos that contain all of the `Directory` /// protos that constitute the directory tree.) /// /// Note that the Tree does not include the directory_path as a prefix, per REAPI. This path /// gets stored on the OutputDirectory proto. /// /// If the output directory does not exist, then returns Ok(None). pub(crate) async fn make_tree_for_output_directory( root_directory_digest: Digest, directory_path: RelativePath, store: &Store, ) -> Result<Option<Tree>, String> { // Traverse down from the root directory digest to find the directory digest for // the output directory. let mut current_directory_digest = root_directory_digest; for next_path_component in directory_path.as_ref().components() { let next_name = match next_path_component { Component::Normal(name) => name .to_str() .ok_or_else(|| format!("unable to convert '{:?}' to string", name))?, _ => return Ok(None), }; // Load the Directory proto corresponding to `current_directory_digest`. let current_directory = match store.load_directory(current_directory_digest).await? { Some((dir, _)) => dir, None => { return Err(format!( "Directory digest {:?} was referenced in output, but was not found in store.", current_directory_digest )) } }; // Scan the current directory for the current path component. let dir_node = match current_directory .directories .iter() .find(|dn| dn.name == next_name) { Some(dn) => dn, None => return Ok(None), }; // Set the current directory digest to be the digest in the DirectoryNode just found. // If there are more path components, then the search will continue there. // Otherwise, if this loop ends then the final Directory digest has been found. current_directory_digest = require_digest(dir_node.digest.as_ref())?; } // At this point, `current_directory_digest` holds the digest of the output directory. // This will be the root of the Tree. Add it to a queue of digests to traverse. let mut tree = Tree::default(); let mut digest_queue = VecDeque::new(); digest_queue.push_back(current_directory_digest); while let Some(directory_digest) = digest_queue.pop_front() { let directory = match store.load_directory(directory_digest).await? { Some((dir, _)) => dir, None => { return Err(format!( "illegal state: directory for digest {:?} did not exist locally", &current_directory_digest )) } }; // Add all of the digests for subdirectories into the queue so they are processed // in future iterations of the loop. for subdirectory_node in &directory.directories { let subdirectory_digest = require_digest(subdirectory_node.digest.as_ref())?; digest_queue.push_back(subdirectory_digest); } // Store this directory either as the `root` or one of the `children` if not the root. if directory_digest == current_directory_digest { tree.root = Some(directory); } else { tree.children.push(directory) } } Ok(Some(tree)) } pub(crate) async fn extract_output_file( root_directory_digest: Digest, file_path: RelativePath, store: &Store, ) -> Result<Option<FileNode>, String> { // Traverse down from the root directory digest to find the directory digest for // the output directory. let mut current_directory_digest = root_directory_digest; let parent_path = file_path.as_ref().parent(); let components_opt = parent_path.map(|x| x.components()); if let Some(components) = components_opt { for next_path_component in components { let next_name = match next_path_component { Component::Normal(name) => name .to_str() .ok_or_else(|| format!("unable to convert '{:?}' to string", name))?, _ => return Ok(None), }; // Load the Directory proto corresponding to `current_directory_digest`. let current_directory = match store.load_directory(current_directory_digest).await? { Some((dir, _)) => dir, None => { return Err(format!( "Directory digest {:?} was referenced in output, but was not found in store.", current_directory_digest )) } }; // Scan the current directory for the current path component. let dir_node = match current_directory .directories .iter() .find(|dn| dn.name == next_name) { Some(dn) => dn, None => return Ok(None), }; // Set the current directory digest to be the digest in the DirectoryNode just found. // If there are more path components, then the search will continue there. // Otherwise, if this loop ends then the final Directory digest has been found. current_directory_digest = require_digest(dir_node.digest.as_ref())?; } } // Load the final directory. let directory = match store.load_directory(current_directory_digest).await? { Some((dir, _)) => dir, None => return Ok(None), }; // Search for the file. let file_base_name = file_path.as_ref().file_name().unwrap(); Ok( directory .files .iter() .find(|node| { let name = OsString::from(&node.name); name == file_base_name }) .cloned(), ) } /// Converts a REAPI `Command` and a `FallibleProcessResultWithPlatform` produced from executing /// that Command into a REAPI `ActionResult` suitable for upload to the REAPI Action Cache. /// /// This function also returns a vector of all `Digest`s referenced directly and indirectly by /// the `ActionResult` suitable for passing to `Store::ensure_remote_has_recursive`. (The /// digests may include both File and Tree digests.) pub(crate) async fn make_action_result( &self, command: &Command, result: &FallibleProcessResultWithPlatform, store: &Store, ) -> Result<(ActionResult, Vec<Digest>), String> { // Keep track of digests that need to be uploaded. let mut digests = HashSet::new(); let mut action_result = ActionResult { exit_code: result.exit_code, stdout_digest: Some(result.stdout_digest.into()), stderr_digest: Some(result.stderr_digest.into()), execution_metadata: Some(result.metadata.clone().into()), ..ActionResult::default() }; digests.insert(result.stdout_digest); digests.insert(result.stderr_digest); for output_directory in &command.output_directories { let tree = match Self::make_tree_for_output_directory( result.output_directory, RelativePath::new(output_directory).unwrap(), store, ) .await? { Some(t) => t, None => continue, }; let tree_digest = crate::remote::store_proto_locally(&self.store, &tree).await?; digests.insert(tree_digest); action_result .output_directories .push(remexec::OutputDirectory { path: output_directory.to_owned(), tree_digest: Some(tree_digest.into()), }); } for output_file in &command.output_files { let file_node = match Self::extract_output_file( result.output_directory, RelativePath::new(output_file).unwrap(), store, ) .await? { Some(node) => node, None => continue, }; let digest = require_digest(file_node.digest.as_ref())?; digests.insert(digest); action_result.output_files.push({ remexec::OutputFile { digest: Some(digest.into()), path: output_file.to_owned(), is_executable: file_node.is_executable, ..remexec::OutputFile::default() } }) } Ok((action_result, digests.into_iter().collect::<Vec<_>>())) } /// Stores an execution result into the remote Action Cache. async fn update_action_cache( &self, context: &Context, request: &Process, result: &FallibleProcessResultWithPlatform, metadata: &ProcessMetadata, command: &Command, action_digest: Digest, command_digest: Digest, ) -> Result<(), String> { // Upload the action (and related data, i.e. the embedded command and input files). // Assumption: The Action and related data has already been stored locally. with_workunit( context.workunit_store.clone(), "ensure_action_uploaded".to_owned(), WorkunitMetadata { level: Level::Trace, desc: Some(format!("ensure action uploaded for {:?}", action_digest)), ..WorkunitMetadata::default() }, crate::remote::ensure_action_uploaded( &self.store, command_digest, action_digest, request.input_files, ), |_, md| md, ) .await?; // Create an ActionResult from the process result. let (action_result, digests_for_action_result) = self .make_action_result(command, result, &self.store) .await?; // Ensure that all digests referenced by directly and indirectly by the ActionResult // have been uploaded to the remote cache. self .store .ensure_remote_has_recursive(digests_for_action_result) .await?; let update_action_cache_request = remexec::UpdateActionResultRequest { instance_name: metadata .instance_name .as_ref() .cloned() .unwrap_or_else(|| "".to_owned()), action_digest: Some(action_digest.into()), action_result: Some(action_result), ..remexec::UpdateActionResultRequest::default() }; let mut client = self.action_cache_client.as_ref().clone(); client .update_action_result(update_action_cache_request) .await
Ok(()) } fn log_cache_error(&self, err: String, err_type: CacheErrorType) { let err_count = { let mut errors_counter = match err_type { CacheErrorType::ReadError => self.read_errors_counter.lock(), CacheErrorType::WriteError => self.write_errors_counter.lock(), }; let count = errors_counter.entry(err.clone()).or_insert(0); *count += 1; *count }; let failure_desc = match err_type { CacheErrorType::ReadError => "read from", CacheErrorType::WriteError => "write to", }; let log_msg = format!( "Failed to {} remote cache ({} occurrences so far): {}", failure_desc, err_count, err ); let log_at_warn = match self.warnings_behavior { RemoteCacheWarningsBehavior::Ignore => false, RemoteCacheWarningsBehavior::FirstOnly => err_count == 1, RemoteCacheWarningsBehavior::Backoff => err_count.is_power_of_two(), }; if log_at_warn { log::warn!("{}", log_msg); } else { log::debug!("{}", log_msg); } } } enum CacheErrorType { ReadError, WriteError, } #[async_trait] impl crate::CommandRunner for CommandRunner { async fn run( &self, req: MultiPlatformProcess, context: Context, ) -> Result<FallibleProcessResultWithPlatform, String> { let cache_lookup_start = Instant::now(); // Construct the REv2 ExecuteRequest and related data for this execution request. let request = self .extract_compatible_request(&req) .ok_or_else(|| "No compatible Process found for checking remote cache.".to_owned())?; let (action, command, _execute_request) = make_execute_request(&request, self.metadata.clone())?; // Ensure the action and command are stored locally. let (command_digest, action_digest) = with_workunit( context.workunit_store.clone(), "ensure_action_stored_locally".to_owned(), WorkunitMetadata { level: Level::Trace, desc: Some(format!("ensure action stored locally for {:?}", action)), ..WorkunitMetadata::default() }, crate::remote::ensure_action_stored_locally(&self.store, &command, &action), |_, md| md, ) .await?; let mut local_execution_future = self.underlying.run(req, context.clone()); let result = if self.cache_read { // A future to read from the cache and log the results accordingly. let cache_read_future = async { let response = with_workunit( context.workunit_store.clone(), "check_action_cache".to_owned(), WorkunitMetadata { level: Level::Trace, desc: Some(format!("check action cache for {:?}", action_digest)), ..WorkunitMetadata::default() }, crate::remote::check_action_cache( action_digest, &self.metadata, self.platform, &context, self.action_cache_client.clone(), self.store.clone(), self.eager_fetch, ), |_, md| md, ) .await; match response { Ok(cached_response_opt) => { log::debug!( "remote cache response: digest={:?}: {:?}", action_digest, cached_response_opt ); cached_response_opt } Err(err) => { self.log_cache_error(err, CacheErrorType::ReadError); None } } } .boxed(); // We speculate between reading from the remote cache vs. running locally. If there was a // cache hit, we return early because there will be no need to write to the cache. Otherwise, // we run the process locally and will possibly write it to the cache later. tokio::select! { cache_result = cache_read_future => { if let Some(cached_response) = cache_result { let lookup_elapsed = cache_lookup_start.elapsed(); context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationRemoteCompletedFirst, 1); if let Some(time_saved) = cached_response.metadata.time_saved_from_cache(lookup_elapsed) { let time_saved = time_saved.as_millis() as u64; context .workunit_store .increment_counter(Metric::RemoteCacheTotalTimeSavedMs, time_saved); context .workunit_store .record_observation(ObservationMetric::RemoteCacheTimeSavedMs, time_saved); } return Ok(cached_response); } else { // Note that we don't increment a counter here, as there is nothing of note in this // scenario: the remote cache did not save unnecessary local work, nor was the remote // trip unusually slow such that local execution was faster. local_execution_future.await? } } local_result = &mut local_execution_future => { context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationLocalCompletedFirst, 1); local_result? } } } else { local_execution_future.await? }; if result.exit_code == 0 && self.cache_write { let command_runner = self.clone(); let result = result.clone(); let context2 = context.clone(); // NB: We use `TaskExecutor::spawn` instead of `tokio::spawn` to ensure logging still works. let cache_write_future = async move { context2 .workunit_store .increment_counter(Metric::RemoteCacheWriteStarted, 1); let write_result = command_runner .update_action_cache( &context2, &request, &result, &command_runner.metadata, &command, action_digest, command_digest, ) .await; context2 .workunit_store .increment_counter(Metric::RemoteCacheWriteFinished, 1); if let Err(err) = write_result { command_runner.log_cache_error(err, CacheErrorType::WriteError); context2 .workunit_store .increment_counter(Metric::RemoteCacheWriteErrors, 1); }; } .boxed(); let _write_join = self.executor.spawn(with_workunit( context.workunit_store, "remote_cache_write".to_owned(), WorkunitMetadata { level: Level::Trace, ..WorkunitMetadata::default() }, cache_write_future, |_, md| md, )); } Ok(result) } fn extract_compatible_request(&self, req: &MultiPlatformProcess) -> Option<Process> { self.underlying.extract_compatible_request(req) } }
.map_err(status_to_str)?;
random_line_split
remote_cache.rs
use std::collections::{BTreeMap, HashSet, VecDeque}; use std::ffi::OsString; use std::path::Component; use std::sync::Arc; use std::time::Instant; use async_trait::async_trait; use bazel_protos::gen::build::bazel::remote::execution::v2 as remexec; use bazel_protos::require_digest; use fs::RelativePath; use futures::FutureExt; use grpc_util::headers_to_interceptor_fn; use grpc_util::status_to_str; use hashing::Digest; use parking_lot::Mutex; use remexec::action_cache_client::ActionCacheClient; use remexec::{ActionResult, Command, FileNode, Tree}; use store::Store; use tonic::transport::Channel; use workunit_store::{with_workunit, Level, Metric, ObservationMetric, WorkunitMetadata}; use crate::remote::make_execute_request; use crate::{ Context, FallibleProcessResultWithPlatform, MultiPlatformProcess, Platform, Process, ProcessMetadata, RemoteCacheWarningsBehavior, }; /// This `CommandRunner` implementation caches results remotely using the Action Cache service /// of the Remote Execution API. /// /// This runner expects to sit between the local cache CommandRunner and the CommandRunner /// that is actually executing the Process. Thus, the local cache will be checked first, /// then the remote cache, and then execution (local or remote) as necessary if neither cache /// has a hit. On the way back out of the stack, the result will be stored remotely and /// then locally. #[derive(Clone)] pub struct CommandRunner { underlying: Arc<dyn crate::CommandRunner>, metadata: ProcessMetadata, executor: task_executor::Executor, store: Store, action_cache_client: Arc<ActionCacheClient<Channel>>, headers: BTreeMap<String, String>, platform: Platform, cache_read: bool, cache_write: bool, eager_fetch: bool, warnings_behavior: RemoteCacheWarningsBehavior, read_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>, write_errors_counter: Arc<Mutex<BTreeMap<String, usize>>>, } impl CommandRunner { pub fn new( underlying: Arc<dyn crate::CommandRunner>, metadata: ProcessMetadata, executor: task_executor::Executor, store: Store, action_cache_address: &str, root_ca_certs: Option<Vec<u8>>, headers: BTreeMap<String, String>, platform: Platform, cache_read: bool, cache_write: bool, warnings_behavior: RemoteCacheWarningsBehavior, eager_fetch: bool, ) -> Result<Self, String> { let tls_client_config = if action_cache_address.starts_with("https://") { Some(grpc_util::create_tls_config(root_ca_certs)?) } else { None }; let endpoint = grpc_util::create_endpoint(&action_cache_address, tls_client_config.as_ref())?; let channel = tonic::transport::Channel::balance_list(vec![endpoint].into_iter()); let action_cache_client = Arc::new(if headers.is_empty() { ActionCacheClient::new(channel) } else { ActionCacheClient::with_interceptor(channel, headers_to_interceptor_fn(&headers)?) }); Ok(CommandRunner { underlying, metadata, executor, store, action_cache_client, headers, platform, cache_read, cache_write, eager_fetch, warnings_behavior, read_errors_counter: Arc::new(Mutex::new(BTreeMap::new())), write_errors_counter: Arc::new(Mutex::new(BTreeMap::new())), }) } /// Create a REAPI `Tree` protobuf for an output directory by traversing down from a Pants /// merged final output directory to find the specific path to extract. (REAPI requires /// output directories to be stored as `Tree` protos that contain all of the `Directory` /// protos that constitute the directory tree.) /// /// Note that the Tree does not include the directory_path as a prefix, per REAPI. This path /// gets stored on the OutputDirectory proto. /// /// If the output directory does not exist, then returns Ok(None). pub(crate) async fn make_tree_for_output_directory( root_directory_digest: Digest, directory_path: RelativePath, store: &Store, ) -> Result<Option<Tree>, String> { // Traverse down from the root directory digest to find the directory digest for // the output directory. let mut current_directory_digest = root_directory_digest; for next_path_component in directory_path.as_ref().components() { let next_name = match next_path_component { Component::Normal(name) => name .to_str() .ok_or_else(|| format!("unable to convert '{:?}' to string", name))?, _ => return Ok(None), }; // Load the Directory proto corresponding to `current_directory_digest`. let current_directory = match store.load_directory(current_directory_digest).await? { Some((dir, _)) => dir, None => { return Err(format!( "Directory digest {:?} was referenced in output, but was not found in store.", current_directory_digest )) } }; // Scan the current directory for the current path component. let dir_node = match current_directory .directories .iter() .find(|dn| dn.name == next_name) { Some(dn) => dn, None => return Ok(None), }; // Set the current directory digest to be the digest in the DirectoryNode just found. // If there are more path components, then the search will continue there. // Otherwise, if this loop ends then the final Directory digest has been found. current_directory_digest = require_digest(dir_node.digest.as_ref())?; } // At this point, `current_directory_digest` holds the digest of the output directory. // This will be the root of the Tree. Add it to a queue of digests to traverse. let mut tree = Tree::default(); let mut digest_queue = VecDeque::new(); digest_queue.push_back(current_directory_digest); while let Some(directory_digest) = digest_queue.pop_front() { let directory = match store.load_directory(directory_digest).await? { Some((dir, _)) => dir, None => { return Err(format!( "illegal state: directory for digest {:?} did not exist locally", &current_directory_digest )) } }; // Add all of the digests for subdirectories into the queue so they are processed // in future iterations of the loop. for subdirectory_node in &directory.directories { let subdirectory_digest = require_digest(subdirectory_node.digest.as_ref())?; digest_queue.push_back(subdirectory_digest); } // Store this directory either as the `root` or one of the `children` if not the root. if directory_digest == current_directory_digest { tree.root = Some(directory); } else { tree.children.push(directory) } } Ok(Some(tree)) } pub(crate) async fn extract_output_file( root_directory_digest: Digest, file_path: RelativePath, store: &Store, ) -> Result<Option<FileNode>, String> { // Traverse down from the root directory digest to find the directory digest for // the output directory. let mut current_directory_digest = root_directory_digest; let parent_path = file_path.as_ref().parent(); let components_opt = parent_path.map(|x| x.components()); if let Some(components) = components_opt { for next_path_component in components { let next_name = match next_path_component { Component::Normal(name) => name .to_str() .ok_or_else(|| format!("unable to convert '{:?}' to string", name))?, _ => return Ok(None), }; // Load the Directory proto corresponding to `current_directory_digest`. let current_directory = match store.load_directory(current_directory_digest).await? { Some((dir, _)) => dir, None => { return Err(format!( "Directory digest {:?} was referenced in output, but was not found in store.", current_directory_digest )) } }; // Scan the current directory for the current path component. let dir_node = match current_directory .directories .iter() .find(|dn| dn.name == next_name) { Some(dn) => dn, None => return Ok(None), }; // Set the current directory digest to be the digest in the DirectoryNode just found. // If there are more path components, then the search will continue there. // Otherwise, if this loop ends then the final Directory digest has been found. current_directory_digest = require_digest(dir_node.digest.as_ref())?; } } // Load the final directory. let directory = match store.load_directory(current_directory_digest).await? { Some((dir, _)) => dir, None => return Ok(None), }; // Search for the file. let file_base_name = file_path.as_ref().file_name().unwrap(); Ok( directory .files .iter() .find(|node| { let name = OsString::from(&node.name); name == file_base_name }) .cloned(), ) } /// Converts a REAPI `Command` and a `FallibleProcessResultWithPlatform` produced from executing /// that Command into a REAPI `ActionResult` suitable for upload to the REAPI Action Cache. /// /// This function also returns a vector of all `Digest`s referenced directly and indirectly by /// the `ActionResult` suitable for passing to `Store::ensure_remote_has_recursive`. (The /// digests may include both File and Tree digests.) pub(crate) async fn make_action_result( &self, command: &Command, result: &FallibleProcessResultWithPlatform, store: &Store, ) -> Result<(ActionResult, Vec<Digest>), String> { // Keep track of digests that need to be uploaded. let mut digests = HashSet::new(); let mut action_result = ActionResult { exit_code: result.exit_code, stdout_digest: Some(result.stdout_digest.into()), stderr_digest: Some(result.stderr_digest.into()), execution_metadata: Some(result.metadata.clone().into()), ..ActionResult::default() }; digests.insert(result.stdout_digest); digests.insert(result.stderr_digest); for output_directory in &command.output_directories { let tree = match Self::make_tree_for_output_directory( result.output_directory, RelativePath::new(output_directory).unwrap(), store, ) .await? { Some(t) => t, None => continue, }; let tree_digest = crate::remote::store_proto_locally(&self.store, &tree).await?; digests.insert(tree_digest); action_result .output_directories .push(remexec::OutputDirectory { path: output_directory.to_owned(), tree_digest: Some(tree_digest.into()), }); } for output_file in &command.output_files { let file_node = match Self::extract_output_file( result.output_directory, RelativePath::new(output_file).unwrap(), store, ) .await? { Some(node) => node, None => continue, }; let digest = require_digest(file_node.digest.as_ref())?; digests.insert(digest); action_result.output_files.push({ remexec::OutputFile { digest: Some(digest.into()), path: output_file.to_owned(), is_executable: file_node.is_executable, ..remexec::OutputFile::default() } }) } Ok((action_result, digests.into_iter().collect::<Vec<_>>())) } /// Stores an execution result into the remote Action Cache. async fn update_action_cache( &self, context: &Context, request: &Process, result: &FallibleProcessResultWithPlatform, metadata: &ProcessMetadata, command: &Command, action_digest: Digest, command_digest: Digest, ) -> Result<(), String> { // Upload the action (and related data, i.e. the embedded command and input files). // Assumption: The Action and related data has already been stored locally. with_workunit( context.workunit_store.clone(), "ensure_action_uploaded".to_owned(), WorkunitMetadata { level: Level::Trace, desc: Some(format!("ensure action uploaded for {:?}", action_digest)), ..WorkunitMetadata::default() }, crate::remote::ensure_action_uploaded( &self.store, command_digest, action_digest, request.input_files, ), |_, md| md, ) .await?; // Create an ActionResult from the process result. let (action_result, digests_for_action_result) = self .make_action_result(command, result, &self.store) .await?; // Ensure that all digests referenced by directly and indirectly by the ActionResult // have been uploaded to the remote cache. self .store .ensure_remote_has_recursive(digests_for_action_result) .await?; let update_action_cache_request = remexec::UpdateActionResultRequest { instance_name: metadata .instance_name .as_ref() .cloned() .unwrap_or_else(|| "".to_owned()), action_digest: Some(action_digest.into()), action_result: Some(action_result), ..remexec::UpdateActionResultRequest::default() }; let mut client = self.action_cache_client.as_ref().clone(); client .update_action_result(update_action_cache_request) .await .map_err(status_to_str)?; Ok(()) } fn log_cache_error(&self, err: String, err_type: CacheErrorType) { let err_count = { let mut errors_counter = match err_type { CacheErrorType::ReadError => self.read_errors_counter.lock(), CacheErrorType::WriteError => self.write_errors_counter.lock(), }; let count = errors_counter.entry(err.clone()).or_insert(0); *count += 1; *count }; let failure_desc = match err_type { CacheErrorType::ReadError => "read from", CacheErrorType::WriteError => "write to", }; let log_msg = format!( "Failed to {} remote cache ({} occurrences so far): {}", failure_desc, err_count, err ); let log_at_warn = match self.warnings_behavior { RemoteCacheWarningsBehavior::Ignore => false, RemoteCacheWarningsBehavior::FirstOnly => err_count == 1, RemoteCacheWarningsBehavior::Backoff => err_count.is_power_of_two(), }; if log_at_warn { log::warn!("{}", log_msg); } else { log::debug!("{}", log_msg); } } } enum CacheErrorType { ReadError, WriteError, } #[async_trait] impl crate::CommandRunner for CommandRunner { async fn run( &self, req: MultiPlatformProcess, context: Context, ) -> Result<FallibleProcessResultWithPlatform, String> { let cache_lookup_start = Instant::now(); // Construct the REv2 ExecuteRequest and related data for this execution request. let request = self .extract_compatible_request(&req) .ok_or_else(|| "No compatible Process found for checking remote cache.".to_owned())?; let (action, command, _execute_request) = make_execute_request(&request, self.metadata.clone())?; // Ensure the action and command are stored locally. let (command_digest, action_digest) = with_workunit( context.workunit_store.clone(), "ensure_action_stored_locally".to_owned(), WorkunitMetadata { level: Level::Trace, desc: Some(format!("ensure action stored locally for {:?}", action)), ..WorkunitMetadata::default() }, crate::remote::ensure_action_stored_locally(&self.store, &command, &action), |_, md| md, ) .await?; let mut local_execution_future = self.underlying.run(req, context.clone()); let result = if self.cache_read { // A future to read from the cache and log the results accordingly. let cache_read_future = async { let response = with_workunit( context.workunit_store.clone(), "check_action_cache".to_owned(), WorkunitMetadata { level: Level::Trace, desc: Some(format!("check action cache for {:?}", action_digest)), ..WorkunitMetadata::default() }, crate::remote::check_action_cache( action_digest, &self.metadata, self.platform, &context, self.action_cache_client.clone(), self.store.clone(), self.eager_fetch, ), |_, md| md, ) .await; match response { Ok(cached_response_opt) => { log::debug!( "remote cache response: digest={:?}: {:?}", action_digest, cached_response_opt ); cached_response_opt } Err(err) => { self.log_cache_error(err, CacheErrorType::ReadError); None } } } .boxed(); // We speculate between reading from the remote cache vs. running locally. If there was a // cache hit, we return early because there will be no need to write to the cache. Otherwise, // we run the process locally and will possibly write it to the cache later. tokio::select! { cache_result = cache_read_future => { if let Some(cached_response) = cache_result { let lookup_elapsed = cache_lookup_start.elapsed(); context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationRemoteCompletedFirst, 1); if let Some(time_saved) = cached_response.metadata.time_saved_from_cache(lookup_elapsed) { let time_saved = time_saved.as_millis() as u64; context .workunit_store .increment_counter(Metric::RemoteCacheTotalTimeSavedMs, time_saved); context .workunit_store .record_observation(ObservationMetric::RemoteCacheTimeSavedMs, time_saved); } return Ok(cached_response); } else { // Note that we don't increment a counter here, as there is nothing of note in this // scenario: the remote cache did not save unnecessary local work, nor was the remote // trip unusually slow such that local execution was faster. local_execution_future.await? } } local_result = &mut local_execution_future => { context.workunit_store.increment_counter(Metric::RemoteCacheSpeculationLocalCompletedFirst, 1); local_result? } } } else { local_execution_future.await? }; if result.exit_code == 0 && self.cache_write { let command_runner = self.clone(); let result = result.clone(); let context2 = context.clone(); // NB: We use `TaskExecutor::spawn` instead of `tokio::spawn` to ensure logging still works. let cache_write_future = async move { context2 .workunit_store .increment_counter(Metric::RemoteCacheWriteStarted, 1); let write_result = command_runner .update_action_cache( &context2, &request, &result, &command_runner.metadata, &command, action_digest, command_digest, ) .await; context2 .workunit_store .increment_counter(Metric::RemoteCacheWriteFinished, 1); if let Err(err) = write_result { command_runner.log_cache_error(err, CacheErrorType::WriteError); context2 .workunit_store .increment_counter(Metric::RemoteCacheWriteErrors, 1); }; } .boxed(); let _write_join = self.executor.spawn(with_workunit( context.workunit_store, "remote_cache_write".to_owned(), WorkunitMetadata { level: Level::Trace, ..WorkunitMetadata::default() }, cache_write_future, |_, md| md, )); } Ok(result) } fn
(&self, req: &MultiPlatformProcess) -> Option<Process> { self.underlying.extract_compatible_request(req) } }
extract_compatible_request
identifier_name
bare_index.rs
use crate::{Crate, Error, IndexConfig}; use std::marker::PhantomPinned; use std::{ io, path::{Path, PathBuf}, }; /// Access to a "bare" git index that fetches files directly from the repo instead of local checkout /// /// Uses Cargo's cache pub struct BareIndex { path: PathBuf, pub url: String, } impl BareIndex { /// Creates a bare index from a provided URL, opening the same location on /// disk that cargo uses for that registry index. pub fn from_url(url: &str) -> Result<Self, Error> { let (dir_name, canonical_url) = url_to_local_dir(url)?; let mut path = home::cargo_home().unwrap_or_default(); path.push("registry/index"); path.push(dir_name); Ok(Self { path, url: canonical_url, }) } /// Creates a bare index at the provided path with the specified repository URL. #[inline] pub fn with_path(path: PathBuf, url: &str) -> Self { Self { path, url: url.to_owned(), } } /// Creates an index for the default crates.io registry, using the same /// disk location as cargo itself. #[inline] pub fn new_cargo_default() -> Self { // UNWRAP: The default index git URL is known to safely convert to a path. Self::from_url(crate::INDEX_GIT_URL).unwrap() } /// Opens the local index, which acts as a kind of lock for source control /// operations #[inline] pub fn open_or_clone(&self) -> Result<BareIndexRepo<'_>, Error> { BareIndexRepo::new(self) } /// Get the index directory. #[inline] pub fn path(&self) -> &Path { &self.path } } /// Self-referential struct where `Tree` borrows from `Repository` struct UnsafeRepoTree { /// Warning: order of the fields is necessary for safety. `tree` must Drop before `repo`. tree: git2::Tree<'static>, repo: Box<git2::Repository>, // Currently !Unpin is Rust's heuristic for self-referential structs _self_referential: PhantomPinned, } /// Opened instance of [`BareIndex`] pub struct BareIndexRepo<'a> { inner: &'a BareIndex, head_str: String, rt: UnsafeRepoTree, } impl<'a> BareIndexRepo<'a> { fn new(index: &'a BareIndex) -> Result<Self, Error> { let exists = git2::Repository::discover(&index.path) .map(|repository| { repository .find_remote("origin") .ok() // Cargo creates a checkout without an origin set, // so default to true in case of missing origin .map_or(true, |remote| { remote.url().map_or(true, |url| url == index.url) }) }) .unwrap_or(false); let repo = if !exists { let mut opts = git2::RepositoryInitOptions::new(); opts.external_template(false); let repo = git2::Repository::init_opts(&index.path, &opts)?; { let mut origin_remote = repo .find_remote("origin") .or_else(|_| repo.remote_anonymous(&index.url))?; origin_remote.fetch( &[ "HEAD:refs/remotes/origin/HEAD", "master:refs/remotes/origin/master", ], Some(&mut crate::fetch_opts()), None, )?; } repo } else { git2::Repository::open(&index.path)? }; // It's going to be used in a self-referential type. Boxing prevents it from being moved // and adds a layer of indirection that will hopefully not upset noalias analysis. let repo = Box::new(repo); let head = repo // Fallback to HEAD, as a fresh clone won't have a FETCH_HEAD .refname_to_id("FETCH_HEAD") .or_else(|_| repo.refname_to_id("HEAD"))?; let head_str = head.to_string(); let tree = { let commit = repo.find_commit(head)?; let tree = commit.tree()?; // See `UnsafeRepoTree` unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) } }; Ok(Self { inner: index, head_str, rt: UnsafeRepoTree { repo, tree, _self_referential: PhantomPinned, }, }) } /// Fetches latest from the remote index repository. Note that using this /// method will mean no cache entries will be used, if a new commit is fetched /// from the repository, as their commit version will no longer match. pub fn
(&mut self) -> Result<(), Error> { { let mut origin_remote = self .rt .repo .find_remote("origin") .or_else(|_| self.rt.repo.remote_anonymous(&self.inner.url))?; origin_remote.fetch( &[ "HEAD:refs/remotes/origin/HEAD", "master:refs/remotes/origin/master", ], Some(&mut crate::fetch_opts()), None, )?; } let head = self .rt .repo .refname_to_id("FETCH_HEAD") .or_else(|_| self.rt.repo.refname_to_id("HEAD"))?; let head_str = head.to_string(); let commit = self.rt.repo.find_commit(head)?; let tree = commit.tree()?; // See `UnsafeRepoTree` let tree = unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) }; self.head_str = head_str; self.rt.tree = tree; Ok(()) } /// Reads a crate from the index, it will attempt to use a cached entry if /// one is available, otherwise it will fallback to reading the crate /// directly from the git blob containing the crate information. pub fn crate_(&self, name: &str) -> Option<Crate> { let rel_path = match crate::crate_name_to_relative_path(name) { Some(rp) => rp, None => return None, }; // Attempt to load the .cache/ entry first, this is purely an acceleration // mechanism and can fail for a few reasons that are non-fatal { let mut cache_path = self.inner.path.join(".cache"); cache_path.push(&rel_path); if let Ok(cache_bytes) = std::fs::read(&cache_path) { if let Ok(krate) = Crate::from_cache_slice(&cache_bytes, &self.head_str) { return Some(krate); } } } // Fallback to reading the blob directly via git if we don't have a // valid cache entry self.crate_from_rel_path(&rel_path).ok() } fn crate_from_rel_path(&self, path: &str) -> Result<Crate, Error> { let entry = self.rt.tree.get_path(&Path::new(path))?; let object = entry.to_object(&self.rt.repo)?; let blob = object .as_blob() .ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, path.to_owned())))?; Crate::from_slice(blob.content()).map_err(Error::Io) } /// Retrieve an iterator over all the crates in the index. /// skips crates that can not be parsed. #[inline] pub fn crates(&self) -> Crates<'_> { Crates { blobs: self.crates_refs(), } } /// Retrieve an iterator over all the crates in the index. /// Returns opaque reference for each crate in the index, which can be used with [`CrateRef::parse`] fn crates_refs(&self) -> CrateRefs<'_> { let mut stack = Vec::with_capacity(800); // Scan only directories at top level (skip config.json, etc.) for entry in self.rt.tree.iter() { let entry = entry.to_object(&self.rt.repo).unwrap(); if entry.as_tree().is_some() { stack.push(entry); } } CrateRefs { stack, rt: &self.rt, } } /// Get the global configuration of the index. pub fn index_config(&self) -> Result<IndexConfig, Error> { let entry = self.rt.tree.get_path(&Path::new("config.json"))?; let object = entry.to_object(&self.rt.repo)?; let blob = object .as_blob() .ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, "config.json")))?; serde_json::from_slice(blob.content()).map_err(Error::Json) } } /// Iterator over all crates in the index, but returns opaque objects that can be parsed separately. /// /// See [`CrateRef::parse`]. struct CrateRefs<'a> { stack: Vec<git2::Object<'a>>, rt: &'a UnsafeRepoTree, } /// Opaque representation of a crate in the index. See [`CrateRef::parse`]. pub(crate) struct CrateRef<'a>(pub(crate) git2::Object<'a>); impl CrateRef<'_> { #[inline] /// Parse a crate from [`BareIndex::crates_blobs`] iterator pub fn parse(&self) -> Option<Crate> { Crate::from_slice(self.as_slice()?).ok() } /// Raw crate data that can be parsed with [`Crate::from_slice`] pub fn as_slice(&self) -> Option<&[u8]> { Some(self.0.as_blob()?.content()) } } impl<'a> Iterator for CrateRefs<'a> { type Item = CrateRef<'a>; fn next(&mut self) -> Option<Self::Item> { while let Some(last) = self.stack.pop() { match last.as_tree() { None => return Some(CrateRef(last)), Some(tree) => { for entry in tree.iter().rev() { self.stack.push(entry.to_object(&self.rt.repo).unwrap()); } continue; } } } None } } pub struct Crates<'a> { blobs: CrateRefs<'a>, } impl<'a> Iterator for Crates<'a> { type Item = Crate; fn next(&mut self) -> Option<Self::Item> { while let Some(next) = self.blobs.next() { if let Some(k) = CrateRef::parse(&next) { return Some(k); } } None } } /// Converts a full url, eg https://github.com/rust-lang/crates.io-index, into /// the root directory name where cargo itself will fetch it on disk fn url_to_local_dir(url: &str) -> Result<(String, String), Error> { fn to_hex(num: u64) -> String { const CHARS: &[u8] = b"0123456789abcdef"; let bytes = &[ num as u8, (num >> 8) as u8, (num >> 16) as u8, (num >> 24) as u8, (num >> 32) as u8, (num >> 40) as u8, (num >> 48) as u8, (num >> 56) as u8, ]; let mut output = vec![0u8; 16]; let mut ind = 0; for &byte in bytes { output[ind] = CHARS[(byte >> 4) as usize]; output[ind + 1] = CHARS[(byte & 0xf) as usize]; ind += 2; } String::from_utf8(output).expect("valid utf-8 hex string") } #[allow(deprecated)] fn hash_u64(url: &str) -> u64 { use std::hash::{Hash, Hasher, SipHasher}; let mut hasher = SipHasher::new_with_keys(0, 0); // Registry 2usize.hash(&mut hasher); // Url url.hash(&mut hasher); hasher.finish() } // Ensure we have a registry or bare url let (url, scheme_ind) = { let scheme_ind = url .find("://") .ok_or_else(|| Error::Url(format!("'{}' is not a valid url", url)))?; let scheme_str = &url[..scheme_ind]; if let Some(ind) = scheme_str.find('+') { if &scheme_str[..ind] != "registry" { return Err(Error::Url(format!("'{}' is not a valid registry url", url))); } (&url[ind + 1..], scheme_ind - ind - 1) } else { (url, scheme_ind) } }; // Could use the Url crate for this, but it's simple enough and we don't // need to deal with every possible url (I hope...) let host = match url[scheme_ind + 3..].find('/') { Some(end) => &url[scheme_ind + 3..scheme_ind + 3 + end], None => &url[scheme_ind + 3..], }; // cargo special cases github.com for reasons, so do the same let mut canonical = if host == "github.com" { url.to_lowercase() } else { url.to_owned() }; // Chop off any query params/fragments if let Some(hash) = canonical.rfind('#') { canonical.truncate(hash); } if let Some(query) = canonical.rfind('?') { canonical.truncate(query); } let ident = to_hex(hash_u64(&canonical)); if canonical.ends_with('/') { canonical.pop(); } if canonical.ends_with(".git") { canonical.truncate(canonical.len() - 4); } Ok((format!("{}-{}", host, ident), canonical)) } #[cfg(test)] mod test { #[test] fn matches_cargo() { assert_eq!( super::url_to_local_dir(crate::INDEX_GIT_URL).unwrap(), ( "github.com-1ecc6299db9ec823".to_owned(), crate::INDEX_GIT_URL.to_owned() ) ); // I've confirmed this also works with a custom registry, unfortunately // that one includes a secret key as part of the url which would allow // anyone to publish to the registry, so uhh...here's a fake one instead assert_eq!( super::url_to_local_dir( "https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index.git" ) .unwrap(), ( "dl.cloudsmith.io-ff79e51ddd2b38fd".to_owned(), "https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index".to_owned() ) ); // Ensure we actually strip off the irrelevant parts of a url, note that // the .git suffix is not part of the canonical url, but *is* used when hashing assert_eq!( super::url_to_local_dir(&format!( "registry+{}.git?one=1&two=2#fragment", crate::INDEX_GIT_URL )) .unwrap(), ( "github.com-c786010fb7ef2e6e".to_owned(), crate::INDEX_GIT_URL.to_owned() ) ); } #[test] fn bare_iterator() { use super::BareIndex; let tmp_dir = tempdir::TempDir::new("bare_iterator").unwrap(); let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL); let repo = index .open_or_clone() .expect("Failed to clone crates.io index"); let mut found_gcc_crate = false; for c in repo.crates() { if c.name() == "gcc" { found_gcc_crate = true; } } assert!(found_gcc_crate); } #[test] fn clones_bare_index() { use super::BareIndex; let tmp_dir = tempdir::TempDir::new("clones_bare_index").unwrap(); let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL); let mut repo = index .open_or_clone() .expect("Failed to clone crates.io index"); fn test_sval(repo: &super::BareIndexRepo<'_>) { let krate = repo .crate_("sval") .expect("Could not find the crate sval in the index"); let version = krate .versions() .iter() .find(|v| v.version() == "0.0.1") .expect("Version 0.0.1 of sval does not exist?"); let dep_with_package_name = version .dependencies() .iter() .find(|d| d.name() == "serde_lib") .expect("sval does not have expected dependency?"); assert_ne!( dep_with_package_name.name(), dep_with_package_name.package().unwrap() ); assert_eq!( dep_with_package_name.crate_name(), dep_with_package_name.package().unwrap() ); } test_sval(&repo); repo.retrieve().expect("Failed to fetch crates.io index"); test_sval(&repo); } #[test] fn opens_bare_index() { use super::BareIndex; let tmp_dir = tempdir::TempDir::new("opens_bare_index").unwrap(); let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL); { let _ = index .open_or_clone() .expect("Failed to clone crates.io index"); } let mut repo = index .open_or_clone() .expect("Failed to open crates.io index"); fn test_sval(repo: &super::BareIndexRepo<'_>) { let krate = repo .crate_("sval") .expect("Could not find the crate sval in the index"); let version = krate .versions() .iter() .find(|v| v.version() == "0.0.1") .expect("Version 0.0.1 of sval does not exist?"); let dep_with_package_name = version .dependencies() .iter() .find(|d| d.name() == "serde_lib") .expect("sval does not have expected dependency?"); assert_ne!( dep_with_package_name.name(), dep_with_package_name.package().unwrap() ); assert_eq!( dep_with_package_name.crate_name(), dep_with_package_name.package().unwrap() ); } test_sval(&repo); repo.retrieve().expect("Failed to fetch crates.io index"); test_sval(&repo); } }
retrieve
identifier_name
bare_index.rs
use crate::{Crate, Error, IndexConfig}; use std::marker::PhantomPinned; use std::{ io, path::{Path, PathBuf}, }; /// Access to a "bare" git index that fetches files directly from the repo instead of local checkout /// /// Uses Cargo's cache pub struct BareIndex { path: PathBuf, pub url: String, } impl BareIndex { /// Creates a bare index from a provided URL, opening the same location on /// disk that cargo uses for that registry index. pub fn from_url(url: &str) -> Result<Self, Error> { let (dir_name, canonical_url) = url_to_local_dir(url)?; let mut path = home::cargo_home().unwrap_or_default(); path.push("registry/index"); path.push(dir_name); Ok(Self { path, url: canonical_url, }) } /// Creates a bare index at the provided path with the specified repository URL. #[inline] pub fn with_path(path: PathBuf, url: &str) -> Self { Self { path, url: url.to_owned(), } } /// Creates an index for the default crates.io registry, using the same /// disk location as cargo itself. #[inline] pub fn new_cargo_default() -> Self { // UNWRAP: The default index git URL is known to safely convert to a path. Self::from_url(crate::INDEX_GIT_URL).unwrap() } /// Opens the local index, which acts as a kind of lock for source control /// operations #[inline] pub fn open_or_clone(&self) -> Result<BareIndexRepo<'_>, Error> { BareIndexRepo::new(self) } /// Get the index directory. #[inline] pub fn path(&self) -> &Path { &self.path } } /// Self-referential struct where `Tree` borrows from `Repository` struct UnsafeRepoTree { /// Warning: order of the fields is necessary for safety. `tree` must Drop before `repo`. tree: git2::Tree<'static>, repo: Box<git2::Repository>, // Currently !Unpin is Rust's heuristic for self-referential structs _self_referential: PhantomPinned, } /// Opened instance of [`BareIndex`] pub struct BareIndexRepo<'a> { inner: &'a BareIndex, head_str: String, rt: UnsafeRepoTree, } impl<'a> BareIndexRepo<'a> { fn new(index: &'a BareIndex) -> Result<Self, Error> { let exists = git2::Repository::discover(&index.path) .map(|repository| { repository .find_remote("origin") .ok() // Cargo creates a checkout without an origin set, // so default to true in case of missing origin .map_or(true, |remote| { remote.url().map_or(true, |url| url == index.url) }) }) .unwrap_or(false); let repo = if !exists { let mut opts = git2::RepositoryInitOptions::new(); opts.external_template(false); let repo = git2::Repository::init_opts(&index.path, &opts)?; { let mut origin_remote = repo .find_remote("origin") .or_else(|_| repo.remote_anonymous(&index.url))?; origin_remote.fetch( &[ "HEAD:refs/remotes/origin/HEAD", "master:refs/remotes/origin/master", ], Some(&mut crate::fetch_opts()), None, )?; } repo } else { git2::Repository::open(&index.path)? }; // It's going to be used in a self-referential type. Boxing prevents it from being moved // and adds a layer of indirection that will hopefully not upset noalias analysis. let repo = Box::new(repo); let head = repo // Fallback to HEAD, as a fresh clone won't have a FETCH_HEAD .refname_to_id("FETCH_HEAD") .or_else(|_| repo.refname_to_id("HEAD"))?; let head_str = head.to_string(); let tree = { let commit = repo.find_commit(head)?; let tree = commit.tree()?; // See `UnsafeRepoTree` unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) } }; Ok(Self { inner: index, head_str, rt: UnsafeRepoTree { repo, tree, _self_referential: PhantomPinned, }, }) } /// Fetches latest from the remote index repository. Note that using this /// method will mean no cache entries will be used, if a new commit is fetched /// from the repository, as their commit version will no longer match. pub fn retrieve(&mut self) -> Result<(), Error> { { let mut origin_remote = self .rt .repo .find_remote("origin") .or_else(|_| self.rt.repo.remote_anonymous(&self.inner.url))?; origin_remote.fetch( &[ "HEAD:refs/remotes/origin/HEAD", "master:refs/remotes/origin/master", ], Some(&mut crate::fetch_opts()), None, )?; } let head = self .rt .repo .refname_to_id("FETCH_HEAD") .or_else(|_| self.rt.repo.refname_to_id("HEAD"))?; let head_str = head.to_string(); let commit = self.rt.repo.find_commit(head)?; let tree = commit.tree()?; // See `UnsafeRepoTree` let tree = unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) }; self.head_str = head_str; self.rt.tree = tree; Ok(()) } /// Reads a crate from the index, it will attempt to use a cached entry if /// one is available, otherwise it will fallback to reading the crate /// directly from the git blob containing the crate information. pub fn crate_(&self, name: &str) -> Option<Crate> { let rel_path = match crate::crate_name_to_relative_path(name) { Some(rp) => rp, None => return None, }; // Attempt to load the .cache/ entry first, this is purely an acceleration // mechanism and can fail for a few reasons that are non-fatal { let mut cache_path = self.inner.path.join(".cache"); cache_path.push(&rel_path); if let Ok(cache_bytes) = std::fs::read(&cache_path) { if let Ok(krate) = Crate::from_cache_slice(&cache_bytes, &self.head_str) { return Some(krate); } } } // Fallback to reading the blob directly via git if we don't have a // valid cache entry self.crate_from_rel_path(&rel_path).ok() } fn crate_from_rel_path(&self, path: &str) -> Result<Crate, Error> { let entry = self.rt.tree.get_path(&Path::new(path))?; let object = entry.to_object(&self.rt.repo)?; let blob = object .as_blob() .ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, path.to_owned())))?; Crate::from_slice(blob.content()).map_err(Error::Io) } /// Retrieve an iterator over all the crates in the index. /// skips crates that can not be parsed. #[inline] pub fn crates(&self) -> Crates<'_> { Crates { blobs: self.crates_refs(), } } /// Retrieve an iterator over all the crates in the index. /// Returns opaque reference for each crate in the index, which can be used with [`CrateRef::parse`] fn crates_refs(&self) -> CrateRefs<'_> { let mut stack = Vec::with_capacity(800); // Scan only directories at top level (skip config.json, etc.) for entry in self.rt.tree.iter() { let entry = entry.to_object(&self.rt.repo).unwrap(); if entry.as_tree().is_some() { stack.push(entry); } } CrateRefs { stack, rt: &self.rt, } } /// Get the global configuration of the index. pub fn index_config(&self) -> Result<IndexConfig, Error> { let entry = self.rt.tree.get_path(&Path::new("config.json"))?; let object = entry.to_object(&self.rt.repo)?; let blob = object .as_blob() .ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, "config.json")))?; serde_json::from_slice(blob.content()).map_err(Error::Json) } } /// Iterator over all crates in the index, but returns opaque objects that can be parsed separately. /// /// See [`CrateRef::parse`]. struct CrateRefs<'a> { stack: Vec<git2::Object<'a>>, rt: &'a UnsafeRepoTree, } /// Opaque representation of a crate in the index. See [`CrateRef::parse`]. pub(crate) struct CrateRef<'a>(pub(crate) git2::Object<'a>); impl CrateRef<'_> { #[inline] /// Parse a crate from [`BareIndex::crates_blobs`] iterator pub fn parse(&self) -> Option<Crate> { Crate::from_slice(self.as_slice()?).ok() } /// Raw crate data that can be parsed with [`Crate::from_slice`] pub fn as_slice(&self) -> Option<&[u8]> { Some(self.0.as_blob()?.content()) } } impl<'a> Iterator for CrateRefs<'a> { type Item = CrateRef<'a>; fn next(&mut self) -> Option<Self::Item> { while let Some(last) = self.stack.pop() { match last.as_tree() { None => return Some(CrateRef(last)), Some(tree) => { for entry in tree.iter().rev() { self.stack.push(entry.to_object(&self.rt.repo).unwrap()); } continue; } } } None } } pub struct Crates<'a> { blobs: CrateRefs<'a>, } impl<'a> Iterator for Crates<'a> { type Item = Crate; fn next(&mut self) -> Option<Self::Item> { while let Some(next) = self.blobs.next() { if let Some(k) = CrateRef::parse(&next) { return Some(k); } } None } } /// Converts a full url, eg https://github.com/rust-lang/crates.io-index, into /// the root directory name where cargo itself will fetch it on disk fn url_to_local_dir(url: &str) -> Result<(String, String), Error> { fn to_hex(num: u64) -> String { const CHARS: &[u8] = b"0123456789abcdef"; let bytes = &[ num as u8, (num >> 8) as u8, (num >> 16) as u8, (num >> 24) as u8, (num >> 32) as u8, (num >> 40) as u8, (num >> 48) as u8, (num >> 56) as u8, ]; let mut output = vec![0u8; 16]; let mut ind = 0; for &byte in bytes { output[ind] = CHARS[(byte >> 4) as usize]; output[ind + 1] = CHARS[(byte & 0xf) as usize]; ind += 2; } String::from_utf8(output).expect("valid utf-8 hex string") } #[allow(deprecated)] fn hash_u64(url: &str) -> u64 { use std::hash::{Hash, Hasher, SipHasher}; let mut hasher = SipHasher::new_with_keys(0, 0); // Registry 2usize.hash(&mut hasher); // Url url.hash(&mut hasher); hasher.finish() } // Ensure we have a registry or bare url let (url, scheme_ind) = { let scheme_ind = url .find("://") .ok_or_else(|| Error::Url(format!("'{}' is not a valid url", url)))?; let scheme_str = &url[..scheme_ind]; if let Some(ind) = scheme_str.find('+') { if &scheme_str[..ind] != "registry" { return Err(Error::Url(format!("'{}' is not a valid registry url", url))); } (&url[ind + 1..], scheme_ind - ind - 1) } else { (url, scheme_ind) } }; // Could use the Url crate for this, but it's simple enough and we don't // need to deal with every possible url (I hope...) let host = match url[scheme_ind + 3..].find('/') { Some(end) => &url[scheme_ind + 3..scheme_ind + 3 + end], None => &url[scheme_ind + 3..], }; // cargo special cases github.com for reasons, so do the same let mut canonical = if host == "github.com" { url.to_lowercase() } else { url.to_owned() }; // Chop off any query params/fragments if let Some(hash) = canonical.rfind('#') { canonical.truncate(hash); } if let Some(query) = canonical.rfind('?') { canonical.truncate(query); } let ident = to_hex(hash_u64(&canonical)); if canonical.ends_with('/') { canonical.pop(); } if canonical.ends_with(".git") { canonical.truncate(canonical.len() - 4); } Ok((format!("{}-{}", host, ident), canonical)) } #[cfg(test)] mod test { #[test] fn matches_cargo() { assert_eq!( super::url_to_local_dir(crate::INDEX_GIT_URL).unwrap(), ( "github.com-1ecc6299db9ec823".to_owned(), crate::INDEX_GIT_URL.to_owned() ) ); // I've confirmed this also works with a custom registry, unfortunately // that one includes a secret key as part of the url which would allow // anyone to publish to the registry, so uhh...here's a fake one instead assert_eq!( super::url_to_local_dir( "https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index.git" ) .unwrap(), ( "dl.cloudsmith.io-ff79e51ddd2b38fd".to_owned(), "https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index".to_owned() ) ); // Ensure we actually strip off the irrelevant parts of a url, note that // the .git suffix is not part of the canonical url, but *is* used when hashing assert_eq!( super::url_to_local_dir(&format!( "registry+{}.git?one=1&two=2#fragment", crate::INDEX_GIT_URL )) .unwrap(), ( "github.com-c786010fb7ef2e6e".to_owned(), crate::INDEX_GIT_URL.to_owned() ) ); } #[test] fn bare_iterator() { use super::BareIndex; let tmp_dir = tempdir::TempDir::new("bare_iterator").unwrap(); let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL); let repo = index .open_or_clone() .expect("Failed to clone crates.io index"); let mut found_gcc_crate = false; for c in repo.crates() { if c.name() == "gcc" { found_gcc_crate = true; } } assert!(found_gcc_crate); } #[test] fn clones_bare_index() { use super::BareIndex; let tmp_dir = tempdir::TempDir::new("clones_bare_index").unwrap(); let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL); let mut repo = index .open_or_clone() .expect("Failed to clone crates.io index"); fn test_sval(repo: &super::BareIndexRepo<'_>) { let krate = repo .crate_("sval") .expect("Could not find the crate sval in the index"); let version = krate .versions() .iter() .find(|v| v.version() == "0.0.1") .expect("Version 0.0.1 of sval does not exist?"); let dep_with_package_name = version .dependencies() .iter() .find(|d| d.name() == "serde_lib") .expect("sval does not have expected dependency?"); assert_ne!( dep_with_package_name.name(), dep_with_package_name.package().unwrap() ); assert_eq!( dep_with_package_name.crate_name(), dep_with_package_name.package().unwrap() ); } test_sval(&repo); repo.retrieve().expect("Failed to fetch crates.io index"); test_sval(&repo); } #[test] fn opens_bare_index() { use super::BareIndex; let tmp_dir = tempdir::TempDir::new("opens_bare_index").unwrap(); let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL); { let _ = index .open_or_clone() .expect("Failed to clone crates.io index"); } let mut repo = index .open_or_clone() .expect("Failed to open crates.io index"); fn test_sval(repo: &super::BareIndexRepo<'_>)
test_sval(&repo); repo.retrieve().expect("Failed to fetch crates.io index"); test_sval(&repo); } }
{ let krate = repo .crate_("sval") .expect("Could not find the crate sval in the index"); let version = krate .versions() .iter() .find(|v| v.version() == "0.0.1") .expect("Version 0.0.1 of sval does not exist?"); let dep_with_package_name = version .dependencies() .iter() .find(|d| d.name() == "serde_lib") .expect("sval does not have expected dependency?"); assert_ne!( dep_with_package_name.name(), dep_with_package_name.package().unwrap() ); assert_eq!( dep_with_package_name.crate_name(), dep_with_package_name.package().unwrap() ); }
identifier_body
bare_index.rs
use crate::{Crate, Error, IndexConfig}; use std::marker::PhantomPinned; use std::{ io, path::{Path, PathBuf}, }; /// Access to a "bare" git index that fetches files directly from the repo instead of local checkout /// /// Uses Cargo's cache pub struct BareIndex { path: PathBuf, pub url: String, } impl BareIndex { /// Creates a bare index from a provided URL, opening the same location on /// disk that cargo uses for that registry index. pub fn from_url(url: &str) -> Result<Self, Error> { let (dir_name, canonical_url) = url_to_local_dir(url)?; let mut path = home::cargo_home().unwrap_or_default(); path.push("registry/index"); path.push(dir_name); Ok(Self { path, url: canonical_url, }) }
pub fn with_path(path: PathBuf, url: &str) -> Self { Self { path, url: url.to_owned(), } } /// Creates an index for the default crates.io registry, using the same /// disk location as cargo itself. #[inline] pub fn new_cargo_default() -> Self { // UNWRAP: The default index git URL is known to safely convert to a path. Self::from_url(crate::INDEX_GIT_URL).unwrap() } /// Opens the local index, which acts as a kind of lock for source control /// operations #[inline] pub fn open_or_clone(&self) -> Result<BareIndexRepo<'_>, Error> { BareIndexRepo::new(self) } /// Get the index directory. #[inline] pub fn path(&self) -> &Path { &self.path } } /// Self-referential struct where `Tree` borrows from `Repository` struct UnsafeRepoTree { /// Warning: order of the fields is necessary for safety. `tree` must Drop before `repo`. tree: git2::Tree<'static>, repo: Box<git2::Repository>, // Currently !Unpin is Rust's heuristic for self-referential structs _self_referential: PhantomPinned, } /// Opened instance of [`BareIndex`] pub struct BareIndexRepo<'a> { inner: &'a BareIndex, head_str: String, rt: UnsafeRepoTree, } impl<'a> BareIndexRepo<'a> { fn new(index: &'a BareIndex) -> Result<Self, Error> { let exists = git2::Repository::discover(&index.path) .map(|repository| { repository .find_remote("origin") .ok() // Cargo creates a checkout without an origin set, // so default to true in case of missing origin .map_or(true, |remote| { remote.url().map_or(true, |url| url == index.url) }) }) .unwrap_or(false); let repo = if !exists { let mut opts = git2::RepositoryInitOptions::new(); opts.external_template(false); let repo = git2::Repository::init_opts(&index.path, &opts)?; { let mut origin_remote = repo .find_remote("origin") .or_else(|_| repo.remote_anonymous(&index.url))?; origin_remote.fetch( &[ "HEAD:refs/remotes/origin/HEAD", "master:refs/remotes/origin/master", ], Some(&mut crate::fetch_opts()), None, )?; } repo } else { git2::Repository::open(&index.path)? }; // It's going to be used in a self-referential type. Boxing prevents it from being moved // and adds a layer of indirection that will hopefully not upset noalias analysis. let repo = Box::new(repo); let head = repo // Fallback to HEAD, as a fresh clone won't have a FETCH_HEAD .refname_to_id("FETCH_HEAD") .or_else(|_| repo.refname_to_id("HEAD"))?; let head_str = head.to_string(); let tree = { let commit = repo.find_commit(head)?; let tree = commit.tree()?; // See `UnsafeRepoTree` unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) } }; Ok(Self { inner: index, head_str, rt: UnsafeRepoTree { repo, tree, _self_referential: PhantomPinned, }, }) } /// Fetches latest from the remote index repository. Note that using this /// method will mean no cache entries will be used, if a new commit is fetched /// from the repository, as their commit version will no longer match. pub fn retrieve(&mut self) -> Result<(), Error> { { let mut origin_remote = self .rt .repo .find_remote("origin") .or_else(|_| self.rt.repo.remote_anonymous(&self.inner.url))?; origin_remote.fetch( &[ "HEAD:refs/remotes/origin/HEAD", "master:refs/remotes/origin/master", ], Some(&mut crate::fetch_opts()), None, )?; } let head = self .rt .repo .refname_to_id("FETCH_HEAD") .or_else(|_| self.rt.repo.refname_to_id("HEAD"))?; let head_str = head.to_string(); let commit = self.rt.repo.find_commit(head)?; let tree = commit.tree()?; // See `UnsafeRepoTree` let tree = unsafe { std::mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) }; self.head_str = head_str; self.rt.tree = tree; Ok(()) } /// Reads a crate from the index, it will attempt to use a cached entry if /// one is available, otherwise it will fallback to reading the crate /// directly from the git blob containing the crate information. pub fn crate_(&self, name: &str) -> Option<Crate> { let rel_path = match crate::crate_name_to_relative_path(name) { Some(rp) => rp, None => return None, }; // Attempt to load the .cache/ entry first, this is purely an acceleration // mechanism and can fail for a few reasons that are non-fatal { let mut cache_path = self.inner.path.join(".cache"); cache_path.push(&rel_path); if let Ok(cache_bytes) = std::fs::read(&cache_path) { if let Ok(krate) = Crate::from_cache_slice(&cache_bytes, &self.head_str) { return Some(krate); } } } // Fallback to reading the blob directly via git if we don't have a // valid cache entry self.crate_from_rel_path(&rel_path).ok() } fn crate_from_rel_path(&self, path: &str) -> Result<Crate, Error> { let entry = self.rt.tree.get_path(&Path::new(path))?; let object = entry.to_object(&self.rt.repo)?; let blob = object .as_blob() .ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, path.to_owned())))?; Crate::from_slice(blob.content()).map_err(Error::Io) } /// Retrieve an iterator over all the crates in the index. /// skips crates that can not be parsed. #[inline] pub fn crates(&self) -> Crates<'_> { Crates { blobs: self.crates_refs(), } } /// Retrieve an iterator over all the crates in the index. /// Returns opaque reference for each crate in the index, which can be used with [`CrateRef::parse`] fn crates_refs(&self) -> CrateRefs<'_> { let mut stack = Vec::with_capacity(800); // Scan only directories at top level (skip config.json, etc.) for entry in self.rt.tree.iter() { let entry = entry.to_object(&self.rt.repo).unwrap(); if entry.as_tree().is_some() { stack.push(entry); } } CrateRefs { stack, rt: &self.rt, } } /// Get the global configuration of the index. pub fn index_config(&self) -> Result<IndexConfig, Error> { let entry = self.rt.tree.get_path(&Path::new("config.json"))?; let object = entry.to_object(&self.rt.repo)?; let blob = object .as_blob() .ok_or_else(|| Error::Io(io::Error::new(io::ErrorKind::NotFound, "config.json")))?; serde_json::from_slice(blob.content()).map_err(Error::Json) } } /// Iterator over all crates in the index, but returns opaque objects that can be parsed separately. /// /// See [`CrateRef::parse`]. struct CrateRefs<'a> { stack: Vec<git2::Object<'a>>, rt: &'a UnsafeRepoTree, } /// Opaque representation of a crate in the index. See [`CrateRef::parse`]. pub(crate) struct CrateRef<'a>(pub(crate) git2::Object<'a>); impl CrateRef<'_> { #[inline] /// Parse a crate from [`BareIndex::crates_blobs`] iterator pub fn parse(&self) -> Option<Crate> { Crate::from_slice(self.as_slice()?).ok() } /// Raw crate data that can be parsed with [`Crate::from_slice`] pub fn as_slice(&self) -> Option<&[u8]> { Some(self.0.as_blob()?.content()) } } impl<'a> Iterator for CrateRefs<'a> { type Item = CrateRef<'a>; fn next(&mut self) -> Option<Self::Item> { while let Some(last) = self.stack.pop() { match last.as_tree() { None => return Some(CrateRef(last)), Some(tree) => { for entry in tree.iter().rev() { self.stack.push(entry.to_object(&self.rt.repo).unwrap()); } continue; } } } None } } pub struct Crates<'a> { blobs: CrateRefs<'a>, } impl<'a> Iterator for Crates<'a> { type Item = Crate; fn next(&mut self) -> Option<Self::Item> { while let Some(next) = self.blobs.next() { if let Some(k) = CrateRef::parse(&next) { return Some(k); } } None } } /// Converts a full url, eg https://github.com/rust-lang/crates.io-index, into /// the root directory name where cargo itself will fetch it on disk fn url_to_local_dir(url: &str) -> Result<(String, String), Error> { fn to_hex(num: u64) -> String { const CHARS: &[u8] = b"0123456789abcdef"; let bytes = &[ num as u8, (num >> 8) as u8, (num >> 16) as u8, (num >> 24) as u8, (num >> 32) as u8, (num >> 40) as u8, (num >> 48) as u8, (num >> 56) as u8, ]; let mut output = vec![0u8; 16]; let mut ind = 0; for &byte in bytes { output[ind] = CHARS[(byte >> 4) as usize]; output[ind + 1] = CHARS[(byte & 0xf) as usize]; ind += 2; } String::from_utf8(output).expect("valid utf-8 hex string") } #[allow(deprecated)] fn hash_u64(url: &str) -> u64 { use std::hash::{Hash, Hasher, SipHasher}; let mut hasher = SipHasher::new_with_keys(0, 0); // Registry 2usize.hash(&mut hasher); // Url url.hash(&mut hasher); hasher.finish() } // Ensure we have a registry or bare url let (url, scheme_ind) = { let scheme_ind = url .find("://") .ok_or_else(|| Error::Url(format!("'{}' is not a valid url", url)))?; let scheme_str = &url[..scheme_ind]; if let Some(ind) = scheme_str.find('+') { if &scheme_str[..ind] != "registry" { return Err(Error::Url(format!("'{}' is not a valid registry url", url))); } (&url[ind + 1..], scheme_ind - ind - 1) } else { (url, scheme_ind) } }; // Could use the Url crate for this, but it's simple enough and we don't // need to deal with every possible url (I hope...) let host = match url[scheme_ind + 3..].find('/') { Some(end) => &url[scheme_ind + 3..scheme_ind + 3 + end], None => &url[scheme_ind + 3..], }; // cargo special cases github.com for reasons, so do the same let mut canonical = if host == "github.com" { url.to_lowercase() } else { url.to_owned() }; // Chop off any query params/fragments if let Some(hash) = canonical.rfind('#') { canonical.truncate(hash); } if let Some(query) = canonical.rfind('?') { canonical.truncate(query); } let ident = to_hex(hash_u64(&canonical)); if canonical.ends_with('/') { canonical.pop(); } if canonical.ends_with(".git") { canonical.truncate(canonical.len() - 4); } Ok((format!("{}-{}", host, ident), canonical)) } #[cfg(test)] mod test { #[test] fn matches_cargo() { assert_eq!( super::url_to_local_dir(crate::INDEX_GIT_URL).unwrap(), ( "github.com-1ecc6299db9ec823".to_owned(), crate::INDEX_GIT_URL.to_owned() ) ); // I've confirmed this also works with a custom registry, unfortunately // that one includes a secret key as part of the url which would allow // anyone to publish to the registry, so uhh...here's a fake one instead assert_eq!( super::url_to_local_dir( "https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index.git" ) .unwrap(), ( "dl.cloudsmith.io-ff79e51ddd2b38fd".to_owned(), "https://dl.cloudsmith.io/aBcW1234aBcW1234/embark/rust/cargo/index".to_owned() ) ); // Ensure we actually strip off the irrelevant parts of a url, note that // the .git suffix is not part of the canonical url, but *is* used when hashing assert_eq!( super::url_to_local_dir(&format!( "registry+{}.git?one=1&two=2#fragment", crate::INDEX_GIT_URL )) .unwrap(), ( "github.com-c786010fb7ef2e6e".to_owned(), crate::INDEX_GIT_URL.to_owned() ) ); } #[test] fn bare_iterator() { use super::BareIndex; let tmp_dir = tempdir::TempDir::new("bare_iterator").unwrap(); let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL); let repo = index .open_or_clone() .expect("Failed to clone crates.io index"); let mut found_gcc_crate = false; for c in repo.crates() { if c.name() == "gcc" { found_gcc_crate = true; } } assert!(found_gcc_crate); } #[test] fn clones_bare_index() { use super::BareIndex; let tmp_dir = tempdir::TempDir::new("clones_bare_index").unwrap(); let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL); let mut repo = index .open_or_clone() .expect("Failed to clone crates.io index"); fn test_sval(repo: &super::BareIndexRepo<'_>) { let krate = repo .crate_("sval") .expect("Could not find the crate sval in the index"); let version = krate .versions() .iter() .find(|v| v.version() == "0.0.1") .expect("Version 0.0.1 of sval does not exist?"); let dep_with_package_name = version .dependencies() .iter() .find(|d| d.name() == "serde_lib") .expect("sval does not have expected dependency?"); assert_ne!( dep_with_package_name.name(), dep_with_package_name.package().unwrap() ); assert_eq!( dep_with_package_name.crate_name(), dep_with_package_name.package().unwrap() ); } test_sval(&repo); repo.retrieve().expect("Failed to fetch crates.io index"); test_sval(&repo); } #[test] fn opens_bare_index() { use super::BareIndex; let tmp_dir = tempdir::TempDir::new("opens_bare_index").unwrap(); let index = BareIndex::with_path(tmp_dir.path().to_owned(), crate::INDEX_GIT_URL); { let _ = index .open_or_clone() .expect("Failed to clone crates.io index"); } let mut repo = index .open_or_clone() .expect("Failed to open crates.io index"); fn test_sval(repo: &super::BareIndexRepo<'_>) { let krate = repo .crate_("sval") .expect("Could not find the crate sval in the index"); let version = krate .versions() .iter() .find(|v| v.version() == "0.0.1") .expect("Version 0.0.1 of sval does not exist?"); let dep_with_package_name = version .dependencies() .iter() .find(|d| d.name() == "serde_lib") .expect("sval does not have expected dependency?"); assert_ne!( dep_with_package_name.name(), dep_with_package_name.package().unwrap() ); assert_eq!( dep_with_package_name.crate_name(), dep_with_package_name.package().unwrap() ); } test_sval(&repo); repo.retrieve().expect("Failed to fetch crates.io index"); test_sval(&repo); } }
/// Creates a bare index at the provided path with the specified repository URL. #[inline]
random_line_split
encode.rs
use std::collections::{HashMap, HashSet, BTreeMap}; use std::fmt; use std::str::FromStr; use regex::Regex; use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; use package::Package; use package_id::PackageId; use source::SourceId; use util::{CraftResult, Graph, Config, internal, ChainError, CraftError}; use workspace::Workspace; use super::Resolve; #[derive(RustcEncodable, RustcDecodable, Debug)] pub struct EncodableResolve { package: Option<Vec<EncodableDependency>>, /// `root` is optional to allow forward compatibility. root: Option<EncodableDependency>, metadata: Option<Metadata>, } pub type Metadata = BTreeMap<String, String>; impl EncodableResolve { pub fn into_resolve(self, ws: &Workspace) -> CraftResult<Resolve> { let path_deps = build_path_deps(ws); let packages = { let mut packages = self.package.unwrap_or(Vec::new()); if let Some(root) = self.root { packages.insert(0, root); } packages }; // `PackageId`s in the lock file don't include the `source` part // for workspace members, so we reconstruct proper ids. let (live_pkgs, all_pkgs) = { let mut live_pkgs = HashMap::new(); let mut all_pkgs = HashSet::new(); for pkg in packages.iter() { let enc_id = EncodablePackageId { name: pkg.name.clone(), version: pkg.version.clone(), source: pkg.source.clone(), }; if !all_pkgs.insert(enc_id.clone()) { return Err(internal(format!("package `{}` is specified twice in the lockfile", pkg.name))); } let id = match pkg.source.as_ref().or(path_deps.get(&pkg.name)) { // We failed to find a local package in the workspace. // It must have been removed and should be ignored. None => continue, Some(source) => PackageId::new(&pkg.name, &pkg.version, source)?, }; assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none()) } (live_pkgs, all_pkgs) }; let lookup_id = |enc_id: &EncodablePackageId| -> CraftResult<Option<PackageId>> { match live_pkgs.get(enc_id) { Some(&(ref id, _)) => Ok(Some(id.clone())), None => { if all_pkgs.contains(enc_id) { // Package is found in the lockfile, but it is // no longer a member of the workspace. Ok(None) } else { Err(internal(format!("package `{}` is specified as a dependency, but is missing from the \ package list", enc_id))) } } } }; let g = { let mut g = Graph::new(); for &(ref id, _) in live_pkgs.values() { g.add(id.clone(), &[]); } for &(ref id, ref pkg) in live_pkgs.values() { let deps = match pkg.dependencies { Some(ref deps) => deps, None => continue, }; for edge in deps.iter() { if let Some(to_depend_on) = lookup_id(edge)? { g.link(id.clone(), to_depend_on); } } } g }; let replacements = { let mut replacements = HashMap::new(); for &(ref id, ref pkg) in live_pkgs.values() { if let Some(ref replace) = pkg.replace { assert!(pkg.dependencies.is_none()); if let Some(replace_id) = lookup_id(replace)? { replacements.insert(id.clone(), replace_id); } } } replacements }; let mut metadata = self.metadata.unwrap_or(BTreeMap::new()); // Parse out all package checksums. After we do this we can be in a few // situations: // // * We parsed no checksums. In this situation we're dealing with an old // lock file and we're gonna fill them all in. // * We parsed some checksums, but not one for all packages listed. It // could have been the case that some were listed, then an older Craft // client added more dependencies, and now we're going to fill in the // missing ones. // * There are too many checksums listed, indicative of an older Craft // client removing a package but not updating the checksums listed. // // In all of these situations they're part of normal usage, so we don't // really worry about it. We just try to slurp up as many checksums as // possible. let mut checksums = HashMap::new(); let prefix = "checksum "; let mut to_remove = Vec::new(); for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) { to_remove.push(k.to_string()); let k = &k[prefix.len()..]; let enc_id: EncodablePackageId = k.parse() .chain_error(|| internal("invalid encoding of checksum in lockfile"))?; let id = match lookup_id(&enc_id) { Ok(Some(id)) => id, _ => continue, }; let v = if v == "<none>" { None } else { Some(v.to_string()) }; checksums.insert(id, v); } for k in to_remove { metadata.remove(&k); } Ok(Resolve { graph: g, features: HashMap::new(), replacements: replacements, checksums: checksums, metadata: metadata, }) } } fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> { // If a chest is *not* a path source, then we're probably in a situation // such as `craft install` with a lock file from a remote dependency. In // that case we don't need to fixup any path dependencies (as they're not // actually path dependencies any more), so we ignore them. let members = ws.members() .filter(|p| p.package_id().source_id().is_path()) .collect::<Vec<_>>(); let mut ret = HashMap::new(); for member in members.iter() { ret.insert(member.package_id().name().to_string(), member.package_id().source_id().clone()); } for member in members.iter() { build(member, ws.config(), &mut ret); } return ret; fn build(pkg: &Package, config: &Config, ret: &mut HashMap<String, SourceId>) { let replace = pkg.manifest().replace(); let deps = pkg.dependencies() .iter() .chain(replace.iter().map(|p| &p.1)) .filter(|d| !ret.contains_key(d.name())) .map(|d| d.source_id()) .filter(|id| id.is_path()) .filter_map(|id| id.url().to_file_path().ok()) .map(|path| path.join("Craft.toml")) .filter_map(|path| Package::for_path(&path, config).ok()) .collect::<Vec<_>>(); for pkg in deps { ret.insert(pkg.name().to_string(), pkg.package_id().source_id().clone()); build(&pkg, config, ret); } } } #[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)] pub struct EncodableDependency { name: String, version: String, source: Option<SourceId>, dependencies: Option<Vec<EncodablePackageId>>, replace: Option<EncodablePackageId>, } #[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)] pub struct EncodablePackageId { name: String, version: String, source: Option<SourceId>, } impl fmt::Display for EncodablePackageId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{} {}", self.name, self.version)?; if let Some(ref s) = self.source { write!(f, " ({})", s.to_url())?; } Ok(()) } } impl FromStr for EncodablePackageId { type Err = Box<CraftError>; fn from_str(s: &str) -> CraftResult<EncodablePackageId> { let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap(); let captures = regex.captures(s).ok_or_else(|| internal("invalid serialized PackageId"))?; let name = captures.at(1).unwrap(); let version = captures.at(2).unwrap(); let source_id = match captures.at(3) { Some(s) => Some(SourceId::from_url(s)?), None => None, }; Ok(EncodablePackageId { name: name.to_string(), version: version.to_string(), source: source_id, }) } } impl Encodable for EncodablePackageId { fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> { self.to_string().encode(s) } } impl Decodable for EncodablePackageId { fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> { String::decode(d).and_then(|string| { string.parse::<EncodablePackageId>() .map_err(|e| d.error(&e.to_string())) }) } } pub struct WorkspaceResolve<'a, 'cfg: 'a> { pub ws: &'a Workspace<'cfg>, pub resolve: &'a Resolve, pub use_root_key: bool, } impl<'a, 'cfg> Encodable for WorkspaceResolve<'a, 'cfg> { fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> { let mut ids: Vec<&PackageId> = self.resolve.graph.iter().collect(); ids.sort(); let root = self.ws .members() .max_by_key(|member| member.name()) .unwrap() .package_id(); let encodable = ids.iter() .filter_map(|&id| { if self.use_root_key && root == id { return None; } Some(encodable_resolve_node(id, self.resolve)) }) .collect::<Vec<_>>(); let mut metadata = self.resolve.metadata.clone(); for id in ids.iter().filter(|id| !id.source_id().is_path()) { let checksum = match self.resolve.checksums[*id] { Some(ref s) => &s[..], None => "<none>", }; let id = encodable_package_id(id); metadata.insert(format!("checksum {}", id.to_string()), checksum.to_string()); } let metadata = if metadata.len() == 0 { None } else { Some(metadata) }; let root = if self.use_root_key { Some(encodable_resolve_node(&root, self.resolve)) } else { None }; EncodableResolve { package: Some(encodable), root: root, metadata: metadata, } .encode(s) } } fn encodable_resolve_node(id: &PackageId, resolve: &Resolve) -> EncodableDependency { let (replace, deps) = match resolve.replacement(id) { Some(id) => (Some(encodable_package_id(id)), None), None => { let mut deps = resolve.graph .edges(id) .into_iter() .flat_map(|a| a) .map(encodable_package_id) .collect::<Vec<_>>(); deps.sort(); (None, Some(deps)) } }; let source = if id.source_id().is_path() { None } else { Some(id.source_id().clone()) }; EncodableDependency { name: id.name().to_string(), version: id.version().to_string(), source: source, dependencies: deps, replace: replace, } } fn encodable_package_id(id: &PackageId) -> EncodablePackageId
{ let source = if id.source_id().is_path() { None } else { Some(id.source_id().with_precise(None)) }; EncodablePackageId { name: id.name().to_string(), version: id.version().to_string(), source: source, } }
identifier_body
encode.rs
use std::collections::{HashMap, HashSet, BTreeMap}; use std::fmt; use std::str::FromStr; use regex::Regex; use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; use package::Package; use package_id::PackageId; use source::SourceId; use util::{CraftResult, Graph, Config, internal, ChainError, CraftError}; use workspace::Workspace; use super::Resolve; #[derive(RustcEncodable, RustcDecodable, Debug)] pub struct EncodableResolve { package: Option<Vec<EncodableDependency>>, /// `root` is optional to allow forward compatibility. root: Option<EncodableDependency>, metadata: Option<Metadata>, } pub type Metadata = BTreeMap<String, String>; impl EncodableResolve { pub fn into_resolve(self, ws: &Workspace) -> CraftResult<Resolve> { let path_deps = build_path_deps(ws); let packages = { let mut packages = self.package.unwrap_or(Vec::new()); if let Some(root) = self.root { packages.insert(0, root); } packages }; // `PackageId`s in the lock file don't include the `source` part // for workspace members, so we reconstruct proper ids. let (live_pkgs, all_pkgs) = { let mut live_pkgs = HashMap::new(); let mut all_pkgs = HashSet::new(); for pkg in packages.iter() { let enc_id = EncodablePackageId { name: pkg.name.clone(), version: pkg.version.clone(), source: pkg.source.clone(), }; if !all_pkgs.insert(enc_id.clone()) { return Err(internal(format!("package `{}` is specified twice in the lockfile", pkg.name))); } let id = match pkg.source.as_ref().or(path_deps.get(&pkg.name)) { // We failed to find a local package in the workspace. // It must have been removed and should be ignored. None => continue, Some(source) => PackageId::new(&pkg.name, &pkg.version, source)?, }; assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none()) } (live_pkgs, all_pkgs) }; let lookup_id = |enc_id: &EncodablePackageId| -> CraftResult<Option<PackageId>> { match live_pkgs.get(enc_id) { Some(&(ref id, _)) => Ok(Some(id.clone())), None => { if all_pkgs.contains(enc_id) { // Package is found in the lockfile, but it is // no longer a member of the workspace. Ok(None) } else { Err(internal(format!("package `{}` is specified as a dependency, but is missing from the \ package list", enc_id))) } } } }; let g = { let mut g = Graph::new(); for &(ref id, _) in live_pkgs.values() { g.add(id.clone(), &[]); } for &(ref id, ref pkg) in live_pkgs.values() { let deps = match pkg.dependencies { Some(ref deps) => deps, None => continue, }; for edge in deps.iter() { if let Some(to_depend_on) = lookup_id(edge)? { g.link(id.clone(), to_depend_on); } } } g }; let replacements = { let mut replacements = HashMap::new(); for &(ref id, ref pkg) in live_pkgs.values() { if let Some(ref replace) = pkg.replace { assert!(pkg.dependencies.is_none()); if let Some(replace_id) = lookup_id(replace)? { replacements.insert(id.clone(), replace_id); } } } replacements }; let mut metadata = self.metadata.unwrap_or(BTreeMap::new()); // Parse out all package checksums. After we do this we can be in a few // situations: // // * We parsed no checksums. In this situation we're dealing with an old // lock file and we're gonna fill them all in. // * We parsed some checksums, but not one for all packages listed. It // could have been the case that some were listed, then an older Craft // client added more dependencies, and now we're going to fill in the // missing ones. // * There are too many checksums listed, indicative of an older Craft // client removing a package but not updating the checksums listed. // // In all of these situations they're part of normal usage, so we don't // really worry about it. We just try to slurp up as many checksums as // possible. let mut checksums = HashMap::new(); let prefix = "checksum "; let mut to_remove = Vec::new(); for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) { to_remove.push(k.to_string()); let k = &k[prefix.len()..]; let enc_id: EncodablePackageId = k.parse() .chain_error(|| internal("invalid encoding of checksum in lockfile"))?; let id = match lookup_id(&enc_id) { Ok(Some(id)) => id, _ => continue, }; let v = if v == "<none>" { None } else { Some(v.to_string()) }; checksums.insert(id, v); } for k in to_remove { metadata.remove(&k); } Ok(Resolve { graph: g, features: HashMap::new(), replacements: replacements, checksums: checksums, metadata: metadata, }) } } fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> { // If a chest is *not* a path source, then we're probably in a situation // such as `craft install` with a lock file from a remote dependency. In // that case we don't need to fixup any path dependencies (as they're not // actually path dependencies any more), so we ignore them. let members = ws.members() .filter(|p| p.package_id().source_id().is_path()) .collect::<Vec<_>>(); let mut ret = HashMap::new(); for member in members.iter() { ret.insert(member.package_id().name().to_string(), member.package_id().source_id().clone()); } for member in members.iter() { build(member, ws.config(), &mut ret); } return ret; fn build(pkg: &Package, config: &Config, ret: &mut HashMap<String, SourceId>) { let replace = pkg.manifest().replace(); let deps = pkg.dependencies() .iter() .chain(replace.iter().map(|p| &p.1)) .filter(|d| !ret.contains_key(d.name())) .map(|d| d.source_id()) .filter(|id| id.is_path()) .filter_map(|id| id.url().to_file_path().ok()) .map(|path| path.join("Craft.toml")) .filter_map(|path| Package::for_path(&path, config).ok()) .collect::<Vec<_>>(); for pkg in deps { ret.insert(pkg.name().to_string(), pkg.package_id().source_id().clone()); build(&pkg, config, ret); } } } #[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)] pub struct EncodableDependency { name: String, version: String, source: Option<SourceId>, dependencies: Option<Vec<EncodablePackageId>>, replace: Option<EncodablePackageId>, } #[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)] pub struct EncodablePackageId { name: String, version: String, source: Option<SourceId>, } impl fmt::Display for EncodablePackageId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{} {}", self.name, self.version)?; if let Some(ref s) = self.source { write!(f, " ({})", s.to_url())?; } Ok(()) } } impl FromStr for EncodablePackageId { type Err = Box<CraftError>; fn
(s: &str) -> CraftResult<EncodablePackageId> { let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap(); let captures = regex.captures(s).ok_or_else(|| internal("invalid serialized PackageId"))?; let name = captures.at(1).unwrap(); let version = captures.at(2).unwrap(); let source_id = match captures.at(3) { Some(s) => Some(SourceId::from_url(s)?), None => None, }; Ok(EncodablePackageId { name: name.to_string(), version: version.to_string(), source: source_id, }) } } impl Encodable for EncodablePackageId { fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> { self.to_string().encode(s) } } impl Decodable for EncodablePackageId { fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> { String::decode(d).and_then(|string| { string.parse::<EncodablePackageId>() .map_err(|e| d.error(&e.to_string())) }) } } pub struct WorkspaceResolve<'a, 'cfg: 'a> { pub ws: &'a Workspace<'cfg>, pub resolve: &'a Resolve, pub use_root_key: bool, } impl<'a, 'cfg> Encodable for WorkspaceResolve<'a, 'cfg> { fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> { let mut ids: Vec<&PackageId> = self.resolve.graph.iter().collect(); ids.sort(); let root = self.ws .members() .max_by_key(|member| member.name()) .unwrap() .package_id(); let encodable = ids.iter() .filter_map(|&id| { if self.use_root_key && root == id { return None; } Some(encodable_resolve_node(id, self.resolve)) }) .collect::<Vec<_>>(); let mut metadata = self.resolve.metadata.clone(); for id in ids.iter().filter(|id| !id.source_id().is_path()) { let checksum = match self.resolve.checksums[*id] { Some(ref s) => &s[..], None => "<none>", }; let id = encodable_package_id(id); metadata.insert(format!("checksum {}", id.to_string()), checksum.to_string()); } let metadata = if metadata.len() == 0 { None } else { Some(metadata) }; let root = if self.use_root_key { Some(encodable_resolve_node(&root, self.resolve)) } else { None }; EncodableResolve { package: Some(encodable), root: root, metadata: metadata, } .encode(s) } } fn encodable_resolve_node(id: &PackageId, resolve: &Resolve) -> EncodableDependency { let (replace, deps) = match resolve.replacement(id) { Some(id) => (Some(encodable_package_id(id)), None), None => { let mut deps = resolve.graph .edges(id) .into_iter() .flat_map(|a| a) .map(encodable_package_id) .collect::<Vec<_>>(); deps.sort(); (None, Some(deps)) } }; let source = if id.source_id().is_path() { None } else { Some(id.source_id().clone()) }; EncodableDependency { name: id.name().to_string(), version: id.version().to_string(), source: source, dependencies: deps, replace: replace, } } fn encodable_package_id(id: &PackageId) -> EncodablePackageId { let source = if id.source_id().is_path() { None } else { Some(id.source_id().with_precise(None)) }; EncodablePackageId { name: id.name().to_string(), version: id.version().to_string(), source: source, } }
from_str
identifier_name
encode.rs
use std::collections::{HashMap, HashSet, BTreeMap}; use std::fmt; use std::str::FromStr; use regex::Regex; use rustc_serialize::{Encodable, Encoder, Decodable, Decoder}; use package::Package; use package_id::PackageId; use source::SourceId; use util::{CraftResult, Graph, Config, internal, ChainError, CraftError}; use workspace::Workspace; use super::Resolve; #[derive(RustcEncodable, RustcDecodable, Debug)] pub struct EncodableResolve { package: Option<Vec<EncodableDependency>>, /// `root` is optional to allow forward compatibility. root: Option<EncodableDependency>, metadata: Option<Metadata>, } pub type Metadata = BTreeMap<String, String>; impl EncodableResolve { pub fn into_resolve(self, ws: &Workspace) -> CraftResult<Resolve> { let path_deps = build_path_deps(ws); let packages = { let mut packages = self.package.unwrap_or(Vec::new()); if let Some(root) = self.root { packages.insert(0, root); } packages }; // `PackageId`s in the lock file don't include the `source` part // for workspace members, so we reconstruct proper ids. let (live_pkgs, all_pkgs) = { let mut live_pkgs = HashMap::new(); let mut all_pkgs = HashSet::new(); for pkg in packages.iter() { let enc_id = EncodablePackageId { name: pkg.name.clone(), version: pkg.version.clone(), source: pkg.source.clone(), }; if !all_pkgs.insert(enc_id.clone()) { return Err(internal(format!("package `{}` is specified twice in the lockfile", pkg.name))); } let id = match pkg.source.as_ref().or(path_deps.get(&pkg.name)) { // We failed to find a local package in the workspace. // It must have been removed and should be ignored. None => continue, Some(source) => PackageId::new(&pkg.name, &pkg.version, source)?, }; assert!(live_pkgs.insert(enc_id, (id, pkg)).is_none()) } (live_pkgs, all_pkgs) }; let lookup_id = |enc_id: &EncodablePackageId| -> CraftResult<Option<PackageId>> { match live_pkgs.get(enc_id) { Some(&(ref id, _)) => Ok(Some(id.clone())), None => { if all_pkgs.contains(enc_id) { // Package is found in the lockfile, but it is // no longer a member of the workspace. Ok(None) } else { Err(internal(format!("package `{}` is specified as a dependency, but is missing from the \ package list", enc_id))) } } } }; let g = { let mut g = Graph::new(); for &(ref id, _) in live_pkgs.values() { g.add(id.clone(), &[]); } for &(ref id, ref pkg) in live_pkgs.values() { let deps = match pkg.dependencies { Some(ref deps) => deps, None => continue, };
} } } g }; let replacements = { let mut replacements = HashMap::new(); for &(ref id, ref pkg) in live_pkgs.values() { if let Some(ref replace) = pkg.replace { assert!(pkg.dependencies.is_none()); if let Some(replace_id) = lookup_id(replace)? { replacements.insert(id.clone(), replace_id); } } } replacements }; let mut metadata = self.metadata.unwrap_or(BTreeMap::new()); // Parse out all package checksums. After we do this we can be in a few // situations: // // * We parsed no checksums. In this situation we're dealing with an old // lock file and we're gonna fill them all in. // * We parsed some checksums, but not one for all packages listed. It // could have been the case that some were listed, then an older Craft // client added more dependencies, and now we're going to fill in the // missing ones. // * There are too many checksums listed, indicative of an older Craft // client removing a package but not updating the checksums listed. // // In all of these situations they're part of normal usage, so we don't // really worry about it. We just try to slurp up as many checksums as // possible. let mut checksums = HashMap::new(); let prefix = "checksum "; let mut to_remove = Vec::new(); for (k, v) in metadata.iter().filter(|p| p.0.starts_with(prefix)) { to_remove.push(k.to_string()); let k = &k[prefix.len()..]; let enc_id: EncodablePackageId = k.parse() .chain_error(|| internal("invalid encoding of checksum in lockfile"))?; let id = match lookup_id(&enc_id) { Ok(Some(id)) => id, _ => continue, }; let v = if v == "<none>" { None } else { Some(v.to_string()) }; checksums.insert(id, v); } for k in to_remove { metadata.remove(&k); } Ok(Resolve { graph: g, features: HashMap::new(), replacements: replacements, checksums: checksums, metadata: metadata, }) } } fn build_path_deps(ws: &Workspace) -> HashMap<String, SourceId> { // If a chest is *not* a path source, then we're probably in a situation // such as `craft install` with a lock file from a remote dependency. In // that case we don't need to fixup any path dependencies (as they're not // actually path dependencies any more), so we ignore them. let members = ws.members() .filter(|p| p.package_id().source_id().is_path()) .collect::<Vec<_>>(); let mut ret = HashMap::new(); for member in members.iter() { ret.insert(member.package_id().name().to_string(), member.package_id().source_id().clone()); } for member in members.iter() { build(member, ws.config(), &mut ret); } return ret; fn build(pkg: &Package, config: &Config, ret: &mut HashMap<String, SourceId>) { let replace = pkg.manifest().replace(); let deps = pkg.dependencies() .iter() .chain(replace.iter().map(|p| &p.1)) .filter(|d| !ret.contains_key(d.name())) .map(|d| d.source_id()) .filter(|id| id.is_path()) .filter_map(|id| id.url().to_file_path().ok()) .map(|path| path.join("Craft.toml")) .filter_map(|path| Package::for_path(&path, config).ok()) .collect::<Vec<_>>(); for pkg in deps { ret.insert(pkg.name().to_string(), pkg.package_id().source_id().clone()); build(&pkg, config, ret); } } } #[derive(RustcEncodable, RustcDecodable, Debug, PartialOrd, Ord, PartialEq, Eq)] pub struct EncodableDependency { name: String, version: String, source: Option<SourceId>, dependencies: Option<Vec<EncodablePackageId>>, replace: Option<EncodablePackageId>, } #[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Hash, Clone)] pub struct EncodablePackageId { name: String, version: String, source: Option<SourceId>, } impl fmt::Display for EncodablePackageId { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{} {}", self.name, self.version)?; if let Some(ref s) = self.source { write!(f, " ({})", s.to_url())?; } Ok(()) } } impl FromStr for EncodablePackageId { type Err = Box<CraftError>; fn from_str(s: &str) -> CraftResult<EncodablePackageId> { let regex = Regex::new(r"^([^ ]+) ([^ ]+)(?: \(([^\)]+)\))?$").unwrap(); let captures = regex.captures(s).ok_or_else(|| internal("invalid serialized PackageId"))?; let name = captures.at(1).unwrap(); let version = captures.at(2).unwrap(); let source_id = match captures.at(3) { Some(s) => Some(SourceId::from_url(s)?), None => None, }; Ok(EncodablePackageId { name: name.to_string(), version: version.to_string(), source: source_id, }) } } impl Encodable for EncodablePackageId { fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> { self.to_string().encode(s) } } impl Decodable for EncodablePackageId { fn decode<D: Decoder>(d: &mut D) -> Result<EncodablePackageId, D::Error> { String::decode(d).and_then(|string| { string.parse::<EncodablePackageId>() .map_err(|e| d.error(&e.to_string())) }) } } pub struct WorkspaceResolve<'a, 'cfg: 'a> { pub ws: &'a Workspace<'cfg>, pub resolve: &'a Resolve, pub use_root_key: bool, } impl<'a, 'cfg> Encodable for WorkspaceResolve<'a, 'cfg> { fn encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> { let mut ids: Vec<&PackageId> = self.resolve.graph.iter().collect(); ids.sort(); let root = self.ws .members() .max_by_key(|member| member.name()) .unwrap() .package_id(); let encodable = ids.iter() .filter_map(|&id| { if self.use_root_key && root == id { return None; } Some(encodable_resolve_node(id, self.resolve)) }) .collect::<Vec<_>>(); let mut metadata = self.resolve.metadata.clone(); for id in ids.iter().filter(|id| !id.source_id().is_path()) { let checksum = match self.resolve.checksums[*id] { Some(ref s) => &s[..], None => "<none>", }; let id = encodable_package_id(id); metadata.insert(format!("checksum {}", id.to_string()), checksum.to_string()); } let metadata = if metadata.len() == 0 { None } else { Some(metadata) }; let root = if self.use_root_key { Some(encodable_resolve_node(&root, self.resolve)) } else { None }; EncodableResolve { package: Some(encodable), root: root, metadata: metadata, } .encode(s) } } fn encodable_resolve_node(id: &PackageId, resolve: &Resolve) -> EncodableDependency { let (replace, deps) = match resolve.replacement(id) { Some(id) => (Some(encodable_package_id(id)), None), None => { let mut deps = resolve.graph .edges(id) .into_iter() .flat_map(|a| a) .map(encodable_package_id) .collect::<Vec<_>>(); deps.sort(); (None, Some(deps)) } }; let source = if id.source_id().is_path() { None } else { Some(id.source_id().clone()) }; EncodableDependency { name: id.name().to_string(), version: id.version().to_string(), source: source, dependencies: deps, replace: replace, } } fn encodable_package_id(id: &PackageId) -> EncodablePackageId { let source = if id.source_id().is_path() { None } else { Some(id.source_id().with_precise(None)) }; EncodablePackageId { name: id.name().to_string(), version: id.version().to_string(), source: source, } }
for edge in deps.iter() { if let Some(to_depend_on) = lookup_id(edge)? { g.link(id.clone(), to_depend_on);
random_line_split
MCObserver.py
#!/usr/bin/env python3 ########################################################################################################################## # # 2017/03 Thomas Britton # # Options: # MC variation can be changed by supplying "variation=xxxxx" option otherwise default: mc # the number of events to be generated per file (except for any remainder) can be set by "per_file=xxxx" default: 1000 # # If the user does not want genr8, geant, smearing, reconstruction to be performed the sequence will be terminated at the first instance of genr8=0,geant=0,mcsmear=0,recon=0 default: all on # Similarly, if the user wishes to retain the files created by any step you can supply the cleangenr8=0, cleangeant=0, cleanmcsmear=0, or cleanrecon=0 options. By default all but the reconstruction files # are cleaned. # # The reconstruction step is multi-threaded, for this step, if enabled, the script will use 4 threads. This threading can be changed with the "numthreads=xxx" option # # By default the job will run interactively in the local directory. If the user wishes to submit the jobs to swif the option "swif=1" must be supplied. # # SWIF DOCUMENTATION: # https://scicomp.jlab.org/docs/swif # https://scicomp.jlab.org/docs/swif-cli # https://scicomp.jlab.org/help/swif/add-job.txt #consider phase! # ########################################################################################################################## import MySQLdb #import MySQLdb.cursors from os import environ from optparse import OptionParser import os.path #import mysql.connector import time import os import getpass import sys import re import subprocess from subprocess import call import socket import glob import json import time from datetime import timedelta from datetime import datetime import smtplib from email.message import EmailMessage from multiprocessing import Process import random import pipes import random import pwd MCWRAPPER_BOT_HOST_NAME=str(socket.gethostname()) dbhost = "hallddb.jlab.org" dbuser = 'mcuser' dbpass = '' dbname = 'gluex_mc' try: dbcnx=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname) dbcursor=dbcnx.cursor(MySQLdb.cursors.DictCursor) except: print("WARNING: CANNOT CONNECT TO DATABASE. JOBS WILL NOT BE CONTROLLED OR MONITORED") pass runner_name=pwd.getpwuid( os.getuid() )[0] if( not (runner_name=="tbritton" or runner_name=="mcwrap")): print("ERROR: You must be tbritton or mcwrap to run this script") sys.exit(1) def exists_remote(host, path): """Test if a file exists at path on a host accessible with SSH.""" status = subprocess.call( ['ssh', host, 'test -f {}'.format(pipes.quote(path))]) if status == 0: return True if status == 1: return False raise Exception('SSH failed') def CheckForFile(rootLoc,expFile): found=False subloc="hddm" parse_expFile=expFile.split(".") #print(parse_expFile[len(parse_expFile)-1]) if(parse_expFile[len(parse_expFile)-1]=="root"): subloc="root/monitoring_hists" #if(os.path.isfile('/osgpool/halld/tbritton/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ): if(os.path.isfile('/osgpool/halld/'+runner_name+'/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/work/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ): found=True else: print(rootLoc+"/"+subloc+"/"+expFile+" NOT FOUND") return found def checkJobFilesForCompletion(comp_assignment): #OutstandingProjectsQuery="SELECT * FROM Project WHERE (Is_Dispatched != '0' && Tested != '-1' && Tested != '2' ) && Notified is NULL" #dbcursor.execute(OutstandingProjectsQuery) #OutstandingProjects=dbcursor.fetchall() dbcnx_comp=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname) dbcursor_comp=dbcnx_comp.cursor(MySQLdb.cursors.DictCursor) outdir_root="/osgpool/halld/"+runner_name+"/REQUESTEDMC_OUTPUT/" print("checking "+str(len(comp_assignment))) for attempt in comp_assignment:#OutstandingProjects: jobinfoq="SELECT * from Jobs where ID="+str(attempt["Job_ID"]) dbcursor.execute(jobinfoq) job = dbcursor.fetchall()[0] projq="SELECT * From Project Where ID="+str(job["Project_ID"]) dbcursor.execute(projq) proj = dbcursor.fetchall()[0] locparts=proj['OutputLocation'].split("/") #print("~~~~~~~~~~~~~~~~~~") #print("ProjID:",proj['ID']) files=[] dirs=[] #print locparts[len(locparts)-2] for r, dirs, files in os.walk(outdir_root+locparts[len(locparts)-2]) : files = [f for f in files if not f[0] == '.'] dirs[:] = [d for d in dirs if not d[0] == '.'] #print("NumFiles:",len(files)) #print(dirs) #DISTINCT ID ------in query below #print(fulfilledJobs) #print("Jobs fulfilled:",str(len(fulfilledJobs))) if(proj["Tested"]==2 or proj["Tested"]==3): continue rootLoc=proj['OutputLocation'].split("REQUESTED_MC")[1]#.replace("/","") nullify_list=[] #print("Data already Verified?",job['DataVerified']) if(job['DataVerified'] !=0 ): continue STANDARD_NAME=str(job['RunNumber']).zfill(6)+'_'+str(job['FileNumber']).zfill(3) if(proj['Generator']!="file:"): STANDARD_NAME=proj['Generator']+'_'+STANDARD_NAME #print(STANDARD_NAME) #check if postprocessor is being run postproc_append="" if(proj['GenPostProcessing'] != None and proj['GenPostProcessing'] != ""): print("Postprocessing:",proj['GenPostProcessing']) postproc_append="_"+proj['GenPostProcessing'].split(":")[0] Expected_returned_files=[] if(str(proj['RunGeneration'])=="1" and str(proj['SaveGeneration'])=="1" and str(proj['Generator'])!="particle_gun"): Expected_returned_files.append(STANDARD_NAME+postproc_append+".hddm") if(str(proj['RunGeant'])=="1" and str(proj['SaveGeant'])=="1"): Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+postproc_append+'.hddm') if(str(proj['RunSmear'])=="1" and str(proj['SaveSmear'])=="1"): Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+'_smeared'+postproc_append+'.hddm') if(str(proj['RunReconstruction'])=="1" and str(proj['SaveReconstruction'])=="1"): Expected_returned_files.append('dana_rest_'+STANDARD_NAME+postproc_append+'.hddm') Expected_returned_files.append('hd_root_'+STANDARD_NAME+postproc_append+'.root') found_AllexpFile=True for expFile in Expected_returned_files: #print(expFile) #print("checking for",expFile,"@",rootLoc) found=CheckForFile(rootLoc,expFile) if not found: #print(expFile+" NOT FOUND!!!!") found_AllexpFile=False break if found_AllexpFile: Update_q="UPDATE Attempts Set Status=44,ExitCode=0 where ID="+str(attempt["ID"]) print(Update_q) dbcursor_comp.execute(Update_q) dbcnx_comp.commit() else: continue ########################################################## MAIN ########################################################## def array_split(lst,n): to_return=[] for i in range(0,n): to_return.append([]) for count, ele in enumerate(lst): #print(ele) index=count%n #print(index) to_return[index].append(ele) #print(count) #print(len(to_return)) return to_return def main(argv):
if __name__ == "__main__": main(sys.argv[1:])
runnum=0 runmax=-1 spawnNum=10 numOverRide=False if(len(argv) !=0): numOverRide=True numprocesses_running=subprocess.check_output(["echo `ps all -u "+runner_name+" | grep MCObserver.py | grep -v grep | wc -l`"], shell=True) print(int(numprocesses_running)) if(int(numprocesses_running) <2 or numOverRide): while(runnum<runmax or runmax==-1): runnum=runnum+1 try: queryosgjobs="SELECT * from Attempts WHERE BatchSystem='OSG' && SubmitHost=\""+MCWRAPPER_BOT_HOST_NAME+"\" && Status !='4' && Status !='3' && Status!= '6' && Status != '5' && Status != '44';"# || (Status='4' && ExitCode != 0 && ProgramFailed is NULL) ORDER BY ID desc;" #print queryosgjobs dbcursor.execute(queryosgjobs) Alljobs = list(dbcursor.fetchall()) #print(Alljobs[:5]) random.shuffle(Alljobs) #print(Alljobs[:5]) Monitoring_assignments=array_split(Alljobs,spawnNum) spawns=[] for i in range(0,spawnNum): time.sleep(random.randint(1,spawnNum)) print("block "+str(i)) print(len(Monitoring_assignments[i])) if(len(Monitoring_assignments[i])>0): p=Process(target=checkJobFilesForCompletion,args=(Monitoring_assignments[i],)) p.daemon = True spawns.append(p) #p.join() for i in range(0,len(spawns)): #print("join "+str(i)) time.sleep(random.randint(1,spawnNum)) spawns[i].start() #time.sleep(2) for i in range(0,len(spawns)): if spawns[i].is_alive(): #print("join "+str(i)) spawns[i].join() except Exception as e: print(e) break dbcnx.close()
identifier_body
MCObserver.py
#!/usr/bin/env python3 ########################################################################################################################## # # 2017/03 Thomas Britton # # Options: # MC variation can be changed by supplying "variation=xxxxx" option otherwise default: mc # the number of events to be generated per file (except for any remainder) can be set by "per_file=xxxx" default: 1000 # # If the user does not want genr8, geant, smearing, reconstruction to be performed the sequence will be terminated at the first instance of genr8=0,geant=0,mcsmear=0,recon=0 default: all on # Similarly, if the user wishes to retain the files created by any step you can supply the cleangenr8=0, cleangeant=0, cleanmcsmear=0, or cleanrecon=0 options. By default all but the reconstruction files # are cleaned. # # The reconstruction step is multi-threaded, for this step, if enabled, the script will use 4 threads. This threading can be changed with the "numthreads=xxx" option # # By default the job will run interactively in the local directory. If the user wishes to submit the jobs to swif the option "swif=1" must be supplied. # # SWIF DOCUMENTATION: # https://scicomp.jlab.org/docs/swif # https://scicomp.jlab.org/docs/swif-cli # https://scicomp.jlab.org/help/swif/add-job.txt #consider phase! # ########################################################################################################################## import MySQLdb #import MySQLdb.cursors from os import environ from optparse import OptionParser import os.path #import mysql.connector import time import os import getpass import sys import re import subprocess from subprocess import call import socket import glob import json import time from datetime import timedelta from datetime import datetime import smtplib from email.message import EmailMessage from multiprocessing import Process import random import pipes import random import pwd MCWRAPPER_BOT_HOST_NAME=str(socket.gethostname()) dbhost = "hallddb.jlab.org" dbuser = 'mcuser' dbpass = '' dbname = 'gluex_mc' try: dbcnx=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname) dbcursor=dbcnx.cursor(MySQLdb.cursors.DictCursor) except: print("WARNING: CANNOT CONNECT TO DATABASE. JOBS WILL NOT BE CONTROLLED OR MONITORED") pass runner_name=pwd.getpwuid( os.getuid() )[0] if( not (runner_name=="tbritton" or runner_name=="mcwrap")): print("ERROR: You must be tbritton or mcwrap to run this script") sys.exit(1) def exists_remote(host, path): """Test if a file exists at path on a host accessible with SSH.""" status = subprocess.call( ['ssh', host, 'test -f {}'.format(pipes.quote(path))]) if status == 0: return True if status == 1: return False raise Exception('SSH failed') def CheckForFile(rootLoc,expFile): found=False subloc="hddm" parse_expFile=expFile.split(".") #print(parse_expFile[len(parse_expFile)-1]) if(parse_expFile[len(parse_expFile)-1]=="root"): subloc="root/monitoring_hists" #if(os.path.isfile('/osgpool/halld/tbritton/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ): if(os.path.isfile('/osgpool/halld/'+runner_name+'/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/work/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ): found=True else: print(rootLoc+"/"+subloc+"/"+expFile+" NOT FOUND") return found def checkJobFilesForCompletion(comp_assignment): #OutstandingProjectsQuery="SELECT * FROM Project WHERE (Is_Dispatched != '0' && Tested != '-1' && Tested != '2' ) && Notified is NULL" #dbcursor.execute(OutstandingProjectsQuery) #OutstandingProjects=dbcursor.fetchall() dbcnx_comp=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname) dbcursor_comp=dbcnx_comp.cursor(MySQLdb.cursors.DictCursor) outdir_root="/osgpool/halld/"+runner_name+"/REQUESTEDMC_OUTPUT/" print("checking "+str(len(comp_assignment))) for attempt in comp_assignment:#OutstandingProjects: jobinfoq="SELECT * from Jobs where ID="+str(attempt["Job_ID"]) dbcursor.execute(jobinfoq) job = dbcursor.fetchall()[0] projq="SELECT * From Project Where ID="+str(job["Project_ID"]) dbcursor.execute(projq) proj = dbcursor.fetchall()[0] locparts=proj['OutputLocation'].split("/") #print("~~~~~~~~~~~~~~~~~~") #print("ProjID:",proj['ID']) files=[] dirs=[] #print locparts[len(locparts)-2] for r, dirs, files in os.walk(outdir_root+locparts[len(locparts)-2]) : files = [f for f in files if not f[0] == '.'] dirs[:] = [d for d in dirs if not d[0] == '.'] #print("NumFiles:",len(files)) #print(dirs) #DISTINCT ID ------in query below #print(fulfilledJobs) #print("Jobs fulfilled:",str(len(fulfilledJobs))) if(proj["Tested"]==2 or proj["Tested"]==3): continue rootLoc=proj['OutputLocation'].split("REQUESTED_MC")[1]#.replace("/","") nullify_list=[] #print("Data already Verified?",job['DataVerified']) if(job['DataVerified'] !=0 ): continue STANDARD_NAME=str(job['RunNumber']).zfill(6)+'_'+str(job['FileNumber']).zfill(3) if(proj['Generator']!="file:"): STANDARD_NAME=proj['Generator']+'_'+STANDARD_NAME #print(STANDARD_NAME) #check if postprocessor is being run postproc_append="" if(proj['GenPostProcessing'] != None and proj['GenPostProcessing'] != ""): print("Postprocessing:",proj['GenPostProcessing']) postproc_append="_"+proj['GenPostProcessing'].split(":")[0] Expected_returned_files=[] if(str(proj['RunGeneration'])=="1" and str(proj['SaveGeneration'])=="1" and str(proj['Generator'])!="particle_gun"): Expected_returned_files.append(STANDARD_NAME+postproc_append+".hddm") if(str(proj['RunGeant'])=="1" and str(proj['SaveGeant'])=="1"): Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+postproc_append+'.hddm') if(str(proj['RunSmear'])=="1" and str(proj['SaveSmear'])=="1"): Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+'_smeared'+postproc_append+'.hddm') if(str(proj['RunReconstruction'])=="1" and str(proj['SaveReconstruction'])=="1"): Expected_returned_files.append('dana_rest_'+STANDARD_NAME+postproc_append+'.hddm') Expected_returned_files.append('hd_root_'+STANDARD_NAME+postproc_append+'.root') found_AllexpFile=True for expFile in Expected_returned_files: #print(expFile) #print("checking for",expFile,"@",rootLoc) found=CheckForFile(rootLoc,expFile) if not found: #print(expFile+" NOT FOUND!!!!") found_AllexpFile=False break if found_AllexpFile: Update_q="UPDATE Attempts Set Status=44,ExitCode=0 where ID="+str(attempt["ID"]) print(Update_q) dbcursor_comp.execute(Update_q) dbcnx_comp.commit() else: continue ########################################################## MAIN ########################################################## def array_split(lst,n): to_return=[] for i in range(0,n): to_return.append([]) for count, ele in enumerate(lst): #print(ele) index=count%n #print(index) to_return[index].append(ele) #print(count) #print(len(to_return)) return to_return def
(argv): runnum=0 runmax=-1 spawnNum=10 numOverRide=False if(len(argv) !=0): numOverRide=True numprocesses_running=subprocess.check_output(["echo `ps all -u "+runner_name+" | grep MCObserver.py | grep -v grep | wc -l`"], shell=True) print(int(numprocesses_running)) if(int(numprocesses_running) <2 or numOverRide): while(runnum<runmax or runmax==-1): runnum=runnum+1 try: queryosgjobs="SELECT * from Attempts WHERE BatchSystem='OSG' && SubmitHost=\""+MCWRAPPER_BOT_HOST_NAME+"\" && Status !='4' && Status !='3' && Status!= '6' && Status != '5' && Status != '44';"# || (Status='4' && ExitCode != 0 && ProgramFailed is NULL) ORDER BY ID desc;" #print queryosgjobs dbcursor.execute(queryosgjobs) Alljobs = list(dbcursor.fetchall()) #print(Alljobs[:5]) random.shuffle(Alljobs) #print(Alljobs[:5]) Monitoring_assignments=array_split(Alljobs,spawnNum) spawns=[] for i in range(0,spawnNum): time.sleep(random.randint(1,spawnNum)) print("block "+str(i)) print(len(Monitoring_assignments[i])) if(len(Monitoring_assignments[i])>0): p=Process(target=checkJobFilesForCompletion,args=(Monitoring_assignments[i],)) p.daemon = True spawns.append(p) #p.join() for i in range(0,len(spawns)): #print("join "+str(i)) time.sleep(random.randint(1,spawnNum)) spawns[i].start() #time.sleep(2) for i in range(0,len(spawns)): if spawns[i].is_alive(): #print("join "+str(i)) spawns[i].join() except Exception as e: print(e) break dbcnx.close() if __name__ == "__main__": main(sys.argv[1:])
main
identifier_name
MCObserver.py
#!/usr/bin/env python3 ########################################################################################################################## # # 2017/03 Thomas Britton # # Options: # MC variation can be changed by supplying "variation=xxxxx" option otherwise default: mc # the number of events to be generated per file (except for any remainder) can be set by "per_file=xxxx" default: 1000 # # If the user does not want genr8, geant, smearing, reconstruction to be performed the sequence will be terminated at the first instance of genr8=0,geant=0,mcsmear=0,recon=0 default: all on # Similarly, if the user wishes to retain the files created by any step you can supply the cleangenr8=0, cleangeant=0, cleanmcsmear=0, or cleanrecon=0 options. By default all but the reconstruction files # are cleaned. # # The reconstruction step is multi-threaded, for this step, if enabled, the script will use 4 threads. This threading can be changed with the "numthreads=xxx" option # # By default the job will run interactively in the local directory. If the user wishes to submit the jobs to swif the option "swif=1" must be supplied. # # SWIF DOCUMENTATION: # https://scicomp.jlab.org/docs/swif # https://scicomp.jlab.org/docs/swif-cli # https://scicomp.jlab.org/help/swif/add-job.txt #consider phase! # ########################################################################################################################## import MySQLdb #import MySQLdb.cursors from os import environ from optparse import OptionParser import os.path #import mysql.connector import time import os import getpass import sys import re import subprocess from subprocess import call import socket import glob import json import time from datetime import timedelta from datetime import datetime import smtplib from email.message import EmailMessage from multiprocessing import Process import random import pipes import random import pwd MCWRAPPER_BOT_HOST_NAME=str(socket.gethostname()) dbhost = "hallddb.jlab.org" dbuser = 'mcuser' dbpass = '' dbname = 'gluex_mc' try: dbcnx=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname) dbcursor=dbcnx.cursor(MySQLdb.cursors.DictCursor) except: print("WARNING: CANNOT CONNECT TO DATABASE. JOBS WILL NOT BE CONTROLLED OR MONITORED") pass runner_name=pwd.getpwuid( os.getuid() )[0] if( not (runner_name=="tbritton" or runner_name=="mcwrap")): print("ERROR: You must be tbritton or mcwrap to run this script") sys.exit(1) def exists_remote(host, path): """Test if a file exists at path on a host accessible with SSH.""" status = subprocess.call( ['ssh', host, 'test -f {}'.format(pipes.quote(path))]) if status == 0: return True if status == 1: return False raise Exception('SSH failed') def CheckForFile(rootLoc,expFile): found=False subloc="hddm" parse_expFile=expFile.split(".") #print(parse_expFile[len(parse_expFile)-1]) if(parse_expFile[len(parse_expFile)-1]=="root"): subloc="root/monitoring_hists" #if(os.path.isfile('/osgpool/halld/tbritton/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ): if(os.path.isfile('/osgpool/halld/'+runner_name+'/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/work/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ): found=True else: print(rootLoc+"/"+subloc+"/"+expFile+" NOT FOUND") return found def checkJobFilesForCompletion(comp_assignment): #OutstandingProjectsQuery="SELECT * FROM Project WHERE (Is_Dispatched != '0' && Tested != '-1' && Tested != '2' ) && Notified is NULL" #dbcursor.execute(OutstandingProjectsQuery) #OutstandingProjects=dbcursor.fetchall() dbcnx_comp=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname) dbcursor_comp=dbcnx_comp.cursor(MySQLdb.cursors.DictCursor) outdir_root="/osgpool/halld/"+runner_name+"/REQUESTEDMC_OUTPUT/" print("checking "+str(len(comp_assignment))) for attempt in comp_assignment:#OutstandingProjects: jobinfoq="SELECT * from Jobs where ID="+str(attempt["Job_ID"]) dbcursor.execute(jobinfoq) job = dbcursor.fetchall()[0] projq="SELECT * From Project Where ID="+str(job["Project_ID"]) dbcursor.execute(projq) proj = dbcursor.fetchall()[0] locparts=proj['OutputLocation'].split("/") #print("~~~~~~~~~~~~~~~~~~") #print("ProjID:",proj['ID']) files=[] dirs=[] #print locparts[len(locparts)-2] for r, dirs, files in os.walk(outdir_root+locparts[len(locparts)-2]) : files = [f for f in files if not f[0] == '.'] dirs[:] = [d for d in dirs if not d[0] == '.'] #print("NumFiles:",len(files)) #print(dirs) #DISTINCT ID ------in query below #print(fulfilledJobs) #print("Jobs fulfilled:",str(len(fulfilledJobs))) if(proj["Tested"]==2 or proj["Tested"]==3): continue rootLoc=proj['OutputLocation'].split("REQUESTED_MC")[1]#.replace("/","") nullify_list=[] #print("Data already Verified?",job['DataVerified']) if(job['DataVerified'] !=0 ): continue STANDARD_NAME=str(job['RunNumber']).zfill(6)+'_'+str(job['FileNumber']).zfill(3) if(proj['Generator']!="file:"): STANDARD_NAME=proj['Generator']+'_'+STANDARD_NAME #print(STANDARD_NAME) #check if postprocessor is being run postproc_append="" if(proj['GenPostProcessing'] != None and proj['GenPostProcessing'] != ""): print("Postprocessing:",proj['GenPostProcessing']) postproc_append="_"+proj['GenPostProcessing'].split(":")[0] Expected_returned_files=[] if(str(proj['RunGeneration'])=="1" and str(proj['SaveGeneration'])=="1" and str(proj['Generator'])!="particle_gun"): Expected_returned_files.append(STANDARD_NAME+postproc_append+".hddm") if(str(proj['RunGeant'])=="1" and str(proj['SaveGeant'])=="1"): Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+postproc_append+'.hddm') if(str(proj['RunSmear'])=="1" and str(proj['SaveSmear'])=="1"): Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+'_smeared'+postproc_append+'.hddm') if(str(proj['RunReconstruction'])=="1" and str(proj['SaveReconstruction'])=="1"): Expected_returned_files.append('dana_rest_'+STANDARD_NAME+postproc_append+'.hddm') Expected_returned_files.append('hd_root_'+STANDARD_NAME+postproc_append+'.root') found_AllexpFile=True for expFile in Expected_returned_files: #print(expFile) #print("checking for",expFile,"@",rootLoc) found=CheckForFile(rootLoc,expFile) if not found: #print(expFile+" NOT FOUND!!!!") found_AllexpFile=False break if found_AllexpFile: Update_q="UPDATE Attempts Set Status=44,ExitCode=0 where ID="+str(attempt["ID"]) print(Update_q) dbcursor_comp.execute(Update_q) dbcnx_comp.commit() else: continue ########################################################## MAIN ########################################################## def array_split(lst,n): to_return=[] for i in range(0,n): to_return.append([]) for count, ele in enumerate(lst): #print(ele) index=count%n #print(index) to_return[index].append(ele) #print(count) #print(len(to_return)) return to_return def main(argv): runnum=0 runmax=-1 spawnNum=10 numOverRide=False
print(int(numprocesses_running)) if(int(numprocesses_running) <2 or numOverRide): while(runnum<runmax or runmax==-1): runnum=runnum+1 try: queryosgjobs="SELECT * from Attempts WHERE BatchSystem='OSG' && SubmitHost=\""+MCWRAPPER_BOT_HOST_NAME+"\" && Status !='4' && Status !='3' && Status!= '6' && Status != '5' && Status != '44';"# || (Status='4' && ExitCode != 0 && ProgramFailed is NULL) ORDER BY ID desc;" #print queryosgjobs dbcursor.execute(queryosgjobs) Alljobs = list(dbcursor.fetchall()) #print(Alljobs[:5]) random.shuffle(Alljobs) #print(Alljobs[:5]) Monitoring_assignments=array_split(Alljobs,spawnNum) spawns=[] for i in range(0,spawnNum): time.sleep(random.randint(1,spawnNum)) print("block "+str(i)) print(len(Monitoring_assignments[i])) if(len(Monitoring_assignments[i])>0): p=Process(target=checkJobFilesForCompletion,args=(Monitoring_assignments[i],)) p.daemon = True spawns.append(p) #p.join() for i in range(0,len(spawns)): #print("join "+str(i)) time.sleep(random.randint(1,spawnNum)) spawns[i].start() #time.sleep(2) for i in range(0,len(spawns)): if spawns[i].is_alive(): #print("join "+str(i)) spawns[i].join() except Exception as e: print(e) break dbcnx.close() if __name__ == "__main__": main(sys.argv[1:])
if(len(argv) !=0): numOverRide=True numprocesses_running=subprocess.check_output(["echo `ps all -u "+runner_name+" | grep MCObserver.py | grep -v grep | wc -l`"], shell=True)
random_line_split
MCObserver.py
#!/usr/bin/env python3 ########################################################################################################################## # # 2017/03 Thomas Britton # # Options: # MC variation can be changed by supplying "variation=xxxxx" option otherwise default: mc # the number of events to be generated per file (except for any remainder) can be set by "per_file=xxxx" default: 1000 # # If the user does not want genr8, geant, smearing, reconstruction to be performed the sequence will be terminated at the first instance of genr8=0,geant=0,mcsmear=0,recon=0 default: all on # Similarly, if the user wishes to retain the files created by any step you can supply the cleangenr8=0, cleangeant=0, cleanmcsmear=0, or cleanrecon=0 options. By default all but the reconstruction files # are cleaned. # # The reconstruction step is multi-threaded, for this step, if enabled, the script will use 4 threads. This threading can be changed with the "numthreads=xxx" option # # By default the job will run interactively in the local directory. If the user wishes to submit the jobs to swif the option "swif=1" must be supplied. # # SWIF DOCUMENTATION: # https://scicomp.jlab.org/docs/swif # https://scicomp.jlab.org/docs/swif-cli # https://scicomp.jlab.org/help/swif/add-job.txt #consider phase! # ########################################################################################################################## import MySQLdb #import MySQLdb.cursors from os import environ from optparse import OptionParser import os.path #import mysql.connector import time import os import getpass import sys import re import subprocess from subprocess import call import socket import glob import json import time from datetime import timedelta from datetime import datetime import smtplib from email.message import EmailMessage from multiprocessing import Process import random import pipes import random import pwd MCWRAPPER_BOT_HOST_NAME=str(socket.gethostname()) dbhost = "hallddb.jlab.org" dbuser = 'mcuser' dbpass = '' dbname = 'gluex_mc' try: dbcnx=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname) dbcursor=dbcnx.cursor(MySQLdb.cursors.DictCursor) except: print("WARNING: CANNOT CONNECT TO DATABASE. JOBS WILL NOT BE CONTROLLED OR MONITORED") pass runner_name=pwd.getpwuid( os.getuid() )[0] if( not (runner_name=="tbritton" or runner_name=="mcwrap")): print("ERROR: You must be tbritton or mcwrap to run this script") sys.exit(1) def exists_remote(host, path): """Test if a file exists at path on a host accessible with SSH.""" status = subprocess.call( ['ssh', host, 'test -f {}'.format(pipes.quote(path))]) if status == 0: return True if status == 1: return False raise Exception('SSH failed') def CheckForFile(rootLoc,expFile): found=False subloc="hddm" parse_expFile=expFile.split(".") #print(parse_expFile[len(parse_expFile)-1]) if(parse_expFile[len(parse_expFile)-1]=="root"): subloc="root/monitoring_hists" #if(os.path.isfile('/osgpool/halld/tbritton/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or os.path.isfile('/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ): if(os.path.isfile('/osgpool/halld/'+runner_name+'/REQUESTEDMC_OUTPUT/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/lustre19/expphy/cache/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/mss/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) or exists_remote(runner_name+'@dtn1902','/work/halld/gluex_simulations/REQUESTED_MC/'+rootLoc+"/"+subloc+"/"+expFile) ): found=True else: print(rootLoc+"/"+subloc+"/"+expFile+" NOT FOUND") return found def checkJobFilesForCompletion(comp_assignment): #OutstandingProjectsQuery="SELECT * FROM Project WHERE (Is_Dispatched != '0' && Tested != '-1' && Tested != '2' ) && Notified is NULL" #dbcursor.execute(OutstandingProjectsQuery) #OutstandingProjects=dbcursor.fetchall() dbcnx_comp=MySQLdb.connect(host=dbhost, user=dbuser, db=dbname) dbcursor_comp=dbcnx_comp.cursor(MySQLdb.cursors.DictCursor) outdir_root="/osgpool/halld/"+runner_name+"/REQUESTEDMC_OUTPUT/" print("checking "+str(len(comp_assignment))) for attempt in comp_assignment:#OutstandingProjects: jobinfoq="SELECT * from Jobs where ID="+str(attempt["Job_ID"]) dbcursor.execute(jobinfoq) job = dbcursor.fetchall()[0] projq="SELECT * From Project Where ID="+str(job["Project_ID"]) dbcursor.execute(projq) proj = dbcursor.fetchall()[0] locparts=proj['OutputLocation'].split("/") #print("~~~~~~~~~~~~~~~~~~") #print("ProjID:",proj['ID']) files=[] dirs=[] #print locparts[len(locparts)-2] for r, dirs, files in os.walk(outdir_root+locparts[len(locparts)-2]) : files = [f for f in files if not f[0] == '.'] dirs[:] = [d for d in dirs if not d[0] == '.'] #print("NumFiles:",len(files)) #print(dirs) #DISTINCT ID ------in query below #print(fulfilledJobs) #print("Jobs fulfilled:",str(len(fulfilledJobs))) if(proj["Tested"]==2 or proj["Tested"]==3): continue rootLoc=proj['OutputLocation'].split("REQUESTED_MC")[1]#.replace("/","") nullify_list=[] #print("Data already Verified?",job['DataVerified']) if(job['DataVerified'] !=0 ): continue STANDARD_NAME=str(job['RunNumber']).zfill(6)+'_'+str(job['FileNumber']).zfill(3) if(proj['Generator']!="file:"): STANDARD_NAME=proj['Generator']+'_'+STANDARD_NAME #print(STANDARD_NAME) #check if postprocessor is being run postproc_append="" if(proj['GenPostProcessing'] != None and proj['GenPostProcessing'] != ""): print("Postprocessing:",proj['GenPostProcessing']) postproc_append="_"+proj['GenPostProcessing'].split(":")[0] Expected_returned_files=[] if(str(proj['RunGeneration'])=="1" and str(proj['SaveGeneration'])=="1" and str(proj['Generator'])!="particle_gun"): Expected_returned_files.append(STANDARD_NAME+postproc_append+".hddm") if(str(proj['RunGeant'])=="1" and str(proj['SaveGeant'])=="1"): Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+postproc_append+'.hddm') if(str(proj['RunSmear'])=="1" and str(proj['SaveSmear'])=="1"): Expected_returned_files.append(STANDARD_NAME+'_geant'+str(proj['GeantVersion'])+'_smeared'+postproc_append+'.hddm') if(str(proj['RunReconstruction'])=="1" and str(proj['SaveReconstruction'])=="1"): Expected_returned_files.append('dana_rest_'+STANDARD_NAME+postproc_append+'.hddm') Expected_returned_files.append('hd_root_'+STANDARD_NAME+postproc_append+'.root') found_AllexpFile=True for expFile in Expected_returned_files: #print(expFile) #print("checking for",expFile,"@",rootLoc) found=CheckForFile(rootLoc,expFile) if not found: #print(expFile+" NOT FOUND!!!!") found_AllexpFile=False break if found_AllexpFile: Update_q="UPDATE Attempts Set Status=44,ExitCode=0 where ID="+str(attempt["ID"]) print(Update_q) dbcursor_comp.execute(Update_q) dbcnx_comp.commit() else: continue ########################################################## MAIN ########################################################## def array_split(lst,n): to_return=[] for i in range(0,n): to_return.append([]) for count, ele in enumerate(lst): #print(ele) index=count%n #print(index) to_return[index].append(ele) #print(count) #print(len(to_return)) return to_return def main(argv): runnum=0 runmax=-1 spawnNum=10 numOverRide=False if(len(argv) !=0): numOverRide=True numprocesses_running=subprocess.check_output(["echo `ps all -u "+runner_name+" | grep MCObserver.py | grep -v grep | wc -l`"], shell=True) print(int(numprocesses_running)) if(int(numprocesses_running) <2 or numOverRide): while(runnum<runmax or runmax==-1): runnum=runnum+1 try: queryosgjobs="SELECT * from Attempts WHERE BatchSystem='OSG' && SubmitHost=\""+MCWRAPPER_BOT_HOST_NAME+"\" && Status !='4' && Status !='3' && Status!= '6' && Status != '5' && Status != '44';"# || (Status='4' && ExitCode != 0 && ProgramFailed is NULL) ORDER BY ID desc;" #print queryosgjobs dbcursor.execute(queryosgjobs) Alljobs = list(dbcursor.fetchall()) #print(Alljobs[:5]) random.shuffle(Alljobs) #print(Alljobs[:5]) Monitoring_assignments=array_split(Alljobs,spawnNum) spawns=[] for i in range(0,spawnNum): time.sleep(random.randint(1,spawnNum)) print("block "+str(i)) print(len(Monitoring_assignments[i])) if(len(Monitoring_assignments[i])>0): p=Process(target=checkJobFilesForCompletion,args=(Monitoring_assignments[i],)) p.daemon = True spawns.append(p) #p.join() for i in range(0,len(spawns)): #print("join "+str(i)) time.sleep(random.randint(1,spawnNum)) spawns[i].start() #time.sleep(2) for i in range(0,len(spawns)):
except Exception as e: print(e) break dbcnx.close() if __name__ == "__main__": main(sys.argv[1:])
if spawns[i].is_alive(): #print("join "+str(i)) spawns[i].join()
conditional_block
connector.js
connectorInit = function () { var connector = this; var element = connector.getElement(); var appBody = "<div id=\"visualization\"></div>\n"; element.innerHTML = appBody; //Добавляем кастомный стиль выбора элементов // var customSelectionStyle = ".vis-item.vis-selected { box-shadow: 0 0 30px black; }" // addStyle(customSelectionStyle); var now = Date.now(); var minusDay = 0.5 var plusDay = 0.5 var options = { maxHeight: 400, stack: true, //Могут ли налаживаться компоненты друг на дружку - false - налаживаются groupEditable: true, //Можно ли драгать группы horizontalScroll: true, verticalScroll: true, zoomKey: "ctrlKey", orientation: { //Настройки осей axis: "top", item: "top", }, showCurrentTime: false, moment: function (date) { return vis.moment(date).utc(); //Для показа временной линии по Гринвичу - а то смещенеие в зависимости от временной зоны }, start: Date.now() - 1000 * 60 * 60 * 24 * minusDay, // minus days end: Date.now() + 1000 * 60 * 60 * 24 * plusDay, // plus days }; var items = new vis.DataSet(); var groups = new vis.DataSet(); var lastSelectedItem = ""; var windowStartTime = ""; var windowEndTime = ""; function isContainsStyleInHtml(styleName){ var styletags = document.getElementsByTagName("style"); //loop over all the style tags for(var i = 0; i < styletags.length; i++) { var selectedStyle = styletags[i].innerHTML; // console.log(styletags[i].innerHTML) if(selectedStyle.includes(styleName)){ // console.log("Contains" +styletags[i].innerHTML +"||"); return true; } else{ // console.log("Not contains"); } } return false; } // Handle changes from the server-side connector.onStateChange = function () { var state = connector.getState(); var data = state.data; items = data.usesItems console.log("Items", items); groups = data.usesGroups lastSelectedItem = data.lastSelectedItem windowStartTime = data.windowStartTime windowEndTime = data.windowEndTime console.log("State data: ", data); items.forEach(function(item, i, arr) { var color = item.color if(color==null || color=="") { color = getRandomColor(); } var temple = '.vis-background.'; if(item.itemType=="item"){ temple = '.vis-item.'; } var styleName = temple+'bg-'+item.id; var isContains = isContainsStyleInHtml(styleName); if(isContains){ console.log("Contains " +styleName+"||"); } else{ console.log("Not contains "+styleName); addStyle(styleName+'{background-color:'+color+'; color:white; font-size:14px;}') } }); timeline.setGroups(groups); timeline.setItems(items); //Смещаем окно на последнее местоположение if(windowStartTime ===null || windowStartTime === "" ||windowEndTime ===null || windowEndTime === "" ){ } else{ console.log("Set windowStartTime ", windowStartTime); console.log("Set windowEndTime ", windowEndTime); // timeline.setWindow(windowStartTime,windowEndTime); options.start = windowStartTime; options.end = windowEndTime; timeline.setOptions(options); } if(lastSelectedItem === null || lastSelectedItem === ""){ } else{ //Перемещаем на последний выбранный элемент console.log("Last selected item id for move ",lastSelectedItem) timeline.setSelection(lastSelectedItem); setTimeout(() =>{ moveToItem(lastSelectedItem, timeline); console.log("moveToItem is end "); setTimeout(() =>{ options.start = windowStartTime; options.end = windowEndTime; timeline.setOptions(options); }, 600); }, 300); }
timeline = new vis.Timeline(container); timeline.setOptions(options); timeline.setGroups(groups); timeline.setItems(items); timeline.on("click", (e) => { connector.onClick(e) }); timeline.on("dblclick", (e) => { connector.onDoubleClick(e); }); timeline.on('select', function (e) { console.log('selected items: ' + e.items); var itemID = e.items[0]; if(itemID === null || itemID === ""){ console.log("Empty object"); timeline.setSelection(lastSelectedItem); } else{ console.log("Object with ID "+itemID); lastSelectedItem = itemID connector.onItemClick(itemID); } }); timeline.on("rangechanged", function (properties) { // console.log("rangechanged", properties); console.log("Save windowStartTime", properties.start); console.log("Save windowEndTime", properties.end); connector.onRangeChanged(properties.start,properties.end); }); function getRandomColor() { return "#"+((1<<24)*Math.random()|0).toString(16); }; function addStyle(styleText) { var style = document.createElement('style'); style.type = 'text/css'; style.innerHTML = styleText; document.getElementsByTagName('head')[0].appendChild(style); }; /* Work-Around */ // This is a quick-and-dirty animation for scrolling var animateScroll = function(from, to, duration, timeline) { var initTime = new Date().valueOf(); //var duration = 500; var easingFunction = function(t) { return t < .5 ? 2 * t * t : -1 + (4 - 2 * t) * t }; var defer = $.Deferred(); var next = function() { var now = new Date().valueOf(); var time = now - initTime; var ease = easingFunction(time / duration); var done = time > duration; var s = done ? to : (from + (to - from) * ease); timeline._setScrollTop(-s); timeline._redraw(); if (!done) { //setTimeout(next, 20); window.requestAnimationFrame(next); } else { defer.resolve(); } }; next(); return defer.promise(); }; var moveToItem = function(eventId, timeline, duration) { console.log("moveToItem:", lastSelectedItem); duration = 200; var event = timeline.itemSet.items[eventId]; var leftHeight = timeline.props.leftContainer.height; var contentHeight = timeline.props.left.height; var alreadyVisible = false; if (event.displayed) { alreadyVisible = true; if (!event.selected) { timeline.setSelection(eventId); } } var groupId = event.data.group; var group = timeline.itemSet.groups[groupId] || { top: 0, height: 0 }; // Use a default if we don't have a group var offset = group.top; var orientation = timeline.timeAxis.options.orientation.axis; var eventTop = function(event, group) { if (orientation == "bottom") { return group.height - event.top - event.height; } else { return event.top; } }; var currentScrollHeight = timeline._getScrollTop() * -1; var targetOffset = offset + eventTop(event, group); var height = event.height; if (targetOffset < currentScrollHeight) { if (offset + leftHeight <= offset + eventTop(event, group) + height) { offset += eventTop(event, group) - timeline.itemSet.options.margin.item.vertical; } } else { if (targetOffset + height > currentScrollHeight + leftHeight) { offset += eventTop(event, group) + height - leftHeight + timeline.itemSet.options.margin.item.vertical; } } offset = Math.min(offset, contentHeight - leftHeight); if (targetOffset + height > currentScrollHeight + leftHeight || targetOffset < currentScrollHeight) { animateScroll(currentScrollHeight, offset, duration, timeline); timeline.setSelection(eventId); timeline.focus(eventId); } }; function funForCall(){ console.log("call a funForCall function from java !") } // function debounce(func, wait = 100) { // let timeout; // return function (...args) { // clearTimeout(timeout); // timeout = setTimeout(() => { // func.apply(this, args); // }, wait); // }; // } // // let groupFocus = (e) => { // let vGroups = timeline.getVisibleGroups(); // let vItems = vGroups.reduce((res, groupId) => { // let group = timeline.itemSet.groups[groupId]; // if (group.items) { // res = res.concat(Object.keys(group.items)); // } // return res; // }, []); // timeline.focus(vItems); // }; // this.timeline.on("scroll", debounce(groupFocus, 200)); // Enabling the next line leads to a continuous since calling focus might scroll vertically even if it shouldn't // this.timeline.on("scrollSide", debounce(groupFocus, 200)) }
}; // create a Timeline var container = document.getElementById("visualization");
random_line_split
connector.js
connectorInit = function () { var connector = this; var element = connector.getElement(); var appBody = "<div id=\"visualization\"></div>\n"; element.innerHTML = appBody; //Добавляем кастомный стиль выбора элементов // var customSelectionStyle = ".vis-item.vis-selected { box-shadow: 0 0 30px black; }" // addStyle(customSelectionStyle); var now = Date.now(); var minusDay = 0.5 var plusDay = 0.5 var options = { maxHeight: 400, stack: true, //Могут ли налаживаться компоненты друг на дружку - false - налаживаются groupEditable: true, //Можно ли драгать группы horizontalScroll: true, verticalScroll: true, zoomKey: "ctrlKey", orientation: { //Настройки осей axis: "top", item: "top", }, showCurrentTime: false, moment: function (date) { return vis.moment(date).utc(); //Для показа временной линии по Гринвичу - а то смещенеие в зависимости от временной зоны }, start: Date.now() - 1000 * 60 * 60 * 24 * minusDay, // minus days end: Date.now() + 1000 * 60 * 60 * 24 * plusDay, // plus days }; var items = new vis.DataSet(); var groups = new vis.DataSet(); var lastSelectedItem = ""; var windowStartTime = ""; var windowEndTime = ""; function isContainsStyleInHtml(styleName){ var styletags = document.getElementsByTagName("style"); //loop over all the style tags for(var i = 0; i < styletags.length; i
var selectedStyle = styletags[i].innerHTML; // console.log(styletags[i].innerHTML) if(selectedStyle.includes(styleName)){ // console.log("Contains" +styletags[i].innerHTML +"||"); return true; } else{ // console.log("Not contains"); } } return false; } // Handle changes from the server-side connector.onStateChange = function () { var state = connector.getState(); var data = state.data; items = data.usesItems console.log("Items", items); groups = data.usesGroups lastSelectedItem = data.lastSelectedItem windowStartTime = data.windowStartTime windowEndTime = data.windowEndTime console.log("State data: ", data); items.forEach(function(item, i, arr) { var color = item.color if(color==null || color=="") { color = getRandomColor(); } var temple = '.vis-background.'; if(item.itemType=="item"){ temple = '.vis-item.'; } var styleName = temple+'bg-'+item.id; var isContains = isContainsStyleInHtml(styleName); if(isContains){ console.log("Contains " +styleName+"||"); } else{ console.log("Not contains "+styleName); addStyle(styleName+'{background-color:'+color+'; color:white; font-size:14px;}') } }); timeline.setGroups(groups); timeline.setItems(items); //Смещаем окно на последнее местоположение if(windowStartTime ===null || windowStartTime === "" ||windowEndTime ===null || windowEndTime === "" ){ } else{ console.log("Set windowStartTime ", windowStartTime); console.log("Set windowEndTime ", windowEndTime); // timeline.setWindow(windowStartTime,windowEndTime); options.start = windowStartTime; options.end = windowEndTime; timeline.setOptions(options); } if(lastSelectedItem === null || lastSelectedItem === ""){ } else{ //Перемещаем на последний выбранный элемент console.log("Last selected item id for move ",lastSelectedItem) timeline.setSelection(lastSelectedItem); setTimeout(() =>{ moveToItem(lastSelectedItem, timeline); console.log("moveToItem is end "); setTimeout(() =>{ options.start = windowStartTime; options.end = windowEndTime; timeline.setOptions(options); }, 600); }, 300); } }; // create a Timeline var container = document.getElementById("visualization"); timeline = new vis.Timeline(container); timeline.setOptions(options); timeline.setGroups(groups); timeline.setItems(items); timeline.on("click", (e) => { connector.onClick(e) }); timeline.on("dblclick", (e) => { connector.onDoubleClick(e); }); timeline.on('select', function (e) { console.log('selected items: ' + e.items); var itemID = e.items[0]; if(itemID === null || itemID === ""){ console.log("Empty object"); timeline.setSelection(lastSelectedItem); } else{ console.log("Object with ID "+itemID); lastSelectedItem = itemID connector.onItemClick(itemID); } }); timeline.on("rangechanged", function (properties) { // console.log("rangechanged", properties); console.log("Save windowStartTime", properties.start); console.log("Save windowEndTime", properties.end); connector.onRangeChanged(properties.start,properties.end); }); function getRandomColor() { return "#"+((1<<24)*Math.random()|0).toString(16); }; function addStyle(styleText) { var style = document.createElement('style'); style.type = 'text/css'; style.innerHTML = styleText; document.getElementsByTagName('head')[0].appendChild(style); }; /* Work-Around */ // This is a quick-and-dirty animation for scrolling var animateScroll = function(from, to, duration, timeline) { var initTime = new Date().valueOf(); //var duration = 500; var easingFunction = function(t) { return t < .5 ? 2 * t * t : -1 + (4 - 2 * t) * t }; var defer = $.Deferred(); var next = function() { var now = new Date().valueOf(); var time = now - initTime; var ease = easingFunction(time / duration); var done = time > duration; var s = done ? to : (from + (to - from) * ease); timeline._setScrollTop(-s); timeline._redraw(); if (!done) { //setTimeout(next, 20); window.requestAnimationFrame(next); } else { defer.resolve(); } }; next(); return defer.promise(); }; var moveToItem = function(eventId, timeline, duration) { console.log("moveToItem:", lastSelectedItem); duration = 200; var event = timeline.itemSet.items[eventId]; var leftHeight = timeline.props.leftContainer.height; var contentHeight = timeline.props.left.height; var alreadyVisible = false; if (event.displayed) { alreadyVisible = true; if (!event.selected) { timeline.setSelection(eventId); } } var groupId = event.data.group; var group = timeline.itemSet.groups[groupId] || { top: 0, height: 0 }; // Use a default if we don't have a group var offset = group.top; var orientation = timeline.timeAxis.options.orientation.axis; var eventTop = function(event, group) { if (orientation == "bottom") { return group.height - event.top - event.height; } else { return event.top; } }; var currentScrollHeight = timeline._getScrollTop() * -1; var targetOffset = offset + eventTop(event, group); var height = event.height; if (targetOffset < currentScrollHeight) { if (offset + leftHeight <= offset + eventTop(event, group) + height) { offset += eventTop(event, group) - timeline.itemSet.options.margin.item.vertical; } } else { if (targetOffset + height > currentScrollHeight + leftHeight) { offset += eventTop(event, group) + height - leftHeight + timeline.itemSet.options.margin.item.vertical; } } offset = Math.min(offset, contentHeight - leftHeight); if (targetOffset + height > currentScrollHeight + leftHeight || targetOffset < currentScrollHeight) { animateScroll(currentScrollHeight, offset, duration, timeline); timeline.setSelection(eventId); timeline.focus(eventId); } }; function funForCall(){ console.log("call a funForCall function from java !") } // function debounce(func, wait = 100) { // let timeout; // return function (...args) { // clearTimeout(timeout); // timeout = setTimeout(() => { // func.apply(this, args); // }, wait); // }; // } // // let groupFocus = (e) => { // let vGroups = timeline.getVisibleGroups(); // let vItems = vGroups.reduce((res, groupId) => { // let group = timeline.itemSet.groups[groupId]; // if (group.items) { // res = res.concat(Object.keys(group.items)); // } // return res; // }, []); // timeline.focus(vItems); // }; // this.timeline.on("scroll", debounce(groupFocus, 200)); // Enabling the next line leads to a continuous since calling focus might scroll vertically even if it shouldn't // this.timeline.on("scrollSide", debounce(groupFocus, 200)) }
++) {
identifier_name
connector.js
connectorInit = function () { var connector = this; var element = connector.getElement(); var appBody = "<div id=\"visualization\"></div>\n"; element.innerHTML = appBody; //Добавляем кастомный стиль выбора элементов // var customSelectionStyle = ".vis-item.vis-selected { box-shadow: 0 0 30px black; }" // addStyle(customSelectionStyle); var now = Date.now(); var minusDay = 0.5 var plusDay = 0.5 var options = { maxHeight: 400, stack: true, //Могут ли налаживаться компоненты друг на дружку - false - налаживаются groupEditable: true, //Можно ли драгать группы horizontalScroll: true, verticalScroll: true, zoomKey: "ctrlKey", orientation: { //Настройки осей axis: "top", item: "top", }, showCurrentTime: false, moment: function (date) { return vis.moment(date).utc(); //Для показа временной линии по Гринвичу - а то смещенеие в зависимости от временной зоны }, start: Date.now() - 1000 * 60 * 60 * 24 * minusDay, // minus days end: Date.now() + 1000 * 60 * 60 * 24 * plusDay, // plus days }; var items = new vis.DataSet(); var groups = new vis.DataSet(); var lastSelectedItem = ""; var windowStartTime = ""; var windowEndTime = ""; function isContainsStyleInHtml(styleName){ var styletags = document.getElementsByTagName("style"); //loop over all the style tags for(var i = 0; i < styletags.length; i++) {
s = data.usesItems console.log("Items", items); groups = data.usesGroups lastSelectedItem = data.lastSelectedItem windowStartTime = data.windowStartTime windowEndTime = data.windowEndTime console.log("State data: ", data); items.forEach(function(item, i, arr) { var color = item.color if(color==null || color=="") { color = getRandomColor(); } var temple = '.vis-background.'; if(item.itemType=="item"){ temple = '.vis-item.'; } var styleName = temple+'bg-'+item.id; var isContains = isContainsStyleInHtml(styleName); if(isContains){ console.log("Contains " +styleName+"||"); } else{ console.log("Not contains "+styleName); addStyle(styleName+'{background-color:'+color+'; color:white; font-size:14px;}') } }); timeline.setGroups(groups); timeline.setItems(items); //Смещаем окно на последнее местоположение if(windowStartTime ===null || windowStartTime === "" ||windowEndTime ===null || windowEndTime === "" ){ } else{ console.log("Set windowStartTime ", windowStartTime); console.log("Set windowEndTime ", windowEndTime); // timeline.setWindow(windowStartTime,windowEndTime); options.start = windowStartTime; options.end = windowEndTime; timeline.setOptions(options); } if(lastSelectedItem === null || lastSelectedItem === ""){ } else{ //Перемещаем на последний выбранный элемент console.log("Last selected item id for move ",lastSelectedItem) timeline.setSelection(lastSelectedItem); setTimeout(() =>{ moveToItem(lastSelectedItem, timeline); console.log("moveToItem is end "); setTimeout(() =>{ options.start = windowStartTime; options.end = windowEndTime; timeline.setOptions(options); }, 600); }, 300); } }; // create a Timeline var container = document.getElementById("visualization"); timeline = new vis.Timeline(container); timeline.setOptions(options); timeline.setGroups(groups); timeline.setItems(items); timeline.on("click", (e) => { connector.onClick(e) }); timeline.on("dblclick", (e) => { connector.onDoubleClick(e); }); timeline.on('select', function (e) { console.log('selected items: ' + e.items); var itemID = e.items[0]; if(itemID === null || itemID === ""){ console.log("Empty object"); timeline.setSelection(lastSelectedItem); } else{ console.log("Object with ID "+itemID); lastSelectedItem = itemID connector.onItemClick(itemID); } }); timeline.on("rangechanged", function (properties) { // console.log("rangechanged", properties); console.log("Save windowStartTime", properties.start); console.log("Save windowEndTime", properties.end); connector.onRangeChanged(properties.start,properties.end); }); function getRandomColor() { return "#"+((1<<24)*Math.random()|0).toString(16); }; function addStyle(styleText) { var style = document.createElement('style'); style.type = 'text/css'; style.innerHTML = styleText; document.getElementsByTagName('head')[0].appendChild(style); }; /* Work-Around */ // This is a quick-and-dirty animation for scrolling var animateScroll = function(from, to, duration, timeline) { var initTime = new Date().valueOf(); //var duration = 500; var easingFunction = function(t) { return t < .5 ? 2 * t * t : -1 + (4 - 2 * t) * t }; var defer = $.Deferred(); var next = function() { var now = new Date().valueOf(); var time = now - initTime; var ease = easingFunction(time / duration); var done = time > duration; var s = done ? to : (from + (to - from) * ease); timeline._setScrollTop(-s); timeline._redraw(); if (!done) { //setTimeout(next, 20); window.requestAnimationFrame(next); } else { defer.resolve(); } }; next(); return defer.promise(); }; var moveToItem = function(eventId, timeline, duration) { console.log("moveToItem:", lastSelectedItem); duration = 200; var event = timeline.itemSet.items[eventId]; var leftHeight = timeline.props.leftContainer.height; var contentHeight = timeline.props.left.height; var alreadyVisible = false; if (event.displayed) { alreadyVisible = true; if (!event.selected) { timeline.setSelection(eventId); } } var groupId = event.data.group; var group = timeline.itemSet.groups[groupId] || { top: 0, height: 0 }; // Use a default if we don't have a group var offset = group.top; var orientation = timeline.timeAxis.options.orientation.axis; var eventTop = function(event, group) { if (orientation == "bottom") { return group.height - event.top - event.height; } else { return event.top; } }; var currentScrollHeight = timeline._getScrollTop() * -1; var targetOffset = offset + eventTop(event, group); var height = event.height; if (targetOffset < currentScrollHeight) { if (offset + leftHeight <= offset + eventTop(event, group) + height) { offset += eventTop(event, group) - timeline.itemSet.options.margin.item.vertical; } } else { if (targetOffset + height > currentScrollHeight + leftHeight) { offset += eventTop(event, group) + height - leftHeight + timeline.itemSet.options.margin.item.vertical; } } offset = Math.min(offset, contentHeight - leftHeight); if (targetOffset + height > currentScrollHeight + leftHeight || targetOffset < currentScrollHeight) { animateScroll(currentScrollHeight, offset, duration, timeline); timeline.setSelection(eventId); timeline.focus(eventId); } }; function funForCall(){ console.log("call a funForCall function from java !") } // function debounce(func, wait = 100) { // let timeout; // return function (...args) { // clearTimeout(timeout); // timeout = setTimeout(() => { // func.apply(this, args); // }, wait); // }; // } // // let groupFocus = (e) => { // let vGroups = timeline.getVisibleGroups(); // let vItems = vGroups.reduce((res, groupId) => { // let group = timeline.itemSet.groups[groupId]; // if (group.items) { // res = res.concat(Object.keys(group.items)); // } // return res; // }, []); // timeline.focus(vItems); // }; // this.timeline.on("scroll", debounce(groupFocus, 200)); // Enabling the next line leads to a continuous since calling focus might scroll vertically even if it shouldn't // this.timeline.on("scrollSide", debounce(groupFocus, 200)) }
var selectedStyle = styletags[i].innerHTML; // console.log(styletags[i].innerHTML) if(selectedStyle.includes(styleName)){ // console.log("Contains" +styletags[i].innerHTML +"||"); return true; } else{ // console.log("Not contains"); } } return false; } // Handle changes from the server-side connector.onStateChange = function () { var state = connector.getState(); var data = state.data; item
identifier_body
connector.js
connectorInit = function () { var connector = this; var element = connector.getElement(); var appBody = "<div id=\"visualization\"></div>\n"; element.innerHTML = appBody; //Добавляем кастомный стиль выбора элементов // var customSelectionStyle = ".vis-item.vis-selected { box-shadow: 0 0 30px black; }" // addStyle(customSelectionStyle); var now = Date.now(); var minusDay = 0.5 var plusDay = 0.5 var options = { maxHeight: 400, stack: true, //Могут ли налаживаться компоненты друг на дружку - false - налаживаются groupEditable: true, //Можно ли драгать группы horizontalScroll: true, verticalScroll: true, zoomKey: "ctrlKey", orientation: { //Настройки осей axis: "top", item: "top", }, showCurrentTime: false, moment: function (date) { return vis.moment(date).utc(); //Для показа временной линии по Гринвичу - а то смещенеие в зависимости от временной зоны }, start: Date.now() - 1000 * 60 * 60 * 24 * minusDay, // minus days end: Date.now() + 1000 * 60 * 60 * 24 * plusDay, // plus days }; var items = new vis.DataSet(); var groups = new vis.DataSet(); var lastSelectedItem = ""; var windowStartTime = ""; var windowEndTime = ""; function isContainsStyleInHtml(styleName){ var styletags = document.getElementsByTagName("style"); //loop over all the style tags for(var i = 0; i < styletags.length; i++) { var selectedStyle = styletags[i].innerHTML; // console.log(styletags[i].innerHTML) if(selectedStyle.includes(styleName)){ // console.log("Contains" +styletags[i].innerHTML +"||"); return true; } else{ // console.log("Not contains"); } } return false; } // Handle changes from the server-side connector.onStateChange = function () { var state = connector.getState(); var data = state.data; items = data.usesItems console.log("Items", items); groups = data.usesGroups lastSelectedItem = data.lastSelectedItem windowStartTime = data.windowStartTime windowEndTime = data.windowEndTime console.log("State data: ", data); items.forEach(function(item, i, arr) { var color = item.color if(color==null || color=="") { color = getRandomColor(); } var temple = '.vis-background.'; if(item.itemType=="item"){ temple = '.vis-item.'; } var styleName = temple+'bg-'+item.id; var isContains = isContainsStyleInHtml(styleName); if(isContains){ console.log("Contains " +styleName+"||"); } else{ console.log("Not contains "+styleName); addStyle(styleName+'{background-color:'+color+'; color:white; font-size:14px;}') } }); timeline.setGroups(groups); timeline.setItems(items); //Смещаем окно на последнее местоположение if(windowStartTime ===null || windowStartTime === "" ||windowEndTime ===null || windowEndTime === "" ){ } else{ console.log("Set windowStartTime ", windowStartTime); console.log("Set windowEndTime ", windowEndTime); // timeline.setWindow(windowStartTime,windowEndTime); options.start = windowStartTime; options.end = windowEndTime; timeline.setOptions(options); } if(lastSelectedItem === null || lastSelectedItem === ""){ } else{ //Перемещаем на последний выбранный элемент console.log("Last selected item id for move ",lastSelectedItem) timeline.setSelection(lastSelectedItem); setTimeout(() =>{ moveToItem(lastSelectedItem, timeline); console.log("moveToItem is end "); setTimeout(() =>{ options.start = windowStartTime; options.end = windowEndTime; timeline.setOptions(options); }, 600); }, 300); } }; // create a Timeline var container = document.getElementById("visualization"); timeline = new vis.Timeline(container); timeline.setOptions(options); timeline.setGroups(groups); timeline.setItems(items); timeline.on("click", (e) => { connector.onClick(e) }); timeline.on("dblclick", (e) => { connector.onDoubleClick(e); }); timeline.on('select', function (e) { console.log('selected items: ' + e.items); var itemID = e.items[0]; if(itemID === null || itemID === ""){ console.log("Empty object"); timeline.setSelection(lastSelectedItem); } else{ console.log("Object with ID "+itemID); lastSelectedItem = itemID connector.onItemClick(itemID); } }); timeline.on("rangechanged", function (properties) { // console.log("rangechanged", properties); console.log("Save windowStartTime", properties.start); console.log("Save windowEndTime", properties.end); connector.onRangeChanged(properties.start,properties.end); }); function getRandomColor() { return "#"+((1<<24)*Math.random()|0).toString(16); }; function addStyle(styleText) { var style = document.createElement('style'); style.type = 'text/css'; style.innerHTML = styleText; document.getElementsByTagName('head')[0].appendChild(style); }; /* Work-Around */ // This is a quick-and-dirty animation for scrolling var animateScroll = function(from, to, duration, timeline) { var initTime = new Date().valueOf(); //var duration = 500; var easingFunction = function(t) { return t < .5 ? 2 * t * t : -1 + (4 - 2 * t) * t }; var defer = $.Deferred(); var next = function() { var now = new Date().valueOf(); var time = now - initTime; var ease = easingFunction(time / duration); var done = time > duration; var s = done ? to : (from + (to - from) * ease); timeline._setScrollTop(-s); timeline._redraw(); if (!done) { //setTimeout(next, 20); window.requestAnimationFrame(next); } else { defer.resolve(); } }; next(); return defer.promise(); }; var moveToItem = function(eventId, timeline, duration) { console.log("moveToItem:", lastSelectedItem); duration = 200; var event = timeline.itemSet.items[eventId]; var leftHeight = timeline.props.leftContainer.height; var contentHeight = timeline.props.left.height; var alreadyVisible = false; if (event.displayed) { alreadyVisible = true; if (!event.selected) { timeline.setSelection(eventId); } } var groupId = event.data.group; var group = timeline.itemSet.groups[groupId] || { top: 0, height: 0 }; // Use a default if we don't have a group var offset = group.top; var orientation = timeline.timeAxis.options.orientation.axis; var eventTop = function(event, group) { if (orientation == "bottom") { return group.height - event.top - event.height; } else { return event.top; } }; var currentScrollHeight = timeline._getScrollTop() * -1; var targetOffset = offset + eventTop(event, group); var height = event.height; if (targetOffset < currentScrollHeight) { if (offset + leftHeight <= offset + eventTop(event, group) + height) { offset += eventTop(event, group) - timeline.itemSet.options.margin.item.vertical; } } else { if (targetOffset + height > currentScrollHe
entHeight - leftHeight); if (targetOffset + height > currentScrollHeight + leftHeight || targetOffset < currentScrollHeight) { animateScroll(currentScrollHeight, offset, duration, timeline); timeline.setSelection(eventId); timeline.focus(eventId); } }; function funForCall(){ console.log("call a funForCall function from java !") } // function debounce(func, wait = 100) { // let timeout; // return function (...args) { // clearTimeout(timeout); // timeout = setTimeout(() => { // func.apply(this, args); // }, wait); // }; // } // // let groupFocus = (e) => { // let vGroups = timeline.getVisibleGroups(); // let vItems = vGroups.reduce((res, groupId) => { // let group = timeline.itemSet.groups[groupId]; // if (group.items) { // res = res.concat(Object.keys(group.items)); // } // return res; // }, []); // timeline.focus(vItems); // }; // this.timeline.on("scroll", debounce(groupFocus, 200)); // Enabling the next line leads to a continuous since calling focus might scroll vertically even if it shouldn't // this.timeline.on("scrollSide", debounce(groupFocus, 200)) }
ight + leftHeight) { offset += eventTop(event, group) + height - leftHeight + timeline.itemSet.options.margin.item.vertical; } } offset = Math.min(offset, cont
conditional_block
test_conv_layer.py
import pytest import numpy as np from collections import OrderedDict from contextlib import closing import neon as ng import neon.transformers as ngt from neon.testing import executor from neon.frontend.common import utils from neon.op_graph.axes import IncompatibleAxesError from neon.frontend import Convolution, Deconvolution, Sequential from neon.frontend import ConstantInit, Rectlin, GaussianInit, make_bound_computation def reference_conv1d(inputs, filters, activation, strides=1, padding=0): # for now: assert strides == 1 assert padding == 0 # inputs: features, time steps (conv axis), batch size # filters: input feature dimension/channels, T=1, R=filter_width, S=1, K=num_filters # result: K, 1, time_steps - S + 1, 1, batch size filters = np.squeeze(filters) # input channels, filter_width, num_filters feature_dimension, time_steps_in, batch_size = inputs.shape filter_width = filters.shape[1] K = filters.shape[-1] time_steps_out = time_steps_in - filter_width + 1 result = np.zeros((K, time_steps_out, batch_size)) # TODO: refactor to make this more efficient for t in range(time_steps_out):
result = activation(result) # expand dimensions from K, time_steps, batch_size to (K, 1, time_steps, 1, batch_size) result = np.expand_dims(np.expand_dims(result, axis=1), axis=3) return result # TODO: Remove these to conftest.py @pytest.fixture(params=[1]) def input_size(request): return request.param @pytest.fixture(params=[16]) def output_size(request): return request.param @pytest.fixture(params=[4]) def batch_size(request): return request.param @pytest.fixture def width_axis(width): return ng.make_axis(length=width, name="W") @pytest.fixture def conv1d_placeholder(channel_axis, width_axis, batch_axis): return ng.placeholder((channel_axis, width_axis, batch_axis)) @pytest.fixture def conv1d_no_channel_axis(width_axis, batch_axis): return ng.placeholder((width_axis, batch_axis)) @pytest.fixture def spatial_onehot(input_size, width, batch_size): value = np.zeros((input_size, width, batch_size)) value[:, width // 2, :] = 1 return value @pytest.mark.xfail(reason='1d conv not supported') def test_causal_convolution(conv1d_placeholder, spatial_onehot, output_size, width): """ Test that causal convolutions only operate on leftward inputs""" conv_layer = Convolution((3, output_size), lambda x: 1, padding="causal") output = conv_layer(conv1d_placeholder) output_width = output.axes.find_by_name("W")[0].length assert output_width == width, "Causal convolution output width != " \ "input width: {} != {}".format(output_width, width) with executor(output, conv1d_placeholder) as comp: output_val = comp(spatial_onehot) # First 1 is at width // 2, so anything before that should be 0 assert (output_val[:, :width // 2] == 0).all(), "Acausal outputs in causal convolution" @pytest.mark.xfail(reason='1d conv not supported') @pytest.mark.parametrize("stride", (1, 3)) def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride): """ Test that 'same' always results in out_size = np.ceil(in_size / stride) """ conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding="same") output = conv_layer(conv1d_placeholder) output_width = output.axes.find_by_name("W")[0].length assert output_width == np.ceil(width / float(stride)), ("Same convolution output width != " "ceil(input_width / stride): {} != " "ceil({} / {})").format(output_width, width, stride) @pytest.mark.xfail(reason='1d conv not supported') def test_axis_preservation(conv1d_placeholder, output_size): """ Test that axes into a conv are the same as axes out""" conv_layer = Convolution((3, output_size), lambda x: 1) output = conv_layer(conv1d_placeholder) assert output.axes == conv1d_placeholder.axes, ("Output axes are not the same as input axes: " "{} != {}").format(output.axes, conv1d_placeholder.axes) @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis): """ Test that a channel axis is added when it doesn't exist in the input""" conv_layer = Convolution((3, output_size), lambda x: 1) output = conv_layer(conv1d_no_channel_axis) t_axes = conv1d_no_channel_axis.axes + channel_axis assert output.axes.is_equal_set(t_axes), ("Output axes are not input axes + channel axis:" "{} != {} + {}").format(output.axes, conv1d_no_channel_axis.axes, channel_axis) @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis): """ Test that spatial axis names are modifiable """ width_axis.name = "time" assert len(conv1d_placeholder.axes.find_by_name("time")) == 1 conv_layer = Convolution((3, output_size), lambda x: 1) with pytest.raises(IncompatibleAxesError): conv_layer(conv1d_placeholder) # As a dictionary output = conv_layer(conv1d_placeholder, spatial_axes={"W": "time"}) assert output.axes == conv1d_placeholder.axes # As a tuple output = conv_layer(conv1d_placeholder, spatial_axes=("D", "H", "time")) assert output.axes == conv1d_placeholder.axes @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis): """ Test that channel axis names are modifiable""" channel_axis.name = "channel" assert len(conv1d_placeholder.axes.find_by_name("channel")) == 1 conv_layer = Convolution((3, output_size), lambda x: 1) with pytest.raises(IncompatibleAxesError): conv_layer(conv1d_placeholder) output = conv_layer(conv1d_placeholder, channel_axes="channel") assert output.axes == conv1d_placeholder.axes @pytest.mark.xfail(reason='resolution issue') @pytest.mark.parametrize('dilation', [1, 2, 3]) def test_dilated_conv(dilation): """Test that the dilated convolution layer output matches expected. This test compares the maximum output value to an expected max output value. The expected value is computed based on the dilation parameter. The test also checks that the output size matches the expected size based on the dilaton parameter value.""" image_size = 3 batch_size = 1 init_val = 0.1 conv_size = 3 pad = 3 N_filters = 1 image_channels = 3 model = Sequential([Convolution((conv_size, conv_size, N_filters), filter_init=ConstantInit(val=init_val), padding=pad, dilation=dilation)]) X = np.ones(shape=(batch_size, 3, image_size, image_size)) # Create dummy image data = {'image': X, 'iteration': 1} data_size = OrderedDict([('N', batch_size), ('C', 3), ('H', image_size), ('W', image_size)]) ax = [ng.make_axis(length=data_size[k], name=k) for k in list(data_size.keys())] p_axes = ng.make_axes(ax) named_inputs = {'image': ng.placeholder(p_axes)} outputs = model(named_inputs['image']) named_outputs = {outputs.name: outputs} with closing(ngt.make_transformer()) as transformer: m = make_bound_computation(transformer, named_outputs, named_inputs) output = m(data)[list(m(data).keys())[0]] filter_size = dilation * (conv_size - 1) + 1 # Compute expected filter size # Compute the expected output size based on convolution parameters out_size = (image_size + 2 * pad - filter_size) + 1 filt_tmp = np.zeros(filter_size) filt_tmp[0::dilation] = 1 # max overlap between dilated filter and image (in 1-d) max_overlap = int(np.min([filter_size, image_size])) exp_max_output = init_val * image_channels * (np.sum(filt_tmp[0: max_overlap]))**2 # Expected max output changes for different dilation parameter values# assert int(10 * np.max(output)) == int(10 * exp_max_output), \ ("Dilated conv max outputs do not match expected: " "{} != {}").format(np.max(output), init_val * conv_size * ((image_size - (dilation - 1))**2)) assert np.shape(output) == (batch_size, N_filters, out_size, out_size), \ ("Dilated conv output is not expected size: " "{} != {}").format(np.shape(output), (batch_size, N_filters, out_size, out_size)) @pytest.mark.xfail(reason='Not implemented') @pytest.mark.parametrize('filter_width', [3]) @pytest.mark.parametrize('num_filters', [2]) @pytest.mark.parametrize('strides', [1]) @pytest.mark.parametrize('padding', [0]) @pytest.mark.parametrize('time_steps', [5]) @pytest.mark.parametrize('feature_dimension', [4]) @pytest.mark.parametrize('batch_size', [2]) def test_conv1d(transformer_factory, filter_width, num_filters, strides, padding, time_steps, feature_dimension, batch_size): dilation = 1 # reference conv does not support dilation F = ng.make_axis(name='F', length=feature_dimension) REC = ng.make_axis(name='REC', length=time_steps) N = ng.make_axis(name='N', length=batch_size) in_axes = ng.make_axes([F, REC, N]) inputs = ng.placeholder(axes=in_axes) input_vals = np.random.randn(*in_axes.lengths) filter_init = GaussianInit() conv1d = Convolution((filter_width, num_filters), filter_init, strides=strides, padding=padding, dilation=dilation, bias_init=None, activation=Rectlin(), batch_norm=None) result_op = conv1d(inputs, channel_axes='F', spatial_axes={'W': 'REC'}) with closing(ngt.make_transformer()) as transformer: result_comp = transformer.add_computation(ng.computation(result_op, inputs)) filter_vals = transformer.add_computation(ng.computation(conv1d.conv.W))() result_ng = result_comp(input_vals) result_np = np.squeeze(reference_conv1d(input_vals, filter_vals, lambda x: np.maximum(0, x))) ng.testing.assert_allclose(result_ng, result_np) @pytest.mark.xfail(reason='Not implemented') @pytest.mark.transformer_dependent def test_deconv(): """ basic test of deconv fprop. ngraph/tests/test_conv.py tests ng.deconvolution bprop """ # filter params R, S = 5, 5 fshape = (R, S, 1) strides = 2 filter_val_nz = np.arange(1, R * S + 1).reshape(R, S) filter_val = np.zeros(fshape) filter_val[:, :, 0] = filter_val_nz deconv = Deconvolution(fshape, filter_init=ConstantInit(filter_val), strides=strides, padding=0, dilation=1) N = ng.make_axis(name='N', length=1) # batch image_shape = (1, 8, 8) # CHW image_axes = ng.make_axes([ng.make_axis(name=nm, length=l) for nm, l in zip('CHW', image_shape)]) image_axes |= N image = ng.placeholder(axes=image_axes) output = deconv(image) with closing(ngt.make_transformer()) as transformer: comp = transformer.add_computation(ng.computation(output, image)) input_val = np.zeros(image_shape + (N.length, ), dtype=float) input_val[0, 0, 0] = 1 input_val[0, 5, 5] = 1 input_val[0, 7, 7] = 1 result = comp(input_val) feature_map = np.squeeze(result) assert (feature_map[:5, :5] == filter_val_nz).all() result2 = filter_val_nz.copy() result2[-1, -1] = 26 assert (feature_map[10:15, 10:15] == result2).all() result3 = filter_val_nz.copy() result3[0, 0] = 26 assert (feature_map[-5:, -5:] == result3).all() @pytest.mark.parametrize("input_size", (10, 25)) @pytest.mark.parametrize("filter_size", (3, 4)) @pytest.mark.parametrize("padding", ((0, 0), (3, 4))) @pytest.mark.parametrize("stride", (1, 3)) def test_conv_inverts_deconv(transformer_factory, input_size, filter_size, padding, stride): """ Test that conv and deconv are inverse operations given the same parameters""" # convolutions whose output size are not an even multiple of stride cannot be exactly inverted a = (input_size + sum(padding) - filter_size) % stride conv_output = utils.conv_output_dim(input_size, filter_size, padding, stride) deconv_output = utils.deconv_output_dim(conv_output, filter_size, padding, stride) assert deconv_output == (input_size - a), ("Convolution and Deconvolution do not invert:\n" "output ({}) != input ({}) - a ({})\n" "filter: {}, padding: {}, stride: {}" ).format(deconv_output, input_size, a, filter_size, padding, stride)
for k in range(K): for n in range(batch_size): result[k, t, n] = np.sum(inputs[:, t:t + filter_width, n] * filters[:, :, k])
conditional_block
test_conv_layer.py
import pytest import numpy as np from collections import OrderedDict from contextlib import closing import neon as ng import neon.transformers as ngt from neon.testing import executor from neon.frontend.common import utils from neon.op_graph.axes import IncompatibleAxesError from neon.frontend import Convolution, Deconvolution, Sequential from neon.frontend import ConstantInit, Rectlin, GaussianInit, make_bound_computation def reference_conv1d(inputs, filters, activation, strides=1, padding=0): # for now: assert strides == 1 assert padding == 0 # inputs: features, time steps (conv axis), batch size # filters: input feature dimension/channels, T=1, R=filter_width, S=1, K=num_filters # result: K, 1, time_steps - S + 1, 1, batch size filters = np.squeeze(filters) # input channels, filter_width, num_filters feature_dimension, time_steps_in, batch_size = inputs.shape filter_width = filters.shape[1] K = filters.shape[-1] time_steps_out = time_steps_in - filter_width + 1 result = np.zeros((K, time_steps_out, batch_size)) # TODO: refactor to make this more efficient for t in range(time_steps_out): for k in range(K): for n in range(batch_size): result[k, t, n] = np.sum(inputs[:, t:t + filter_width, n] * filters[:, :, k]) result = activation(result) # expand dimensions from K, time_steps, batch_size to (K, 1, time_steps, 1, batch_size) result = np.expand_dims(np.expand_dims(result, axis=1), axis=3) return result # TODO: Remove these to conftest.py @pytest.fixture(params=[1]) def input_size(request): return request.param @pytest.fixture(params=[16]) def output_size(request): return request.param @pytest.fixture(params=[4]) def batch_size(request): return request.param @pytest.fixture def width_axis(width): return ng.make_axis(length=width, name="W") @pytest.fixture def conv1d_placeholder(channel_axis, width_axis, batch_axis): return ng.placeholder((channel_axis, width_axis, batch_axis)) @pytest.fixture def conv1d_no_channel_axis(width_axis, batch_axis): return ng.placeholder((width_axis, batch_axis)) @pytest.fixture def spatial_onehot(input_size, width, batch_size): value = np.zeros((input_size, width, batch_size)) value[:, width // 2, :] = 1 return value @pytest.mark.xfail(reason='1d conv not supported') def test_causal_convolution(conv1d_placeholder, spatial_onehot, output_size, width): """ Test that causal convolutions only operate on leftward inputs""" conv_layer = Convolution((3, output_size), lambda x: 1, padding="causal") output = conv_layer(conv1d_placeholder) output_width = output.axes.find_by_name("W")[0].length assert output_width == width, "Causal convolution output width != " \ "input width: {} != {}".format(output_width, width) with executor(output, conv1d_placeholder) as comp: output_val = comp(spatial_onehot) # First 1 is at width // 2, so anything before that should be 0 assert (output_val[:, :width // 2] == 0).all(), "Acausal outputs in causal convolution" @pytest.mark.xfail(reason='1d conv not supported') @pytest.mark.parametrize("stride", (1, 3)) def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride):
@pytest.mark.xfail(reason='1d conv not supported') def test_axis_preservation(conv1d_placeholder, output_size): """ Test that axes into a conv are the same as axes out""" conv_layer = Convolution((3, output_size), lambda x: 1) output = conv_layer(conv1d_placeholder) assert output.axes == conv1d_placeholder.axes, ("Output axes are not the same as input axes: " "{} != {}").format(output.axes, conv1d_placeholder.axes) @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis): """ Test that a channel axis is added when it doesn't exist in the input""" conv_layer = Convolution((3, output_size), lambda x: 1) output = conv_layer(conv1d_no_channel_axis) t_axes = conv1d_no_channel_axis.axes + channel_axis assert output.axes.is_equal_set(t_axes), ("Output axes are not input axes + channel axis:" "{} != {} + {}").format(output.axes, conv1d_no_channel_axis.axes, channel_axis) @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis): """ Test that spatial axis names are modifiable """ width_axis.name = "time" assert len(conv1d_placeholder.axes.find_by_name("time")) == 1 conv_layer = Convolution((3, output_size), lambda x: 1) with pytest.raises(IncompatibleAxesError): conv_layer(conv1d_placeholder) # As a dictionary output = conv_layer(conv1d_placeholder, spatial_axes={"W": "time"}) assert output.axes == conv1d_placeholder.axes # As a tuple output = conv_layer(conv1d_placeholder, spatial_axes=("D", "H", "time")) assert output.axes == conv1d_placeholder.axes @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis): """ Test that channel axis names are modifiable""" channel_axis.name = "channel" assert len(conv1d_placeholder.axes.find_by_name("channel")) == 1 conv_layer = Convolution((3, output_size), lambda x: 1) with pytest.raises(IncompatibleAxesError): conv_layer(conv1d_placeholder) output = conv_layer(conv1d_placeholder, channel_axes="channel") assert output.axes == conv1d_placeholder.axes @pytest.mark.xfail(reason='resolution issue') @pytest.mark.parametrize('dilation', [1, 2, 3]) def test_dilated_conv(dilation): """Test that the dilated convolution layer output matches expected. This test compares the maximum output value to an expected max output value. The expected value is computed based on the dilation parameter. The test also checks that the output size matches the expected size based on the dilaton parameter value.""" image_size = 3 batch_size = 1 init_val = 0.1 conv_size = 3 pad = 3 N_filters = 1 image_channels = 3 model = Sequential([Convolution((conv_size, conv_size, N_filters), filter_init=ConstantInit(val=init_val), padding=pad, dilation=dilation)]) X = np.ones(shape=(batch_size, 3, image_size, image_size)) # Create dummy image data = {'image': X, 'iteration': 1} data_size = OrderedDict([('N', batch_size), ('C', 3), ('H', image_size), ('W', image_size)]) ax = [ng.make_axis(length=data_size[k], name=k) for k in list(data_size.keys())] p_axes = ng.make_axes(ax) named_inputs = {'image': ng.placeholder(p_axes)} outputs = model(named_inputs['image']) named_outputs = {outputs.name: outputs} with closing(ngt.make_transformer()) as transformer: m = make_bound_computation(transformer, named_outputs, named_inputs) output = m(data)[list(m(data).keys())[0]] filter_size = dilation * (conv_size - 1) + 1 # Compute expected filter size # Compute the expected output size based on convolution parameters out_size = (image_size + 2 * pad - filter_size) + 1 filt_tmp = np.zeros(filter_size) filt_tmp[0::dilation] = 1 # max overlap between dilated filter and image (in 1-d) max_overlap = int(np.min([filter_size, image_size])) exp_max_output = init_val * image_channels * (np.sum(filt_tmp[0: max_overlap]))**2 # Expected max output changes for different dilation parameter values# assert int(10 * np.max(output)) == int(10 * exp_max_output), \ ("Dilated conv max outputs do not match expected: " "{} != {}").format(np.max(output), init_val * conv_size * ((image_size - (dilation - 1))**2)) assert np.shape(output) == (batch_size, N_filters, out_size, out_size), \ ("Dilated conv output is not expected size: " "{} != {}").format(np.shape(output), (batch_size, N_filters, out_size, out_size)) @pytest.mark.xfail(reason='Not implemented') @pytest.mark.parametrize('filter_width', [3]) @pytest.mark.parametrize('num_filters', [2]) @pytest.mark.parametrize('strides', [1]) @pytest.mark.parametrize('padding', [0]) @pytest.mark.parametrize('time_steps', [5]) @pytest.mark.parametrize('feature_dimension', [4]) @pytest.mark.parametrize('batch_size', [2]) def test_conv1d(transformer_factory, filter_width, num_filters, strides, padding, time_steps, feature_dimension, batch_size): dilation = 1 # reference conv does not support dilation F = ng.make_axis(name='F', length=feature_dimension) REC = ng.make_axis(name='REC', length=time_steps) N = ng.make_axis(name='N', length=batch_size) in_axes = ng.make_axes([F, REC, N]) inputs = ng.placeholder(axes=in_axes) input_vals = np.random.randn(*in_axes.lengths) filter_init = GaussianInit() conv1d = Convolution((filter_width, num_filters), filter_init, strides=strides, padding=padding, dilation=dilation, bias_init=None, activation=Rectlin(), batch_norm=None) result_op = conv1d(inputs, channel_axes='F', spatial_axes={'W': 'REC'}) with closing(ngt.make_transformer()) as transformer: result_comp = transformer.add_computation(ng.computation(result_op, inputs)) filter_vals = transformer.add_computation(ng.computation(conv1d.conv.W))() result_ng = result_comp(input_vals) result_np = np.squeeze(reference_conv1d(input_vals, filter_vals, lambda x: np.maximum(0, x))) ng.testing.assert_allclose(result_ng, result_np) @pytest.mark.xfail(reason='Not implemented') @pytest.mark.transformer_dependent def test_deconv(): """ basic test of deconv fprop. ngraph/tests/test_conv.py tests ng.deconvolution bprop """ # filter params R, S = 5, 5 fshape = (R, S, 1) strides = 2 filter_val_nz = np.arange(1, R * S + 1).reshape(R, S) filter_val = np.zeros(fshape) filter_val[:, :, 0] = filter_val_nz deconv = Deconvolution(fshape, filter_init=ConstantInit(filter_val), strides=strides, padding=0, dilation=1) N = ng.make_axis(name='N', length=1) # batch image_shape = (1, 8, 8) # CHW image_axes = ng.make_axes([ng.make_axis(name=nm, length=l) for nm, l in zip('CHW', image_shape)]) image_axes |= N image = ng.placeholder(axes=image_axes) output = deconv(image) with closing(ngt.make_transformer()) as transformer: comp = transformer.add_computation(ng.computation(output, image)) input_val = np.zeros(image_shape + (N.length, ), dtype=float) input_val[0, 0, 0] = 1 input_val[0, 5, 5] = 1 input_val[0, 7, 7] = 1 result = comp(input_val) feature_map = np.squeeze(result) assert (feature_map[:5, :5] == filter_val_nz).all() result2 = filter_val_nz.copy() result2[-1, -1] = 26 assert (feature_map[10:15, 10:15] == result2).all() result3 = filter_val_nz.copy() result3[0, 0] = 26 assert (feature_map[-5:, -5:] == result3).all() @pytest.mark.parametrize("input_size", (10, 25)) @pytest.mark.parametrize("filter_size", (3, 4)) @pytest.mark.parametrize("padding", ((0, 0), (3, 4))) @pytest.mark.parametrize("stride", (1, 3)) def test_conv_inverts_deconv(transformer_factory, input_size, filter_size, padding, stride): """ Test that conv and deconv are inverse operations given the same parameters""" # convolutions whose output size are not an even multiple of stride cannot be exactly inverted a = (input_size + sum(padding) - filter_size) % stride conv_output = utils.conv_output_dim(input_size, filter_size, padding, stride) deconv_output = utils.deconv_output_dim(conv_output, filter_size, padding, stride) assert deconv_output == (input_size - a), ("Convolution and Deconvolution do not invert:\n" "output ({}) != input ({}) - a ({})\n" "filter: {}, padding: {}, stride: {}" ).format(deconv_output, input_size, a, filter_size, padding, stride)
""" Test that 'same' always results in out_size = np.ceil(in_size / stride) """ conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding="same") output = conv_layer(conv1d_placeholder) output_width = output.axes.find_by_name("W")[0].length assert output_width == np.ceil(width / float(stride)), ("Same convolution output width != " "ceil(input_width / stride): {} != " "ceil({} / {})").format(output_width, width, stride)
identifier_body
test_conv_layer.py
import pytest import numpy as np from collections import OrderedDict from contextlib import closing import neon as ng import neon.transformers as ngt from neon.testing import executor from neon.frontend.common import utils from neon.op_graph.axes import IncompatibleAxesError from neon.frontend import Convolution, Deconvolution, Sequential from neon.frontend import ConstantInit, Rectlin, GaussianInit, make_bound_computation def reference_conv1d(inputs, filters, activation, strides=1, padding=0): # for now: assert strides == 1 assert padding == 0 # inputs: features, time steps (conv axis), batch size # filters: input feature dimension/channels, T=1, R=filter_width, S=1, K=num_filters # result: K, 1, time_steps - S + 1, 1, batch size filters = np.squeeze(filters) # input channels, filter_width, num_filters feature_dimension, time_steps_in, batch_size = inputs.shape filter_width = filters.shape[1] K = filters.shape[-1] time_steps_out = time_steps_in - filter_width + 1 result = np.zeros((K, time_steps_out, batch_size)) # TODO: refactor to make this more efficient for t in range(time_steps_out): for k in range(K): for n in range(batch_size): result[k, t, n] = np.sum(inputs[:, t:t + filter_width, n] * filters[:, :, k]) result = activation(result) # expand dimensions from K, time_steps, batch_size to (K, 1, time_steps, 1, batch_size) result = np.expand_dims(np.expand_dims(result, axis=1), axis=3) return result # TODO: Remove these to conftest.py @pytest.fixture(params=[1]) def input_size(request): return request.param @pytest.fixture(params=[16]) def output_size(request): return request.param @pytest.fixture(params=[4]) def batch_size(request): return request.param @pytest.fixture def width_axis(width): return ng.make_axis(length=width, name="W") @pytest.fixture def conv1d_placeholder(channel_axis, width_axis, batch_axis): return ng.placeholder((channel_axis, width_axis, batch_axis)) @pytest.fixture def conv1d_no_channel_axis(width_axis, batch_axis): return ng.placeholder((width_axis, batch_axis)) @pytest.fixture def spatial_onehot(input_size, width, batch_size): value = np.zeros((input_size, width, batch_size)) value[:, width // 2, :] = 1 return value @pytest.mark.xfail(reason='1d conv not supported') def test_causal_convolution(conv1d_placeholder, spatial_onehot, output_size, width): """ Test that causal convolutions only operate on leftward inputs""" conv_layer = Convolution((3, output_size), lambda x: 1, padding="causal") output = conv_layer(conv1d_placeholder) output_width = output.axes.find_by_name("W")[0].length assert output_width == width, "Causal convolution output width != " \ "input width: {} != {}".format(output_width, width) with executor(output, conv1d_placeholder) as comp: output_val = comp(spatial_onehot) # First 1 is at width // 2, so anything before that should be 0 assert (output_val[:, :width // 2] == 0).all(), "Acausal outputs in causal convolution" @pytest.mark.xfail(reason='1d conv not supported') @pytest.mark.parametrize("stride", (1, 3)) def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride): """ Test that 'same' always results in out_size = np.ceil(in_size / stride) """ conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding="same") output = conv_layer(conv1d_placeholder) output_width = output.axes.find_by_name("W")[0].length assert output_width == np.ceil(width / float(stride)), ("Same convolution output width != " "ceil(input_width / stride): {} != " "ceil({} / {})").format(output_width, width, stride) @pytest.mark.xfail(reason='1d conv not supported') def test_axis_preservation(conv1d_placeholder, output_size): """ Test that axes into a conv are the same as axes out""" conv_layer = Convolution((3, output_size), lambda x: 1) output = conv_layer(conv1d_placeholder) assert output.axes == conv1d_placeholder.axes, ("Output axes are not the same as input axes: " "{} != {}").format(output.axes, conv1d_placeholder.axes) @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis): """ Test that a channel axis is added when it doesn't exist in the input""" conv_layer = Convolution((3, output_size), lambda x: 1) output = conv_layer(conv1d_no_channel_axis) t_axes = conv1d_no_channel_axis.axes + channel_axis assert output.axes.is_equal_set(t_axes), ("Output axes are not input axes + channel axis:" "{} != {} + {}").format(output.axes, conv1d_no_channel_axis.axes, channel_axis) @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis): """ Test that spatial axis names are modifiable """ width_axis.name = "time" assert len(conv1d_placeholder.axes.find_by_name("time")) == 1 conv_layer = Convolution((3, output_size), lambda x: 1) with pytest.raises(IncompatibleAxesError): conv_layer(conv1d_placeholder) # As a dictionary output = conv_layer(conv1d_placeholder, spatial_axes={"W": "time"}) assert output.axes == conv1d_placeholder.axes # As a tuple output = conv_layer(conv1d_placeholder, spatial_axes=("D", "H", "time")) assert output.axes == conv1d_placeholder.axes @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis): """ Test that channel axis names are modifiable""" channel_axis.name = "channel" assert len(conv1d_placeholder.axes.find_by_name("channel")) == 1 conv_layer = Convolution((3, output_size), lambda x: 1) with pytest.raises(IncompatibleAxesError): conv_layer(conv1d_placeholder) output = conv_layer(conv1d_placeholder, channel_axes="channel") assert output.axes == conv1d_placeholder.axes @pytest.mark.xfail(reason='resolution issue') @pytest.mark.parametrize('dilation', [1, 2, 3]) def
(dilation): """Test that the dilated convolution layer output matches expected. This test compares the maximum output value to an expected max output value. The expected value is computed based on the dilation parameter. The test also checks that the output size matches the expected size based on the dilaton parameter value.""" image_size = 3 batch_size = 1 init_val = 0.1 conv_size = 3 pad = 3 N_filters = 1 image_channels = 3 model = Sequential([Convolution((conv_size, conv_size, N_filters), filter_init=ConstantInit(val=init_val), padding=pad, dilation=dilation)]) X = np.ones(shape=(batch_size, 3, image_size, image_size)) # Create dummy image data = {'image': X, 'iteration': 1} data_size = OrderedDict([('N', batch_size), ('C', 3), ('H', image_size), ('W', image_size)]) ax = [ng.make_axis(length=data_size[k], name=k) for k in list(data_size.keys())] p_axes = ng.make_axes(ax) named_inputs = {'image': ng.placeholder(p_axes)} outputs = model(named_inputs['image']) named_outputs = {outputs.name: outputs} with closing(ngt.make_transformer()) as transformer: m = make_bound_computation(transformer, named_outputs, named_inputs) output = m(data)[list(m(data).keys())[0]] filter_size = dilation * (conv_size - 1) + 1 # Compute expected filter size # Compute the expected output size based on convolution parameters out_size = (image_size + 2 * pad - filter_size) + 1 filt_tmp = np.zeros(filter_size) filt_tmp[0::dilation] = 1 # max overlap between dilated filter and image (in 1-d) max_overlap = int(np.min([filter_size, image_size])) exp_max_output = init_val * image_channels * (np.sum(filt_tmp[0: max_overlap]))**2 # Expected max output changes for different dilation parameter values# assert int(10 * np.max(output)) == int(10 * exp_max_output), \ ("Dilated conv max outputs do not match expected: " "{} != {}").format(np.max(output), init_val * conv_size * ((image_size - (dilation - 1))**2)) assert np.shape(output) == (batch_size, N_filters, out_size, out_size), \ ("Dilated conv output is not expected size: " "{} != {}").format(np.shape(output), (batch_size, N_filters, out_size, out_size)) @pytest.mark.xfail(reason='Not implemented') @pytest.mark.parametrize('filter_width', [3]) @pytest.mark.parametrize('num_filters', [2]) @pytest.mark.parametrize('strides', [1]) @pytest.mark.parametrize('padding', [0]) @pytest.mark.parametrize('time_steps', [5]) @pytest.mark.parametrize('feature_dimension', [4]) @pytest.mark.parametrize('batch_size', [2]) def test_conv1d(transformer_factory, filter_width, num_filters, strides, padding, time_steps, feature_dimension, batch_size): dilation = 1 # reference conv does not support dilation F = ng.make_axis(name='F', length=feature_dimension) REC = ng.make_axis(name='REC', length=time_steps) N = ng.make_axis(name='N', length=batch_size) in_axes = ng.make_axes([F, REC, N]) inputs = ng.placeholder(axes=in_axes) input_vals = np.random.randn(*in_axes.lengths) filter_init = GaussianInit() conv1d = Convolution((filter_width, num_filters), filter_init, strides=strides, padding=padding, dilation=dilation, bias_init=None, activation=Rectlin(), batch_norm=None) result_op = conv1d(inputs, channel_axes='F', spatial_axes={'W': 'REC'}) with closing(ngt.make_transformer()) as transformer: result_comp = transformer.add_computation(ng.computation(result_op, inputs)) filter_vals = transformer.add_computation(ng.computation(conv1d.conv.W))() result_ng = result_comp(input_vals) result_np = np.squeeze(reference_conv1d(input_vals, filter_vals, lambda x: np.maximum(0, x))) ng.testing.assert_allclose(result_ng, result_np) @pytest.mark.xfail(reason='Not implemented') @pytest.mark.transformer_dependent def test_deconv(): """ basic test of deconv fprop. ngraph/tests/test_conv.py tests ng.deconvolution bprop """ # filter params R, S = 5, 5 fshape = (R, S, 1) strides = 2 filter_val_nz = np.arange(1, R * S + 1).reshape(R, S) filter_val = np.zeros(fshape) filter_val[:, :, 0] = filter_val_nz deconv = Deconvolution(fshape, filter_init=ConstantInit(filter_val), strides=strides, padding=0, dilation=1) N = ng.make_axis(name='N', length=1) # batch image_shape = (1, 8, 8) # CHW image_axes = ng.make_axes([ng.make_axis(name=nm, length=l) for nm, l in zip('CHW', image_shape)]) image_axes |= N image = ng.placeholder(axes=image_axes) output = deconv(image) with closing(ngt.make_transformer()) as transformer: comp = transformer.add_computation(ng.computation(output, image)) input_val = np.zeros(image_shape + (N.length, ), dtype=float) input_val[0, 0, 0] = 1 input_val[0, 5, 5] = 1 input_val[0, 7, 7] = 1 result = comp(input_val) feature_map = np.squeeze(result) assert (feature_map[:5, :5] == filter_val_nz).all() result2 = filter_val_nz.copy() result2[-1, -1] = 26 assert (feature_map[10:15, 10:15] == result2).all() result3 = filter_val_nz.copy() result3[0, 0] = 26 assert (feature_map[-5:, -5:] == result3).all() @pytest.mark.parametrize("input_size", (10, 25)) @pytest.mark.parametrize("filter_size", (3, 4)) @pytest.mark.parametrize("padding", ((0, 0), (3, 4))) @pytest.mark.parametrize("stride", (1, 3)) def test_conv_inverts_deconv(transformer_factory, input_size, filter_size, padding, stride): """ Test that conv and deconv are inverse operations given the same parameters""" # convolutions whose output size are not an even multiple of stride cannot be exactly inverted a = (input_size + sum(padding) - filter_size) % stride conv_output = utils.conv_output_dim(input_size, filter_size, padding, stride) deconv_output = utils.deconv_output_dim(conv_output, filter_size, padding, stride) assert deconv_output == (input_size - a), ("Convolution and Deconvolution do not invert:\n" "output ({}) != input ({}) - a ({})\n" "filter: {}, padding: {}, stride: {}" ).format(deconv_output, input_size, a, filter_size, padding, stride)
test_dilated_conv
identifier_name
test_conv_layer.py
import pytest import numpy as np from collections import OrderedDict from contextlib import closing import neon as ng import neon.transformers as ngt from neon.testing import executor from neon.frontend.common import utils from neon.op_graph.axes import IncompatibleAxesError from neon.frontend import Convolution, Deconvolution, Sequential from neon.frontend import ConstantInit, Rectlin, GaussianInit, make_bound_computation def reference_conv1d(inputs, filters, activation, strides=1, padding=0): # for now: assert strides == 1 assert padding == 0 # inputs: features, time steps (conv axis), batch size # filters: input feature dimension/channels, T=1, R=filter_width, S=1, K=num_filters # result: K, 1, time_steps - S + 1, 1, batch size filters = np.squeeze(filters) # input channels, filter_width, num_filters feature_dimension, time_steps_in, batch_size = inputs.shape filter_width = filters.shape[1] K = filters.shape[-1] time_steps_out = time_steps_in - filter_width + 1 result = np.zeros((K, time_steps_out, batch_size)) # TODO: refactor to make this more efficient for t in range(time_steps_out): for k in range(K): for n in range(batch_size): result[k, t, n] = np.sum(inputs[:, t:t + filter_width, n] * filters[:, :, k]) result = activation(result) # expand dimensions from K, time_steps, batch_size to (K, 1, time_steps, 1, batch_size) result = np.expand_dims(np.expand_dims(result, axis=1), axis=3) return result # TODO: Remove these to conftest.py @pytest.fixture(params=[1]) def input_size(request): return request.param @pytest.fixture(params=[16]) def output_size(request): return request.param @pytest.fixture(params=[4]) def batch_size(request): return request.param @pytest.fixture def width_axis(width): return ng.make_axis(length=width, name="W") @pytest.fixture def conv1d_placeholder(channel_axis, width_axis, batch_axis): return ng.placeholder((channel_axis, width_axis, batch_axis)) @pytest.fixture def conv1d_no_channel_axis(width_axis, batch_axis): return ng.placeholder((width_axis, batch_axis)) @pytest.fixture def spatial_onehot(input_size, width, batch_size): value = np.zeros((input_size, width, batch_size)) value[:, width // 2, :] = 1 return value @pytest.mark.xfail(reason='1d conv not supported') def test_causal_convolution(conv1d_placeholder, spatial_onehot, output_size, width): """ Test that causal convolutions only operate on leftward inputs""" conv_layer = Convolution((3, output_size), lambda x: 1, padding="causal") output = conv_layer(conv1d_placeholder) output_width = output.axes.find_by_name("W")[0].length assert output_width == width, "Causal convolution output width != " \ "input width: {} != {}".format(output_width, width) with executor(output, conv1d_placeholder) as comp: output_val = comp(spatial_onehot) # First 1 is at width // 2, so anything before that should be 0 assert (output_val[:, :width // 2] == 0).all(), "Acausal outputs in causal convolution" @pytest.mark.xfail(reason='1d conv not supported') @pytest.mark.parametrize("stride", (1, 3)) def test_same_convolution(conv1d_placeholder, spatial_onehot, output_size, width, stride): """ Test that 'same' always results in out_size = np.ceil(in_size / stride) """ conv_layer = Convolution((3, output_size), lambda x: 1, strides=stride, padding="same") output = conv_layer(conv1d_placeholder) output_width = output.axes.find_by_name("W")[0].length assert output_width == np.ceil(width / float(stride)), ("Same convolution output width != " "ceil(input_width / stride): {} != " "ceil({} / {})").format(output_width, width, stride) @pytest.mark.xfail(reason='1d conv not supported') def test_axis_preservation(conv1d_placeholder, output_size): """ Test that axes into a conv are the same as axes out""" conv_layer = Convolution((3, output_size), lambda x: 1) output = conv_layer(conv1d_placeholder) assert output.axes == conv1d_placeholder.axes, ("Output axes are not the same as input axes: " "{} != {}").format(output.axes, conv1d_placeholder.axes) @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_channel_axis_introduction(conv1d_no_channel_axis, output_size, channel_axis): """ Test that a channel axis is added when it doesn't exist in the input""" conv_layer = Convolution((3, output_size), lambda x: 1) output = conv_layer(conv1d_no_channel_axis) t_axes = conv1d_no_channel_axis.axes + channel_axis assert output.axes.is_equal_set(t_axes), ("Output axes are not input axes + channel axis:" "{} != {} + {}").format(output.axes, conv1d_no_channel_axis.axes, channel_axis) @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis): """ Test that spatial axis names are modifiable """ width_axis.name = "time" assert len(conv1d_placeholder.axes.find_by_name("time")) == 1 conv_layer = Convolution((3, output_size), lambda x: 1) with pytest.raises(IncompatibleAxesError): conv_layer(conv1d_placeholder) # As a dictionary output = conv_layer(conv1d_placeholder, spatial_axes={"W": "time"}) assert output.axes == conv1d_placeholder.axes # As a tuple output = conv_layer(conv1d_placeholder, spatial_axes=("D", "H", "time")) assert output.axes == conv1d_placeholder.axes @pytest.mark.xfail(reason='1d conv and channel name not supported') def test_alternate_channel_axes(conv1d_placeholder, output_size, channel_axis): """ Test that channel axis names are modifiable""" channel_axis.name = "channel" assert len(conv1d_placeholder.axes.find_by_name("channel")) == 1 conv_layer = Convolution((3, output_size), lambda x: 1) with pytest.raises(IncompatibleAxesError): conv_layer(conv1d_placeholder) output = conv_layer(conv1d_placeholder, channel_axes="channel") assert output.axes == conv1d_placeholder.axes @pytest.mark.xfail(reason='resolution issue') @pytest.mark.parametrize('dilation', [1, 2, 3]) def test_dilated_conv(dilation): """Test that the dilated convolution layer output matches expected. This test compares the maximum output value to an expected max output value. The expected value is computed based on the dilation parameter. The test also checks that the output size matches the expected size based on the dilaton parameter value.""" image_size = 3 batch_size = 1 init_val = 0.1
N_filters = 1 image_channels = 3 model = Sequential([Convolution((conv_size, conv_size, N_filters), filter_init=ConstantInit(val=init_val), padding=pad, dilation=dilation)]) X = np.ones(shape=(batch_size, 3, image_size, image_size)) # Create dummy image data = {'image': X, 'iteration': 1} data_size = OrderedDict([('N', batch_size), ('C', 3), ('H', image_size), ('W', image_size)]) ax = [ng.make_axis(length=data_size[k], name=k) for k in list(data_size.keys())] p_axes = ng.make_axes(ax) named_inputs = {'image': ng.placeholder(p_axes)} outputs = model(named_inputs['image']) named_outputs = {outputs.name: outputs} with closing(ngt.make_transformer()) as transformer: m = make_bound_computation(transformer, named_outputs, named_inputs) output = m(data)[list(m(data).keys())[0]] filter_size = dilation * (conv_size - 1) + 1 # Compute expected filter size # Compute the expected output size based on convolution parameters out_size = (image_size + 2 * pad - filter_size) + 1 filt_tmp = np.zeros(filter_size) filt_tmp[0::dilation] = 1 # max overlap between dilated filter and image (in 1-d) max_overlap = int(np.min([filter_size, image_size])) exp_max_output = init_val * image_channels * (np.sum(filt_tmp[0: max_overlap]))**2 # Expected max output changes for different dilation parameter values# assert int(10 * np.max(output)) == int(10 * exp_max_output), \ ("Dilated conv max outputs do not match expected: " "{} != {}").format(np.max(output), init_val * conv_size * ((image_size - (dilation - 1))**2)) assert np.shape(output) == (batch_size, N_filters, out_size, out_size), \ ("Dilated conv output is not expected size: " "{} != {}").format(np.shape(output), (batch_size, N_filters, out_size, out_size)) @pytest.mark.xfail(reason='Not implemented') @pytest.mark.parametrize('filter_width', [3]) @pytest.mark.parametrize('num_filters', [2]) @pytest.mark.parametrize('strides', [1]) @pytest.mark.parametrize('padding', [0]) @pytest.mark.parametrize('time_steps', [5]) @pytest.mark.parametrize('feature_dimension', [4]) @pytest.mark.parametrize('batch_size', [2]) def test_conv1d(transformer_factory, filter_width, num_filters, strides, padding, time_steps, feature_dimension, batch_size): dilation = 1 # reference conv does not support dilation F = ng.make_axis(name='F', length=feature_dimension) REC = ng.make_axis(name='REC', length=time_steps) N = ng.make_axis(name='N', length=batch_size) in_axes = ng.make_axes([F, REC, N]) inputs = ng.placeholder(axes=in_axes) input_vals = np.random.randn(*in_axes.lengths) filter_init = GaussianInit() conv1d = Convolution((filter_width, num_filters), filter_init, strides=strides, padding=padding, dilation=dilation, bias_init=None, activation=Rectlin(), batch_norm=None) result_op = conv1d(inputs, channel_axes='F', spatial_axes={'W': 'REC'}) with closing(ngt.make_transformer()) as transformer: result_comp = transformer.add_computation(ng.computation(result_op, inputs)) filter_vals = transformer.add_computation(ng.computation(conv1d.conv.W))() result_ng = result_comp(input_vals) result_np = np.squeeze(reference_conv1d(input_vals, filter_vals, lambda x: np.maximum(0, x))) ng.testing.assert_allclose(result_ng, result_np) @pytest.mark.xfail(reason='Not implemented') @pytest.mark.transformer_dependent def test_deconv(): """ basic test of deconv fprop. ngraph/tests/test_conv.py tests ng.deconvolution bprop """ # filter params R, S = 5, 5 fshape = (R, S, 1) strides = 2 filter_val_nz = np.arange(1, R * S + 1).reshape(R, S) filter_val = np.zeros(fshape) filter_val[:, :, 0] = filter_val_nz deconv = Deconvolution(fshape, filter_init=ConstantInit(filter_val), strides=strides, padding=0, dilation=1) N = ng.make_axis(name='N', length=1) # batch image_shape = (1, 8, 8) # CHW image_axes = ng.make_axes([ng.make_axis(name=nm, length=l) for nm, l in zip('CHW', image_shape)]) image_axes |= N image = ng.placeholder(axes=image_axes) output = deconv(image) with closing(ngt.make_transformer()) as transformer: comp = transformer.add_computation(ng.computation(output, image)) input_val = np.zeros(image_shape + (N.length, ), dtype=float) input_val[0, 0, 0] = 1 input_val[0, 5, 5] = 1 input_val[0, 7, 7] = 1 result = comp(input_val) feature_map = np.squeeze(result) assert (feature_map[:5, :5] == filter_val_nz).all() result2 = filter_val_nz.copy() result2[-1, -1] = 26 assert (feature_map[10:15, 10:15] == result2).all() result3 = filter_val_nz.copy() result3[0, 0] = 26 assert (feature_map[-5:, -5:] == result3).all() @pytest.mark.parametrize("input_size", (10, 25)) @pytest.mark.parametrize("filter_size", (3, 4)) @pytest.mark.parametrize("padding", ((0, 0), (3, 4))) @pytest.mark.parametrize("stride", (1, 3)) def test_conv_inverts_deconv(transformer_factory, input_size, filter_size, padding, stride): """ Test that conv and deconv are inverse operations given the same parameters""" # convolutions whose output size are not an even multiple of stride cannot be exactly inverted a = (input_size + sum(padding) - filter_size) % stride conv_output = utils.conv_output_dim(input_size, filter_size, padding, stride) deconv_output = utils.deconv_output_dim(conv_output, filter_size, padding, stride) assert deconv_output == (input_size - a), ("Convolution and Deconvolution do not invert:\n" "output ({}) != input ({}) - a ({})\n" "filter: {}, padding: {}, stride: {}" ).format(deconv_output, input_size, a, filter_size, padding, stride)
conv_size = 3 pad = 3
random_line_split
controller.go
package controller import ( "context" "fmt" "math" "time" "github.com/practo/klog/v2" "github.com/prometheus/client_golang/prometheus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" appsinformers "k8s.io/client-go/informers/apps/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" appslisters "k8s.io/client-go/listers/apps/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" v1 "github.com/practo/k8s-worker-pod-autoscaler/pkg/apis/workerpodautoscaler/v1" clientset "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/clientset/versioned" samplescheme "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/clientset/versioned/scheme" informers "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/informers/externalversions/workerpodautoscaler/v1" listers "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/listers/workerpodautoscaler/v1" queue "github.com/practo/k8s-worker-pod-autoscaler/pkg/queue" ) const controllerAgentName = "workerpodautoscaler-controller" const ( // SuccessSynced is used as part of the Event 'reason' when a WorkerPodAutoScaler is synced SuccessSynced = "Synced" // ErrResourceExists is used as part of the Event 'reason' when a WorkerPodAutoScaler fails // to sync due to a Deployment of the same name already existing. ErrResourceExists = "ErrResourceExists" // MessageResourceExists is the message used for Events when a resource // fails to sync due to a Deployment already existing MessageResourceExists = "Resource %q already exists and is not managed by WorkerPodAutoScaler" // MessageResourceSynced is the message used for an Event fired when a WorkerPodAutoScaler // is synced successfully MessageResourceSynced = "WorkerPodAutoScaler synced successfully" // WokerPodAutoScalerEventAdd stores the add event name WokerPodAutoScalerEventAdd = "add" // WokerPodAutoScalerEventUpdate stores the add event name WokerPodAutoScalerEventUpdate = "update" // WokerPodAutoScalerEventDelete stores the add event name WokerPodAutoScalerEventDelete = "delete" ) var ( loopDurationSeconds = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "controller", Name: "loop_duration_seconds", Help: "Number of seconds to complete the control loop successfully, partitioned by wpa name and namespace", }, []string{"workerpodautoscaler", "namespace"}, ) loopCountSuccess = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "wpa", Subsystem: "controller", Name: "loop_count_success", Help: "How many times the control loop executed successfully, partitioned by wpa name and namespace", }, []string{"workerpodautoscaler", "namespace"}, ) qMsgs = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "queue", Name: "messages", Help: "Number of unprocessed messages in the queue", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) qMsgsSPM = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "queue", Name: "messages_sent_per_minute", Help: "Number of messages sent to the queue per minute", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersIdle = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "idle", Help: "Number of idle workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersCurrent = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "current", Help: "Number of current workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersDesired = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "desired", Help: "Number of desired workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersAvailable = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "available", Help: "Number of available workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) ) func init() { prometheus.MustRegister(loopDurationSeconds) prometheus.MustRegister(loopCountSuccess) prometheus.MustRegister(qMsgs) prometheus.MustRegister(qMsgsSPM) prometheus.MustRegister(workersIdle) prometheus.MustRegister(workersCurrent) prometheus.MustRegister(workersDesired) prometheus.MustRegister(workersAvailable) } type WokerPodAutoScalerEvent struct { key string name string } // Controller is the controller implementation for WorkerPodAutoScaler resources type Controller struct { ctx context.Context // kubeclientset is a standard kubernetes clientset kubeclientset kubernetes.Interface // customclientset is a clientset for our own API group customclientset clientset.Interface deploymentLister appslisters.DeploymentLister deploymentsSynced cache.InformerSynced replicaSetLister appslisters.ReplicaSetLister replicaSetsSynced cache.InformerSynced workerPodAutoScalersLister listers.WorkerPodAutoScalerLister workerPodAutoScalersSynced cache.InformerSynced // workqueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This // means we can ensure we only process a fixed amount of resources at a // time, and makes it easy to ensure we are never processing the same item // simultaneously in two different workers. workqueue workqueue.RateLimitingInterface // recorder is an event recorder for recording Event resources to the // Kubernetes API. recorder record.EventRecorder // defaultMaxDisruption // it is the default value for the maxDisruption in the WPA spec. // This specifies how much percentage of pods can be disrupted in a // single scale down acitivity. // Can be expressed as integers or as a percentage. defaultMaxDisruption string // QueueList keeps the list of all the queues in memeory // which is used by the core controller and the sqs exporter // scaleDownDelay after last scale up // the no of seconds to wait after the last scale up before scaling down scaleDownDelay time.Duration Queues *queue.Queues } // NewController returns a new sample controller func NewController( ctx context.Context, kubeclientset kubernetes.Interface, customclientset clientset.Interface, deploymentInformer appsinformers.DeploymentInformer, replicaSetInformer appsinformers.ReplicaSetInformer, workerPodAutoScalerInformer informers.WorkerPodAutoScalerInformer, defaultMaxDisruption string, resyncPeriod time.Duration, scaleDownDelay time.Duration, queues *queue.Queues) *Controller { // Create event broadcaster // Add sample-controller types to the default Kubernetes Scheme so Events can be // logged for sample-controller types. utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme)) klog.V(4).Info("Creating event broadcaster") eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) controller := &Controller{ ctx: ctx, kubeclientset: kubeclientset, customclientset: customclientset, deploymentLister: deploymentInformer.Lister(), deploymentsSynced: deploymentInformer.Informer().HasSynced, replicaSetLister: replicaSetInformer.Lister(), replicaSetsSynced: replicaSetInformer.Informer().HasSynced, workerPodAutoScalersLister: workerPodAutoScalerInformer.Lister(), workerPodAutoScalersSynced: workerPodAutoScalerInformer.Informer().HasSynced, workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkerPodAutoScalers"), recorder: recorder, defaultMaxDisruption: defaultMaxDisruption, scaleDownDelay: scaleDownDelay, Queues: queues, } klog.V(4).Info("Setting up event handlers") // Set up an event handler for when WorkerPodAutoScaler resources change workerPodAutoScalerInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ AddFunc: controller.enqueueAddWorkerPodAutoScaler, UpdateFunc: func(old, new interface{}) { controller.enqueueUpdateWorkerPodAutoScaler(new) }, DeleteFunc: controller.enqueueDeleteWorkerPodAutoScaler, }, resyncPeriod) return controller } // Run will set up the event handlers for types we are interested in, as well // as syncing informer caches and starting workers. It will block until stopCh // is closed, at which point it will shutdown the workqueue and wait for // workers to finish processing their current work items. func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() // Start the informer factories to begin populating the informer caches klog.V(1).Info("Starting WorkerPodAutoScaler controller") // Wait for the caches to be synced before starting workers klog.V(1).Info("Waiting for informer caches to sync") if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.workerPodAutoScalersSynced); !ok { return fmt.Errorf("failed to wait for caches to sync") } klog.V(1).Info("Starting workers") // Launch two workers to process WorkerPodAutoScaler resources for i := 0; i < threadiness; i++ { // TOOD: move from stopCh to context, use: UntilWithContext() go wait.Until(c.runWorker, time.Second, stopCh) } <-stopCh klog.V(1).Info("Shutting down workers") return nil } // runWorker is a long-running function that will continually call the // processNextWorkItem function in order to read and process a message on the // workqueue. func (c *Controller) runWorker() { for c.processNextWorkItem(c.ctx) { } } // processNextWorkItem will read a single work item off the workqueue and // attempt to process it, by calling the syncHandler. func (c *Controller) processNextWorkItem(ctx context.Context) bool { obj, shutdown := c.workqueue.Get() if shutdown { return false } // We wrap this block in a func so we can defer c.workqueue.Done. err := func(obj interface{}) error { // We call Done here so the workqueue knows we have finished // processing this item. We also must remember to call Forget if we // do not want this work item being re-queued. For example, we do // not call Forget if a transient error occurs, instead the item is // put back on the workqueue and attempted again after a back-off // period. defer c.workqueue.Done(obj) var ok bool // We expect strings to come off the workqueue. These are of the // form namespace/name. We do this as the delayed nature of the // workqueue means the items in the informer cache may actually be // more up to date that when the item was initially put onto the // workqueue.(PS: not anymore, its an WPA event) event, ok := obj.(WokerPodAutoScalerEvent) if !ok { // As the item in the workqueue is actually invalid, we call // Forget here else we'd go into a loop of attempting to // process a work item that is invalid. c.workqueue.Forget(obj) utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) return nil } // Run the syncHandler, passing it the namespace/name string of the // WorkerPodAutoScaler resource to be synced. if err := c.syncHandler(ctx, event); err != nil { // Put the item back on the workqueue to handle any transient errors. c.workqueue.AddRateLimited(event) return fmt.Errorf("error syncing '%s': %s, requeuing", event, err.Error()) } // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. c.workqueue.Forget(obj) return nil }(obj) if err != nil { utilruntime.HandleError(err) return true } return true } // syncHandler compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the WorkerPodAutoScaler resource // with the current status of the resource. func (c *Controller) syncHandler(ctx context.Context, event WokerPodAutoScalerEvent) error { now := time.Now() key := event.key // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return nil } // Get the WorkerPodAutoScaler resource with this namespace/name workerPodAutoScaler, err := c.workerPodAutoScalersLister.WorkerPodAutoScalers(namespace).Get(name) if err != nil { // The WorkerPodAutoScaler resource may no longer exist, in which case we stop processing. if errors.IsNotFound(err) { utilruntime.HandleError(fmt.Errorf("workerPodAutoScaler '%s' in work queue no longer exists", key)) c.Queues.Delete(namespace, name) return nil } return err } var currentWorkers, availableWorkers int32 deploymentName := workerPodAutoScaler.Spec.DeploymentName replicaSetName := workerPodAutoScaler.Spec.ReplicaSetName if deploymentName != "" { // Get the Deployment with the name specified in WorkerPodAutoScaler.spec deployment, err := c.deploymentLister.Deployments(workerPodAutoScaler.Namespace).Get(deploymentName) if errors.IsNotFound(err) { return fmt.Errorf("deployment %s not found in namespace %s", deploymentName, workerPodAutoScaler.Namespace) } else if err != nil { return err } currentWorkers = *deployment.Spec.Replicas availableWorkers = deployment.Status.AvailableReplicas } else if replicaSetName != "" { // Get the ReplicaSet with the name specified in WorkerPodAutoScaler.spec replicaSet, err := c.replicaSetLister.ReplicaSets(workerPodAutoScaler.Namespace).Get(replicaSetName) if errors.IsNotFound(err) { return fmt.Errorf("ReplicaSet %s not found in namespace %s", replicaSetName, workerPodAutoScaler.Namespace) } else if err != nil { return err } currentWorkers = *replicaSet.Spec.Replicas availableWorkers = replicaSet.Status.AvailableReplicas } else { // We choose to absorb the error here as the worker would requeue the // resource otherwise. Instead, the next time the resource is updated // the resource will be queued again. utilruntime.HandleError(fmt.Errorf("%s: deployment or replicaset name must be specified", key)) return nil } var secondsToProcessOneJob float64 if workerPodAutoScaler.Spec.SecondsToProcessOneJob != nil { secondsToProcessOneJob = *workerPodAutoScaler.Spec.SecondsToProcessOneJob } switch event.name { case WokerPodAutoScalerEventAdd: err = c.Queues.Add( namespace, name, workerPodAutoScaler.Spec.QueueURI, currentWorkers, secondsToProcessOneJob, ) case WokerPodAutoScalerEventUpdate: err = c.Queues.Add( namespace, name, workerPodAutoScaler.Spec.QueueURI, currentWorkers, secondsToProcessOneJob, ) case WokerPodAutoScalerEventDelete: err = c.Queues.Delete(namespace, name) } if err != nil { utilruntime.HandleError(fmt.Errorf("unable to sync queue: %s", err.Error())) return err } queueName, queueMessages, messagesSentPerMinute, idleWorkers := c.Queues.GetQueueInfo( namespace, name) if queueName == "" { return nil } if queueMessages == queue.UnsyncedQueueMessageCount { klog.Warningf( "%s qMsgs: %d, q not initialized, waiting for init to complete", queueName, queueMessages, ) return nil } desiredWorkers := GetDesiredWorkers( queueName, queueMessages, messagesSentPerMinute, secondsToProcessOneJob, *workerPodAutoScaler.Spec.TargetMessagesPerWorker, currentWorkers, idleWorkers, *workerPodAutoScaler.Spec.MinReplicas, *workerPodAutoScaler.Spec.MaxReplicas, workerPodAutoScaler.GetMaxDisruption(c.defaultMaxDisruption), ) klog.V(2).Infof("%s current: %d", queueName, currentWorkers) klog.V(2).Infof("%s qMsgs: %d, desired: %d", queueName, queueMessages, desiredWorkers) // set metrics qMsgs.WithLabelValues( name, namespace, queueName, ).Set(float64(queueMessages)) qMsgsSPM.WithLabelValues( name, namespace, queueName, ).Set(messagesSentPerMinute) workersIdle.WithLabelValues( name, namespace, queueName,
).Set(float64(currentWorkers)) workersDesired.WithLabelValues( name, namespace, queueName, ).Set(float64(desiredWorkers)) workersAvailable.WithLabelValues( name, namespace, queueName, ).Set(float64(availableWorkers)) lastScaleTime := workerPodAutoScaler.Status.LastScaleTime.DeepCopy() op := GetScaleOperation( queueName, desiredWorkers, currentWorkers, lastScaleTime, c.scaleDownDelay, ) if op == ScaleUp || op == ScaleDown { if deploymentName != "" { c.updateDeployment( ctx, workerPodAutoScaler.Namespace, deploymentName, &desiredWorkers) } else { c.updateReplicaSet( ctx, workerPodAutoScaler.Namespace, replicaSetName, &desiredWorkers) } now := metav1.Now() lastScaleTime = &now } klog.V(2).Infof("%s scaleOp: %v", queueName, scaleOpString(op)) // Finally, we update the status block of the WorkerPodAutoScaler resource to reflect the // current state of the world updateWorkerPodAutoScalerStatus( ctx, name, namespace, c.customclientset, desiredWorkers, workerPodAutoScaler, currentWorkers, availableWorkers, queueMessages, lastScaleTime, ) loopDurationSeconds.WithLabelValues( name, namespace, ).Set(time.Since(now).Seconds()) loopCountSuccess.WithLabelValues( name, namespace, ).Inc() // TODO: organize and log events // c.recorder.Event(workerPodAutoScaler, corev1.EventTypeNormal, SuccessSynced, MessageResourceSynced) return nil } // updateDeployment updates the Deployment with the desired number of replicas func (c *Controller) updateDeployment(ctx context.Context, namespace string, deploymentName string, replicas *int32) { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of the Deployment before attempting update deployment, getErr := c.deploymentLister.Deployments(namespace).Get(deploymentName) if errors.IsNotFound(getErr) { return fmt.Errorf("deployment %s was not found in namespace %s", deploymentName, namespace) } if getErr != nil { klog.Fatalf("Failed to get deployment: %v", getErr) } deployment.Spec.Replicas = replicas _, updateErr := c.kubeclientset.AppsV1().Deployments(namespace).Update(ctx, deployment, metav1.UpdateOptions{}) if updateErr != nil { klog.Errorf("Failed to update deployment: %v", updateErr) } return updateErr }) if retryErr != nil { klog.Fatalf("Failed to update deployment (retry failed): %v", retryErr) } } // updateReplicaSet updates the ReplicaSet with the desired number of replicas func (c *Controller) updateReplicaSet(ctx context.Context, namespace string, replicaSetName string, replicas *int32) { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of the ReplicaSet before attempting update replicaSet, getErr := c.replicaSetLister.ReplicaSets(namespace).Get(replicaSetName) if errors.IsNotFound(getErr) { return fmt.Errorf("ReplicaSet %s was not found in namespace %s", replicaSetName, namespace) } if getErr != nil { klog.Fatalf("Failed to get ReplicaSet: %v", getErr) } replicaSet.Spec.Replicas = replicas _, updateErr := c.kubeclientset.AppsV1().ReplicaSets(namespace).Update(ctx, replicaSet, metav1.UpdateOptions{}) if updateErr != nil { klog.Errorf("Failed to update ReplicaSet: %v", updateErr) } return updateErr }) if retryErr != nil { klog.Fatalf("Failed to update ReplicaSet (retry failed): %v", retryErr) } } // getMaxDisruptableWorkers gets the maximum number of workers that can // be scaled down in the single scale down activity. func getMaxDisruptableWorkers( maxDisruption *string, currentWorkers int32) int32 { if maxDisruption == nil { klog.Fatalf("maxDisruption default is not being set. Exiting") } maxDisruptionIntOrStr := intstr.Parse(*maxDisruption) maxDisruptableWorkers, err := intstr.GetValueFromIntOrPercent( &maxDisruptionIntOrStr, int(currentWorkers), true, ) if err != nil { klog.Fatalf("Error calculating maxDisruptable workers, err: %v", err) } return int32(maxDisruptableWorkers) } // getMinWorkers gets the min workers based on the // velocity metric: messagesSentPerMinute func getMinWorkers( messagesSentPerMinute float64, minWorkers int32, secondsToProcessOneJob float64) int32 { // disable this feature for WPA queues which have not specified // processing time if secondsToProcessOneJob == 0.0 { return minWorkers } workersBasedOnMessagesSent := int32(math.Ceil((secondsToProcessOneJob * messagesSentPerMinute) / 60)) klog.V(4).Infof("%v, workersBasedOnMessagesSent=%v\n", secondsToProcessOneJob, workersBasedOnMessagesSent) if workersBasedOnMessagesSent > minWorkers { return workersBasedOnMessagesSent } return minWorkers } func isChangeTooSmall(desired int32, current int32, tolerance float64) bool { return math.Abs(float64(desired-current))/float64(current) <= tolerance } // GetDesiredWorkers finds the desired number of workers which are required func GetDesiredWorkers( queueName string, queueMessages int32, messagesSentPerMinute float64, secondsToProcessOneJob float64, targetMessagesPerWorker int32, currentWorkers int32, idleWorkers int32, minWorkers int32, maxWorkers int32, maxDisruption *string) int32 { klog.V(4).Infof("%s min=%v, max=%v, targetBacklog=%v \n", queueName, minWorkers, maxWorkers, targetMessagesPerWorker) // overwrite the minimum workers needed based on // messagesSentPerMinute and secondsToProcessOneJob // this feature is disabled if secondsToProcessOneJob is not set or is 0.0 minWorkers = getMinWorkers( messagesSentPerMinute, minWorkers, secondsToProcessOneJob, ) // gets the maximum number of workers that can be scaled down in a // single scale down activity. maxDisruptableWorkers := getMaxDisruptableWorkers( maxDisruption, currentWorkers, ) tolerance := 0.1 desiredWorkers := int32(math.Ceil( float64(queueMessages) / float64(targetMessagesPerWorker)), ) klog.V(4).Infof("%s qMsgs=%v, qMsgsPerMin=%v \n", queueName, queueMessages, messagesSentPerMinute) klog.V(4).Infof("%s secToProcessJob=%v, maxDisruption=%v \n", queueName, secondsToProcessOneJob, *maxDisruption) klog.V(4).Infof("%s current=%v, idle=%v \n", queueName, currentWorkers, idleWorkers) klog.V(3).Infof("%s minComputed=%v, maxDisruptable=%v\n", queueName, minWorkers, maxDisruptableWorkers) if currentWorkers == 0 { return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } if queueMessages > 0 { if isChangeTooSmall(desiredWorkers, currentWorkers, tolerance) { // desired is same as current in this scenario return convertDesiredReplicasWithRules( currentWorkers, currentWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } else if messagesSentPerMinute > 0 && secondsToProcessOneJob > 0.0 { // this is the case in which there is no backlog visible. // (mostly because the workers picks up jobs very quickly) // But the queue has throughput, so we return the minWorkers. // Note: minWorkers is updated based on // messagesSentPerMinute and secondsToProcessOneJob // desried is the minReplicas in this scenario return convertDesiredReplicasWithRules( currentWorkers, minWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } // Attempt for massive scale down if currentWorkers == idleWorkers { desiredWorkers := int32(0) // for massive scale down to happen maxDisruptableWorkers // should be ignored return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, currentWorkers, ) } // Attempt partial scale down since there is no backlog or in-processing // messages. return convertDesiredReplicasWithRules( currentWorkers, minWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } func convertDesiredReplicasWithRules( current int32, desired int32, min int32, max int32, maxDisruptable int32) int32 { if min >= max { return max } if (current - desired) > maxDisruptable { desired = current - maxDisruptable } if desired > max { return max } if desired < min { return min } return desired } func updateWorkerPodAutoScalerStatus( ctx context.Context, name string, namespace string, customclientset clientset.Interface, desiredWorkers int32, workerPodAutoScaler *v1.WorkerPodAutoScaler, currentWorkers int32, availableWorkers int32, queueMessages int32, lastScaleTime *metav1.Time) { if workerPodAutoScaler.Status.CurrentReplicas == currentWorkers && workerPodAutoScaler.Status.AvailableReplicas == availableWorkers && workerPodAutoScaler.Status.DesiredReplicas == desiredWorkers && workerPodAutoScaler.Status.CurrentMessages == queueMessages && workerPodAutoScaler.Status.LastScaleTime.Equal(lastScaleTime) { klog.V(4).Infof("%s/%s: WPA status is already up to date\n", namespace, name) return } else { klog.V(4).Infof("%s/%s: Updating wpa status\n", namespace, name) } // NEVER modify objects from the store. It's a read-only, local cache. // You can use DeepCopy() to make a deep copy of original object and modify this copy // Or create a copy manually for better performance workerPodAutoScalerCopy := workerPodAutoScaler.DeepCopy() workerPodAutoScalerCopy.Status.CurrentReplicas = currentWorkers workerPodAutoScalerCopy.Status.AvailableReplicas = availableWorkers workerPodAutoScalerCopy.Status.DesiredReplicas = desiredWorkers workerPodAutoScalerCopy.Status.CurrentMessages = queueMessages workerPodAutoScalerCopy.Status.LastScaleTime = lastScaleTime // If the CustomResourceSubresources feature gate is not enabled, // we must use Update instead of UpdateStatus to update the Status block of the WorkerPodAutoScaler resource. // UpdateStatus will not allow changes to the Spec of the resource, // which is ideal for ensuring nothing other than resource status has been updated. _, err := customclientset.K8sV1().WorkerPodAutoScalers(workerPodAutoScaler.Namespace).UpdateStatus(ctx, workerPodAutoScalerCopy, metav1.UpdateOptions{}) if err != nil { klog.Errorf("Error updating wpa status, err: %v", err) return } klog.V(4).Infof("%s/%s: Updated wpa status\n", namespace, name) } // getKeyForWorkerPodAutoScaler takes a WorkerPodAutoScaler resource and converts it into a namespace/name // string which is then put onto the work queue. This method should *not* be // passed resources of any type other than WorkerPodAutoScaler. func (c *Controller) getKeyForWorkerPodAutoScaler(obj interface{}) string { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { utilruntime.HandleError(err) return "" } return key } func (c *Controller) enqueueAddWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventAdd, }) } func (c *Controller) enqueueUpdateWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventUpdate, }) } func (c *Controller) enqueueDeleteWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventDelete, }) }
).Set(float64(idleWorkers)) workersCurrent.WithLabelValues( name, namespace, queueName,
random_line_split
controller.go
package controller import ( "context" "fmt" "math" "time" "github.com/practo/klog/v2" "github.com/prometheus/client_golang/prometheus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" appsinformers "k8s.io/client-go/informers/apps/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" appslisters "k8s.io/client-go/listers/apps/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" v1 "github.com/practo/k8s-worker-pod-autoscaler/pkg/apis/workerpodautoscaler/v1" clientset "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/clientset/versioned" samplescheme "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/clientset/versioned/scheme" informers "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/informers/externalversions/workerpodautoscaler/v1" listers "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/listers/workerpodautoscaler/v1" queue "github.com/practo/k8s-worker-pod-autoscaler/pkg/queue" ) const controllerAgentName = "workerpodautoscaler-controller" const ( // SuccessSynced is used as part of the Event 'reason' when a WorkerPodAutoScaler is synced SuccessSynced = "Synced" // ErrResourceExists is used as part of the Event 'reason' when a WorkerPodAutoScaler fails // to sync due to a Deployment of the same name already existing. ErrResourceExists = "ErrResourceExists" // MessageResourceExists is the message used for Events when a resource // fails to sync due to a Deployment already existing MessageResourceExists = "Resource %q already exists and is not managed by WorkerPodAutoScaler" // MessageResourceSynced is the message used for an Event fired when a WorkerPodAutoScaler // is synced successfully MessageResourceSynced = "WorkerPodAutoScaler synced successfully" // WokerPodAutoScalerEventAdd stores the add event name WokerPodAutoScalerEventAdd = "add" // WokerPodAutoScalerEventUpdate stores the add event name WokerPodAutoScalerEventUpdate = "update" // WokerPodAutoScalerEventDelete stores the add event name WokerPodAutoScalerEventDelete = "delete" ) var ( loopDurationSeconds = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "controller", Name: "loop_duration_seconds", Help: "Number of seconds to complete the control loop successfully, partitioned by wpa name and namespace", }, []string{"workerpodautoscaler", "namespace"}, ) loopCountSuccess = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "wpa", Subsystem: "controller", Name: "loop_count_success", Help: "How many times the control loop executed successfully, partitioned by wpa name and namespace", }, []string{"workerpodautoscaler", "namespace"}, ) qMsgs = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "queue", Name: "messages", Help: "Number of unprocessed messages in the queue", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) qMsgsSPM = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "queue", Name: "messages_sent_per_minute", Help: "Number of messages sent to the queue per minute", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersIdle = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "idle", Help: "Number of idle workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersCurrent = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "current", Help: "Number of current workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersDesired = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "desired", Help: "Number of desired workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersAvailable = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "available", Help: "Number of available workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) ) func init() { prometheus.MustRegister(loopDurationSeconds) prometheus.MustRegister(loopCountSuccess) prometheus.MustRegister(qMsgs) prometheus.MustRegister(qMsgsSPM) prometheus.MustRegister(workersIdle) prometheus.MustRegister(workersCurrent) prometheus.MustRegister(workersDesired) prometheus.MustRegister(workersAvailable) } type WokerPodAutoScalerEvent struct { key string name string } // Controller is the controller implementation for WorkerPodAutoScaler resources type Controller struct { ctx context.Context // kubeclientset is a standard kubernetes clientset kubeclientset kubernetes.Interface // customclientset is a clientset for our own API group customclientset clientset.Interface deploymentLister appslisters.DeploymentLister deploymentsSynced cache.InformerSynced replicaSetLister appslisters.ReplicaSetLister replicaSetsSynced cache.InformerSynced workerPodAutoScalersLister listers.WorkerPodAutoScalerLister workerPodAutoScalersSynced cache.InformerSynced // workqueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This // means we can ensure we only process a fixed amount of resources at a // time, and makes it easy to ensure we are never processing the same item // simultaneously in two different workers. workqueue workqueue.RateLimitingInterface // recorder is an event recorder for recording Event resources to the // Kubernetes API. recorder record.EventRecorder // defaultMaxDisruption // it is the default value for the maxDisruption in the WPA spec. // This specifies how much percentage of pods can be disrupted in a // single scale down acitivity. // Can be expressed as integers or as a percentage. defaultMaxDisruption string // QueueList keeps the list of all the queues in memeory // which is used by the core controller and the sqs exporter // scaleDownDelay after last scale up // the no of seconds to wait after the last scale up before scaling down scaleDownDelay time.Duration Queues *queue.Queues } // NewController returns a new sample controller func NewController( ctx context.Context, kubeclientset kubernetes.Interface, customclientset clientset.Interface, deploymentInformer appsinformers.DeploymentInformer, replicaSetInformer appsinformers.ReplicaSetInformer, workerPodAutoScalerInformer informers.WorkerPodAutoScalerInformer, defaultMaxDisruption string, resyncPeriod time.Duration, scaleDownDelay time.Duration, queues *queue.Queues) *Controller { // Create event broadcaster // Add sample-controller types to the default Kubernetes Scheme so Events can be // logged for sample-controller types. utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme)) klog.V(4).Info("Creating event broadcaster") eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) controller := &Controller{ ctx: ctx, kubeclientset: kubeclientset, customclientset: customclientset, deploymentLister: deploymentInformer.Lister(), deploymentsSynced: deploymentInformer.Informer().HasSynced, replicaSetLister: replicaSetInformer.Lister(), replicaSetsSynced: replicaSetInformer.Informer().HasSynced, workerPodAutoScalersLister: workerPodAutoScalerInformer.Lister(), workerPodAutoScalersSynced: workerPodAutoScalerInformer.Informer().HasSynced, workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkerPodAutoScalers"), recorder: recorder, defaultMaxDisruption: defaultMaxDisruption, scaleDownDelay: scaleDownDelay, Queues: queues, } klog.V(4).Info("Setting up event handlers") // Set up an event handler for when WorkerPodAutoScaler resources change workerPodAutoScalerInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ AddFunc: controller.enqueueAddWorkerPodAutoScaler, UpdateFunc: func(old, new interface{}) { controller.enqueueUpdateWorkerPodAutoScaler(new) }, DeleteFunc: controller.enqueueDeleteWorkerPodAutoScaler, }, resyncPeriod) return controller } // Run will set up the event handlers for types we are interested in, as well // as syncing informer caches and starting workers. It will block until stopCh // is closed, at which point it will shutdown the workqueue and wait for // workers to finish processing their current work items. func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() // Start the informer factories to begin populating the informer caches klog.V(1).Info("Starting WorkerPodAutoScaler controller") // Wait for the caches to be synced before starting workers klog.V(1).Info("Waiting for informer caches to sync") if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.workerPodAutoScalersSynced); !ok { return fmt.Errorf("failed to wait for caches to sync") } klog.V(1).Info("Starting workers") // Launch two workers to process WorkerPodAutoScaler resources for i := 0; i < threadiness; i++ { // TOOD: move from stopCh to context, use: UntilWithContext() go wait.Until(c.runWorker, time.Second, stopCh) } <-stopCh klog.V(1).Info("Shutting down workers") return nil } // runWorker is a long-running function that will continually call the // processNextWorkItem function in order to read and process a message on the // workqueue. func (c *Controller) runWorker() { for c.processNextWorkItem(c.ctx) { } } // processNextWorkItem will read a single work item off the workqueue and // attempt to process it, by calling the syncHandler. func (c *Controller) processNextWorkItem(ctx context.Context) bool { obj, shutdown := c.workqueue.Get() if shutdown { return false } // We wrap this block in a func so we can defer c.workqueue.Done. err := func(obj interface{}) error { // We call Done here so the workqueue knows we have finished // processing this item. We also must remember to call Forget if we // do not want this work item being re-queued. For example, we do // not call Forget if a transient error occurs, instead the item is // put back on the workqueue and attempted again after a back-off // period. defer c.workqueue.Done(obj) var ok bool // We expect strings to come off the workqueue. These are of the // form namespace/name. We do this as the delayed nature of the // workqueue means the items in the informer cache may actually be // more up to date that when the item was initially put onto the // workqueue.(PS: not anymore, its an WPA event) event, ok := obj.(WokerPodAutoScalerEvent) if !ok { // As the item in the workqueue is actually invalid, we call // Forget here else we'd go into a loop of attempting to // process a work item that is invalid. c.workqueue.Forget(obj) utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) return nil } // Run the syncHandler, passing it the namespace/name string of the // WorkerPodAutoScaler resource to be synced. if err := c.syncHandler(ctx, event); err != nil { // Put the item back on the workqueue to handle any transient errors. c.workqueue.AddRateLimited(event) return fmt.Errorf("error syncing '%s': %s, requeuing", event, err.Error()) } // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. c.workqueue.Forget(obj) return nil }(obj) if err != nil { utilruntime.HandleError(err) return true } return true } // syncHandler compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the WorkerPodAutoScaler resource // with the current status of the resource. func (c *Controller) syncHandler(ctx context.Context, event WokerPodAutoScalerEvent) error { now := time.Now() key := event.key // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return nil } // Get the WorkerPodAutoScaler resource with this namespace/name workerPodAutoScaler, err := c.workerPodAutoScalersLister.WorkerPodAutoScalers(namespace).Get(name) if err != nil { // The WorkerPodAutoScaler resource may no longer exist, in which case we stop processing. if errors.IsNotFound(err) { utilruntime.HandleError(fmt.Errorf("workerPodAutoScaler '%s' in work queue no longer exists", key)) c.Queues.Delete(namespace, name) return nil } return err } var currentWorkers, availableWorkers int32 deploymentName := workerPodAutoScaler.Spec.DeploymentName replicaSetName := workerPodAutoScaler.Spec.ReplicaSetName if deploymentName != "" { // Get the Deployment with the name specified in WorkerPodAutoScaler.spec deployment, err := c.deploymentLister.Deployments(workerPodAutoScaler.Namespace).Get(deploymentName) if errors.IsNotFound(err)
else if err != nil { return err } currentWorkers = *deployment.Spec.Replicas availableWorkers = deployment.Status.AvailableReplicas } else if replicaSetName != "" { // Get the ReplicaSet with the name specified in WorkerPodAutoScaler.spec replicaSet, err := c.replicaSetLister.ReplicaSets(workerPodAutoScaler.Namespace).Get(replicaSetName) if errors.IsNotFound(err) { return fmt.Errorf("ReplicaSet %s not found in namespace %s", replicaSetName, workerPodAutoScaler.Namespace) } else if err != nil { return err } currentWorkers = *replicaSet.Spec.Replicas availableWorkers = replicaSet.Status.AvailableReplicas } else { // We choose to absorb the error here as the worker would requeue the // resource otherwise. Instead, the next time the resource is updated // the resource will be queued again. utilruntime.HandleError(fmt.Errorf("%s: deployment or replicaset name must be specified", key)) return nil } var secondsToProcessOneJob float64 if workerPodAutoScaler.Spec.SecondsToProcessOneJob != nil { secondsToProcessOneJob = *workerPodAutoScaler.Spec.SecondsToProcessOneJob } switch event.name { case WokerPodAutoScalerEventAdd: err = c.Queues.Add( namespace, name, workerPodAutoScaler.Spec.QueueURI, currentWorkers, secondsToProcessOneJob, ) case WokerPodAutoScalerEventUpdate: err = c.Queues.Add( namespace, name, workerPodAutoScaler.Spec.QueueURI, currentWorkers, secondsToProcessOneJob, ) case WokerPodAutoScalerEventDelete: err = c.Queues.Delete(namespace, name) } if err != nil { utilruntime.HandleError(fmt.Errorf("unable to sync queue: %s", err.Error())) return err } queueName, queueMessages, messagesSentPerMinute, idleWorkers := c.Queues.GetQueueInfo( namespace, name) if queueName == "" { return nil } if queueMessages == queue.UnsyncedQueueMessageCount { klog.Warningf( "%s qMsgs: %d, q not initialized, waiting for init to complete", queueName, queueMessages, ) return nil } desiredWorkers := GetDesiredWorkers( queueName, queueMessages, messagesSentPerMinute, secondsToProcessOneJob, *workerPodAutoScaler.Spec.TargetMessagesPerWorker, currentWorkers, idleWorkers, *workerPodAutoScaler.Spec.MinReplicas, *workerPodAutoScaler.Spec.MaxReplicas, workerPodAutoScaler.GetMaxDisruption(c.defaultMaxDisruption), ) klog.V(2).Infof("%s current: %d", queueName, currentWorkers) klog.V(2).Infof("%s qMsgs: %d, desired: %d", queueName, queueMessages, desiredWorkers) // set metrics qMsgs.WithLabelValues( name, namespace, queueName, ).Set(float64(queueMessages)) qMsgsSPM.WithLabelValues( name, namespace, queueName, ).Set(messagesSentPerMinute) workersIdle.WithLabelValues( name, namespace, queueName, ).Set(float64(idleWorkers)) workersCurrent.WithLabelValues( name, namespace, queueName, ).Set(float64(currentWorkers)) workersDesired.WithLabelValues( name, namespace, queueName, ).Set(float64(desiredWorkers)) workersAvailable.WithLabelValues( name, namespace, queueName, ).Set(float64(availableWorkers)) lastScaleTime := workerPodAutoScaler.Status.LastScaleTime.DeepCopy() op := GetScaleOperation( queueName, desiredWorkers, currentWorkers, lastScaleTime, c.scaleDownDelay, ) if op == ScaleUp || op == ScaleDown { if deploymentName != "" { c.updateDeployment( ctx, workerPodAutoScaler.Namespace, deploymentName, &desiredWorkers) } else { c.updateReplicaSet( ctx, workerPodAutoScaler.Namespace, replicaSetName, &desiredWorkers) } now := metav1.Now() lastScaleTime = &now } klog.V(2).Infof("%s scaleOp: %v", queueName, scaleOpString(op)) // Finally, we update the status block of the WorkerPodAutoScaler resource to reflect the // current state of the world updateWorkerPodAutoScalerStatus( ctx, name, namespace, c.customclientset, desiredWorkers, workerPodAutoScaler, currentWorkers, availableWorkers, queueMessages, lastScaleTime, ) loopDurationSeconds.WithLabelValues( name, namespace, ).Set(time.Since(now).Seconds()) loopCountSuccess.WithLabelValues( name, namespace, ).Inc() // TODO: organize and log events // c.recorder.Event(workerPodAutoScaler, corev1.EventTypeNormal, SuccessSynced, MessageResourceSynced) return nil } // updateDeployment updates the Deployment with the desired number of replicas func (c *Controller) updateDeployment(ctx context.Context, namespace string, deploymentName string, replicas *int32) { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of the Deployment before attempting update deployment, getErr := c.deploymentLister.Deployments(namespace).Get(deploymentName) if errors.IsNotFound(getErr) { return fmt.Errorf("deployment %s was not found in namespace %s", deploymentName, namespace) } if getErr != nil { klog.Fatalf("Failed to get deployment: %v", getErr) } deployment.Spec.Replicas = replicas _, updateErr := c.kubeclientset.AppsV1().Deployments(namespace).Update(ctx, deployment, metav1.UpdateOptions{}) if updateErr != nil { klog.Errorf("Failed to update deployment: %v", updateErr) } return updateErr }) if retryErr != nil { klog.Fatalf("Failed to update deployment (retry failed): %v", retryErr) } } // updateReplicaSet updates the ReplicaSet with the desired number of replicas func (c *Controller) updateReplicaSet(ctx context.Context, namespace string, replicaSetName string, replicas *int32) { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of the ReplicaSet before attempting update replicaSet, getErr := c.replicaSetLister.ReplicaSets(namespace).Get(replicaSetName) if errors.IsNotFound(getErr) { return fmt.Errorf("ReplicaSet %s was not found in namespace %s", replicaSetName, namespace) } if getErr != nil { klog.Fatalf("Failed to get ReplicaSet: %v", getErr) } replicaSet.Spec.Replicas = replicas _, updateErr := c.kubeclientset.AppsV1().ReplicaSets(namespace).Update(ctx, replicaSet, metav1.UpdateOptions{}) if updateErr != nil { klog.Errorf("Failed to update ReplicaSet: %v", updateErr) } return updateErr }) if retryErr != nil { klog.Fatalf("Failed to update ReplicaSet (retry failed): %v", retryErr) } } // getMaxDisruptableWorkers gets the maximum number of workers that can // be scaled down in the single scale down activity. func getMaxDisruptableWorkers( maxDisruption *string, currentWorkers int32) int32 { if maxDisruption == nil { klog.Fatalf("maxDisruption default is not being set. Exiting") } maxDisruptionIntOrStr := intstr.Parse(*maxDisruption) maxDisruptableWorkers, err := intstr.GetValueFromIntOrPercent( &maxDisruptionIntOrStr, int(currentWorkers), true, ) if err != nil { klog.Fatalf("Error calculating maxDisruptable workers, err: %v", err) } return int32(maxDisruptableWorkers) } // getMinWorkers gets the min workers based on the // velocity metric: messagesSentPerMinute func getMinWorkers( messagesSentPerMinute float64, minWorkers int32, secondsToProcessOneJob float64) int32 { // disable this feature for WPA queues which have not specified // processing time if secondsToProcessOneJob == 0.0 { return minWorkers } workersBasedOnMessagesSent := int32(math.Ceil((secondsToProcessOneJob * messagesSentPerMinute) / 60)) klog.V(4).Infof("%v, workersBasedOnMessagesSent=%v\n", secondsToProcessOneJob, workersBasedOnMessagesSent) if workersBasedOnMessagesSent > minWorkers { return workersBasedOnMessagesSent } return minWorkers } func isChangeTooSmall(desired int32, current int32, tolerance float64) bool { return math.Abs(float64(desired-current))/float64(current) <= tolerance } // GetDesiredWorkers finds the desired number of workers which are required func GetDesiredWorkers( queueName string, queueMessages int32, messagesSentPerMinute float64, secondsToProcessOneJob float64, targetMessagesPerWorker int32, currentWorkers int32, idleWorkers int32, minWorkers int32, maxWorkers int32, maxDisruption *string) int32 { klog.V(4).Infof("%s min=%v, max=%v, targetBacklog=%v \n", queueName, minWorkers, maxWorkers, targetMessagesPerWorker) // overwrite the minimum workers needed based on // messagesSentPerMinute and secondsToProcessOneJob // this feature is disabled if secondsToProcessOneJob is not set or is 0.0 minWorkers = getMinWorkers( messagesSentPerMinute, minWorkers, secondsToProcessOneJob, ) // gets the maximum number of workers that can be scaled down in a // single scale down activity. maxDisruptableWorkers := getMaxDisruptableWorkers( maxDisruption, currentWorkers, ) tolerance := 0.1 desiredWorkers := int32(math.Ceil( float64(queueMessages) / float64(targetMessagesPerWorker)), ) klog.V(4).Infof("%s qMsgs=%v, qMsgsPerMin=%v \n", queueName, queueMessages, messagesSentPerMinute) klog.V(4).Infof("%s secToProcessJob=%v, maxDisruption=%v \n", queueName, secondsToProcessOneJob, *maxDisruption) klog.V(4).Infof("%s current=%v, idle=%v \n", queueName, currentWorkers, idleWorkers) klog.V(3).Infof("%s minComputed=%v, maxDisruptable=%v\n", queueName, minWorkers, maxDisruptableWorkers) if currentWorkers == 0 { return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } if queueMessages > 0 { if isChangeTooSmall(desiredWorkers, currentWorkers, tolerance) { // desired is same as current in this scenario return convertDesiredReplicasWithRules( currentWorkers, currentWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } else if messagesSentPerMinute > 0 && secondsToProcessOneJob > 0.0 { // this is the case in which there is no backlog visible. // (mostly because the workers picks up jobs very quickly) // But the queue has throughput, so we return the minWorkers. // Note: minWorkers is updated based on // messagesSentPerMinute and secondsToProcessOneJob // desried is the minReplicas in this scenario return convertDesiredReplicasWithRules( currentWorkers, minWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } // Attempt for massive scale down if currentWorkers == idleWorkers { desiredWorkers := int32(0) // for massive scale down to happen maxDisruptableWorkers // should be ignored return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, currentWorkers, ) } // Attempt partial scale down since there is no backlog or in-processing // messages. return convertDesiredReplicasWithRules( currentWorkers, minWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } func convertDesiredReplicasWithRules( current int32, desired int32, min int32, max int32, maxDisruptable int32) int32 { if min >= max { return max } if (current - desired) > maxDisruptable { desired = current - maxDisruptable } if desired > max { return max } if desired < min { return min } return desired } func updateWorkerPodAutoScalerStatus( ctx context.Context, name string, namespace string, customclientset clientset.Interface, desiredWorkers int32, workerPodAutoScaler *v1.WorkerPodAutoScaler, currentWorkers int32, availableWorkers int32, queueMessages int32, lastScaleTime *metav1.Time) { if workerPodAutoScaler.Status.CurrentReplicas == currentWorkers && workerPodAutoScaler.Status.AvailableReplicas == availableWorkers && workerPodAutoScaler.Status.DesiredReplicas == desiredWorkers && workerPodAutoScaler.Status.CurrentMessages == queueMessages && workerPodAutoScaler.Status.LastScaleTime.Equal(lastScaleTime) { klog.V(4).Infof("%s/%s: WPA status is already up to date\n", namespace, name) return } else { klog.V(4).Infof("%s/%s: Updating wpa status\n", namespace, name) } // NEVER modify objects from the store. It's a read-only, local cache. // You can use DeepCopy() to make a deep copy of original object and modify this copy // Or create a copy manually for better performance workerPodAutoScalerCopy := workerPodAutoScaler.DeepCopy() workerPodAutoScalerCopy.Status.CurrentReplicas = currentWorkers workerPodAutoScalerCopy.Status.AvailableReplicas = availableWorkers workerPodAutoScalerCopy.Status.DesiredReplicas = desiredWorkers workerPodAutoScalerCopy.Status.CurrentMessages = queueMessages workerPodAutoScalerCopy.Status.LastScaleTime = lastScaleTime // If the CustomResourceSubresources feature gate is not enabled, // we must use Update instead of UpdateStatus to update the Status block of the WorkerPodAutoScaler resource. // UpdateStatus will not allow changes to the Spec of the resource, // which is ideal for ensuring nothing other than resource status has been updated. _, err := customclientset.K8sV1().WorkerPodAutoScalers(workerPodAutoScaler.Namespace).UpdateStatus(ctx, workerPodAutoScalerCopy, metav1.UpdateOptions{}) if err != nil { klog.Errorf("Error updating wpa status, err: %v", err) return } klog.V(4).Infof("%s/%s: Updated wpa status\n", namespace, name) } // getKeyForWorkerPodAutoScaler takes a WorkerPodAutoScaler resource and converts it into a namespace/name // string which is then put onto the work queue. This method should *not* be // passed resources of any type other than WorkerPodAutoScaler. func (c *Controller) getKeyForWorkerPodAutoScaler(obj interface{}) string { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { utilruntime.HandleError(err) return "" } return key } func (c *Controller) enqueueAddWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventAdd, }) } func (c *Controller) enqueueUpdateWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventUpdate, }) } func (c *Controller) enqueueDeleteWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventDelete, }) }
{ return fmt.Errorf("deployment %s not found in namespace %s", deploymentName, workerPodAutoScaler.Namespace) }
conditional_block
controller.go
package controller import ( "context" "fmt" "math" "time" "github.com/practo/klog/v2" "github.com/prometheus/client_golang/prometheus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" appsinformers "k8s.io/client-go/informers/apps/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" appslisters "k8s.io/client-go/listers/apps/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" v1 "github.com/practo/k8s-worker-pod-autoscaler/pkg/apis/workerpodautoscaler/v1" clientset "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/clientset/versioned" samplescheme "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/clientset/versioned/scheme" informers "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/informers/externalversions/workerpodautoscaler/v1" listers "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/listers/workerpodautoscaler/v1" queue "github.com/practo/k8s-worker-pod-autoscaler/pkg/queue" ) const controllerAgentName = "workerpodautoscaler-controller" const ( // SuccessSynced is used as part of the Event 'reason' when a WorkerPodAutoScaler is synced SuccessSynced = "Synced" // ErrResourceExists is used as part of the Event 'reason' when a WorkerPodAutoScaler fails // to sync due to a Deployment of the same name already existing. ErrResourceExists = "ErrResourceExists" // MessageResourceExists is the message used for Events when a resource // fails to sync due to a Deployment already existing MessageResourceExists = "Resource %q already exists and is not managed by WorkerPodAutoScaler" // MessageResourceSynced is the message used for an Event fired when a WorkerPodAutoScaler // is synced successfully MessageResourceSynced = "WorkerPodAutoScaler synced successfully" // WokerPodAutoScalerEventAdd stores the add event name WokerPodAutoScalerEventAdd = "add" // WokerPodAutoScalerEventUpdate stores the add event name WokerPodAutoScalerEventUpdate = "update" // WokerPodAutoScalerEventDelete stores the add event name WokerPodAutoScalerEventDelete = "delete" ) var ( loopDurationSeconds = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "controller", Name: "loop_duration_seconds", Help: "Number of seconds to complete the control loop successfully, partitioned by wpa name and namespace", }, []string{"workerpodautoscaler", "namespace"}, ) loopCountSuccess = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "wpa", Subsystem: "controller", Name: "loop_count_success", Help: "How many times the control loop executed successfully, partitioned by wpa name and namespace", }, []string{"workerpodautoscaler", "namespace"}, ) qMsgs = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "queue", Name: "messages", Help: "Number of unprocessed messages in the queue", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) qMsgsSPM = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "queue", Name: "messages_sent_per_minute", Help: "Number of messages sent to the queue per minute", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersIdle = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "idle", Help: "Number of idle workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersCurrent = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "current", Help: "Number of current workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersDesired = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "desired", Help: "Number of desired workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersAvailable = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "available", Help: "Number of available workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) ) func init() { prometheus.MustRegister(loopDurationSeconds) prometheus.MustRegister(loopCountSuccess) prometheus.MustRegister(qMsgs) prometheus.MustRegister(qMsgsSPM) prometheus.MustRegister(workersIdle) prometheus.MustRegister(workersCurrent) prometheus.MustRegister(workersDesired) prometheus.MustRegister(workersAvailable) } type WokerPodAutoScalerEvent struct { key string name string } // Controller is the controller implementation for WorkerPodAutoScaler resources type Controller struct { ctx context.Context // kubeclientset is a standard kubernetes clientset kubeclientset kubernetes.Interface // customclientset is a clientset for our own API group customclientset clientset.Interface deploymentLister appslisters.DeploymentLister deploymentsSynced cache.InformerSynced replicaSetLister appslisters.ReplicaSetLister replicaSetsSynced cache.InformerSynced workerPodAutoScalersLister listers.WorkerPodAutoScalerLister workerPodAutoScalersSynced cache.InformerSynced // workqueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This // means we can ensure we only process a fixed amount of resources at a // time, and makes it easy to ensure we are never processing the same item // simultaneously in two different workers. workqueue workqueue.RateLimitingInterface // recorder is an event recorder for recording Event resources to the // Kubernetes API. recorder record.EventRecorder // defaultMaxDisruption // it is the default value for the maxDisruption in the WPA spec. // This specifies how much percentage of pods can be disrupted in a // single scale down acitivity. // Can be expressed as integers or as a percentage. defaultMaxDisruption string // QueueList keeps the list of all the queues in memeory // which is used by the core controller and the sqs exporter // scaleDownDelay after last scale up // the no of seconds to wait after the last scale up before scaling down scaleDownDelay time.Duration Queues *queue.Queues } // NewController returns a new sample controller func NewController( ctx context.Context, kubeclientset kubernetes.Interface, customclientset clientset.Interface, deploymentInformer appsinformers.DeploymentInformer, replicaSetInformer appsinformers.ReplicaSetInformer, workerPodAutoScalerInformer informers.WorkerPodAutoScalerInformer, defaultMaxDisruption string, resyncPeriod time.Duration, scaleDownDelay time.Duration, queues *queue.Queues) *Controller { // Create event broadcaster // Add sample-controller types to the default Kubernetes Scheme so Events can be // logged for sample-controller types. utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme)) klog.V(4).Info("Creating event broadcaster") eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) controller := &Controller{ ctx: ctx, kubeclientset: kubeclientset, customclientset: customclientset, deploymentLister: deploymentInformer.Lister(), deploymentsSynced: deploymentInformer.Informer().HasSynced, replicaSetLister: replicaSetInformer.Lister(), replicaSetsSynced: replicaSetInformer.Informer().HasSynced, workerPodAutoScalersLister: workerPodAutoScalerInformer.Lister(), workerPodAutoScalersSynced: workerPodAutoScalerInformer.Informer().HasSynced, workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkerPodAutoScalers"), recorder: recorder, defaultMaxDisruption: defaultMaxDisruption, scaleDownDelay: scaleDownDelay, Queues: queues, } klog.V(4).Info("Setting up event handlers") // Set up an event handler for when WorkerPodAutoScaler resources change workerPodAutoScalerInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ AddFunc: controller.enqueueAddWorkerPodAutoScaler, UpdateFunc: func(old, new interface{}) { controller.enqueueUpdateWorkerPodAutoScaler(new) }, DeleteFunc: controller.enqueueDeleteWorkerPodAutoScaler, }, resyncPeriod) return controller } // Run will set up the event handlers for types we are interested in, as well // as syncing informer caches and starting workers. It will block until stopCh // is closed, at which point it will shutdown the workqueue and wait for // workers to finish processing their current work items. func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() // Start the informer factories to begin populating the informer caches klog.V(1).Info("Starting WorkerPodAutoScaler controller") // Wait for the caches to be synced before starting workers klog.V(1).Info("Waiting for informer caches to sync") if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.workerPodAutoScalersSynced); !ok { return fmt.Errorf("failed to wait for caches to sync") } klog.V(1).Info("Starting workers") // Launch two workers to process WorkerPodAutoScaler resources for i := 0; i < threadiness; i++ { // TOOD: move from stopCh to context, use: UntilWithContext() go wait.Until(c.runWorker, time.Second, stopCh) } <-stopCh klog.V(1).Info("Shutting down workers") return nil } // runWorker is a long-running function that will continually call the // processNextWorkItem function in order to read and process a message on the // workqueue. func (c *Controller) runWorker() { for c.processNextWorkItem(c.ctx) { } } // processNextWorkItem will read a single work item off the workqueue and // attempt to process it, by calling the syncHandler. func (c *Controller)
(ctx context.Context) bool { obj, shutdown := c.workqueue.Get() if shutdown { return false } // We wrap this block in a func so we can defer c.workqueue.Done. err := func(obj interface{}) error { // We call Done here so the workqueue knows we have finished // processing this item. We also must remember to call Forget if we // do not want this work item being re-queued. For example, we do // not call Forget if a transient error occurs, instead the item is // put back on the workqueue and attempted again after a back-off // period. defer c.workqueue.Done(obj) var ok bool // We expect strings to come off the workqueue. These are of the // form namespace/name. We do this as the delayed nature of the // workqueue means the items in the informer cache may actually be // more up to date that when the item was initially put onto the // workqueue.(PS: not anymore, its an WPA event) event, ok := obj.(WokerPodAutoScalerEvent) if !ok { // As the item in the workqueue is actually invalid, we call // Forget here else we'd go into a loop of attempting to // process a work item that is invalid. c.workqueue.Forget(obj) utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) return nil } // Run the syncHandler, passing it the namespace/name string of the // WorkerPodAutoScaler resource to be synced. if err := c.syncHandler(ctx, event); err != nil { // Put the item back on the workqueue to handle any transient errors. c.workqueue.AddRateLimited(event) return fmt.Errorf("error syncing '%s': %s, requeuing", event, err.Error()) } // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. c.workqueue.Forget(obj) return nil }(obj) if err != nil { utilruntime.HandleError(err) return true } return true } // syncHandler compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the WorkerPodAutoScaler resource // with the current status of the resource. func (c *Controller) syncHandler(ctx context.Context, event WokerPodAutoScalerEvent) error { now := time.Now() key := event.key // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return nil } // Get the WorkerPodAutoScaler resource with this namespace/name workerPodAutoScaler, err := c.workerPodAutoScalersLister.WorkerPodAutoScalers(namespace).Get(name) if err != nil { // The WorkerPodAutoScaler resource may no longer exist, in which case we stop processing. if errors.IsNotFound(err) { utilruntime.HandleError(fmt.Errorf("workerPodAutoScaler '%s' in work queue no longer exists", key)) c.Queues.Delete(namespace, name) return nil } return err } var currentWorkers, availableWorkers int32 deploymentName := workerPodAutoScaler.Spec.DeploymentName replicaSetName := workerPodAutoScaler.Spec.ReplicaSetName if deploymentName != "" { // Get the Deployment with the name specified in WorkerPodAutoScaler.spec deployment, err := c.deploymentLister.Deployments(workerPodAutoScaler.Namespace).Get(deploymentName) if errors.IsNotFound(err) { return fmt.Errorf("deployment %s not found in namespace %s", deploymentName, workerPodAutoScaler.Namespace) } else if err != nil { return err } currentWorkers = *deployment.Spec.Replicas availableWorkers = deployment.Status.AvailableReplicas } else if replicaSetName != "" { // Get the ReplicaSet with the name specified in WorkerPodAutoScaler.spec replicaSet, err := c.replicaSetLister.ReplicaSets(workerPodAutoScaler.Namespace).Get(replicaSetName) if errors.IsNotFound(err) { return fmt.Errorf("ReplicaSet %s not found in namespace %s", replicaSetName, workerPodAutoScaler.Namespace) } else if err != nil { return err } currentWorkers = *replicaSet.Spec.Replicas availableWorkers = replicaSet.Status.AvailableReplicas } else { // We choose to absorb the error here as the worker would requeue the // resource otherwise. Instead, the next time the resource is updated // the resource will be queued again. utilruntime.HandleError(fmt.Errorf("%s: deployment or replicaset name must be specified", key)) return nil } var secondsToProcessOneJob float64 if workerPodAutoScaler.Spec.SecondsToProcessOneJob != nil { secondsToProcessOneJob = *workerPodAutoScaler.Spec.SecondsToProcessOneJob } switch event.name { case WokerPodAutoScalerEventAdd: err = c.Queues.Add( namespace, name, workerPodAutoScaler.Spec.QueueURI, currentWorkers, secondsToProcessOneJob, ) case WokerPodAutoScalerEventUpdate: err = c.Queues.Add( namespace, name, workerPodAutoScaler.Spec.QueueURI, currentWorkers, secondsToProcessOneJob, ) case WokerPodAutoScalerEventDelete: err = c.Queues.Delete(namespace, name) } if err != nil { utilruntime.HandleError(fmt.Errorf("unable to sync queue: %s", err.Error())) return err } queueName, queueMessages, messagesSentPerMinute, idleWorkers := c.Queues.GetQueueInfo( namespace, name) if queueName == "" { return nil } if queueMessages == queue.UnsyncedQueueMessageCount { klog.Warningf( "%s qMsgs: %d, q not initialized, waiting for init to complete", queueName, queueMessages, ) return nil } desiredWorkers := GetDesiredWorkers( queueName, queueMessages, messagesSentPerMinute, secondsToProcessOneJob, *workerPodAutoScaler.Spec.TargetMessagesPerWorker, currentWorkers, idleWorkers, *workerPodAutoScaler.Spec.MinReplicas, *workerPodAutoScaler.Spec.MaxReplicas, workerPodAutoScaler.GetMaxDisruption(c.defaultMaxDisruption), ) klog.V(2).Infof("%s current: %d", queueName, currentWorkers) klog.V(2).Infof("%s qMsgs: %d, desired: %d", queueName, queueMessages, desiredWorkers) // set metrics qMsgs.WithLabelValues( name, namespace, queueName, ).Set(float64(queueMessages)) qMsgsSPM.WithLabelValues( name, namespace, queueName, ).Set(messagesSentPerMinute) workersIdle.WithLabelValues( name, namespace, queueName, ).Set(float64(idleWorkers)) workersCurrent.WithLabelValues( name, namespace, queueName, ).Set(float64(currentWorkers)) workersDesired.WithLabelValues( name, namespace, queueName, ).Set(float64(desiredWorkers)) workersAvailable.WithLabelValues( name, namespace, queueName, ).Set(float64(availableWorkers)) lastScaleTime := workerPodAutoScaler.Status.LastScaleTime.DeepCopy() op := GetScaleOperation( queueName, desiredWorkers, currentWorkers, lastScaleTime, c.scaleDownDelay, ) if op == ScaleUp || op == ScaleDown { if deploymentName != "" { c.updateDeployment( ctx, workerPodAutoScaler.Namespace, deploymentName, &desiredWorkers) } else { c.updateReplicaSet( ctx, workerPodAutoScaler.Namespace, replicaSetName, &desiredWorkers) } now := metav1.Now() lastScaleTime = &now } klog.V(2).Infof("%s scaleOp: %v", queueName, scaleOpString(op)) // Finally, we update the status block of the WorkerPodAutoScaler resource to reflect the // current state of the world updateWorkerPodAutoScalerStatus( ctx, name, namespace, c.customclientset, desiredWorkers, workerPodAutoScaler, currentWorkers, availableWorkers, queueMessages, lastScaleTime, ) loopDurationSeconds.WithLabelValues( name, namespace, ).Set(time.Since(now).Seconds()) loopCountSuccess.WithLabelValues( name, namespace, ).Inc() // TODO: organize and log events // c.recorder.Event(workerPodAutoScaler, corev1.EventTypeNormal, SuccessSynced, MessageResourceSynced) return nil } // updateDeployment updates the Deployment with the desired number of replicas func (c *Controller) updateDeployment(ctx context.Context, namespace string, deploymentName string, replicas *int32) { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of the Deployment before attempting update deployment, getErr := c.deploymentLister.Deployments(namespace).Get(deploymentName) if errors.IsNotFound(getErr) { return fmt.Errorf("deployment %s was not found in namespace %s", deploymentName, namespace) } if getErr != nil { klog.Fatalf("Failed to get deployment: %v", getErr) } deployment.Spec.Replicas = replicas _, updateErr := c.kubeclientset.AppsV1().Deployments(namespace).Update(ctx, deployment, metav1.UpdateOptions{}) if updateErr != nil { klog.Errorf("Failed to update deployment: %v", updateErr) } return updateErr }) if retryErr != nil { klog.Fatalf("Failed to update deployment (retry failed): %v", retryErr) } } // updateReplicaSet updates the ReplicaSet with the desired number of replicas func (c *Controller) updateReplicaSet(ctx context.Context, namespace string, replicaSetName string, replicas *int32) { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of the ReplicaSet before attempting update replicaSet, getErr := c.replicaSetLister.ReplicaSets(namespace).Get(replicaSetName) if errors.IsNotFound(getErr) { return fmt.Errorf("ReplicaSet %s was not found in namespace %s", replicaSetName, namespace) } if getErr != nil { klog.Fatalf("Failed to get ReplicaSet: %v", getErr) } replicaSet.Spec.Replicas = replicas _, updateErr := c.kubeclientset.AppsV1().ReplicaSets(namespace).Update(ctx, replicaSet, metav1.UpdateOptions{}) if updateErr != nil { klog.Errorf("Failed to update ReplicaSet: %v", updateErr) } return updateErr }) if retryErr != nil { klog.Fatalf("Failed to update ReplicaSet (retry failed): %v", retryErr) } } // getMaxDisruptableWorkers gets the maximum number of workers that can // be scaled down in the single scale down activity. func getMaxDisruptableWorkers( maxDisruption *string, currentWorkers int32) int32 { if maxDisruption == nil { klog.Fatalf("maxDisruption default is not being set. Exiting") } maxDisruptionIntOrStr := intstr.Parse(*maxDisruption) maxDisruptableWorkers, err := intstr.GetValueFromIntOrPercent( &maxDisruptionIntOrStr, int(currentWorkers), true, ) if err != nil { klog.Fatalf("Error calculating maxDisruptable workers, err: %v", err) } return int32(maxDisruptableWorkers) } // getMinWorkers gets the min workers based on the // velocity metric: messagesSentPerMinute func getMinWorkers( messagesSentPerMinute float64, minWorkers int32, secondsToProcessOneJob float64) int32 { // disable this feature for WPA queues which have not specified // processing time if secondsToProcessOneJob == 0.0 { return minWorkers } workersBasedOnMessagesSent := int32(math.Ceil((secondsToProcessOneJob * messagesSentPerMinute) / 60)) klog.V(4).Infof("%v, workersBasedOnMessagesSent=%v\n", secondsToProcessOneJob, workersBasedOnMessagesSent) if workersBasedOnMessagesSent > minWorkers { return workersBasedOnMessagesSent } return minWorkers } func isChangeTooSmall(desired int32, current int32, tolerance float64) bool { return math.Abs(float64(desired-current))/float64(current) <= tolerance } // GetDesiredWorkers finds the desired number of workers which are required func GetDesiredWorkers( queueName string, queueMessages int32, messagesSentPerMinute float64, secondsToProcessOneJob float64, targetMessagesPerWorker int32, currentWorkers int32, idleWorkers int32, minWorkers int32, maxWorkers int32, maxDisruption *string) int32 { klog.V(4).Infof("%s min=%v, max=%v, targetBacklog=%v \n", queueName, minWorkers, maxWorkers, targetMessagesPerWorker) // overwrite the minimum workers needed based on // messagesSentPerMinute and secondsToProcessOneJob // this feature is disabled if secondsToProcessOneJob is not set or is 0.0 minWorkers = getMinWorkers( messagesSentPerMinute, minWorkers, secondsToProcessOneJob, ) // gets the maximum number of workers that can be scaled down in a // single scale down activity. maxDisruptableWorkers := getMaxDisruptableWorkers( maxDisruption, currentWorkers, ) tolerance := 0.1 desiredWorkers := int32(math.Ceil( float64(queueMessages) / float64(targetMessagesPerWorker)), ) klog.V(4).Infof("%s qMsgs=%v, qMsgsPerMin=%v \n", queueName, queueMessages, messagesSentPerMinute) klog.V(4).Infof("%s secToProcessJob=%v, maxDisruption=%v \n", queueName, secondsToProcessOneJob, *maxDisruption) klog.V(4).Infof("%s current=%v, idle=%v \n", queueName, currentWorkers, idleWorkers) klog.V(3).Infof("%s minComputed=%v, maxDisruptable=%v\n", queueName, minWorkers, maxDisruptableWorkers) if currentWorkers == 0 { return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } if queueMessages > 0 { if isChangeTooSmall(desiredWorkers, currentWorkers, tolerance) { // desired is same as current in this scenario return convertDesiredReplicasWithRules( currentWorkers, currentWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } else if messagesSentPerMinute > 0 && secondsToProcessOneJob > 0.0 { // this is the case in which there is no backlog visible. // (mostly because the workers picks up jobs very quickly) // But the queue has throughput, so we return the minWorkers. // Note: minWorkers is updated based on // messagesSentPerMinute and secondsToProcessOneJob // desried is the minReplicas in this scenario return convertDesiredReplicasWithRules( currentWorkers, minWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } // Attempt for massive scale down if currentWorkers == idleWorkers { desiredWorkers := int32(0) // for massive scale down to happen maxDisruptableWorkers // should be ignored return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, currentWorkers, ) } // Attempt partial scale down since there is no backlog or in-processing // messages. return convertDesiredReplicasWithRules( currentWorkers, minWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } func convertDesiredReplicasWithRules( current int32, desired int32, min int32, max int32, maxDisruptable int32) int32 { if min >= max { return max } if (current - desired) > maxDisruptable { desired = current - maxDisruptable } if desired > max { return max } if desired < min { return min } return desired } func updateWorkerPodAutoScalerStatus( ctx context.Context, name string, namespace string, customclientset clientset.Interface, desiredWorkers int32, workerPodAutoScaler *v1.WorkerPodAutoScaler, currentWorkers int32, availableWorkers int32, queueMessages int32, lastScaleTime *metav1.Time) { if workerPodAutoScaler.Status.CurrentReplicas == currentWorkers && workerPodAutoScaler.Status.AvailableReplicas == availableWorkers && workerPodAutoScaler.Status.DesiredReplicas == desiredWorkers && workerPodAutoScaler.Status.CurrentMessages == queueMessages && workerPodAutoScaler.Status.LastScaleTime.Equal(lastScaleTime) { klog.V(4).Infof("%s/%s: WPA status is already up to date\n", namespace, name) return } else { klog.V(4).Infof("%s/%s: Updating wpa status\n", namespace, name) } // NEVER modify objects from the store. It's a read-only, local cache. // You can use DeepCopy() to make a deep copy of original object and modify this copy // Or create a copy manually for better performance workerPodAutoScalerCopy := workerPodAutoScaler.DeepCopy() workerPodAutoScalerCopy.Status.CurrentReplicas = currentWorkers workerPodAutoScalerCopy.Status.AvailableReplicas = availableWorkers workerPodAutoScalerCopy.Status.DesiredReplicas = desiredWorkers workerPodAutoScalerCopy.Status.CurrentMessages = queueMessages workerPodAutoScalerCopy.Status.LastScaleTime = lastScaleTime // If the CustomResourceSubresources feature gate is not enabled, // we must use Update instead of UpdateStatus to update the Status block of the WorkerPodAutoScaler resource. // UpdateStatus will not allow changes to the Spec of the resource, // which is ideal for ensuring nothing other than resource status has been updated. _, err := customclientset.K8sV1().WorkerPodAutoScalers(workerPodAutoScaler.Namespace).UpdateStatus(ctx, workerPodAutoScalerCopy, metav1.UpdateOptions{}) if err != nil { klog.Errorf("Error updating wpa status, err: %v", err) return } klog.V(4).Infof("%s/%s: Updated wpa status\n", namespace, name) } // getKeyForWorkerPodAutoScaler takes a WorkerPodAutoScaler resource and converts it into a namespace/name // string which is then put onto the work queue. This method should *not* be // passed resources of any type other than WorkerPodAutoScaler. func (c *Controller) getKeyForWorkerPodAutoScaler(obj interface{}) string { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { utilruntime.HandleError(err) return "" } return key } func (c *Controller) enqueueAddWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventAdd, }) } func (c *Controller) enqueueUpdateWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventUpdate, }) } func (c *Controller) enqueueDeleteWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventDelete, }) }
processNextWorkItem
identifier_name
controller.go
package controller import ( "context" "fmt" "math" "time" "github.com/practo/klog/v2" "github.com/prometheus/client_golang/prometheus" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" appsinformers "k8s.io/client-go/informers/apps/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" appslisters "k8s.io/client-go/listers/apps/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" v1 "github.com/practo/k8s-worker-pod-autoscaler/pkg/apis/workerpodautoscaler/v1" clientset "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/clientset/versioned" samplescheme "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/clientset/versioned/scheme" informers "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/informers/externalversions/workerpodautoscaler/v1" listers "github.com/practo/k8s-worker-pod-autoscaler/pkg/generated/listers/workerpodautoscaler/v1" queue "github.com/practo/k8s-worker-pod-autoscaler/pkg/queue" ) const controllerAgentName = "workerpodautoscaler-controller" const ( // SuccessSynced is used as part of the Event 'reason' when a WorkerPodAutoScaler is synced SuccessSynced = "Synced" // ErrResourceExists is used as part of the Event 'reason' when a WorkerPodAutoScaler fails // to sync due to a Deployment of the same name already existing. ErrResourceExists = "ErrResourceExists" // MessageResourceExists is the message used for Events when a resource // fails to sync due to a Deployment already existing MessageResourceExists = "Resource %q already exists and is not managed by WorkerPodAutoScaler" // MessageResourceSynced is the message used for an Event fired when a WorkerPodAutoScaler // is synced successfully MessageResourceSynced = "WorkerPodAutoScaler synced successfully" // WokerPodAutoScalerEventAdd stores the add event name WokerPodAutoScalerEventAdd = "add" // WokerPodAutoScalerEventUpdate stores the add event name WokerPodAutoScalerEventUpdate = "update" // WokerPodAutoScalerEventDelete stores the add event name WokerPodAutoScalerEventDelete = "delete" ) var ( loopDurationSeconds = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "controller", Name: "loop_duration_seconds", Help: "Number of seconds to complete the control loop successfully, partitioned by wpa name and namespace", }, []string{"workerpodautoscaler", "namespace"}, ) loopCountSuccess = prometheus.NewCounterVec( prometheus.CounterOpts{ Namespace: "wpa", Subsystem: "controller", Name: "loop_count_success", Help: "How many times the control loop executed successfully, partitioned by wpa name and namespace", }, []string{"workerpodautoscaler", "namespace"}, ) qMsgs = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "queue", Name: "messages", Help: "Number of unprocessed messages in the queue", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) qMsgsSPM = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "queue", Name: "messages_sent_per_minute", Help: "Number of messages sent to the queue per minute", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersIdle = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "idle", Help: "Number of idle workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersCurrent = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "current", Help: "Number of current workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersDesired = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "desired", Help: "Number of desired workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) workersAvailable = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Namespace: "wpa", Subsystem: "worker", Name: "available", Help: "Number of available workers", }, []string{"workerpodautoscaler", "namespace", "queueName"}, ) ) func init() { prometheus.MustRegister(loopDurationSeconds) prometheus.MustRegister(loopCountSuccess) prometheus.MustRegister(qMsgs) prometheus.MustRegister(qMsgsSPM) prometheus.MustRegister(workersIdle) prometheus.MustRegister(workersCurrent) prometheus.MustRegister(workersDesired) prometheus.MustRegister(workersAvailable) } type WokerPodAutoScalerEvent struct { key string name string } // Controller is the controller implementation for WorkerPodAutoScaler resources type Controller struct { ctx context.Context // kubeclientset is a standard kubernetes clientset kubeclientset kubernetes.Interface // customclientset is a clientset for our own API group customclientset clientset.Interface deploymentLister appslisters.DeploymentLister deploymentsSynced cache.InformerSynced replicaSetLister appslisters.ReplicaSetLister replicaSetsSynced cache.InformerSynced workerPodAutoScalersLister listers.WorkerPodAutoScalerLister workerPodAutoScalersSynced cache.InformerSynced // workqueue is a rate limited work queue. This is used to queue work to be // processed instead of performing it as soon as a change happens. This // means we can ensure we only process a fixed amount of resources at a // time, and makes it easy to ensure we are never processing the same item // simultaneously in two different workers. workqueue workqueue.RateLimitingInterface // recorder is an event recorder for recording Event resources to the // Kubernetes API. recorder record.EventRecorder // defaultMaxDisruption // it is the default value for the maxDisruption in the WPA spec. // This specifies how much percentage of pods can be disrupted in a // single scale down acitivity. // Can be expressed as integers or as a percentage. defaultMaxDisruption string // QueueList keeps the list of all the queues in memeory // which is used by the core controller and the sqs exporter // scaleDownDelay after last scale up // the no of seconds to wait after the last scale up before scaling down scaleDownDelay time.Duration Queues *queue.Queues } // NewController returns a new sample controller func NewController( ctx context.Context, kubeclientset kubernetes.Interface, customclientset clientset.Interface, deploymentInformer appsinformers.DeploymentInformer, replicaSetInformer appsinformers.ReplicaSetInformer, workerPodAutoScalerInformer informers.WorkerPodAutoScalerInformer, defaultMaxDisruption string, resyncPeriod time.Duration, scaleDownDelay time.Duration, queues *queue.Queues) *Controller
// Run will set up the event handlers for types we are interested in, as well // as syncing informer caches and starting workers. It will block until stopCh // is closed, at which point it will shutdown the workqueue and wait for // workers to finish processing their current work items. func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() defer c.workqueue.ShutDown() // Start the informer factories to begin populating the informer caches klog.V(1).Info("Starting WorkerPodAutoScaler controller") // Wait for the caches to be synced before starting workers klog.V(1).Info("Waiting for informer caches to sync") if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.workerPodAutoScalersSynced); !ok { return fmt.Errorf("failed to wait for caches to sync") } klog.V(1).Info("Starting workers") // Launch two workers to process WorkerPodAutoScaler resources for i := 0; i < threadiness; i++ { // TOOD: move from stopCh to context, use: UntilWithContext() go wait.Until(c.runWorker, time.Second, stopCh) } <-stopCh klog.V(1).Info("Shutting down workers") return nil } // runWorker is a long-running function that will continually call the // processNextWorkItem function in order to read and process a message on the // workqueue. func (c *Controller) runWorker() { for c.processNextWorkItem(c.ctx) { } } // processNextWorkItem will read a single work item off the workqueue and // attempt to process it, by calling the syncHandler. func (c *Controller) processNextWorkItem(ctx context.Context) bool { obj, shutdown := c.workqueue.Get() if shutdown { return false } // We wrap this block in a func so we can defer c.workqueue.Done. err := func(obj interface{}) error { // We call Done here so the workqueue knows we have finished // processing this item. We also must remember to call Forget if we // do not want this work item being re-queued. For example, we do // not call Forget if a transient error occurs, instead the item is // put back on the workqueue and attempted again after a back-off // period. defer c.workqueue.Done(obj) var ok bool // We expect strings to come off the workqueue. These are of the // form namespace/name. We do this as the delayed nature of the // workqueue means the items in the informer cache may actually be // more up to date that when the item was initially put onto the // workqueue.(PS: not anymore, its an WPA event) event, ok := obj.(WokerPodAutoScalerEvent) if !ok { // As the item in the workqueue is actually invalid, we call // Forget here else we'd go into a loop of attempting to // process a work item that is invalid. c.workqueue.Forget(obj) utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj)) return nil } // Run the syncHandler, passing it the namespace/name string of the // WorkerPodAutoScaler resource to be synced. if err := c.syncHandler(ctx, event); err != nil { // Put the item back on the workqueue to handle any transient errors. c.workqueue.AddRateLimited(event) return fmt.Errorf("error syncing '%s': %s, requeuing", event, err.Error()) } // Finally, if no error occurs we Forget this item so it does not // get queued again until another change happens. c.workqueue.Forget(obj) return nil }(obj) if err != nil { utilruntime.HandleError(err) return true } return true } // syncHandler compares the actual state with the desired, and attempts to // converge the two. It then updates the Status block of the WorkerPodAutoScaler resource // with the current status of the resource. func (c *Controller) syncHandler(ctx context.Context, event WokerPodAutoScalerEvent) error { now := time.Now() key := event.key // Convert the namespace/name string into a distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key)) return nil } // Get the WorkerPodAutoScaler resource with this namespace/name workerPodAutoScaler, err := c.workerPodAutoScalersLister.WorkerPodAutoScalers(namespace).Get(name) if err != nil { // The WorkerPodAutoScaler resource may no longer exist, in which case we stop processing. if errors.IsNotFound(err) { utilruntime.HandleError(fmt.Errorf("workerPodAutoScaler '%s' in work queue no longer exists", key)) c.Queues.Delete(namespace, name) return nil } return err } var currentWorkers, availableWorkers int32 deploymentName := workerPodAutoScaler.Spec.DeploymentName replicaSetName := workerPodAutoScaler.Spec.ReplicaSetName if deploymentName != "" { // Get the Deployment with the name specified in WorkerPodAutoScaler.spec deployment, err := c.deploymentLister.Deployments(workerPodAutoScaler.Namespace).Get(deploymentName) if errors.IsNotFound(err) { return fmt.Errorf("deployment %s not found in namespace %s", deploymentName, workerPodAutoScaler.Namespace) } else if err != nil { return err } currentWorkers = *deployment.Spec.Replicas availableWorkers = deployment.Status.AvailableReplicas } else if replicaSetName != "" { // Get the ReplicaSet with the name specified in WorkerPodAutoScaler.spec replicaSet, err := c.replicaSetLister.ReplicaSets(workerPodAutoScaler.Namespace).Get(replicaSetName) if errors.IsNotFound(err) { return fmt.Errorf("ReplicaSet %s not found in namespace %s", replicaSetName, workerPodAutoScaler.Namespace) } else if err != nil { return err } currentWorkers = *replicaSet.Spec.Replicas availableWorkers = replicaSet.Status.AvailableReplicas } else { // We choose to absorb the error here as the worker would requeue the // resource otherwise. Instead, the next time the resource is updated // the resource will be queued again. utilruntime.HandleError(fmt.Errorf("%s: deployment or replicaset name must be specified", key)) return nil } var secondsToProcessOneJob float64 if workerPodAutoScaler.Spec.SecondsToProcessOneJob != nil { secondsToProcessOneJob = *workerPodAutoScaler.Spec.SecondsToProcessOneJob } switch event.name { case WokerPodAutoScalerEventAdd: err = c.Queues.Add( namespace, name, workerPodAutoScaler.Spec.QueueURI, currentWorkers, secondsToProcessOneJob, ) case WokerPodAutoScalerEventUpdate: err = c.Queues.Add( namespace, name, workerPodAutoScaler.Spec.QueueURI, currentWorkers, secondsToProcessOneJob, ) case WokerPodAutoScalerEventDelete: err = c.Queues.Delete(namespace, name) } if err != nil { utilruntime.HandleError(fmt.Errorf("unable to sync queue: %s", err.Error())) return err } queueName, queueMessages, messagesSentPerMinute, idleWorkers := c.Queues.GetQueueInfo( namespace, name) if queueName == "" { return nil } if queueMessages == queue.UnsyncedQueueMessageCount { klog.Warningf( "%s qMsgs: %d, q not initialized, waiting for init to complete", queueName, queueMessages, ) return nil } desiredWorkers := GetDesiredWorkers( queueName, queueMessages, messagesSentPerMinute, secondsToProcessOneJob, *workerPodAutoScaler.Spec.TargetMessagesPerWorker, currentWorkers, idleWorkers, *workerPodAutoScaler.Spec.MinReplicas, *workerPodAutoScaler.Spec.MaxReplicas, workerPodAutoScaler.GetMaxDisruption(c.defaultMaxDisruption), ) klog.V(2).Infof("%s current: %d", queueName, currentWorkers) klog.V(2).Infof("%s qMsgs: %d, desired: %d", queueName, queueMessages, desiredWorkers) // set metrics qMsgs.WithLabelValues( name, namespace, queueName, ).Set(float64(queueMessages)) qMsgsSPM.WithLabelValues( name, namespace, queueName, ).Set(messagesSentPerMinute) workersIdle.WithLabelValues( name, namespace, queueName, ).Set(float64(idleWorkers)) workersCurrent.WithLabelValues( name, namespace, queueName, ).Set(float64(currentWorkers)) workersDesired.WithLabelValues( name, namespace, queueName, ).Set(float64(desiredWorkers)) workersAvailable.WithLabelValues( name, namespace, queueName, ).Set(float64(availableWorkers)) lastScaleTime := workerPodAutoScaler.Status.LastScaleTime.DeepCopy() op := GetScaleOperation( queueName, desiredWorkers, currentWorkers, lastScaleTime, c.scaleDownDelay, ) if op == ScaleUp || op == ScaleDown { if deploymentName != "" { c.updateDeployment( ctx, workerPodAutoScaler.Namespace, deploymentName, &desiredWorkers) } else { c.updateReplicaSet( ctx, workerPodAutoScaler.Namespace, replicaSetName, &desiredWorkers) } now := metav1.Now() lastScaleTime = &now } klog.V(2).Infof("%s scaleOp: %v", queueName, scaleOpString(op)) // Finally, we update the status block of the WorkerPodAutoScaler resource to reflect the // current state of the world updateWorkerPodAutoScalerStatus( ctx, name, namespace, c.customclientset, desiredWorkers, workerPodAutoScaler, currentWorkers, availableWorkers, queueMessages, lastScaleTime, ) loopDurationSeconds.WithLabelValues( name, namespace, ).Set(time.Since(now).Seconds()) loopCountSuccess.WithLabelValues( name, namespace, ).Inc() // TODO: organize and log events // c.recorder.Event(workerPodAutoScaler, corev1.EventTypeNormal, SuccessSynced, MessageResourceSynced) return nil } // updateDeployment updates the Deployment with the desired number of replicas func (c *Controller) updateDeployment(ctx context.Context, namespace string, deploymentName string, replicas *int32) { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of the Deployment before attempting update deployment, getErr := c.deploymentLister.Deployments(namespace).Get(deploymentName) if errors.IsNotFound(getErr) { return fmt.Errorf("deployment %s was not found in namespace %s", deploymentName, namespace) } if getErr != nil { klog.Fatalf("Failed to get deployment: %v", getErr) } deployment.Spec.Replicas = replicas _, updateErr := c.kubeclientset.AppsV1().Deployments(namespace).Update(ctx, deployment, metav1.UpdateOptions{}) if updateErr != nil { klog.Errorf("Failed to update deployment: %v", updateErr) } return updateErr }) if retryErr != nil { klog.Fatalf("Failed to update deployment (retry failed): %v", retryErr) } } // updateReplicaSet updates the ReplicaSet with the desired number of replicas func (c *Controller) updateReplicaSet(ctx context.Context, namespace string, replicaSetName string, replicas *int32) { retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Retrieve the latest version of the ReplicaSet before attempting update replicaSet, getErr := c.replicaSetLister.ReplicaSets(namespace).Get(replicaSetName) if errors.IsNotFound(getErr) { return fmt.Errorf("ReplicaSet %s was not found in namespace %s", replicaSetName, namespace) } if getErr != nil { klog.Fatalf("Failed to get ReplicaSet: %v", getErr) } replicaSet.Spec.Replicas = replicas _, updateErr := c.kubeclientset.AppsV1().ReplicaSets(namespace).Update(ctx, replicaSet, metav1.UpdateOptions{}) if updateErr != nil { klog.Errorf("Failed to update ReplicaSet: %v", updateErr) } return updateErr }) if retryErr != nil { klog.Fatalf("Failed to update ReplicaSet (retry failed): %v", retryErr) } } // getMaxDisruptableWorkers gets the maximum number of workers that can // be scaled down in the single scale down activity. func getMaxDisruptableWorkers( maxDisruption *string, currentWorkers int32) int32 { if maxDisruption == nil { klog.Fatalf("maxDisruption default is not being set. Exiting") } maxDisruptionIntOrStr := intstr.Parse(*maxDisruption) maxDisruptableWorkers, err := intstr.GetValueFromIntOrPercent( &maxDisruptionIntOrStr, int(currentWorkers), true, ) if err != nil { klog.Fatalf("Error calculating maxDisruptable workers, err: %v", err) } return int32(maxDisruptableWorkers) } // getMinWorkers gets the min workers based on the // velocity metric: messagesSentPerMinute func getMinWorkers( messagesSentPerMinute float64, minWorkers int32, secondsToProcessOneJob float64) int32 { // disable this feature for WPA queues which have not specified // processing time if secondsToProcessOneJob == 0.0 { return minWorkers } workersBasedOnMessagesSent := int32(math.Ceil((secondsToProcessOneJob * messagesSentPerMinute) / 60)) klog.V(4).Infof("%v, workersBasedOnMessagesSent=%v\n", secondsToProcessOneJob, workersBasedOnMessagesSent) if workersBasedOnMessagesSent > minWorkers { return workersBasedOnMessagesSent } return minWorkers } func isChangeTooSmall(desired int32, current int32, tolerance float64) bool { return math.Abs(float64(desired-current))/float64(current) <= tolerance } // GetDesiredWorkers finds the desired number of workers which are required func GetDesiredWorkers( queueName string, queueMessages int32, messagesSentPerMinute float64, secondsToProcessOneJob float64, targetMessagesPerWorker int32, currentWorkers int32, idleWorkers int32, minWorkers int32, maxWorkers int32, maxDisruption *string) int32 { klog.V(4).Infof("%s min=%v, max=%v, targetBacklog=%v \n", queueName, minWorkers, maxWorkers, targetMessagesPerWorker) // overwrite the minimum workers needed based on // messagesSentPerMinute and secondsToProcessOneJob // this feature is disabled if secondsToProcessOneJob is not set or is 0.0 minWorkers = getMinWorkers( messagesSentPerMinute, minWorkers, secondsToProcessOneJob, ) // gets the maximum number of workers that can be scaled down in a // single scale down activity. maxDisruptableWorkers := getMaxDisruptableWorkers( maxDisruption, currentWorkers, ) tolerance := 0.1 desiredWorkers := int32(math.Ceil( float64(queueMessages) / float64(targetMessagesPerWorker)), ) klog.V(4).Infof("%s qMsgs=%v, qMsgsPerMin=%v \n", queueName, queueMessages, messagesSentPerMinute) klog.V(4).Infof("%s secToProcessJob=%v, maxDisruption=%v \n", queueName, secondsToProcessOneJob, *maxDisruption) klog.V(4).Infof("%s current=%v, idle=%v \n", queueName, currentWorkers, idleWorkers) klog.V(3).Infof("%s minComputed=%v, maxDisruptable=%v\n", queueName, minWorkers, maxDisruptableWorkers) if currentWorkers == 0 { return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } if queueMessages > 0 { if isChangeTooSmall(desiredWorkers, currentWorkers, tolerance) { // desired is same as current in this scenario return convertDesiredReplicasWithRules( currentWorkers, currentWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } else if messagesSentPerMinute > 0 && secondsToProcessOneJob > 0.0 { // this is the case in which there is no backlog visible. // (mostly because the workers picks up jobs very quickly) // But the queue has throughput, so we return the minWorkers. // Note: minWorkers is updated based on // messagesSentPerMinute and secondsToProcessOneJob // desried is the minReplicas in this scenario return convertDesiredReplicasWithRules( currentWorkers, minWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } // Attempt for massive scale down if currentWorkers == idleWorkers { desiredWorkers := int32(0) // for massive scale down to happen maxDisruptableWorkers // should be ignored return convertDesiredReplicasWithRules( currentWorkers, desiredWorkers, minWorkers, maxWorkers, currentWorkers, ) } // Attempt partial scale down since there is no backlog or in-processing // messages. return convertDesiredReplicasWithRules( currentWorkers, minWorkers, minWorkers, maxWorkers, maxDisruptableWorkers, ) } func convertDesiredReplicasWithRules( current int32, desired int32, min int32, max int32, maxDisruptable int32) int32 { if min >= max { return max } if (current - desired) > maxDisruptable { desired = current - maxDisruptable } if desired > max { return max } if desired < min { return min } return desired } func updateWorkerPodAutoScalerStatus( ctx context.Context, name string, namespace string, customclientset clientset.Interface, desiredWorkers int32, workerPodAutoScaler *v1.WorkerPodAutoScaler, currentWorkers int32, availableWorkers int32, queueMessages int32, lastScaleTime *metav1.Time) { if workerPodAutoScaler.Status.CurrentReplicas == currentWorkers && workerPodAutoScaler.Status.AvailableReplicas == availableWorkers && workerPodAutoScaler.Status.DesiredReplicas == desiredWorkers && workerPodAutoScaler.Status.CurrentMessages == queueMessages && workerPodAutoScaler.Status.LastScaleTime.Equal(lastScaleTime) { klog.V(4).Infof("%s/%s: WPA status is already up to date\n", namespace, name) return } else { klog.V(4).Infof("%s/%s: Updating wpa status\n", namespace, name) } // NEVER modify objects from the store. It's a read-only, local cache. // You can use DeepCopy() to make a deep copy of original object and modify this copy // Or create a copy manually for better performance workerPodAutoScalerCopy := workerPodAutoScaler.DeepCopy() workerPodAutoScalerCopy.Status.CurrentReplicas = currentWorkers workerPodAutoScalerCopy.Status.AvailableReplicas = availableWorkers workerPodAutoScalerCopy.Status.DesiredReplicas = desiredWorkers workerPodAutoScalerCopy.Status.CurrentMessages = queueMessages workerPodAutoScalerCopy.Status.LastScaleTime = lastScaleTime // If the CustomResourceSubresources feature gate is not enabled, // we must use Update instead of UpdateStatus to update the Status block of the WorkerPodAutoScaler resource. // UpdateStatus will not allow changes to the Spec of the resource, // which is ideal for ensuring nothing other than resource status has been updated. _, err := customclientset.K8sV1().WorkerPodAutoScalers(workerPodAutoScaler.Namespace).UpdateStatus(ctx, workerPodAutoScalerCopy, metav1.UpdateOptions{}) if err != nil { klog.Errorf("Error updating wpa status, err: %v", err) return } klog.V(4).Infof("%s/%s: Updated wpa status\n", namespace, name) } // getKeyForWorkerPodAutoScaler takes a WorkerPodAutoScaler resource and converts it into a namespace/name // string which is then put onto the work queue. This method should *not* be // passed resources of any type other than WorkerPodAutoScaler. func (c *Controller) getKeyForWorkerPodAutoScaler(obj interface{}) string { var key string var err error if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { utilruntime.HandleError(err) return "" } return key } func (c *Controller) enqueueAddWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventAdd, }) } func (c *Controller) enqueueUpdateWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventUpdate, }) } func (c *Controller) enqueueDeleteWorkerPodAutoScaler(obj interface{}) { c.workqueue.Add(WokerPodAutoScalerEvent{ key: c.getKeyForWorkerPodAutoScaler(obj), name: WokerPodAutoScalerEventDelete, }) }
{ // Create event broadcaster // Add sample-controller types to the default Kubernetes Scheme so Events can be // logged for sample-controller types. utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme)) klog.V(4).Info("Creating event broadcaster") eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")}) recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName}) controller := &Controller{ ctx: ctx, kubeclientset: kubeclientset, customclientset: customclientset, deploymentLister: deploymentInformer.Lister(), deploymentsSynced: deploymentInformer.Informer().HasSynced, replicaSetLister: replicaSetInformer.Lister(), replicaSetsSynced: replicaSetInformer.Informer().HasSynced, workerPodAutoScalersLister: workerPodAutoScalerInformer.Lister(), workerPodAutoScalersSynced: workerPodAutoScalerInformer.Informer().HasSynced, workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "WorkerPodAutoScalers"), recorder: recorder, defaultMaxDisruption: defaultMaxDisruption, scaleDownDelay: scaleDownDelay, Queues: queues, } klog.V(4).Info("Setting up event handlers") // Set up an event handler for when WorkerPodAutoScaler resources change workerPodAutoScalerInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{ AddFunc: controller.enqueueAddWorkerPodAutoScaler, UpdateFunc: func(old, new interface{}) { controller.enqueueUpdateWorkerPodAutoScaler(new) }, DeleteFunc: controller.enqueueDeleteWorkerPodAutoScaler, }, resyncPeriod) return controller }
identifier_body
generate.py
import pickle import numpy as np import copy import json import argparse from model import get_language_model folder = "../YouTube2Text/youtubeclips-dataset/" fname = folder + "test.txt" with open(fname) as f: content = f.readlines() test = [x.strip() for x in content] # no_clean_captions = set(['vid1690', 'vid1458', 'vid1657', 'vid1772', 'vid1515', 'vid1445', 'vid1446', 'vid1797', 'vid1855', 'vid1724', 'vid1787', 'vid1605', 'vid1455', 'vid1722', 'vid1746', 'vid1912', 'vid1301', 'vid1868', 'vid1887']) # test = list(set(test) - no_clean_captions) parser = argparse.ArgumentParser() parser.add_argument('-p', action='store', dest='tag_type', help='(predicted/groundtruth) Type of Tags to use in predictions') parser.add_argument('-t', action='store', dest='tag_threshold', type= float, help='Threshold for tag binarization') parser.add_argument('-s', action='store', dest='lstm_size', type= int, help='Number of hidden units in LSTM model') parser.add_argument('-m', action='store', dest='model_file', help='Model File Name') # parser.add_argument('-d', action='store', dest='gpu', help='GPU to use') results = parser.parse_args() TAG_TYPE = results.tag_type THRESHOLD = results.tag_threshold LSTM_SIZE = results.lstm_size # Load single frame feature vectors and attribute/entity/action vectors if TAG_TYPE == 'predicted': video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_simple_predicted_tags.pickle", "rb")) video_action_vectors = pickle.load(open("../advanced_tag_models/action_simple_predicted_tags.pickle", "rb")) video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_simple_predicted_tags.pickle", "rb")) #video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_vectors_predicted.p", "rb")) # video_action_vectors = pickle.load(open("../advanced_tag_models/action_vectors_predicted.p", "rb")) #video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_vectors_predicted.p", "rb")) else: video_entity_vectors = pickle.load(open("../entity_classifier/entity_vectors_long.pickle", "rb")) video_action_vectors = pickle.load(open("../action_classifier/action_vectors_long.pickle", "rb")) video_attribute_vectors = pickle.load(open("../attribute_classifier/attribute_vectors_long.pickle", "rb")) video_frame_features = pickle.load(open("../frame_features/average_frame_features.pickle", "rb")) # Remove videos for which clean captions aren't available # available_vids = set(video_entity_vectors.keys()).intersection(set(video_action_vectors.keys()).intersection(set(video_attribute_vectors.keys()).intersection(set(video_frame_features.keys())))) # test = list(set(test).intersection(available_vids)) # Read feature sizes from data NUM_ENTITIES = video_entity_vectors[video_entity_vectors.keys()[0]].shape[0] NUM_ACTIONS = video_action_vectors[video_action_vectors.keys()[0]].shape[0] NUM_ATTRIBUTES = video_attribute_vectors[video_attribute_vectors.keys()[0]].shape[0] # NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[1] NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[0] X_ent_test = [] X_act_test = [] X_att_test = [] X_vgg_test = [] X_prev_words_begin = [] vocabulary = pickle.load(open("vocabulary_10.p", "rb")) # Turn vocabulary into list of words vocabulary_words = [x[1] for x in vocabulary] #Load the model with pre-trained weights TRUNCATED_CAPTION_LEN = 15 + 2 NUM_PREV_WORDS = TRUNCATED_CAPTION_LEN - 1 EMBEDDING_DIM = 256 VOCABULARY_SIZE = len(vocabulary_words) # Load the video features for video in test: X_ent_test.append(np.where(np.array(video_entity_vectors.get(video, np.zeros(NUM_ENTITIES))) > THRESHOLD, 1, 0)) X_act_test.append(np.where(np.array(video_action_vectors.get(video, np.zeros(NUM_ACTIONS))) > THRESHOLD, 1, 0)) X_att_test.append(np.where(np.array(video_attribute_vectors.get(video, np.zeros(NUM_ATTRIBUTES))) > THRESHOLD, 1, 0)) # X_vgg_test.append(np.array(video_frame_features[video][0])) X_vgg_test.append(np.array(video_frame_features[video])) X_prev_words_begin.append([vocabulary_words.index("<bos>")] + [0]*(NUM_PREV_WORDS - 1)) X_ent_test = np.array(X_ent_test) X_act_test = np.array(X_act_test) X_att_test = np.array(X_att_test) X_vgg_test = np.array(X_vgg_test) X_prev_words_begin = np.array(X_prev_words_begin) beam_model = get_language_model(NUM_PREV_WORDS, VOCABULARY_SIZE, EMBEDDING_DIM, NUM_FEATURES, NUM_ENTITIES, NUM_ACTIONS, NUM_ATTRIBUTES, LSTM_SIZE, 0.0, 0.0) # Dropout is inactive during inference beam_model.load_weights("../models/"+results.model_file+".h5") #preds = beam_model.predict([X_ent_test, X_act_test, X_att_test, X_vgg_test, X_prev_words_begin]) # helper function to sample an index from a probability array def sample(preds, temperature=1.0): preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) results_folder = "./" # # Load All Captions fname = folder + "cleaned_descriptions.csv" with open(fname) as f: content = f.readlines() all_captions = [(x.strip().split(",")[0],x.strip().split(",")[1]) for x in content] # # Write correct caption file correct_captions = open(results_folder + "annotations/correct_captions_ref.json","w") correct_annotations = {} correct_annotations['info'] = {'description': 'YouTube2Text', 'url': 'http://upplysingaoflun.ecn.purdue.edu/~yu239/datasets/youtubeclips.zip', 'version': '1.0', 'year': 2013, 'contributor': 'Guadarrama et al', u'date_created': u'2013-01-27 09:11:52.357475'} correct_annotations['images'] = [] correct_annotations['licenses'] = [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}] correct_annotations['type'] = "captions" correct_annotations['annotations'] = [] for video in test: correct_annotations['images'].append({'license': 1, 'url': 'https://www.youtube.com/watch?v=' + video, 'file_name': video+".avi", 'height': 360, 'width': 640, 'date_captured': u'2013-11-14 11:18:45', 'id': test.index(video)}) count = 0 for video,caption in all_captions: if video in test: correct_annotations['annotations'].append({"caption": caption, "id": count, "image_id": test.index(video), "vid_id": video}) count +=1 correct_captions.write(json.dumps(correct_annotations, indent=4, sort_keys=True)) correct_captions.close() def indices(k): combos = [] for x in range(k): for y in range(k): combos.append((x,y)) return combos def greedy_search(captioning_model, prev_words_input, other_inputs): for itr in range(NUM_PREV_WORDS-1):
return prev_words_input def beam_search(captioning_model, prev_words_input, other_inputs, k): top_k_predictions = [copy.deepcopy(prev_words_input) for _ in range(k)] top_k_score = np.array([[0.0]*top_k_predictions[0].shape[0]]*k) # First Iteration predictions = captioning_model.predict(other_inputs + [prev_words_input]) for idx,video in enumerate(test): for version in range(k): top_k_predictions[version][idx][1] = np.argsort(predictions[idx])[-(version+1)] top_k_score[version][idx] = np.sort(predictions[idx])[-(version+1)] for itr in range(2,NUM_PREV_WORDS): top_k_copy = copy.deepcopy(top_k_predictions) print top_k_predictions[0][0] print top_k_predictions[1][0] print top_k_predictions[2][0] predictions = [captioning_model.predict(other_inputs + [top_k_predictions[version]]) for version in range(k)] for idx,video in enumerate(test): scores = [] for version,lookahead in indices(k): scores.append(np.sort(predictions[version][idx])[-(lookahead+1)]*top_k_score[version][idx]) scores = np.array(scores) top_score_indices = np.argsort(scores)[-k:] for num, top_id in enumerate(top_score_indices): version, lookahead = indices(k)[top_id] top_k_predictions[num][idx][itr] = np.argsort(predictions[version][idx])[-(lookahead+1)] top_k_predictions[num][idx][:itr] = top_k_copy[version][idx][:itr] top_k_score[num][idx] = scores[top_id] return top_k_predictions, top_k_score preds, scores = beam_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test], 3) print len(preds), "x", preds[0].shape print len(scores),"x", scores[0].shape preds = preds[-1] print scores[:,0] #scores = scores[-1] # replace <unk>, <BOS>, <EOS> with nothing vocabulary[0] = (0,"") vocabulary[-1] = (0,"") vocabulary[-2] = (0,"") vocabulary[-3] = (0,"") beam_captions_file = results_folder + "results/beam_search_"+results.model_file+results.tag_type+".json" beam_captions = open(beam_captions_file,"w") beam_annotations = [] for idx,video in enumerate(test): sentence = [] for word in preds[idx]: sentence.append(vocabulary[word][1]) beam_annotations.append({"image_id": test.index(video), "caption": " ".join((" ".join(sentence)).split()), "vid_id": video}) beam_captions.write(json.dumps(beam_annotations, indent=4, sort_keys=True)) beam_captions.close() preds = greedy_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test]) greedy_captions_file = results_folder + "results/greedy_search_"+results.model_file+results.tag_type+".json" greedy_captions = open(greedy_captions_file,"w") greedy_annotations = [] for idx,video in enumerate(test): sentence = [] for word in preds[idx]: sentence.append(vocabulary[word][1]) greedy_annotations.append({"image_id": test.index(video), "caption": " ".join((" ".join(sentence)).split()), "vid_id": video}) greedy_captions.write(json.dumps(greedy_annotations, indent=4, sort_keys=True)) greedy_captions.close() # print "python score.py -r annotations/correct_captions_ref.json -t", "results/greedy_search_"+results.model_file+results.tag_type+".json" # print "python score.py -r annotations/correct_captions_ref.json -t", "results/beam_search_"+results.model_file+results.tag_type+".json" # hot_captions = open("scoring_results/hot_captions_batched.sgm","w") # print >>hot_captions, '<tstset trglang="en" setid="y2txt" srclang="any">' # print >>hot_captions, '<doc sysid="langmodel" docid="y2txt" genre="vidcap" origlang="en">' # print >>hot_captions, '<p>' # for idx,video in enumerate(test): # hot_sentence = [] # for word in preds[idx]: # hot_sentence.append(vocabulary[sample(word, 0.1)][1]) # print >>hot_captions, '<seg id="'+str(test.index(video))+'">' + " ".join(hot_sentence) + '</seg>' # print >>hot_captions, '</p>' # print >>hot_captions, '</doc>' # print >>hot_captions, '</tstset>' # hot_captions.close()
predictions = captioning_model.predict(other_inputs + [prev_words_input]) for idx,video in enumerate(test): prev_words_input[idx][itr+1] = np.argmax(predictions[idx])
conditional_block
generate.py
import pickle import numpy as np import copy import json import argparse from model import get_language_model folder = "../YouTube2Text/youtubeclips-dataset/" fname = folder + "test.txt" with open(fname) as f: content = f.readlines() test = [x.strip() for x in content] # no_clean_captions = set(['vid1690', 'vid1458', 'vid1657', 'vid1772', 'vid1515', 'vid1445', 'vid1446', 'vid1797', 'vid1855', 'vid1724', 'vid1787', 'vid1605', 'vid1455', 'vid1722', 'vid1746', 'vid1912', 'vid1301', 'vid1868', 'vid1887']) # test = list(set(test) - no_clean_captions) parser = argparse.ArgumentParser() parser.add_argument('-p', action='store', dest='tag_type', help='(predicted/groundtruth) Type of Tags to use in predictions') parser.add_argument('-t', action='store', dest='tag_threshold', type= float, help='Threshold for tag binarization') parser.add_argument('-s', action='store', dest='lstm_size', type= int, help='Number of hidden units in LSTM model') parser.add_argument('-m', action='store', dest='model_file', help='Model File Name') # parser.add_argument('-d', action='store', dest='gpu', help='GPU to use') results = parser.parse_args()
TAG_TYPE = results.tag_type THRESHOLD = results.tag_threshold LSTM_SIZE = results.lstm_size # Load single frame feature vectors and attribute/entity/action vectors if TAG_TYPE == 'predicted': video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_simple_predicted_tags.pickle", "rb")) video_action_vectors = pickle.load(open("../advanced_tag_models/action_simple_predicted_tags.pickle", "rb")) video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_simple_predicted_tags.pickle", "rb")) #video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_vectors_predicted.p", "rb")) # video_action_vectors = pickle.load(open("../advanced_tag_models/action_vectors_predicted.p", "rb")) #video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_vectors_predicted.p", "rb")) else: video_entity_vectors = pickle.load(open("../entity_classifier/entity_vectors_long.pickle", "rb")) video_action_vectors = pickle.load(open("../action_classifier/action_vectors_long.pickle", "rb")) video_attribute_vectors = pickle.load(open("../attribute_classifier/attribute_vectors_long.pickle", "rb")) video_frame_features = pickle.load(open("../frame_features/average_frame_features.pickle", "rb")) # Remove videos for which clean captions aren't available # available_vids = set(video_entity_vectors.keys()).intersection(set(video_action_vectors.keys()).intersection(set(video_attribute_vectors.keys()).intersection(set(video_frame_features.keys())))) # test = list(set(test).intersection(available_vids)) # Read feature sizes from data NUM_ENTITIES = video_entity_vectors[video_entity_vectors.keys()[0]].shape[0] NUM_ACTIONS = video_action_vectors[video_action_vectors.keys()[0]].shape[0] NUM_ATTRIBUTES = video_attribute_vectors[video_attribute_vectors.keys()[0]].shape[0] # NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[1] NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[0] X_ent_test = [] X_act_test = [] X_att_test = [] X_vgg_test = [] X_prev_words_begin = [] vocabulary = pickle.load(open("vocabulary_10.p", "rb")) # Turn vocabulary into list of words vocabulary_words = [x[1] for x in vocabulary] #Load the model with pre-trained weights TRUNCATED_CAPTION_LEN = 15 + 2 NUM_PREV_WORDS = TRUNCATED_CAPTION_LEN - 1 EMBEDDING_DIM = 256 VOCABULARY_SIZE = len(vocabulary_words) # Load the video features for video in test: X_ent_test.append(np.where(np.array(video_entity_vectors.get(video, np.zeros(NUM_ENTITIES))) > THRESHOLD, 1, 0)) X_act_test.append(np.where(np.array(video_action_vectors.get(video, np.zeros(NUM_ACTIONS))) > THRESHOLD, 1, 0)) X_att_test.append(np.where(np.array(video_attribute_vectors.get(video, np.zeros(NUM_ATTRIBUTES))) > THRESHOLD, 1, 0)) # X_vgg_test.append(np.array(video_frame_features[video][0])) X_vgg_test.append(np.array(video_frame_features[video])) X_prev_words_begin.append([vocabulary_words.index("<bos>")] + [0]*(NUM_PREV_WORDS - 1)) X_ent_test = np.array(X_ent_test) X_act_test = np.array(X_act_test) X_att_test = np.array(X_att_test) X_vgg_test = np.array(X_vgg_test) X_prev_words_begin = np.array(X_prev_words_begin) beam_model = get_language_model(NUM_PREV_WORDS, VOCABULARY_SIZE, EMBEDDING_DIM, NUM_FEATURES, NUM_ENTITIES, NUM_ACTIONS, NUM_ATTRIBUTES, LSTM_SIZE, 0.0, 0.0) # Dropout is inactive during inference beam_model.load_weights("../models/"+results.model_file+".h5") #preds = beam_model.predict([X_ent_test, X_act_test, X_att_test, X_vgg_test, X_prev_words_begin]) # helper function to sample an index from a probability array def sample(preds, temperature=1.0): preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) results_folder = "./" # # Load All Captions fname = folder + "cleaned_descriptions.csv" with open(fname) as f: content = f.readlines() all_captions = [(x.strip().split(",")[0],x.strip().split(",")[1]) for x in content] # # Write correct caption file correct_captions = open(results_folder + "annotations/correct_captions_ref.json","w") correct_annotations = {} correct_annotations['info'] = {'description': 'YouTube2Text', 'url': 'http://upplysingaoflun.ecn.purdue.edu/~yu239/datasets/youtubeclips.zip', 'version': '1.0', 'year': 2013, 'contributor': 'Guadarrama et al', u'date_created': u'2013-01-27 09:11:52.357475'} correct_annotations['images'] = [] correct_annotations['licenses'] = [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}] correct_annotations['type'] = "captions" correct_annotations['annotations'] = [] for video in test: correct_annotations['images'].append({'license': 1, 'url': 'https://www.youtube.com/watch?v=' + video, 'file_name': video+".avi", 'height': 360, 'width': 640, 'date_captured': u'2013-11-14 11:18:45', 'id': test.index(video)}) count = 0 for video,caption in all_captions: if video in test: correct_annotations['annotations'].append({"caption": caption, "id": count, "image_id": test.index(video), "vid_id": video}) count +=1 correct_captions.write(json.dumps(correct_annotations, indent=4, sort_keys=True)) correct_captions.close() def indices(k): combos = [] for x in range(k): for y in range(k): combos.append((x,y)) return combos def greedy_search(captioning_model, prev_words_input, other_inputs): for itr in range(NUM_PREV_WORDS-1): predictions = captioning_model.predict(other_inputs + [prev_words_input]) for idx,video in enumerate(test): prev_words_input[idx][itr+1] = np.argmax(predictions[idx]) return prev_words_input def beam_search(captioning_model, prev_words_input, other_inputs, k): top_k_predictions = [copy.deepcopy(prev_words_input) for _ in range(k)] top_k_score = np.array([[0.0]*top_k_predictions[0].shape[0]]*k) # First Iteration predictions = captioning_model.predict(other_inputs + [prev_words_input]) for idx,video in enumerate(test): for version in range(k): top_k_predictions[version][idx][1] = np.argsort(predictions[idx])[-(version+1)] top_k_score[version][idx] = np.sort(predictions[idx])[-(version+1)] for itr in range(2,NUM_PREV_WORDS): top_k_copy = copy.deepcopy(top_k_predictions) print top_k_predictions[0][0] print top_k_predictions[1][0] print top_k_predictions[2][0] predictions = [captioning_model.predict(other_inputs + [top_k_predictions[version]]) for version in range(k)] for idx,video in enumerate(test): scores = [] for version,lookahead in indices(k): scores.append(np.sort(predictions[version][idx])[-(lookahead+1)]*top_k_score[version][idx]) scores = np.array(scores) top_score_indices = np.argsort(scores)[-k:] for num, top_id in enumerate(top_score_indices): version, lookahead = indices(k)[top_id] top_k_predictions[num][idx][itr] = np.argsort(predictions[version][idx])[-(lookahead+1)] top_k_predictions[num][idx][:itr] = top_k_copy[version][idx][:itr] top_k_score[num][idx] = scores[top_id] return top_k_predictions, top_k_score preds, scores = beam_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test], 3) print len(preds), "x", preds[0].shape print len(scores),"x", scores[0].shape preds = preds[-1] print scores[:,0] #scores = scores[-1] # replace <unk>, <BOS>, <EOS> with nothing vocabulary[0] = (0,"") vocabulary[-1] = (0,"") vocabulary[-2] = (0,"") vocabulary[-3] = (0,"") beam_captions_file = results_folder + "results/beam_search_"+results.model_file+results.tag_type+".json" beam_captions = open(beam_captions_file,"w") beam_annotations = [] for idx,video in enumerate(test): sentence = [] for word in preds[idx]: sentence.append(vocabulary[word][1]) beam_annotations.append({"image_id": test.index(video), "caption": " ".join((" ".join(sentence)).split()), "vid_id": video}) beam_captions.write(json.dumps(beam_annotations, indent=4, sort_keys=True)) beam_captions.close() preds = greedy_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test]) greedy_captions_file = results_folder + "results/greedy_search_"+results.model_file+results.tag_type+".json" greedy_captions = open(greedy_captions_file,"w") greedy_annotations = [] for idx,video in enumerate(test): sentence = [] for word in preds[idx]: sentence.append(vocabulary[word][1]) greedy_annotations.append({"image_id": test.index(video), "caption": " ".join((" ".join(sentence)).split()), "vid_id": video}) greedy_captions.write(json.dumps(greedy_annotations, indent=4, sort_keys=True)) greedy_captions.close() # print "python score.py -r annotations/correct_captions_ref.json -t", "results/greedy_search_"+results.model_file+results.tag_type+".json" # print "python score.py -r annotations/correct_captions_ref.json -t", "results/beam_search_"+results.model_file+results.tag_type+".json" # hot_captions = open("scoring_results/hot_captions_batched.sgm","w") # print >>hot_captions, '<tstset trglang="en" setid="y2txt" srclang="any">' # print >>hot_captions, '<doc sysid="langmodel" docid="y2txt" genre="vidcap" origlang="en">' # print >>hot_captions, '<p>' # for idx,video in enumerate(test): # hot_sentence = [] # for word in preds[idx]: # hot_sentence.append(vocabulary[sample(word, 0.1)][1]) # print >>hot_captions, '<seg id="'+str(test.index(video))+'">' + " ".join(hot_sentence) + '</seg>' # print >>hot_captions, '</p>' # print >>hot_captions, '</doc>' # print >>hot_captions, '</tstset>' # hot_captions.close()
random_line_split
generate.py
import pickle import numpy as np import copy import json import argparse from model import get_language_model folder = "../YouTube2Text/youtubeclips-dataset/" fname = folder + "test.txt" with open(fname) as f: content = f.readlines() test = [x.strip() for x in content] # no_clean_captions = set(['vid1690', 'vid1458', 'vid1657', 'vid1772', 'vid1515', 'vid1445', 'vid1446', 'vid1797', 'vid1855', 'vid1724', 'vid1787', 'vid1605', 'vid1455', 'vid1722', 'vid1746', 'vid1912', 'vid1301', 'vid1868', 'vid1887']) # test = list(set(test) - no_clean_captions) parser = argparse.ArgumentParser() parser.add_argument('-p', action='store', dest='tag_type', help='(predicted/groundtruth) Type of Tags to use in predictions') parser.add_argument('-t', action='store', dest='tag_threshold', type= float, help='Threshold for tag binarization') parser.add_argument('-s', action='store', dest='lstm_size', type= int, help='Number of hidden units in LSTM model') parser.add_argument('-m', action='store', dest='model_file', help='Model File Name') # parser.add_argument('-d', action='store', dest='gpu', help='GPU to use') results = parser.parse_args() TAG_TYPE = results.tag_type THRESHOLD = results.tag_threshold LSTM_SIZE = results.lstm_size # Load single frame feature vectors and attribute/entity/action vectors if TAG_TYPE == 'predicted': video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_simple_predicted_tags.pickle", "rb")) video_action_vectors = pickle.load(open("../advanced_tag_models/action_simple_predicted_tags.pickle", "rb")) video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_simple_predicted_tags.pickle", "rb")) #video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_vectors_predicted.p", "rb")) # video_action_vectors = pickle.load(open("../advanced_tag_models/action_vectors_predicted.p", "rb")) #video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_vectors_predicted.p", "rb")) else: video_entity_vectors = pickle.load(open("../entity_classifier/entity_vectors_long.pickle", "rb")) video_action_vectors = pickle.load(open("../action_classifier/action_vectors_long.pickle", "rb")) video_attribute_vectors = pickle.load(open("../attribute_classifier/attribute_vectors_long.pickle", "rb")) video_frame_features = pickle.load(open("../frame_features/average_frame_features.pickle", "rb")) # Remove videos for which clean captions aren't available # available_vids = set(video_entity_vectors.keys()).intersection(set(video_action_vectors.keys()).intersection(set(video_attribute_vectors.keys()).intersection(set(video_frame_features.keys())))) # test = list(set(test).intersection(available_vids)) # Read feature sizes from data NUM_ENTITIES = video_entity_vectors[video_entity_vectors.keys()[0]].shape[0] NUM_ACTIONS = video_action_vectors[video_action_vectors.keys()[0]].shape[0] NUM_ATTRIBUTES = video_attribute_vectors[video_attribute_vectors.keys()[0]].shape[0] # NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[1] NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[0] X_ent_test = [] X_act_test = [] X_att_test = [] X_vgg_test = [] X_prev_words_begin = [] vocabulary = pickle.load(open("vocabulary_10.p", "rb")) # Turn vocabulary into list of words vocabulary_words = [x[1] for x in vocabulary] #Load the model with pre-trained weights TRUNCATED_CAPTION_LEN = 15 + 2 NUM_PREV_WORDS = TRUNCATED_CAPTION_LEN - 1 EMBEDDING_DIM = 256 VOCABULARY_SIZE = len(vocabulary_words) # Load the video features for video in test: X_ent_test.append(np.where(np.array(video_entity_vectors.get(video, np.zeros(NUM_ENTITIES))) > THRESHOLD, 1, 0)) X_act_test.append(np.where(np.array(video_action_vectors.get(video, np.zeros(NUM_ACTIONS))) > THRESHOLD, 1, 0)) X_att_test.append(np.where(np.array(video_attribute_vectors.get(video, np.zeros(NUM_ATTRIBUTES))) > THRESHOLD, 1, 0)) # X_vgg_test.append(np.array(video_frame_features[video][0])) X_vgg_test.append(np.array(video_frame_features[video])) X_prev_words_begin.append([vocabulary_words.index("<bos>")] + [0]*(NUM_PREV_WORDS - 1)) X_ent_test = np.array(X_ent_test) X_act_test = np.array(X_act_test) X_att_test = np.array(X_att_test) X_vgg_test = np.array(X_vgg_test) X_prev_words_begin = np.array(X_prev_words_begin) beam_model = get_language_model(NUM_PREV_WORDS, VOCABULARY_SIZE, EMBEDDING_DIM, NUM_FEATURES, NUM_ENTITIES, NUM_ACTIONS, NUM_ATTRIBUTES, LSTM_SIZE, 0.0, 0.0) # Dropout is inactive during inference beam_model.load_weights("../models/"+results.model_file+".h5") #preds = beam_model.predict([X_ent_test, X_act_test, X_att_test, X_vgg_test, X_prev_words_begin]) # helper function to sample an index from a probability array def sample(preds, temperature=1.0): preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas) results_folder = "./" # # Load All Captions fname = folder + "cleaned_descriptions.csv" with open(fname) as f: content = f.readlines() all_captions = [(x.strip().split(",")[0],x.strip().split(",")[1]) for x in content] # # Write correct caption file correct_captions = open(results_folder + "annotations/correct_captions_ref.json","w") correct_annotations = {} correct_annotations['info'] = {'description': 'YouTube2Text', 'url': 'http://upplysingaoflun.ecn.purdue.edu/~yu239/datasets/youtubeclips.zip', 'version': '1.0', 'year': 2013, 'contributor': 'Guadarrama et al', u'date_created': u'2013-01-27 09:11:52.357475'} correct_annotations['images'] = [] correct_annotations['licenses'] = [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}] correct_annotations['type'] = "captions" correct_annotations['annotations'] = [] for video in test: correct_annotations['images'].append({'license': 1, 'url': 'https://www.youtube.com/watch?v=' + video, 'file_name': video+".avi", 'height': 360, 'width': 640, 'date_captured': u'2013-11-14 11:18:45', 'id': test.index(video)}) count = 0 for video,caption in all_captions: if video in test: correct_annotations['annotations'].append({"caption": caption, "id": count, "image_id": test.index(video), "vid_id": video}) count +=1 correct_captions.write(json.dumps(correct_annotations, indent=4, sort_keys=True)) correct_captions.close() def indices(k): combos = [] for x in range(k): for y in range(k): combos.append((x,y)) return combos def
(captioning_model, prev_words_input, other_inputs): for itr in range(NUM_PREV_WORDS-1): predictions = captioning_model.predict(other_inputs + [prev_words_input]) for idx,video in enumerate(test): prev_words_input[idx][itr+1] = np.argmax(predictions[idx]) return prev_words_input def beam_search(captioning_model, prev_words_input, other_inputs, k): top_k_predictions = [copy.deepcopy(prev_words_input) for _ in range(k)] top_k_score = np.array([[0.0]*top_k_predictions[0].shape[0]]*k) # First Iteration predictions = captioning_model.predict(other_inputs + [prev_words_input]) for idx,video in enumerate(test): for version in range(k): top_k_predictions[version][idx][1] = np.argsort(predictions[idx])[-(version+1)] top_k_score[version][idx] = np.sort(predictions[idx])[-(version+1)] for itr in range(2,NUM_PREV_WORDS): top_k_copy = copy.deepcopy(top_k_predictions) print top_k_predictions[0][0] print top_k_predictions[1][0] print top_k_predictions[2][0] predictions = [captioning_model.predict(other_inputs + [top_k_predictions[version]]) for version in range(k)] for idx,video in enumerate(test): scores = [] for version,lookahead in indices(k): scores.append(np.sort(predictions[version][idx])[-(lookahead+1)]*top_k_score[version][idx]) scores = np.array(scores) top_score_indices = np.argsort(scores)[-k:] for num, top_id in enumerate(top_score_indices): version, lookahead = indices(k)[top_id] top_k_predictions[num][idx][itr] = np.argsort(predictions[version][idx])[-(lookahead+1)] top_k_predictions[num][idx][:itr] = top_k_copy[version][idx][:itr] top_k_score[num][idx] = scores[top_id] return top_k_predictions, top_k_score preds, scores = beam_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test], 3) print len(preds), "x", preds[0].shape print len(scores),"x", scores[0].shape preds = preds[-1] print scores[:,0] #scores = scores[-1] # replace <unk>, <BOS>, <EOS> with nothing vocabulary[0] = (0,"") vocabulary[-1] = (0,"") vocabulary[-2] = (0,"") vocabulary[-3] = (0,"") beam_captions_file = results_folder + "results/beam_search_"+results.model_file+results.tag_type+".json" beam_captions = open(beam_captions_file,"w") beam_annotations = [] for idx,video in enumerate(test): sentence = [] for word in preds[idx]: sentence.append(vocabulary[word][1]) beam_annotations.append({"image_id": test.index(video), "caption": " ".join((" ".join(sentence)).split()), "vid_id": video}) beam_captions.write(json.dumps(beam_annotations, indent=4, sort_keys=True)) beam_captions.close() preds = greedy_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test]) greedy_captions_file = results_folder + "results/greedy_search_"+results.model_file+results.tag_type+".json" greedy_captions = open(greedy_captions_file,"w") greedy_annotations = [] for idx,video in enumerate(test): sentence = [] for word in preds[idx]: sentence.append(vocabulary[word][1]) greedy_annotations.append({"image_id": test.index(video), "caption": " ".join((" ".join(sentence)).split()), "vid_id": video}) greedy_captions.write(json.dumps(greedy_annotations, indent=4, sort_keys=True)) greedy_captions.close() # print "python score.py -r annotations/correct_captions_ref.json -t", "results/greedy_search_"+results.model_file+results.tag_type+".json" # print "python score.py -r annotations/correct_captions_ref.json -t", "results/beam_search_"+results.model_file+results.tag_type+".json" # hot_captions = open("scoring_results/hot_captions_batched.sgm","w") # print >>hot_captions, '<tstset trglang="en" setid="y2txt" srclang="any">' # print >>hot_captions, '<doc sysid="langmodel" docid="y2txt" genre="vidcap" origlang="en">' # print >>hot_captions, '<p>' # for idx,video in enumerate(test): # hot_sentence = [] # for word in preds[idx]: # hot_sentence.append(vocabulary[sample(word, 0.1)][1]) # print >>hot_captions, '<seg id="'+str(test.index(video))+'">' + " ".join(hot_sentence) + '</seg>' # print >>hot_captions, '</p>' # print >>hot_captions, '</doc>' # print >>hot_captions, '</tstset>' # hot_captions.close()
greedy_search
identifier_name
generate.py
import pickle import numpy as np import copy import json import argparse from model import get_language_model folder = "../YouTube2Text/youtubeclips-dataset/" fname = folder + "test.txt" with open(fname) as f: content = f.readlines() test = [x.strip() for x in content] # no_clean_captions = set(['vid1690', 'vid1458', 'vid1657', 'vid1772', 'vid1515', 'vid1445', 'vid1446', 'vid1797', 'vid1855', 'vid1724', 'vid1787', 'vid1605', 'vid1455', 'vid1722', 'vid1746', 'vid1912', 'vid1301', 'vid1868', 'vid1887']) # test = list(set(test) - no_clean_captions) parser = argparse.ArgumentParser() parser.add_argument('-p', action='store', dest='tag_type', help='(predicted/groundtruth) Type of Tags to use in predictions') parser.add_argument('-t', action='store', dest='tag_threshold', type= float, help='Threshold for tag binarization') parser.add_argument('-s', action='store', dest='lstm_size', type= int, help='Number of hidden units in LSTM model') parser.add_argument('-m', action='store', dest='model_file', help='Model File Name') # parser.add_argument('-d', action='store', dest='gpu', help='GPU to use') results = parser.parse_args() TAG_TYPE = results.tag_type THRESHOLD = results.tag_threshold LSTM_SIZE = results.lstm_size # Load single frame feature vectors and attribute/entity/action vectors if TAG_TYPE == 'predicted': video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_simple_predicted_tags.pickle", "rb")) video_action_vectors = pickle.load(open("../advanced_tag_models/action_simple_predicted_tags.pickle", "rb")) video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_simple_predicted_tags.pickle", "rb")) #video_entity_vectors = pickle.load(open("../advanced_tag_models/entity_vectors_predicted.p", "rb")) # video_action_vectors = pickle.load(open("../advanced_tag_models/action_vectors_predicted.p", "rb")) #video_attribute_vectors = pickle.load(open("../advanced_tag_models/attribute_vectors_predicted.p", "rb")) else: video_entity_vectors = pickle.load(open("../entity_classifier/entity_vectors_long.pickle", "rb")) video_action_vectors = pickle.load(open("../action_classifier/action_vectors_long.pickle", "rb")) video_attribute_vectors = pickle.load(open("../attribute_classifier/attribute_vectors_long.pickle", "rb")) video_frame_features = pickle.load(open("../frame_features/average_frame_features.pickle", "rb")) # Remove videos for which clean captions aren't available # available_vids = set(video_entity_vectors.keys()).intersection(set(video_action_vectors.keys()).intersection(set(video_attribute_vectors.keys()).intersection(set(video_frame_features.keys())))) # test = list(set(test).intersection(available_vids)) # Read feature sizes from data NUM_ENTITIES = video_entity_vectors[video_entity_vectors.keys()[0]].shape[0] NUM_ACTIONS = video_action_vectors[video_action_vectors.keys()[0]].shape[0] NUM_ATTRIBUTES = video_attribute_vectors[video_attribute_vectors.keys()[0]].shape[0] # NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[1] NUM_FEATURES = video_frame_features[video_frame_features.keys()[0]].shape[0] X_ent_test = [] X_act_test = [] X_att_test = [] X_vgg_test = [] X_prev_words_begin = [] vocabulary = pickle.load(open("vocabulary_10.p", "rb")) # Turn vocabulary into list of words vocabulary_words = [x[1] for x in vocabulary] #Load the model with pre-trained weights TRUNCATED_CAPTION_LEN = 15 + 2 NUM_PREV_WORDS = TRUNCATED_CAPTION_LEN - 1 EMBEDDING_DIM = 256 VOCABULARY_SIZE = len(vocabulary_words) # Load the video features for video in test: X_ent_test.append(np.where(np.array(video_entity_vectors.get(video, np.zeros(NUM_ENTITIES))) > THRESHOLD, 1, 0)) X_act_test.append(np.where(np.array(video_action_vectors.get(video, np.zeros(NUM_ACTIONS))) > THRESHOLD, 1, 0)) X_att_test.append(np.where(np.array(video_attribute_vectors.get(video, np.zeros(NUM_ATTRIBUTES))) > THRESHOLD, 1, 0)) # X_vgg_test.append(np.array(video_frame_features[video][0])) X_vgg_test.append(np.array(video_frame_features[video])) X_prev_words_begin.append([vocabulary_words.index("<bos>")] + [0]*(NUM_PREV_WORDS - 1)) X_ent_test = np.array(X_ent_test) X_act_test = np.array(X_act_test) X_att_test = np.array(X_att_test) X_vgg_test = np.array(X_vgg_test) X_prev_words_begin = np.array(X_prev_words_begin) beam_model = get_language_model(NUM_PREV_WORDS, VOCABULARY_SIZE, EMBEDDING_DIM, NUM_FEATURES, NUM_ENTITIES, NUM_ACTIONS, NUM_ATTRIBUTES, LSTM_SIZE, 0.0, 0.0) # Dropout is inactive during inference beam_model.load_weights("../models/"+results.model_file+".h5") #preds = beam_model.predict([X_ent_test, X_act_test, X_att_test, X_vgg_test, X_prev_words_begin]) # helper function to sample an index from a probability array def sample(preds, temperature=1.0):
results_folder = "./" # # Load All Captions fname = folder + "cleaned_descriptions.csv" with open(fname) as f: content = f.readlines() all_captions = [(x.strip().split(",")[0],x.strip().split(",")[1]) for x in content] # # Write correct caption file correct_captions = open(results_folder + "annotations/correct_captions_ref.json","w") correct_annotations = {} correct_annotations['info'] = {'description': 'YouTube2Text', 'url': 'http://upplysingaoflun.ecn.purdue.edu/~yu239/datasets/youtubeclips.zip', 'version': '1.0', 'year': 2013, 'contributor': 'Guadarrama et al', u'date_created': u'2013-01-27 09:11:52.357475'} correct_annotations['images'] = [] correct_annotations['licenses'] = [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}] correct_annotations['type'] = "captions" correct_annotations['annotations'] = [] for video in test: correct_annotations['images'].append({'license': 1, 'url': 'https://www.youtube.com/watch?v=' + video, 'file_name': video+".avi", 'height': 360, 'width': 640, 'date_captured': u'2013-11-14 11:18:45', 'id': test.index(video)}) count = 0 for video,caption in all_captions: if video in test: correct_annotations['annotations'].append({"caption": caption, "id": count, "image_id": test.index(video), "vid_id": video}) count +=1 correct_captions.write(json.dumps(correct_annotations, indent=4, sort_keys=True)) correct_captions.close() def indices(k): combos = [] for x in range(k): for y in range(k): combos.append((x,y)) return combos def greedy_search(captioning_model, prev_words_input, other_inputs): for itr in range(NUM_PREV_WORDS-1): predictions = captioning_model.predict(other_inputs + [prev_words_input]) for idx,video in enumerate(test): prev_words_input[idx][itr+1] = np.argmax(predictions[idx]) return prev_words_input def beam_search(captioning_model, prev_words_input, other_inputs, k): top_k_predictions = [copy.deepcopy(prev_words_input) for _ in range(k)] top_k_score = np.array([[0.0]*top_k_predictions[0].shape[0]]*k) # First Iteration predictions = captioning_model.predict(other_inputs + [prev_words_input]) for idx,video in enumerate(test): for version in range(k): top_k_predictions[version][idx][1] = np.argsort(predictions[idx])[-(version+1)] top_k_score[version][idx] = np.sort(predictions[idx])[-(version+1)] for itr in range(2,NUM_PREV_WORDS): top_k_copy = copy.deepcopy(top_k_predictions) print top_k_predictions[0][0] print top_k_predictions[1][0] print top_k_predictions[2][0] predictions = [captioning_model.predict(other_inputs + [top_k_predictions[version]]) for version in range(k)] for idx,video in enumerate(test): scores = [] for version,lookahead in indices(k): scores.append(np.sort(predictions[version][idx])[-(lookahead+1)]*top_k_score[version][idx]) scores = np.array(scores) top_score_indices = np.argsort(scores)[-k:] for num, top_id in enumerate(top_score_indices): version, lookahead = indices(k)[top_id] top_k_predictions[num][idx][itr] = np.argsort(predictions[version][idx])[-(lookahead+1)] top_k_predictions[num][idx][:itr] = top_k_copy[version][idx][:itr] top_k_score[num][idx] = scores[top_id] return top_k_predictions, top_k_score preds, scores = beam_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test], 3) print len(preds), "x", preds[0].shape print len(scores),"x", scores[0].shape preds = preds[-1] print scores[:,0] #scores = scores[-1] # replace <unk>, <BOS>, <EOS> with nothing vocabulary[0] = (0,"") vocabulary[-1] = (0,"") vocabulary[-2] = (0,"") vocabulary[-3] = (0,"") beam_captions_file = results_folder + "results/beam_search_"+results.model_file+results.tag_type+".json" beam_captions = open(beam_captions_file,"w") beam_annotations = [] for idx,video in enumerate(test): sentence = [] for word in preds[idx]: sentence.append(vocabulary[word][1]) beam_annotations.append({"image_id": test.index(video), "caption": " ".join((" ".join(sentence)).split()), "vid_id": video}) beam_captions.write(json.dumps(beam_annotations, indent=4, sort_keys=True)) beam_captions.close() preds = greedy_search(beam_model, X_prev_words_begin, [X_ent_test, X_act_test, X_att_test, X_vgg_test]) greedy_captions_file = results_folder + "results/greedy_search_"+results.model_file+results.tag_type+".json" greedy_captions = open(greedy_captions_file,"w") greedy_annotations = [] for idx,video in enumerate(test): sentence = [] for word in preds[idx]: sentence.append(vocabulary[word][1]) greedy_annotations.append({"image_id": test.index(video), "caption": " ".join((" ".join(sentence)).split()), "vid_id": video}) greedy_captions.write(json.dumps(greedy_annotations, indent=4, sort_keys=True)) greedy_captions.close() # print "python score.py -r annotations/correct_captions_ref.json -t", "results/greedy_search_"+results.model_file+results.tag_type+".json" # print "python score.py -r annotations/correct_captions_ref.json -t", "results/beam_search_"+results.model_file+results.tag_type+".json" # hot_captions = open("scoring_results/hot_captions_batched.sgm","w") # print >>hot_captions, '<tstset trglang="en" setid="y2txt" srclang="any">' # print >>hot_captions, '<doc sysid="langmodel" docid="y2txt" genre="vidcap" origlang="en">' # print >>hot_captions, '<p>' # for idx,video in enumerate(test): # hot_sentence = [] # for word in preds[idx]: # hot_sentence.append(vocabulary[sample(word, 0.1)][1]) # print >>hot_captions, '<seg id="'+str(test.index(video))+'">' + " ".join(hot_sentence) + '</seg>' # print >>hot_captions, '</p>' # print >>hot_captions, '</doc>' # print >>hot_captions, '</tstset>' # hot_captions.close()
preds = np.asarray(preds).astype('float64') preds = np.log(preds) / temperature exp_preds = np.exp(preds) preds = exp_preds / np.sum(exp_preds) probas = np.random.multinomial(1, preds, 1) return np.argmax(probas)
identifier_body
jiayuan.py
# -*- coding:utf-8 -*- ''' Created on 2018年2月28日 @author: ning.lin ''' ''' 大图地址class或id有big字样 的 <div class="pho_big" id="phoBig" style="height: 640px;"> <div class="big_pic fn-clear" id="bigImg"> 小图地址 <div class="pho_small_box fn-clear mt25 " id="phoSmallPic"> ''' import json import time from scrapy import log from scrapy import cmdline import scrapy from scrapy.http import Request from scrapy.http.request.form import FormRequest from scrapy_redis.spiders import RedisSpider from selenium import webdriver from jiayuan.settings import IMAGES_STORE,USER_NAME,PASSWD from jiayuan.items import JiayuanItem,MainItem import redis class jiayuan_data(RedisSpider): pool=redis.ConnectionPool(host='127.0.0.1',port=6379,db=0,decode_responses=True) #427条记录 r = redis.StrictRedis(connection_pool=pool) name = "jiayuan_main" redis_key = 'jiayuan_main:start_urls' url_base = 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d' redis_key = "sinaspider:start_urls" login_url = 'http://login.jiayuan.com/'#登录时的url start_urls = [] pre_page_num = 25#每个搜索业面有25条记录 #head less模拟登录 option = webdriver.ChromeOptions() option.add_argument('--headless') option.add_argument("--window-size=1920,1080") prefs={"profile.managed_default_content_settings.images":2}#禁止加载图片 option.add_experimental_option("prefs",prefs) try: driver = webdriver.Chrome(chrome_options=option) except Exception as e: driver.close() print("spider出现了异常,关闭",str(e)) driver.get(login_url) time.sleep(3) driver.find_element_by_id("login_btn").click() driver.find_element_by_id("login_email").clear() driver.find_element_by_id("login_email").send_keys(USER_NAME) #修改为自己的用户名 driver.find_element_by_id("login_password").clear() driver.find_element_by_id("login_password").send_keys(PASSWD) #修改为自己的密码 #登录url #url="http://login.jiayuan.com/" driver.find_element_by_id("login_btn").click()#点击登录按钮 cookies = driver.get_cookies()#获取cookies for p in range(1,173649): search_url = "http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d" %(p) start_urls.append(search_url) #print("start_urls",len(start_urls)) # start_urls = [ # "http://search.jiayuan.com/v2/search_v2.php",#直接搜索结果,获取个人主页的url(先不登录) #"https://passport.jiayuan.com/dologin.php?pre_url=http://www.jiayuan.com/usercp",#登录页面post数据 # ] ''' 下载器中间件在下载器和Scrapy引擎之间,每一个request和response都会通过中间件进行处理。 在中间件中,对request进行处理的函数是process_request(request, spider) ''' def start_requests(self):# for url in self.start_urls: yield Request(url=url,callback=self.get_main_info) # yield scrapy.Request(url=search_url,callback=self.get_main_info) # return Request(url=url,callback=self.get_main_info) def get_main_info(self,response):#解析搜索业面的url #info = response.body.decode("utf-8") #登录后可以查看一下登录响应信息json.loads( # for url in self.start_urls: time.sleep(1) print("当前的url",response.url) print('重新加载url') self.driver.get(response.url) self.driver.implicitly_wait(3) user_list = self.driver.find_elements_by_xpath('/html//ul[@id="normal_user_container"]/li//div[@class="user_name"]/a[@class="os_stat"]')#得到多个li标签 if user_list==[]: print("user_list为空了,解析有问题") #print("user_list",type(user_list),user_list) url_details = []#详情页面的url for user in user_list: main_url_main = user.get_attribute("href") print("人员主页url",main_url_main) url_details.append(main_url_main) # self.redis_pipe.rpush("p",main_url_main)#详情页额外写入redis,也可以不写 # self.redis_pipe.execute() print("人员详情url2",len(url_details)) if url_details!=[]: for url in url_details: yield Request(url=url,cookies=self.cookies,callback=self.get_details)#解析人员详细信息 # yield item def get_details(self,response): ''' <class 'str'> 年 龄: 26-29岁之间 身 高: 169-185厘米 民 族: 汉族 学 历: 不限 相 册: 有照片 婚姻状况: 未婚 居 住 地: 湖北十堰 诚 信: 不限 将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库 ''' pass def parse(str1): temp_list = str1.split('\n') result={} result_str='' # temp_dict=[]#result_dict这是因为有些项目下面有多个标签,多个标签就需要合并起来 # result_dict = {}#多个dict合并后的结果 if len(temp_list)>1:#大于1说明该项下有值,否则此项未填信息 for i in range(len(temp_list)): if i%2==0: result[temp_list[i].replace(" ", "").replace(":", '')] = temp_list[i+1] return result #其他则返回str else: result_str = str1 return result_str item = JiayuanItem() self.driver.get(response.url) self.driver.implicitly_wait(3) print('打开浏览器') print("当前的url",response.url) age_info = self.driver.find_element_by_xpath('/html//h6[@class="member_name"]').text person_id = response.url[response.url.rfind('/')+1:response.url.index('?')] print("年龄地址信息",type(age_info),age_info) address = self.driver.find_elements_by_xpath('/html//h6[@class="member_name"]/a')#得到多个a标签的text str_address='' str_sheng=address[0].get_attribute("text") str_shi=address[1].get_attribute("text") print("人员地址",str_sheng+'sssss'+str_shi) ''' 人个信息 ''' person_info = self.driver.find_elements_by_xpath('/html//ul[@class="member_info_list fn-clear"]') person_dict={} for i in person_info: person_dict = parse(i.text) print("个人信息",person_dict) ''' 处理item,对应mysql的person_info表 ''' item['person_id'] = person_id item['province'] = str_sheng item['municipal'] = str_shi nick_name_info = self.driver.find_elements_by_xpath('/html//div[@class="member_info_r yh"]/h4') nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index("I")] print("昵称", nick_name) item['nike_name'] = nick_name item['education'] = person_dict['学历'] item['height'] = person_dict['身高'] item['buy_car'] = person_dict['购车'] item['salary'] = person_dict['月薪'] item['housing'] = person_dict['住房'] item['weight'] = person_dict['体重'] item['constellation'] = person_dict['星座'] item['nation'] = person_dict['民族'] item['zodiac'] = person_dict['属相'] item['blood_type'] = person_dict['血型'] item['age'] = age_info[0:age_info.index(',')] print("年龄",age_info[0:age_info.index(',')]) item['address'] = str_sheng+str_shi item['age_info'] = age_info item['image_dir'] = nick_name+'_'+item['age']+'_'+person_id#下载的相片归类 item['url'] = response.url #个人短语 item['introduce_oneself'] = self.driver.find_element_by_xpath('/html//div[@class="main_1000 mt15 fn-clear"]//div[@class="js_text"]').text print("个性短语",item['introduce_oneself']) #个性标签,有些人是没有个性标签的 #需要点击”更多“才能全部显示出来,否则只有4个 item['interest_label']='' item['personality_label']='' try: #link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a') #link_a.click() self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a').click() time.sleep(1) gexing_info = self.driver.find_elements_by_xpath('/html//div[@class="test4"]//div[@class="list_a fn-clear"]') print("aaa",type(gexing_info),gexing_info) gexing_tag='' for i in gexing_info: gexing_tag += i.text # a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text item['personality_label'] = "".join(gexing_tag) except Exception as e: item['personality_label'] = '还没有填写个性元素' print("个性",item['personality_label']) #她的兴趣爱好有可能也是找不到的 try: #link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a') #link_a.click() self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more"]/a').click() # self.driver.find_element_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[2]/a').click self.driver.implicitly_wait(1) aihao_info = self.driver.find_elements_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul') print("bbb",type(aihao_info),aihao_info) aihao_tag='' for i in aihao_info: aihao_tag += i.text # a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text item['interest_label'] = "".join(aihao_tag) except Exception as e: item['interest_label'] = '还没有填写兴趣爱好' print("她的兴趣爱好",item['interest_label']) find_mate = self.driver.find_elements_by_xpath('/html//div[@class="bg_white mt15"]') ''' 择偶要求 ''' mate = find_mate[1].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') mate_dict={} for i in mate:
item['nation_mate'] = mate_dict['民族'] item['education_mate'] = mate_dict['学历'] item['image_mate'] = mate_dict['相册'] item['marital_status'] = mate_dict['婚姻状况'] item['address_mate'] = mate_dict['居住地'] item['sincerity_mate'] = mate_dict['诚信']#诚信 print("择偶要求",mate_dict) ''' 生活方式 ''' life = find_mate[2].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') life_style={} for i in life: temp = parse(i.text) if isinstance(temp,dict): life_style.update(parse(i.text))#update就合并两个dict else: life_style['吸烟'] = '未填写生活方式' life_style['饮酒'] = '未填写生活方式' life_style['锻炼习惯'] = '未填写生活方式' life_style['饮食习惯'] = '未填写生活方式' life_style['逛街购物'] = '未填写生活方式' life_style['宗教信仰'] = '未填写生活方式' life_style['作息时间'] = '未填写生活方式' life_style['交际圈子'] = '未填写生活方式' life_style['最大消费'] = '未填写生活方式' try: housework = [] pet = [] jiawu1 = find_mate[2].find_elements_by_xpath('div[@class="js_box"]//div[@class="pt25 fn-clear"]//dd[@class="cur"]') for i in jiawu1: housework.append(i.text)#0为家务水平,1为宠物喜欢程度 print("家务1 ",i.text) jiawu2 = find_mate[2].find_elements_by_xpath('div[@class="js_box"]//div[@class="fl pr"]/em') for i in jiawu2: pet.append(i.text)#0为家务分配,1为关于宠物 print("家务2 ",i.text) except Exception as e: housework.append('家务水平程度未填写') housework.append('宠物喜欢程度未填写') pet.append('家务分配未填写') pet.append ('关于宠物未填写') item['person_id_life'] = person_id item['smoke'] = life_style['吸烟'] item['drink_wine'] = life_style['饮酒'] item['exercise_habits'] = life_style['锻炼习惯'] item['eating_habits'] = life_style['饮食习惯'] item['shopping'] = life_style['逛街购物'] item['religious_belief'] = life_style['宗教信仰'] item['time_table'] = life_style['作息时间'] item['circle_of_communication'] = life_style['交际圈子'] item['maximum_consumption'] = life_style['最大消费'] item['housework'] = housework[0] item['household_assignment'] = pet[0] item['pet'] = housework[1] item['about_pets'] = pet[1] print("生活方式",life_style) print("家务",housework[0],pet[0]) print("宠物",housework[1],pet[1]) ''' 经济实力 ''' economic_dict={} economic = find_mate[3].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') for i in economic: economic_dict = parse(i.text) item['person_id_economic'] = person_id item['salary_economic'] = economic_dict['月薪'] item['buy_house_economic'] = economic_dict['购房'] item['buy_car_economic'] = economic_dict['购车'] item['economic_concept'] = economic_dict['经济观念'] item['investment_financing'] = economic_dict['投资理财'] item['foreign_debt'] = economic_dict['外债贷款'] print("经济实力",economic_dict) ''' 工作学习 ''' work = find_mate[4].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') work_study = {}# for i in work: if i.text: temp = parse(i.text) if isinstance(temp,dict): work_study.update(parse(i.text))#update就合并两个dict else: work_study['职业职位'] = '未填写工作学习方式' work_study['公司行业'] = '未填写工作学习方式' work_study['公司类型'] = '未填写工作学习方式' work_study['福利待遇'] = '未填写工作学习方式' work_study['工作状态'] = '未填写工作学习方式' work_study['调动工作可能性'] = '未填写工作学习方式' work_study['事业与家庭'] = '未填写工作学习方式' work_study['海外工作可能性'] = '未填写工作学习方式' work_study['毕业院校'] = '未填写工作学习方式' work_study['专业类型'] = '未填写工作学习方式' work_study['语言能力'] = '未填写工作学习方式' item['person_id_study'] = person_id item['position'] = work_study['职业职位'] item['company'] = work_study['公司行业'] item['company_type'] = work_study['公司类型'] item['welfare'] = work_study['福利待遇'] item['working'] = work_study['工作状态'] item['transfer_work'] = work_study['调动工作可能性'] item['work_family'] = work_study['事业与家庭'] item['overseas_job'] = work_study['海外工作可能性'] item['university'] = work_study['毕业院校'] item['major'] = work_study['专业类型'] item['language'] = work_study['语言能力'] print("工作学习",work_study) ''' 婚姻观念 ''' marriage = find_mate[5].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') marriage_family={} for i in marriage: if i.text: temp = parse(i.text) if isinstance(temp,dict): marriage_family.update(parse(i.text))#update就合并两个dict else: marriage_family['籍贯'] = '未填写婚姻观念' marriage_family['户口'] = '未填写婚姻观念' marriage_family['国籍'] = '未填写婚姻观念' marriage_family['个性待征'] = '未填写婚姻观念' marriage_family['幽默感'] = '未填写婚姻观念' marriage_family['脾气'] = '未填写婚姻观念' marriage_family['对待感情'] = '未填写婚姻观念' marriage_family['是否要小孩'] = '未填写婚姻观念' marriage_family['何时结婚'] = '未填写婚姻观念' marriage_family['是否能接受异地恋'] = '未填写婚姻观念' marriage_family['理想婚姻'] = '未填写婚姻观念' marriage_family['愿与对方父母同住'] = '未填写婚姻观念' marriage_family['家中排行'] = '未填写婚姻观念' marriage_family['父母情况'] = '未填写婚姻观念' marriage_family['兄弟姐妹'] = '未填写婚姻观念' marriage_family['父母经济情况'] = '未填写婚姻观念' marriage_family['父母医保情况'] = '未填写婚姻观念' marriage_family['父母的工作'] = '未填写婚姻观念' item['person_id_marriage'] = person_id item['address_marriage'] = marriage_family['籍贯'] item['registered_residence'] = marriage_family['户口'] item['nationality'] = marriage_family['国籍'] item['personality'] = marriage_family['个性待征'] item['humor'] = marriage_family['幽默感'] item['temper'] = marriage_family['脾气'] item['feelings'] = marriage_family['对待感情'] item['want_child'] = marriage_family['是否要小孩'] item['when_mary'] = marriage_family['何时结婚'] item['strange_love'] = marriage_family['是否能接受异地恋'] item['ideal_marriage'] = marriage_family['理想婚姻'] item['live_parents'] = marriage_family['愿与对方父母同住'] item['rankings_home'] = marriage_family['家中排行'] item['parents_situation'] = marriage_family['父母情况'] item['brothers'] = marriage_family['兄弟姐妹'] item['parents_economic'] = marriage_family['父母经济情况'] item['parents_medical'] = marriage_family['父母医保情况'] item['parents_working'] = marriage_family['父母的工作'] print("婚姻观念",marriage_family) ''' 相片列表 ''' #获取图片 print("相片url",response.url) list_images = self.driver.find_elements_by_xpath('/html//div[@id="bigImg"]//a') print("相片列表",type(list_images),list_images) images= [] for i in list_images: image = i.find_element_by_xpath('img').get_attribute("src") images.append(image) print("相片地址",image) item['img_urls'] = images#保存相片地址,在person_info表中的text print("执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后") yield item cmdline.execute("scrapy crawl jiayuan_main".split())
mate_dict = parse(i.text) item['person_id_mate'] = person_id item['age_mate'] = mate_dict['年龄'] item['height_mate'] = mate_dict['身高']
random_line_split
jiayuan.py
# -*- coding:utf-8 -*- ''' Created on 2018年2月28日 @author: ning.lin ''' ''' 大图地址class或id有big字样 的 <div class="pho_big" id="phoBig" style="height: 640px;"> <div class="big_pic fn-clear" id="bigImg"> 小图地址 <div class="pho_small_box fn-clear mt25 " id="phoSmallPic"> ''' import json import time from scrapy import log from scrapy import cmdline import scrapy from scrapy.http import Request from scrapy.http.request.form import FormRequest from scrapy_redis.spiders import RedisSpider from selenium import webdriver from jiayuan.settings import IMAGES_STORE,USER_NAME,PASSWD from jiayuan.items import JiayuanItem,MainItem import redis class jiayuan_data(RedisSpider):
onnectionPool(host='127.0.0.1',port=6379,db=0,decode_responses=True) #427条记录 r = redis.StrictRedis(connection_pool=pool) name = "jiayuan_main" redis_key = 'jiayuan_main:start_urls' url_base = 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d' redis_key = "sinaspider:start_urls" login_url = 'http://login.jiayuan.com/'#登录时的url start_urls = [] pre_page_num = 25#每个搜索业面有25条记录 #head less模拟登录 option = webdriver.ChromeOptions() option.add_argument('--headless') option.add_argument("--window-size=1920,1080") prefs={"profile.managed_default_content_settings.images":2}#禁止加载图片 option.add_experimental_option("prefs",prefs) try: driver = webdriver.Chrome(chrome_options=option) except Exception as e: driver.close() print("spider出现了异常,关闭",str(e)) driver.get(login_url) time.sleep(3) driver.find_element_by_id("login_btn").click() driver.find_element_by_id("login_email").clear() driver.find_element_by_id("login_email").send_keys(USER_NAME) #修改为自己的用户名 driver.find_element_by_id("login_password").clear() driver.find_element_by_id("login_password").send_keys(PASSWD) #修改为自己的密码 #登录url #url="http://login.jiayuan.com/" driver.find_element_by_id("login_btn").click()#点击登录按钮 cookies = driver.get_cookies()#获取cookies for p in range(1,173649): search_url = "http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d" %(p) start_urls.append(search_url) #print("start_urls",len(start_urls)) # start_urls = [ # "http://search.jiayuan.com/v2/search_v2.php",#直接搜索结果,获取个人主页的url(先不登录) #"https://passport.jiayuan.com/dologin.php?pre_url=http://www.jiayuan.com/usercp",#登录页面post数据 # ] ''' 下载器中间件在下载器和Scrapy引擎之间,每一个request和response都会通过中间件进行处理。 在中间件中,对request进行处理的函数是process_request(request, spider) ''' def start_requests(self):# for url in self.start_urls: yield Request(url=url,callback=self.get_main_info) # yield scrapy.Request(url=search_url,callback=self.get_main_info) # return Request(url=url,callback=self.get_main_info) def get_main_info(self,response):#解析搜索业面的url #info = response.body.decode("utf-8") #登录后可以查看一下登录响应信息json.loads( # for url in self.start_urls: time.sleep(1) print("当前的url",response.url) print('重新加载url') self.driver.get(response.url) self.driver.implicitly_wait(3) user_list = self.driver.find_elements_by_xpath('/html//ul[@id="normal_user_container"]/li//div[@class="user_name"]/a[@class="os_stat"]')#得到多个li标签 if user_list==[]: print("user_list为空了,解析有问题") #print("user_list",type(user_list),user_list) url_details = []#详情页面的url for user in user_list: main_url_main = user.get_attribute("href") print("人员主页url",main_url_main) url_details.append(main_url_main) # self.redis_pipe.rpush("p",main_url_main)#详情页额外写入redis,也可以不写 # self.redis_pipe.execute() print("人员详情url2",len(url_details)) if url_details!=[]: for url in url_details: yield Request(url=url,cookies=self.cookies,callback=self.get_details)#解析人员详细信息 # yield item def get_details(self,response): ''' <class 'str'> 年 龄: 26-29岁之间 身 高: 169-185厘米 民 族: 汉族 学 历: 不限 相 册: 有照片 婚姻状况: 未婚 居 住 地: 湖北十堰 诚 信: 不限 将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库 ''' pass def parse(str1): temp_list = str1.split('\n') result={} result_str='' # temp_dict=[]#result_dict这是因为有些项目下面有多个标签,多个标签就需要合并起来 # result_dict = {}#多个dict合并后的结果 if len(temp_list)>1:#大于1说明该项下有值,否则此项未填信息 for i in range(len(temp_list)): if i%2==0: result[temp_list[i].replace(" ", "").replace(":", '')] = temp_list[i+1] return result #其他则返回str else: result_str = str1 return result_str item = JiayuanItem() self.driver.get(response.url) self.driver.implicitly_wait(3) print('打开浏览器') print("当前的url",response.url) age_info = self.driver.find_element_by_xpath('/html//h6[@class="member_name"]').text person_id = response.url[response.url.rfind('/')+1:response.url.index('?')] print("年龄地址信息",type(age_info),age_info) address = self.driver.find_elements_by_xpath('/html//h6[@class="member_name"]/a')#得到多个a标签的text str_address='' str_sheng=address[0].get_attribute("text") str_shi=address[1].get_attribute("text") print("人员地址",str_sheng+'sssss'+str_shi) ''' 人个信息 ''' person_info = self.driver.find_elements_by_xpath('/html//ul[@class="member_info_list fn-clear"]') person_dict={} for i in person_info: person_dict = parse(i.text) print("个人信息",person_dict) ''' 处理item,对应mysql的person_info表 ''' item['person_id'] = person_id item['province'] = str_sheng item['municipal'] = str_shi nick_name_info = self.driver.find_elements_by_xpath('/html//div[@class="member_info_r yh"]/h4') nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index("I")] print("昵称", nick_name) item['nike_name'] = nick_name item['education'] = person_dict['学历'] item['height'] = person_dict['身高'] item['buy_car'] = person_dict['购车'] item['salary'] = person_dict['月薪'] item['housing'] = person_dict['住房'] item['weight'] = person_dict['体重'] item['constellation'] = person_dict['星座'] item['nation'] = person_dict['民族'] item['zodiac'] = person_dict['属相'] item['blood_type'] = person_dict['血型'] item['age'] = age_info[0:age_info.index(',')] print("年龄",age_info[0:age_info.index(',')]) item['address'] = str_sheng+str_shi item['age_info'] = age_info item['image_dir'] = nick_name+'_'+item['age']+'_'+person_id#下载的相片归类 item['url'] = response.url #个人短语 item['introduce_oneself'] = self.driver.find_element_by_xpath('/html//div[@class="main_1000 mt15 fn-clear"]//div[@class="js_text"]').text print("个性短语",item['introduce_oneself']) #个性标签,有些人是没有个性标签的 #需要点击”更多“才能全部显示出来,否则只有4个 item['interest_label']='' item['personality_label']='' try: #link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a') #link_a.click() self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a').click() time.sleep(1) gexing_info = self.driver.find_elements_by_xpath('/html//div[@class="test4"]//div[@class="list_a fn-clear"]') print("aaa",type(gexing_info),gexing_info) gexing_tag='' for i in gexing_info: gexing_tag += i.text # a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text item['personality_label'] = "".join(gexing_tag) except Exception as e: item['personality_label'] = '还没有填写个性元素' print("个性",item['personality_label']) #她的兴趣爱好有可能也是找不到的 try: #link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a') #link_a.click() self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more"]/a').click() # self.driver.find_element_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[2]/a').click self.driver.implicitly_wait(1) aihao_info = self.driver.find_elements_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul') print("bbb",type(aihao_info),aihao_info) aihao_tag='' for i in aihao_info: aihao_tag += i.text # a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text item['interest_label'] = "".join(aihao_tag) except Exception as e: item['interest_label'] = '还没有填写兴趣爱好' print("她的兴趣爱好",item['interest_label']) find_mate = self.driver.find_elements_by_xpath('/html//div[@class="bg_white mt15"]') ''' 择偶要求 ''' mate = find_mate[1].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') mate_dict={} for i in mate: mate_dict = parse(i.text) item['person_id_mate'] = person_id item['age_mate'] = mate_dict['年龄'] item['height_mate'] = mate_dict['身高'] item['nation_mate'] = mate_dict['民族'] item['education_mate'] = mate_dict['学历'] item['image_mate'] = mate_dict['相册'] item['marital_status'] = mate_dict['婚姻状况'] item['address_mate'] = mate_dict['居住地'] item['sincerity_mate'] = mate_dict['诚信']#诚信 print("择偶要求",mate_dict) ''' 生活方式 ''' life = find_mate[2].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') life_style={} for i in life: temp = parse(i.text) if isinstance(temp,dict): life_style.update(parse(i.text))#update就合并两个dict else: life_style['吸烟'] = '未填写生活方式' life_style['饮酒'] = '未填写生活方式' life_style['锻炼习惯'] = '未填写生活方式' life_style['饮食习惯'] = '未填写生活方式' life_style['逛街购物'] = '未填写生活方式' life_style['宗教信仰'] = '未填写生活方式' life_style['作息时间'] = '未填写生活方式' life_style['交际圈子'] = '未填写生活方式' life_style['最大消费'] = '未填写生活方式' try: housework = [] pet = [] jiawu1 = find_mate[2].find_elements_by_xpath('div[@class="js_box"]//div[@class="pt25 fn-clear"]//dd[@class="cur"]') for i in jiawu1: housework.append(i.text)#0为家务水平,1为宠物喜欢程度 print("家务1 ",i.text) jiawu2 = find_mate[2].find_elements_by_xpath('div[@class="js_box"]//div[@class="fl pr"]/em') for i in jiawu2: pet.append(i.text)#0为家务分配,1为关于宠物 print("家务2 ",i.text) except Exception as e: housework.append('家务水平程度未填写') housework.append('宠物喜欢程度未填写') pet.append('家务分配未填写') pet.append ('关于宠物未填写') item['person_id_life'] = person_id item['smoke'] = life_style['吸烟'] item['drink_wine'] = life_style['饮酒'] item['exercise_habits'] = life_style['锻炼习惯'] item['eating_habits'] = life_style['饮食习惯'] item['shopping'] = life_style['逛街购物'] item['religious_belief'] = life_style['宗教信仰'] item['time_table'] = life_style['作息时间'] item['circle_of_communication'] = life_style['交际圈子'] item['maximum_consumption'] = life_style['最大消费'] item['housework'] = housework[0] item['household_assignment'] = pet[0] item['pet'] = housework[1] item['about_pets'] = pet[1] print("生活方式",life_style) print("家务",housework[0],pet[0]) print("宠物",housework[1],pet[1]) ''' 经济实力 ''' economic_dict={} economic = find_mate[3].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') for i in economic: economic_dict = parse(i.text) item['person_id_economic'] = person_id item['salary_economic'] = economic_dict['月薪'] item['buy_house_economic'] = economic_dict['购房'] item['buy_car_economic'] = economic_dict['购车'] item['economic_concept'] = economic_dict['经济观念'] item['investment_financing'] = economic_dict['投资理财'] item['foreign_debt'] = economic_dict['外债贷款'] print("经济实力",economic_dict) ''' 工作学习 ''' work = find_mate[4].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') work_study = {}# for i in work: if i.text: temp = parse(i.text) if isinstance(temp,dict): work_study.update(parse(i.text))#update就合并两个dict else: work_study['职业职位'] = '未填写工作学习方式' work_study['公司行业'] = '未填写工作学习方式' work_study['公司类型'] = '未填写工作学习方式' work_study['福利待遇'] = '未填写工作学习方式' work_study['工作状态'] = '未填写工作学习方式' work_study['调动工作可能性'] = '未填写工作学习方式' work_study['事业与家庭'] = '未填写工作学习方式' work_study['海外工作可能性'] = '未填写工作学习方式' work_study['毕业院校'] = '未填写工作学习方式' work_study['专业类型'] = '未填写工作学习方式' work_study['语言能力'] = '未填写工作学习方式' item['person_id_study'] = person_id item['position'] = work_study['职业职位'] item['company'] = work_study['公司行业'] item['company_type'] = work_study['公司类型'] item['welfare'] = work_study['福利待遇'] item['working'] = work_study['工作状态'] item['transfer_work'] = work_study['调动工作可能性'] item['work_family'] = work_study['事业与家庭'] item['overseas_job'] = work_study['海外工作可能性'] item['university'] = work_study['毕业院校'] item['major'] = work_study['专业类型'] item['language'] = work_study['语言能力'] print("工作学习",work_study) ''' 婚姻观念 ''' marriage = find_mate[5].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') marriage_family={} for i in marriage: if i.text: temp = parse(i.text) if isinstance(temp,dict): marriage_family.update(parse(i.text))#update就合并两个dict else: marriage_family['籍贯'] = '未填写婚姻观念' marriage_family['户口'] = '未填写婚姻观念' marriage_family['国籍'] = '未填写婚姻观念' marriage_family['个性待征'] = '未填写婚姻观念' marriage_family['幽默感'] = '未填写婚姻观念' marriage_family['脾气'] = '未填写婚姻观念' marriage_family['对待感情'] = '未填写婚姻观念' marriage_family['是否要小孩'] = '未填写婚姻观念' marriage_family['何时结婚'] = '未填写婚姻观念' marriage_family['是否能接受异地恋'] = '未填写婚姻观念' marriage_family['理想婚姻'] = '未填写婚姻观念' marriage_family['愿与对方父母同住'] = '未填写婚姻观念' marriage_family['家中排行'] = '未填写婚姻观念' marriage_family['父母情况'] = '未填写婚姻观念' marriage_family['兄弟姐妹'] = '未填写婚姻观念' marriage_family['父母经济情况'] = '未填写婚姻观念' marriage_family['父母医保情况'] = '未填写婚姻观念' marriage_family['父母的工作'] = '未填写婚姻观念' item['person_id_marriage'] = person_id item['address_marriage'] = marriage_family['籍贯'] item['registered_residence'] = marriage_family['户口'] item['nationality'] = marriage_family['国籍'] item['personality'] = marriage_family['个性待征'] item['humor'] = marriage_family['幽默感'] item['temper'] = marriage_family['脾气'] item['feelings'] = marriage_family['对待感情'] item['want_child'] = marriage_family['是否要小孩'] item['when_mary'] = marriage_family['何时结婚'] item['strange_love'] = marriage_family['是否能接受异地恋'] item['ideal_marriage'] = marriage_family['理想婚姻'] item['live_parents'] = marriage_family['愿与对方父母同住'] item['rankings_home'] = marriage_family['家中排行'] item['parents_situation'] = marriage_family['父母情况'] item['brothers'] = marriage_family['兄弟姐妹'] item['parents_economic'] = marriage_family['父母经济情况'] item['parents_medical'] = marriage_family['父母医保情况'] item['parents_working'] = marriage_family['父母的工作'] print("婚姻观念",marriage_family) ''' 相片列表 ''' #获取图片 print("相片url",response.url) list_images = self.driver.find_elements_by_xpath('/html//div[@id="bigImg"]//a') print("相片列表",type(list_images),list_images) images= [] for i in list_images: image = i.find_element_by_xpath('img').get_attribute("src") images.append(image) print("相片地址",image) item['img_urls'] = images#保存相片地址,在person_info表中的text print("执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后") yield item cmdline.execute("scrapy crawl jiayuan_main".split())
pool=redis.C
identifier_name
jiayuan.py
# -*- coding:utf-8 -*- ''' Created on 2018年2月28日 @author: ning.lin ''' ''' 大图地址class或id有big字样 的 <div class="pho_big" id="phoBig" style="height: 640px;"> <div class="big_pic fn-clear" id="bigImg"> 小图地址 <div class="pho_small_box fn-clear mt25 " id="phoSmallPic"> ''' import json import time from scrapy import log from scrapy import cmdline import scrapy from scrapy.http import Request from scrapy.http.request.form import FormRequest from scrapy_redis.spiders import RedisSpider from selenium import webdriver from jiayuan.settings import IMAGES_STORE,USER_NAME,PASSWD from jiayuan.items import JiayuanItem,MainItem import redis class jiayuan_data(RedisSpider): pool=redis.ConnectionPool(host='
127.0.0.1',port=6379,db=0,decode_responses=True) #427条记录 r = redis.StrictRedis(connection_pool=pool) name = "jiayuan_main" redis_key = 'jiayuan_main:start_urls' url_base = 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d' redis_key = "sinaspider:start_urls" login_url = 'http://login.jiayuan.com/'#登录时的url start_urls = [] pre_page_num = 25#每个搜索业面有25条记录 #head less模拟登录 option = webdriver.ChromeOptions() option.add_argument('--headless') option.add_argument("--window-size=1920,1080") prefs={"profile.managed_default_content_settings.images":2}#禁止加载图片 option.add_experimental_option("prefs",prefs) try: driver = webdriver.Chrome(chrome_options=option) except Exception as e: driver.close() print("spider出现了异常,关闭",str(e)) driver.get(login_url) time.sleep(3) driver.find_element_by_id("login_btn").click() driver.find_element_by_id("login_email").clear() driver.find_element_by_id("login_email").send_keys(USER_NAME) #修改为自己的用户名 driver.find_element_by_id("login_password").clear() driver.find_element_by_id("login_password").send_keys(PASSWD) #修改为自己的密码 #登录url #url="http://login.jiayuan.com/" driver.find_element_by_id("login_btn").click()#点击登录按钮 cookies = driver.get_cookies()#获取cookies for p in range(1,173649): search_url = "http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d" %(p) start_urls.append(search_url) #print("start_urls",len(start_urls)) # start_urls = [ # "http://search.jiayuan.com/v2/search_v2.php",#直接搜索结果,获取个人主页的url(先不登录) #"https://passport.jiayuan.com/dologin.php?pre_url=http://www.jiayuan.com/usercp",#登录页面post数据 # ] ''' 下载器中间件在下载器和Scrapy引擎之间,每一个request和response都会通过中间件进行处理。 在中间件中,对request进行处理的函数是process_request(request, spider) ''' def start_requests(self):# for url in self.start_urls: yield Request(url=url,callback=self.get_main_info) # yield scrapy.Request(url=search_url,callback=self.get_main_info) # return Request(url=url,callback=self.get_main_info) def get_main_info(self,response):#解析搜索业面的url #info = response.body.decode("utf-8") #登录后可以查看一下登录响应信息json.loads( # for url in self.start_urls: time.sleep(1) print("当前的url",response.url) print('重新加载url') self.driver.get(response.url) self.driver.implicitly_wait(3) user_list = self.driver.find_elements_by_xpath('/html//ul[@id="normal_user_container"]/li//div[@class="user_name"]/a[@class="os_stat"]')#得到多个li标签 if user_list==[]: print("user_list为空了,解析有问题") #print("user_list",type(user_list),user_list) url_details = []#详情页面的url for user in user_list: main_url_main = user.get_attribute("href") print("人员主页url",main_url_main) url_details.append(main_url_main) # self.redis_pipe.rpush("p",main_url_main)#详情页额外写入redis,也可以不写 # self.redis_pipe.execute() print("人员详情url2",len(url_details)) if url_details!=[]: for url in url_details: yield Request(url=url,cookies=self.cookies,callback=self.get_details)#解析人员详细信息 # yield item def get_details(self,response): ''' <class 'str'> 年 龄: 26-29岁之间 身 高: 169-185厘米 民 族: 汉族 学 历: 不限 相 册: 有照片 婚姻状况: 未婚 居 住 地: 湖北十堰 诚 信: 不限 将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库 ''' pass def parse(str1): temp_list = str1.split('\n') result={} result_str='' # temp_dict=[]#result_dict这是因为有些项目下面有多个标签,多个标签就需要合并起来 # result_dict = {}#多个dict合并后的结果 if len(temp_list)>1:#大于1说明该项下有值,否则此项未填信息 for i in range(len(temp_list)): if i%2==0: result[temp_list[i].replace(" ", "").replace(":", '')] = temp_list[i+1] return result #其他则返回str else: result_str = str1 return result_str item = JiayuanItem() self.driver.get(response.url) self.driver.implicitly_wait(3) print('打开浏览器') print("当前的url",response.url) age_info = self.driver.find_element_by_xpath('/html//h6[@class="member_name"]').text person_id = response.url[response.url.rfind('/')+1:response.url.index('?')] print("年龄地址信息",type(age_info),age_info) address = self.driver.find_elements_by_xpath('/html//h6[@class="member_name"]/a')#得到多个a标签的text str_address='' str_sheng=address[0].get_attribute("text") str_shi=address[1].get_attribute("text") print("人员地址",str_sheng+'sssss'+str_shi) ''' 人个信息 ''' person_info = self.driver.find_elements_by_xpath('/html//ul[@class="member_info_list fn-clear"]') person_dict={} for i in person_info: person_dict = parse(i.text) print("个人信息",person_dict) ''' 处理item,对应mysql的person_info表 ''' item['person_id'] = person_id item['province'] = str_sheng item['municipal'] = str_shi nick_name_info = self.driver.find_elements_by_xpath('/html//div[@class="member_info_r yh"]/h4') nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index("I")] print("昵称", nick_name) item['nike_name'] = nick_name item['education'] = person_dict['学历'] item['height'] = person_dict['身高'] item['buy_car'] = person_dict['购车'] item['salary'] = person_dict['月薪'] item['housing'] = person_dict['住房'] item['weight'] = person_dict['体重'] item['constellation'] = person_dict['星座'] item['nation'] = person_dict['民族'] item['zodiac'] = person_dict['属相'] item['blood_type'] = person_dict['血型'] item['age'] = age_info[0:age_info.index(',')] print("年龄",age_info[0:age_info.index(',')]) item['address'] = str_sheng+str_shi item['age_info'] = age_info item['image_dir'] = nick_name+'_'+item['age']+'_'+person_id#下载的相片归类 item['url'] = response.url #个人短语 item['introduce_oneself'] = self.driver.find_element_by_xpath('/html//div[@class="main_1000 mt15 fn-clear"]//div[@class="js_text"]').text print("个性短语",item['introduce_oneself']) #个性标签,有些人是没有个性标签的 #需要点击”更多“才能全部显示出来,否则只有4个 item['interest_label']='' item['personality_label']='' try: #link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a') #link_a.click() self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a').click() time.sleep(1) gexing_info = self.driver.find_elements_by_xpath('/html//div[@class="test4"]//div[@class="list_a fn-clear"]') print("aaa",type(gexing_info),gexing_info) gexing_tag='' for i in gexing_info: gexing_tag += i.text # a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text item['personality_label'] = "".join(gexing_tag) except Exception as e: item['personality_label'] = '还没有填写个性元素' print("个性",item['personality_label']) #她的兴趣爱好有可能也是找不到的 try: #link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a') #link_a.click() self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more"]/a').click() # self.driver.find_element_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[2]/a').click self.driver.implicitly_wait(1) aihao_info = self.driver.find_elements_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul') print("bbb",type(aihao_info),aihao_info) aihao_tag='' for i in aihao_info: aihao_tag += i.text # a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text item['interest_label'] = "".join(aihao_tag) except Exception as e: item['interest_label'] = '还没有填写兴趣爱好' print("她的兴趣爱好",item['interest_label']) find_mate = self.driver.find_elements_by_xpath('/html//div[@class="bg_white mt15"]') ''' 择偶要求 ''' mate = find_mate[1].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') mate_dict={} for i in mate: mate_dict = parse(i.text) item['person_id_mate'] = person_id item['age_mate'] = mate_dict['年龄'] item['height_mate'] = mate_dict['身高'] item['nation_mate'] = mate_dict['民族'] item['education_mate'] = mate_dict['学历'] item['image_mate'] = mate_dict['相册'] item['marital_status'] = mate_dict['婚姻状况'] item['address_mate'] = mate_dict['居住地'] item['sincerity_mate'] = mate_dict['诚信']#诚信 print("择偶要求",mate_dict) ''' 生活方式 ''' life = find_mate[2].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') life_style={} for i in life: temp = parse(i.text) if isinstance(temp,dict): life_style.update(parse(i.text))#update就合并两个dict else: life_style['吸烟'] = '未填写生活方式' life_style['饮酒'] = '未填写生活方式' life_style['锻炼习惯'] = '未填写生活方式' life_style['饮食习惯'] = '未填写生活方式' life_style['逛街购物'] = '未填写生活方式' life_style['宗教信仰'] = '未填写生活方式' life_style['作息时间'] = '未填写生活方式' life_style['交际圈子'] = '未填写生活方式' life_style['最大消费'] = '未填写生活方式' try: housework = [] pet = [] jiawu1 = find_mate[2].find_elements_by_xpath('div[@class="js_box"]//div[@class="pt25 fn-clear"]//dd[@class="cur"]') for i in jiawu1: housework.append(i.text)#0为家务水平,1为宠物喜欢程度 print("家务1 ",i.text) jiawu2 = find_mate[2].find_elements_by_xpath('div[@class="js_box"]//div[@class="fl pr"]/em') for i in jiawu2: pet.append(i.text)#0为家务分配,1为关于宠物 print("家务2 ",i.text) except Exception as e: housework.append('家务水平程度未填写') housework.append('宠物喜欢程度未填写') pet.append('家务分配未填写') pet.append ('关于宠物未填写') item['person_id_life'] = person_id item['smoke'] = life_style['吸烟'] item['drink_wine'] = life_style['饮酒'] item['exercise_habits'] = life_style['锻炼习惯'] item['eating_habits'] = life_style['饮食习惯'] item['shopping'] = life_style['逛街购物'] item['religious_belief'] = life_style['宗教信仰'] item['time_table'] = life_style['作息时间'] item['circle_of_communication'] = life_style['交际圈子'] item['maximum_consumption'] = life_style['最大消费'] item['housework'] = housework[0] item['household_assignment'] = pet[0] item['pet'] = housework[1] item['about_pets'] = pet[1] print("生活方式",life_style) print("家务",housework[0],pet[0]) print("宠物",housework[1],pet[1]) ''' 经济实力 ''' economic_dict={} economic = find_mate[3].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') for i in economic: economic_dict = parse(i.text) item['person_id_economic'] = person_id item['salary_economic'] = economic_dict['月薪'] item['buy_house_economic'] = economic_dict['购房'] item['buy_car_economic'] = economic_dict['购车'] item['economic_concept'] = economic_dict['经济观念'] item['investment_financing'] = economic_dict['投资理财'] item['foreign_debt'] = economic_dict['外债贷款'] print("经济实力",economic_dict) ''' 工作学习 ''' work = find_mate[4].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') work_study = {}# for i in work: if i.text: temp = parse(i.text) if isinstance(temp,dict): work_study.update(parse(i.text))#update就合并两个dict else: work_study['职业职位'] = '未填写工作学习方式' work_study['公司行业'] = '未填写工作学习方式' work_study['公司类型'] = '未填写工作学习方式' work_study['福利待遇'] = '未填写工作学习方式' work_study['工作状态'] = '未填写工作学习方式' work_study['调动工作可能性'] = '未填写工作学习方式' work_study['事业与家庭'] = '未填写工作学习方式' work_study['海外工作可能性'] = '未填写工作学习方式' work_study['毕业院校'] = '未填写工作学习方式' work_study['专业类型'] = '未填写工作学习方式' work_study['语言能力'] = '未填写工作学习方式' item['person_id_study'] = person_id item['position'] = work_study['职业职位'] item['company'] = work_study['公司行业'] item['company_type'] = work_study['公司类型'] item['welfare'] = work_study['福利待遇'] item['working'] = work_study['工作状态'] item['transfer_work'] = work_study['调动工作可能性'] item['work_family'] = work_study['事业与家庭'] item['overseas_job'] = work_study['海外工作可能性'] item['university'] = work_study['毕业院校'] item['major'] = work_study['专业类型'] item['language'] = work_study['语言能力'] print("工作学习",work_study) ''' 婚姻观念 ''' marriage = find_mate[5].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') marriage_family={} for i in marriage: if i.text: temp = parse(i.text) if isinstance(temp,dict): marriage_family.update(parse(i.text))#update就合并两个dict else: marriage_family['籍贯'] = '未填写婚姻观念' marriage_family['户口'] = '未填写婚姻观念' marriage_family['国籍'] = '未填写婚姻观念' marriage_family['个性待征'] = '未填写婚姻观念' marriage_family['幽默感'] = '未填写婚姻观念' marriage_family['脾气'] = '未填写婚姻观念' marriage_family['对待感情'] = '未填写婚姻观念' marriage_family['是否要小孩'] = '未填写婚姻观念' marriage_family['何时结婚'] = '未填写婚姻观念' marriage_family['是否能接受异地恋'] = '未填写婚姻观念' marriage_family['理想婚姻'] = '未填写婚姻观念' marriage_family['愿与对方父母同住'] = '未填写婚姻观念' marriage_family['家中排行'] = '未填写婚姻观念' marriage_family['父母情况'] = '未填写婚姻观念' marriage_family['兄弟姐妹'] = '未填写婚姻观念' marriage_family['父母经济情况'] = '未填写婚姻观念' marriage_family['父母医保情况'] = '未填写婚姻观念' marriage_family['父母的工作'] = '未填写婚姻观念' item['person_id_marriage'] = person_id item['address_marriage'] = marriage_family['籍贯'] item['registered_residence'] = marriage_family['户口'] item['nationality'] = marriage_family['国籍'] item['personality'] = marriage_family['个性待征'] item['humor'] = marriage_family['幽默感'] item['temper'] = marriage_family['脾气'] item['feelings'] = marriage_family['对待感情'] item['want_child'] = marriage_family['是否要小孩'] item['when_mary'] = marriage_family['何时结婚'] item['strange_love'] = marriage_family['是否能接受异地恋'] item['ideal_marriage'] = marriage_family['理想婚姻'] item['live_parents'] = marriage_family['愿与对方父母同住'] item['rankings_home'] = marriage_family['家中排行'] item['parents_situation'] = marriage_family['父母情况'] item['brothers'] = marriage_family['兄弟姐妹'] item['parents_economic'] = marriage_family['父母经济情况'] item['parents_medical'] = marriage_family['父母医保情况'] item['parents_working'] = marriage_family['父母的工作'] print("婚姻观念",marriage_family) ''' 相片列表 ''' #获取图片 print("相片url",response.url) list_images = self.driver.find_elements_by_xpath('/html//div[@id="bigImg"]//a') print("相片列表",type(list_images),list_images) images= [] for i in list_images: image = i.find_element_by_xpath('img').get_attribute("src") images.append(image) print("相片地址",image) item['img_urls'] = images#保存相片地址,在person_info表中的text print("执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后") yield item cmdline.execute("scrapy crawl jiayuan_main".split())
identifier_body
jiayuan.py
# -*- coding:utf-8 -*- ''' Created on 2018年2月28日 @author: ning.lin ''' ''' 大图地址class或id有big字样 的 <div class="pho_big" id="phoBig" style="height: 640px;"> <div class="big_pic fn-clear" id="bigImg"> 小图地址 <div class="pho_small_box fn-clear mt25 " id="phoSmallPic"> ''' import json import time from scrapy import log from scrapy import cmdline import scrapy from scrapy.http import Request from scrapy.http.request.form import FormRequest from scrapy_redis.spiders import RedisSpider from selenium import webdriver from jiayuan.settings import IMAGES_STORE,USER_NAME,PASSWD from jiayuan.items import JiayuanItem,MainItem import redis class jiayuan_data(RedisSpider): pool=redis.ConnectionPool(host='127.0.0.1',port=6379,db=0,decode_responses=True) #427条记录 r = redis.StrictRedis(connection_pool=pool) name = "jiayuan_main" redis_key = 'jiayuan_main:start_urls' url_base = 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d' redis_key = "sinaspider:start_urls" login_url = 'http://login.jiayuan.com/'#登录时的url start_urls = [] pre_page_num = 25#每个搜索业面有25条记录 #head less模拟登录 option = webdriver.ChromeOptions() option.add_argument('--headless') option.add_argument("--window-size=1920,1080") prefs={"profile.managed_default_content_settings.images":2}#禁止加载图片 option.add_experimental_option("prefs",prefs) try: driver = webdriver.Chrome(chrome_options=option) except Exception as e: driver.close() print("spider出现了异常,关闭",str(e)) driver.get(login_url) time.sleep(3) driver.find_element_by_id("login_btn").click() driver.find_element_by_id("login_email").clear() driver.find_element_by_id("login_email").send_keys(USER_NAME) #修改为自己的用户名 driver.find_element_by_id("login_password").clear() driver.find_element_by_id("login_password").send_keys(PASSWD) #修改为自己的密码 #登录url #url="http://login.jiayuan.com/" driver.find_element_by_id("login_btn").click()#点击登录按钮 cookies = driver.get_cookies()#获取cookies for p in range(1,173649): search_url = "http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d" %(p) start_urls.append(search_url) #print("start_urls",len(start_urls)) # start_urls = [ # "http://search.jiayuan.com/v2/search_v2.php",#直接搜索结果,获取个人主页的url(先不登录) #"https://passport.jiayuan.com/dologin.php?pre_url=http://www.jiayuan.com/usercp",#登录页面post数据 # ] ''' 下载器中间件在下载器和Scrapy引擎之间,每一个request和response都会通过中间件进行处理。 在中间件中,对request进行处理的函数是process_request(request, spider) ''' def start_requests(self):# for url in self.start_urls: yield Request(url=url,callback=self.get_main_info) # yield scrapy.Request(url=search_url,callback=self.get_main_info) # return Request(url=url,callback=self.get_main_info) def get_main_info(self,response):#解析搜索业面的url #info = response.body.decode("utf-8") #登录后可以查看一下登录响应信息json.loads( # for url in self.start_urls: time.sleep(1) print("当前的url",response.url) print('重新加载url') self.driver.get(response.url) self.driver.implicitly_wait(3) user_list = self.driver.find_elements_by_xpath('/html//ul[@id="normal_user_container"]/li//div[@class="user_name"]/a[@class="os_stat"]')#得到多个li标签 if user_list==[]: print("user_list为空了,解析有问题") #print("user_list",type(user_list),user_list) url_details = []#详情页面的url for user in user_list: main_url_main = user.get_attribute("href") print("人员主页url",main_url_main) url_details.append(main_url_main) # self.redis_pipe.rpush("p",main_url_main)#详情页额外写入redis,也可以不写 # self.redis_pipe.execute() print("人员详情url2",len(url_details)) if url_details!=[]: for url in url_details: yield Request(url
<class 'str'> 年 龄: 26-29岁之间 身 高: 169-185厘米 民 族: 汉族 学 历: 不限 相 册: 有照片 婚姻状况: 未婚 居 住 地: 湖北十堰 诚 信: 不限 将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库 ''' pass def parse(str1): temp_list = str1.split('\n') result={} result_str='' # temp_dict=[]#result_dict这是因为有些项目下面有多个标签,多个标签就需要合并起来 # result_dict = {}#多个dict合并后的结果 if len(temp_list)>1:#大于1说明该项下有值,否则此项未填信息 for i in range(len(temp_list)): if i%2==0: result[temp_list[i].replace(" ", "").replace(":", '')] = temp_list[i+1] return result #其他则返回str else: result_str = str1 return result_str item = JiayuanItem() self.driver.get(response.url) self.driver.implicitly_wait(3) print('打开浏览器') print("当前的url",response.url) age_info = self.driver.find_element_by_xpath('/html//h6[@class="member_name"]').text person_id = response.url[response.url.rfind('/')+1:response.url.index('?')] print("年龄地址信息",type(age_info),age_info) address = self.driver.find_elements_by_xpath('/html//h6[@class="member_name"]/a')#得到多个a标签的text str_address='' str_sheng=address[0].get_attribute("text") str_shi=address[1].get_attribute("text") print("人员地址",str_sheng+'sssss'+str_shi) ''' 人个信息 ''' person_info = self.driver.find_elements_by_xpath('/html//ul[@class="member_info_list fn-clear"]') person_dict={} for i in person_info: person_dict = parse(i.text) print("个人信息",person_dict) ''' 处理item,对应mysql的person_info表 ''' item['person_id'] = person_id item['province'] = str_sheng item['municipal'] = str_shi nick_name_info = self.driver.find_elements_by_xpath('/html//div[@class="member_info_r yh"]/h4') nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index("I")] print("昵称", nick_name) item['nike_name'] = nick_name item['education'] = person_dict['学历'] item['height'] = person_dict['身高'] item['buy_car'] = person_dict['购车'] item['salary'] = person_dict['月薪'] item['housing'] = person_dict['住房'] item['weight'] = person_dict['体重'] item['constellation'] = person_dict['星座'] item['nation'] = person_dict['民族'] item['zodiac'] = person_dict['属相'] item['blood_type'] = person_dict['血型'] item['age'] = age_info[0:age_info.index(',')] print("年龄",age_info[0:age_info.index(',')]) item['address'] = str_sheng+str_shi item['age_info'] = age_info item['image_dir'] = nick_name+'_'+item['age']+'_'+person_id#下载的相片归类 item['url'] = response.url #个人短语 item['introduce_oneself'] = self.driver.find_element_by_xpath('/html//div[@class="main_1000 mt15 fn-clear"]//div[@class="js_text"]').text print("个性短语",item['introduce_oneself']) #个性标签,有些人是没有个性标签的 #需要点击”更多“才能全部显示出来,否则只有4个 item['interest_label']='' item['personality_label']='' try: #link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a') #link_a.click() self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a').click() time.sleep(1) gexing_info = self.driver.find_elements_by_xpath('/html//div[@class="test4"]//div[@class="list_a fn-clear"]') print("aaa",type(gexing_info),gexing_info) gexing_tag='' for i in gexing_info: gexing_tag += i.text # a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text item['personality_label'] = "".join(gexing_tag) except Exception as e: item['personality_label'] = '还没有填写个性元素' print("个性",item['personality_label']) #她的兴趣爱好有可能也是找不到的 try: #link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a') #link_a.click() self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more"]/a').click() # self.driver.find_element_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[2]/a').click self.driver.implicitly_wait(1) aihao_info = self.driver.find_elements_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul') print("bbb",type(aihao_info),aihao_info) aihao_tag='' for i in aihao_info: aihao_tag += i.text # a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text item['interest_label'] = "".join(aihao_tag) except Exception as e: item['interest_label'] = '还没有填写兴趣爱好' print("她的兴趣爱好",item['interest_label']) find_mate = self.driver.find_elements_by_xpath('/html//div[@class="bg_white mt15"]') ''' 择偶要求 ''' mate = find_mate[1].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') mate_dict={} for i in mate: mate_dict = parse(i.text) item['person_id_mate'] = person_id item['age_mate'] = mate_dict['年龄'] item['height_mate'] = mate_dict['身高'] item['nation_mate'] = mate_dict['民族'] item['education_mate'] = mate_dict['学历'] item['image_mate'] = mate_dict['相册'] item['marital_status'] = mate_dict['婚姻状况'] item['address_mate'] = mate_dict['居住地'] item['sincerity_mate'] = mate_dict['诚信']#诚信 print("择偶要求",mate_dict) ''' 生活方式 ''' life = find_mate[2].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') life_style={} for i in life: temp = parse(i.text) if isinstance(temp,dict): life_style.update(parse(i.text))#update就合并两个dict else: life_style['吸烟'] = '未填写生活方式' life_style['饮酒'] = '未填写生活方式' life_style['锻炼习惯'] = '未填写生活方式' life_style['饮食习惯'] = '未填写生活方式' life_style['逛街购物'] = '未填写生活方式' life_style['宗教信仰'] = '未填写生活方式' life_style['作息时间'] = '未填写生活方式' life_style['交际圈子'] = '未填写生活方式' life_style['最大消费'] = '未填写生活方式' try: housework = [] pet = [] jiawu1 = find_mate[2].find_elements_by_xpath('div[@class="js_box"]//div[@class="pt25 fn-clear"]//dd[@class="cur"]') for i in jiawu1: housework.append(i.text)#0为家务水平,1为宠物喜欢程度 print("家务1 ",i.text) jiawu2 = find_mate[2].find_elements_by_xpath('div[@class="js_box"]//div[@class="fl pr"]/em') for i in jiawu2: pet.append(i.text)#0为家务分配,1为关于宠物 print("家务2 ",i.text) except Exception as e: housework.append('家务水平程度未填写') housework.append('宠物喜欢程度未填写') pet.append('家务分配未填写') pet.append ('关于宠物未填写') item['person_id_life'] = person_id item['smoke'] = life_style['吸烟'] item['drink_wine'] = life_style['饮酒'] item['exercise_habits'] = life_style['锻炼习惯'] item['eating_habits'] = life_style['饮食习惯'] item['shopping'] = life_style['逛街购物'] item['religious_belief'] = life_style['宗教信仰'] item['time_table'] = life_style['作息时间'] item['circle_of_communication'] = life_style['交际圈子'] item['maximum_consumption'] = life_style['最大消费'] item['housework'] = housework[0] item['household_assignment'] = pet[0] item['pet'] = housework[1] item['about_pets'] = pet[1] print("生活方式",life_style) print("家务",housework[0],pet[0]) print("宠物",housework[1],pet[1]) ''' 经济实力 ''' economic_dict={} economic = find_mate[3].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') for i in economic: economic_dict = parse(i.text) item['person_id_economic'] = person_id item['salary_economic'] = economic_dict['月薪'] item['buy_house_economic'] = economic_dict['购房'] item['buy_car_economic'] = economic_dict['购车'] item['economic_concept'] = economic_dict['经济观念'] item['investment_financing'] = economic_dict['投资理财'] item['foreign_debt'] = economic_dict['外债贷款'] print("经济实力",economic_dict) ''' 工作学习 ''' work = find_mate[4].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') work_study = {}# for i in work: if i.text: temp = parse(i.text) if isinstance(temp,dict): work_study.update(parse(i.text))#update就合并两个dict else: work_study['职业职位'] = '未填写工作学习方式' work_study['公司行业'] = '未填写工作学习方式' work_study['公司类型'] = '未填写工作学习方式' work_study['福利待遇'] = '未填写工作学习方式' work_study['工作状态'] = '未填写工作学习方式' work_study['调动工作可能性'] = '未填写工作学习方式' work_study['事业与家庭'] = '未填写工作学习方式' work_study['海外工作可能性'] = '未填写工作学习方式' work_study['毕业院校'] = '未填写工作学习方式' work_study['专业类型'] = '未填写工作学习方式' work_study['语言能力'] = '未填写工作学习方式' item['person_id_study'] = person_id item['position'] = work_study['职业职位'] item['company'] = work_study['公司行业'] item['company_type'] = work_study['公司类型'] item['welfare'] = work_study['福利待遇'] item['working'] = work_study['工作状态'] item['transfer_work'] = work_study['调动工作可能性'] item['work_family'] = work_study['事业与家庭'] item['overseas_job'] = work_study['海外工作可能性'] item['university'] = work_study['毕业院校'] item['major'] = work_study['专业类型'] item['language'] = work_study['语言能力'] print("工作学习",work_study) ''' 婚姻观念 ''' marriage = find_mate[5].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]') marriage_family={} for i in marriage: if i.text: temp = parse(i.text) if isinstance(temp,dict): marriage_family.update(parse(i.text))#update就合并两个dict else: marriage_family['籍贯'] = '未填写婚姻观念' marriage_family['户口'] = '未填写婚姻观念' marriage_family['国籍'] = '未填写婚姻观念' marriage_family['个性待征'] = '未填写婚姻观念' marriage_family['幽默感'] = '未填写婚姻观念' marriage_family['脾气'] = '未填写婚姻观念' marriage_family['对待感情'] = '未填写婚姻观念' marriage_family['是否要小孩'] = '未填写婚姻观念' marriage_family['何时结婚'] = '未填写婚姻观念' marriage_family['是否能接受异地恋'] = '未填写婚姻观念' marriage_family['理想婚姻'] = '未填写婚姻观念' marriage_family['愿与对方父母同住'] = '未填写婚姻观念' marriage_family['家中排行'] = '未填写婚姻观念' marriage_family['父母情况'] = '未填写婚姻观念' marriage_family['兄弟姐妹'] = '未填写婚姻观念' marriage_family['父母经济情况'] = '未填写婚姻观念' marriage_family['父母医保情况'] = '未填写婚姻观念' marriage_family['父母的工作'] = '未填写婚姻观念' item['person_id_marriage'] = person_id item['address_marriage'] = marriage_family['籍贯'] item['registered_residence'] = marriage_family['户口'] item['nationality'] = marriage_family['国籍'] item['personality'] = marriage_family['个性待征'] item['humor'] = marriage_family['幽默感'] item['temper'] = marriage_family['脾气'] item['feelings'] = marriage_family['对待感情'] item['want_child'] = marriage_family['是否要小孩'] item['when_mary'] = marriage_family['何时结婚'] item['strange_love'] = marriage_family['是否能接受异地恋'] item['ideal_marriage'] = marriage_family['理想婚姻'] item['live_parents'] = marriage_family['愿与对方父母同住'] item['rankings_home'] = marriage_family['家中排行'] item['parents_situation'] = marriage_family['父母情况'] item['brothers'] = marriage_family['兄弟姐妹'] item['parents_economic'] = marriage_family['父母经济情况'] item['parents_medical'] = marriage_family['父母医保情况'] item['parents_working'] = marriage_family['父母的工作'] print("婚姻观念",marriage_family) ''' 相片列表 ''' #获取图片 print("相片url",response.url) list_images = self.driver.find_elements_by_xpath('/html//div[@id="bigImg"]//a') print("相片列表",type(list_images),list_images) images= [] for i in list_images: image = i.find_element_by_xpath('img').get_attribute("src") images.append(image) print("相片地址",image) item['img_urls'] = images#保存相片地址,在person_info表中的text print("执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后") yield item cmdline.execute("scrapy crawl jiayuan_main".split())
=url,cookies=self.cookies,callback=self.get_details)#解析人员详细信息 # yield item def get_details(self,response): '''
conditional_block
wxkfmanager.go
package wxapi import ( "encoding/xml" "fmt" "github.com/gin-gonic/gin" "github.com/mitchellh/mapstructure" "github.com/robfig/cron" "io/ioutil" "net/http" "net/url" ) type WXKfManager interface { //小程序或者公众号授权给第三方平台 HandleCompentAuthEventPush(context * gin.Context,responsehandler ...func(appmsg APPAuthMsg)) //授权事件推送,根据信息更新授权状态,对应授权事件接收URL HanleCompentAuth(context * gin.Context, responsehandler func(authinfo APPAuthInfoResp)(redicturl string)) //授权回调并得到授权方的账号信息 //授权方信息管理 GetCompentAuthOptionInfo(authorizer_appid,option_name string,responsehandler ...func(APPOptionResp)) //获取授权方的选项设置信息 SetCompentAuthOption(authorizer_appid,option_name,option_value string,responsehandler ...func(BaseResp)) //设置授权方的选项设置信息 //代公众号实现业务API //对应到消息与事件接收url中 HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})) //接收处处公众号事件和消息 //用户网页登录 //对应用户授权后回调 HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser AuthuserResp,state string)(redicturl string)) //获取第三方用户信息,并跳转到登录后的页面 GetAppAuthurl(appid,scope,redirect_uri,state string) string //用户登录发起页面 //用户信息 } type WXKFManager struct { Compenttoken string //第三方平台的token 消息校验token CompentAppid string //第三方平台的APPid CompentAeskey string //第三方平台的秘钥 Componentsecret string //第三方平台的appsecret Component_access_token string //第三方平台的component_access_token ComponentVerifyTicket string //微信服务器传输的第三方平台ComponentVerifyTicket Pre_auth_code string //预授权码 Redircturl string //公众号授权完成后的回调URL用来接收授权码auth_code Compentauthurl string //当前第三方平台授权移动端连接 AppAuthinfos []APPAuthInfoResp //第三方公众号令牌数组,自动刷新 } //初始化开放平台管理器 func InitWXKFManager(token,appid,EncodingAESKey,appseceret,Redircturl string,refreshAppAuthHanlder func(appauth ...APPAuthInfoResp),AppAuthinfos...[]APPAuthInfoResp) *WXKFManager { var wx WXKFManager wx.Compenttoken = token wx.CompentAeskey = EncodingAESKey wx.CompentAppid = appid wx.Componentsecret = appseceret wx.Redircturl=Redircturl //每小时刷新一次component_accesstoken cr := cron.New() spec := "0 0 */1 * * ?" err:=cr.AddFunc(spec,wx.getComponent_access_token) err= cr.AddFunc(spec, func() { if len(AppAuthinfos)>0 { wx.AppAuthinfos=AppAuthinfos[0] for _,value := range wx.AppAuthinfos{ wx.RefreshCompentAuthAccessToken(value.AuthorizationInfo.AuthorizerAppid,value.AuthorizationInfo.AuthorizerRefreshToken, func(APPAuthInfo APPAuthInfoResp) { refreshAppAuthHanlder(APPAuthInfo) }) } }else { refreshAppAuthHanlder() } }) fmt.Println("开房平台定时任务初始化",err,spec) spe :="0 */10 * * * ?" err=cr.AddFunc(spe,wx.getCompent_pre_auth_code) fmt.Println("开房平台定时任务初始化",err,spe) cr.Start() return &wx } //授权流程 //授权回调 获取授权码并根据授权码获取授权信息 func (wx *WXKFManager) HanleCompentAuth(context * gin.Context, responsehandler func(authinfo APPAuthInfoResp)(redicturl string)){ authcode := context.Query("auth_code") //查询公众号授权第三方平台的权限 wx.getCompentAuthAccesstoken(authcode, func(appAuthInfo APPAuthInfoResp) { wx.AppAuthinfos=append(wx.AppAuthinfos, appAuthInfo) redicturl := responsehandler(appAuthInfo) context.Redirect(http.StatusMovedPermanently,redicturl) }) } //获取授权方公众号账号基本信息 func (wx *WXKFManager) GetCompentAuthorizerInfo(authorizer_appid string,response ...func(resp APPUserInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid}, func(resp JsonResponse) { var result APPUserInfoResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(response)>0 { response[0](result) } }) } //获取授权方选项设置信息 func (wx *WXKFManager) GetCompentAuthOptionInfo(authorizer_appid,option_name string,responsehandler ...func(APPOptionResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name}, func(resp JsonResponse) { var result APPOptionResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(responsehandler)>0 { responsehandler[0](result) } }) } //设置授权方选项信息 func (wx *WXKFManager) SetCompentAuthOption(authorizer_appid,option_name,option_value string,responsehandler ...func(BaseResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name, "option_value":option_value}, func(resp JsonResponse) { var result BaseResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(responsehandler)>0 { responsehandler[0](result) } }) } //授权通知处理 func (wx *WXKFManager) HandleCompentAuthEventPush(context * gin.Context,responsehandler ...func(appmsg APPAuthMsg)){ wx.parsereqToAPPAuthMsg(context, func(CheckSign boo
Decrptmsg APPAuthMsg, safe bool) { if CheckSign { if len(responsehandler)>0 { responsehandler[0](Decrptmsg) } } }) context.String(http.StatusOK,"success") } //代公众号实现网页授权 /****************代公众号实现业务*******************/ //1.代公众号调用接口 //获取用户信息 func (wx *WXKFManager)GetUserInfo( authorizer_access_token,authorizer_appid string ,hanlder ...func(user JsonResponse) ){ if len(hanlder)>0 { getuserInfo(authorizer_access_token,authorizer_appid,hanlder[0]) }else { getuserInfo(authorizer_access_token,authorizer_appid) } } //获取用户列表 func (wx *WXKFManager)GetUserList( authorizer_access_token string,hanlder func(user JsonResponse),nestopid ...string ){ if len(nestopid)>0 { getuserlist(authorizer_access_token,hanlder,nestopid[0] ) }else { getuserlist(authorizer_access_token,hanlder) } } //2.代公众号处理消息和事件 func (wx *WXKFManager) HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})){ wx.parsereqToReqMsg(ctx, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg ReqMsg, safe bool) { if !CheckSign { ctx.String(http.StatusForbidden, "验证签名错误") return } if ctx.Request.Method!= http.MethodPost { echostr := ctx.Query("echostr") ctx.String(http.StatusOK,echostr) return } def,replymsg := handler(Decrptmsg) if def { ctx.String(http.StatusOK,"success") return } if safe { // ctx.String(http.StatusOK,string(ReplyMsgData(wx.msgEncrept(replymsg)))) }else { ctx.String(http.StatusOK, string(ReplyMsgData(replymsg))) } }) } //3.代公众号发起网页授权 //获取代公众号发起网页授权url func (wx *WXKFManager) GetAppAuthurl(appid,scope,redirect_uri,state string) string { str := url.QueryEscape(redirect_uri) url:="https://open.weixin.qq.com/connect/oauth2/authorize?appid=" url=url+appid+"&redirect_uri="+str url =url+"&response_type=code&scope="+scope+"&state="+state+"&component_appid="+wx.CompentAppid url =url+"#wechat_redirect" return url } //网页授权后回调 func (wx *WXKFManager) HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser JsonResponse,state string)(redicturl string)) { code := context.Query("code") appid := context.Query("appid") state := context.Query("state") fmt.Println(code,appid,"接收到的微信信息时") wx.getAppAuthUserAccesstoken(code,appid, func(authresp AuthResp) { wx.getAppAuthuserInfo(authresp, func(authuser JsonResponse) { redicturl:=completeHandler(authresp,authuser,state) context.Redirect(http.StatusMovedPermanently,redicturl) }) }) } //4代公众号调用jssdk /*...........私有方法.................*/ //加密XML结构体消息体 func (wx *WXKFManager)msgEncrept(msg interface{})EncryptMsg { return CreatEncryptMsg(ReplyMsgData(msg),DecodeAESKey(wx.CompentAeskey),wx.CompentAppid,wx.Compenttoken) } /*授权流程使用*/ //1.获取第三方平台access_token func (wx *WXKFManager) getComponent_access_token() { if len(wx.ComponentVerifyTicket)>0 { wx.Component_access_token = getcomponent_token(wx.CompentAppid,wx.Componentsecret,wx.ComponentVerifyTicket) }else { wx.Component_access_token="" } } //2.获取预授权码 func (wx *WXKFManager) getCompent_pre_auth_code() { if len(wx.Component_access_token)>0 { wx.Pre_auth_code= getCompent_pre_authcode(wx.Component_access_token,gin.H{"component_appid": wx.CompentAppid}) wx.Compentauthurl= getCompentAuthUrl(wx.CompentAppid,wx.Pre_auth_code,wx.Redircturl) }else { wx.Pre_auth_code="" } } //3.使用授权码换取公众号或小程序的接口调用凭据和授权信息 func (wx *WXKFManager) getCompentAuthAccesstoken(authorization_code string,handler ...func(appAuthInfo APPAuthInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorization_code":authorization_code}, func(response JsonResponse) { var result APPAuthInfoResp mapstructure.Decode(response.Dic,&result) result.JsonResponse=&response if len(handler)>0 { handler[0](result) } }) } //4.(刷新)授权公众号或小程序的接口调用凭据 func (wx *WXKFManager) RefreshCompentAuthAccessToken(authorizer_appid,authorizer_refresh_token string,response ...func(APPAuthInfo APPAuthInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"authorizer_refresh_token":authorizer_refresh_token}, func(res JsonResponse) { var result APPAuthInfoResp mapstructure.Decode(res.Dic,&result) result.JsonResponse=&res if len(response)>0 { response[0](result) } }) } /*代公众号实现业务使用*/ //代公众号获取useraccess_token func (wx *WXKFManager) getAppAuthUserAccesstoken(code ,appid string,handler ...func(authresp AuthResp)) { url :="https://api.weixin.qq.com/sns/oauth2/component/access_token?appid="+appid url = url +"&code="+code+"&grant_type=authorization_code&component_appid="+wx.CompentAppid+"&component_access_token=" url = url + wx.Component_access_token resp := Get(url) var result AuthResp mapstructure.Decode(resp,&result) fmt.Println("代公众号获取usertoken",resp,result,appid,url) if len(handler)>0 { handler[0](result) } } //代公众号获取网页登录用户信息 func (wx *WXKFManager) getAppAuthuserInfo(auth AuthResp,handler ...func(authuser JsonResponse)) { url :="https://api.weixin.qq.com/sns/userinfo?access_token=" url =url +auth.Access_token+"&openid="+auth.Openid+"&lang=zh_CN" resp := Get(url) fmt.Println("代公众号获取网页用户信息",resp.Dic) if len(handler)>0 { handler[0](resp) } } /*解析回电xml使用*/ //解析推送授权事件的XML func (wx *WXKFManager) parsereqToAPPAuthMsg(context2 * gin.Context,f func(CheckSign bool,Orignmsg ReqMsg,Decrptmsg APPAuthMsg,safe bool)){ parsereqMsg(context2,wx.Compenttoken,wx.CompentAeskey, func(CheckSign bool, Orignmsg ReqMsg, safe bool) { var decreptMsg APPAuthMsg if safe{ decreptMsg,_ = decryptAPPAuthMsg(Orignmsg.Encrypt,wx.CompentAeskey) wx.ComponentVerifyTicket = decreptMsg.ComponentVerifyTicket if wx.Component_access_token=="" { wx.getComponent_access_token() wx.getCompent_pre_auth_code() } } f(CheckSign,Orignmsg,decreptMsg,safe) }) } //解析代公众号实现事件消息XML func (wx *WXKFManager) parsereqToReqMsg(context2 * gin.Context,f func(CheckSign bool,Orignmsg ReqMsg,Decrptmsg ReqMsg,safe bool)){ parsereqMsg(context2,wx.Compenttoken,wx.CompentAeskey, func(CheckSign bool, Orignmsg ReqMsg, safe bool) { var decreptMsg ReqMsg if safe{ decreptMsg,_ = decryptReqmsg(Orignmsg.Encrypt,wx.CompentAeskey) } f(CheckSign,Orignmsg,decreptMsg,safe) }) } //解析请求 func parsereqMsg(context2 * gin.Context,token string,aeskey string,handler func(CheckSign bool,Orignmsg ReqMsg,safe bool)) { sign := context2.Query("signature") timestamp := context2.Query("timestamp") nonce := context2.Query("nonce") encrypt_type := context2.Query("encrypt_type") msgsign := context2.Query("msg_signature") var event ReqMsg s,_:=ioutil.ReadAll(context2.Request.Body) xml.Unmarshal(s,&event) safe := encrypt_type=="aes" checksign :=false if safe{ checksign = SignMsg(token,timestamp,nonce,event.Encrypt)==msgsign }else { checksign =SignMsg(token,timestamp,nonce)==sign } handler(checksign,event,safe) }
l, Orignmsg ReqMsg,
identifier_name
wxkfmanager.go
package wxapi import ( "encoding/xml" "fmt" "github.com/gin-gonic/gin" "github.com/mitchellh/mapstructure" "github.com/robfig/cron" "io/ioutil" "net/http" "net/url" ) type WXKfManager interface { //小程序或者公众号授权给第三方平台 HandleCompentAuthEventPush(context * gin.Context,responsehandler ...func(appmsg APPAuthMsg)) //授权事件推送,根据信息更新授权状态,对应授权事件接收URL HanleCompentAuth(context * gin.Context, responsehandler func(authinfo APPAuthInfoResp)(redicturl string)) //授权回调并得到授权方的账号信息 //授权方信息管理 GetCompentAuthOptionInfo(authorizer_appid,option_name string,responsehandler ...func(APPOptionResp)) //获取授权方的选项设置信息 SetCompentAuthOption(authorizer_appid,option_name,option_value string,responsehandler ...func(BaseResp)) //设置授权方的选项设置信息 //代公众号实现业务API //对应到消息与事件接收url中 HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})) //接收处处公众号事件和消息 //用户网页登录 //对应用户授权后回调 HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser AuthuserResp,state string)(redicturl string)) //获取第三方用户信息,并跳转到登录后的页面 GetAppAuthurl(appid,scope,redirect_uri,state string) string //用户登录发起页面 //用户信息 } type WXKFManager struct { Compenttoken string //第三方平台的token 消息校验token CompentAppid string //第三方平台的APPid CompentAeskey string //第三方平台的秘钥 Componentsecret string //第三方平台的appsecret Component_access_token string //第三方平台的component_access_token ComponentVerifyTicket string //微信服务器传输的第三方平台ComponentVerifyTicket Pre_auth_code string //预授权码 Redircturl string //公众号授权完成后的回调URL用来接收授权码auth_code Compentauthurl string //当前第三方平台授权移动端连接 AppAuthinfos []APPAuthInfoResp //第三方公众号令牌数组,自动刷新 } //初始化开放平台管理器 func InitWXKFManager(token,appid,EncodingAESKey,appseceret,Redircturl string,refreshAppAuthHanlder func(appauth ...APPAuthInfoResp),AppAuthinfos...[]APPAuthInfoResp) *WXKFManager { var wx WXKFManager wx.Compenttoken = token wx.CompentAeskey = EncodingAESKey wx.CompentAppid = appid wx.Componentsecret = appseceret wx.Redircturl=Redircturl //每小时刷新一次component_accesstoken cr := cron.New() spec := "0 0 */1 * * ?" err:=cr.AddFunc(spec,wx.getComponent_access_token) err= cr.AddFunc(spec, func() { if len(AppAuthinfos)>0 { wx.AppAuthinfos=AppAuthinfos[0] for _,value := range wx.AppAuthinfos{ wx.RefreshCompentAuthAccessToken(value.AuthorizationInfo.AuthorizerAppid,value.AuthorizationInfo.AuthorizerRefreshToken, func(APPAuthInfo APPAuthInfoResp) { refreshAppAuthHanlder(APPAuthInfo) }) } }else { refreshAppAuthHanlder() } }) fmt.Println("开房平台定时任务初始化",err,spec) spe :="0 */10 * * * ?" err=cr.AddFunc(spe,wx.getCompent_pre_auth_code) fmt.Println("开房平台定时任务初始化",err,spe) cr.Start() return &wx } //授权流程 //授权回调 获取授权码并根据授权码获取授权信息 func (wx *WXKFManager) HanleCompentAuth(context * gin.Context, responsehandler func(authinfo APPAuthInfoResp)(redicturl string)){ authcode := context.Query("auth_code") //查询公众号授权第三方平台的权限 wx.getCompentAuthAccesstoken(authcode, func(appAuthInfo APPAuthInfoResp) { wx.AppAuthinfos=append(wx.AppAuthinfos, appAuthInfo) redicturl := responsehandler(appAuthInfo) context.Redirect(http.StatusMovedPermanently,redicturl) }) } //获取授权方公众号账号基本信息 func (wx *WXKFManager) GetCompentAuthorizerInfo(authorizer_appid string,response ...func(resp APPUserInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid}, func(resp JsonResponse) { var result APPUserInfoResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(response)>0 { response[0](result) } }) } //获取授权方选项设置信息 func (wx *WXKFManager) GetCompentAuthOptionInfo(authorizer_appid,option_name string,responsehandler ...func(APPOptionResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name}, func(resp JsonResponse) { var result APPOptionResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(responsehandler)>0 { responsehandler[0](result) } }) } //设置授权方选项信息 func (wx *WXKFManager) SetCompentAuthOption(authorizer_appid,option_name,option_value string,responsehandler ...func(BaseResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name, "option_value":option_value}, func(resp JsonResponse) { var result BaseResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(responsehandler)>0 { responsehandler[0](result) } }) } //授权通知处理 func (wx *WXKFManager) HandleCompentAuthEventPush(context * gin.Context,responsehandler ...func(appmsg APPAuthMsg)){ wx.parsereqToAPPAuthMsg(context, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg APPAuthMsg, safe bool) { if CheckSign { if len(responsehandler)>0 { responsehandler[0](Decrptmsg) } } }) context.String(http.StatusOK,"success") } //代公众号实现网页授权 /****************代公众号实现业务*******************/ //1.代公众号调用接口 //获取用户信息 func (wx *WXKFManager)GetUserInfo( authorizer_access_token,authorizer_appid string ,hanlder ...func(user JsonResponse) ){ if len(hanlder)>0 { getuserInfo(authorizer_access_token,authorizer_appid,hanlder[0]) }else { getuserInfo(authorizer_access_token,authorizer_appid) } } //获取用户列表 func (wx *WXKFManager)GetUserList( authorizer_access_token string,hanlder func(user JsonResponse),nestopid ...string ){ if len(nestopid)>0 { getuserlist(authorizer_access_token,hanlder,nestopid[0] ) }else { getuserlist(authorizer_access_token,hanlder) } } //2.代公众号处理消息和事件 func (wx *WXKFManager) HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})){ wx.parsereqToReqMsg(ctx, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg ReqMsg, safe bool) { if !CheckSign { ctx.String(http.StatusForbidden, "验证签名错误") return } if ctx.Request.Method!= http.MethodPost { echostr := ctx.Query("echostr") ctx.String(http.StatusOK,echostr) return } def,replymsg := handler(Decrptmsg) if def { ctx.String(http.StatusOK,"success") return } if safe { // ctx.String(http.StatusOK,string(ReplyMsgData(wx.msgEncrept(replymsg)))) }else { ctx.String(http.StatusOK, string(ReplyMsgData(replymsg))) } }) } //3.代公众号发起网页授权 //获取代公众号发起网页授权url func (wx *WXKFManager) GetAppAuthurl(appid,scope,redirect_uri,state string) string { str := url.QueryEscape(redirect_uri) url:="https://open.weixin.qq.com/connect/oauth2/authorize?appid=" url=url+appid+"&redirect_uri="+str url =url+"&response_type=code&scope="+scope+"&state="+state+"&component_appid="+wx.CompentAppid url =url+"#wechat_redirect" return url } //网页授权后回调 func (wx *WXKFManager) HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser JsonResponse,state string)(redicturl string)) { code := context.Query("code") appid := context.Query("appid") state := context.Query("state") fmt.Println(code,appid,"接收到的微信信息时") wx.getAppAuthUserAccesstoken(code,appid, func(authresp AuthResp) { wx.getAppAuthuserInfo(authresp, func(authuser JsonResponse) { redicturl:=completeHandler(authresp,authuser,state) context.Redirect(http.StatusMovedPermanently,redicturl) }) }) } //4代公众号调用jssdk /*...........私有方法.................*/ //加密XML结构体消息体 func (wx *WXKFManager)msgEncrept(msg interface{})EncryptMsg { return CreatEncryptMsg(ReplyMsgData(msg),DecodeAESKey(wx.CompentAeskey),wx.CompentAppid,wx.Compenttoken) } /*授权流程使用*/ //1.获取第三方平台access_token func (wx *WXKFManager) getComponent_access_token() { if len(wx.ComponentVerifyTicket)>0 { wx.Component_access_token = getcomponent_token(wx.CompentAppid,wx.Componentsecret,wx.ComponentVerifyTicket) }else { wx.Component_access_token="" } } //2.获取预授权码 func (wx *WXKFManager) getCompent_pre_auth_code() { if len(wx.Component_access_token)>0 { wx.Pre_auth_code= getCompent_pre_authcode(wx.Component_access_token,gin.H{"component_appid": wx.CompentAppid}) wx.Compentauthurl= getCompentAuthUrl(wx.CompentAppid,wx.Pre_auth_code,wx.Redircturl) }else { wx.Pre_auth_code="" } } //3.使用授权码换取公众号或小程序的接口调用凭据和授权信息 func (wx *WXKFManager) getCompentAuthAccesstoken(authorization_code string,handler ...func(appAuthInfo APPAuthInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorization_code":authorization_code}, func(response JsonResponse) { var result APPAuthInfoResp mapstructure.Decode(response.Dic,&result) result.JsonResponse=&response if len(handler)>0 { handler[0](result) } }) } //4.(刷新)授权公众号或小程序的接口调用凭据 func (wx *WXKFManager) RefreshCompentAuthAccessToken(authorizer_appid,authorizer_refresh_token string,response ...func(APPAuthInfo APPAuthInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"authorizer_refresh_token":authorizer_refresh_token}, func(res JsonResponse) { var result APPAuthInfoResp mapstructure.Decode(res.Dic,&result) result.JsonResponse=&res if len(response)>0 { response[0](result) } }) } /*代公众号实现业务使用*/ //代公众号获取useraccess_token func (wx *WXKFManager) getAppAuthUserAccesstoken(code ,appid string,handler ...func(authresp AuthResp)) { url :="https://api.weixin.qq.com/sns/oauth2/component/access_token?appid="+appid url = url +"&code="+code+"&grant_type=authorization_code&component_appid="+wx.CompentAppid+"&component_access_token=" url = url + wx.Component_access_token resp := Get(url) var result
ode(resp,&result) fmt.Println("代公众号获取usertoken",resp,result,appid,url) if len(handler)>0 { handler[0](result) } } //代公众号获取网页登录用户信息 func (wx *WXKFManager) getAppAuthuserInfo(auth AuthResp,handler ...func(authuser JsonResponse)) { url :="https://api.weixin.qq.com/sns/userinfo?access_token=" url =url +auth.Access_token+"&openid="+auth.Openid+"&lang=zh_CN" resp := Get(url) fmt.Println("代公众号获取网页用户信息",resp.Dic) if len(handler)>0 { handler[0](resp) } } /*解析回电xml使用*/ //解析推送授权事件的XML func (wx *WXKFManager) parsereqToAPPAuthMsg(context2 * gin.Context,f func(CheckSign bool,Orignmsg ReqMsg,Decrptmsg APPAuthMsg,safe bool)){ parsereqMsg(context2,wx.Compenttoken,wx.CompentAeskey, func(CheckSign bool, Orignmsg ReqMsg, safe bool) { var decreptMsg APPAuthMsg if safe{ decreptMsg,_ = decryptAPPAuthMsg(Orignmsg.Encrypt,wx.CompentAeskey) wx.ComponentVerifyTicket = decreptMsg.ComponentVerifyTicket if wx.Component_access_token=="" { wx.getComponent_access_token() wx.getCompent_pre_auth_code() } } f(CheckSign,Orignmsg,decreptMsg,safe) }) } //解析代公众号实现事件消息XML func (wx *WXKFManager) parsereqToReqMsg(context2 * gin.Context,f func(CheckSign bool,Orignmsg ReqMsg,Decrptmsg ReqMsg,safe bool)){ parsereqMsg(context2,wx.Compenttoken,wx.CompentAeskey, func(CheckSign bool, Orignmsg ReqMsg, safe bool) { var decreptMsg ReqMsg if safe{ decreptMsg,_ = decryptReqmsg(Orignmsg.Encrypt,wx.CompentAeskey) } f(CheckSign,Orignmsg,decreptMsg,safe) }) } //解析请求 func parsereqMsg(context2 * gin.Context,token string,aeskey string,handler func(CheckSign bool,Orignmsg ReqMsg,safe bool)) { sign := context2.Query("signature") timestamp := context2.Query("timestamp") nonce := context2.Query("nonce") encrypt_type := context2.Query("encrypt_type") msgsign := context2.Query("msg_signature") var event ReqMsg s,_:=ioutil.ReadAll(context2.Request.Body) xml.Unmarshal(s,&event) safe := encrypt_type=="aes" checksign :=false if safe{ checksign = SignMsg(token,timestamp,nonce,event.Encrypt)==msgsign }else { checksign =SignMsg(token,timestamp,nonce)==sign } handler(checksign,event,safe) }
AuthResp mapstructure.Dec
conditional_block
wxkfmanager.go
package wxapi import ( "encoding/xml" "fmt" "github.com/gin-gonic/gin" "github.com/mitchellh/mapstructure" "github.com/robfig/cron" "io/ioutil" "net/http" "net/url" ) type WXKfManager interface { //小程序或者公众号授权给第三方平台 HandleCompentAuthEventPush(context * gin.Context,responsehandler ...func(appmsg APPAuthMsg)) //授权事件推送,根据信息更新授权状态,对应授权事件接收URL HanleCompentAuth(context * gin.Context, responsehandler func(authinfo APPAuthInfoResp)(redicturl string)) //授权回调并得到授权方的账号信息 //授权方信息管理 GetCompentAuthOptionInfo(authorizer_appid,option_name string,responsehandler ...func(APPOptionResp)) //获取授权方的选项设置信息 SetCompentAuthOption(authorizer_appid,option_name,option_value string,responsehandler ...func(BaseResp)) //设置授权方的选项设置信息 //代公众号实现业务API //对应到消息与事件接收url中 HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})) //接收处处公众号事件和消息 //用户网页登录 //对应用户授权后回调 HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser AuthuserResp,state string)(redicturl string)) //获取第三方用户信息,并跳转到登录后的页面 GetAppAuthurl(appid,scope,redirect_uri,state string) string //用户登录发起页面 //用户信息 } type WXKFManager struct { Compenttoken string //第三方平台的token 消息校验token CompentAppid string //第三方平台的APPid CompentAeskey string //第三方平台的秘钥 Componentsecret string //第三方平台的appsecret Component_access_token string //第三方平台的component_access_token ComponentVerifyTicket string //微信服务器传输的第三方平台ComponentVerifyTicket Pre_auth_code string //预授权码 Redircturl string //公众号授权完成后的回调URL用来接收授权码auth_code Compentauthurl string //当前第三方平台授权移动端连接 AppAuthinfos []APPAuthInfoResp //第三方公众号令牌数组,自动刷新 } //初始化开放平台管理器 func InitWXKFManager(token,appid,EncodingAESKey,appseceret,Redircturl string,refreshAppAuthHanlder func(appauth ...APPAuthInfoResp),AppAuthinfos...[]APPAuthInfoResp) *WXKFManager { var wx WXKFManager wx.Compenttoken = token wx.CompentAeskey = EncodingAESKey wx.CompentAppid = appid wx.Componentsecret = appseceret wx.Redircturl=Redircturl //每小时刷新一次component_accesstoken cr := cron.New() spec := "0 0 */1 * * ?" err:=cr.AddFunc(spec,wx.getComponent_access_token) err= cr.AddFunc(spec, func() { if len(AppAuthinfos)>0 { wx.AppAuthinfos=AppAuthinfos[0] for _,value := range wx.AppAuthinfos{ wx.RefreshCompentAuthAccessToken(value.AuthorizationInfo.AuthorizerAppid,value.AuthorizationInfo.AuthorizerRefreshToken, func(APPAuthInfo APPAuthInfoResp) { refreshAppAuthHanlder(APPAuthInfo) }) } }else { refreshAppAuthHanlder() } }) fmt.Println("开房平台定时任务初始化",err,spec) spe :="0 */10 * * * ?" err=cr.AddFunc(spe,wx.getCompent_pre_auth_code) fmt.Println("开房平台定时任务初始化",err,spe) cr.Start() return &wx } //授权流程 //授权回调 获取授权码并根据授权码获取授权信息 func (wx *WXKFManager) HanleCompentAuth(context * gin.Context, responsehandler func(authinfo APPAuthInfoResp)(redicturl string)){ authcode := context.Query("auth_code") //查询公众号授权第三方平台的权限 wx.getCompentAuthAccesstoken(authcode, func(appAuthInfo APPAuthInfoResp) { wx.AppAuthinfos=append(wx.AppAuthinfos, appAuthInfo) redicturl := responsehandler(appAuthInfo) context.Redirect(http.StatusMovedPermanently,redicturl) }) } //获取授权方公众号账号基本信息 func (wx *WXKFManager) GetCompentAuthorizerInfo(authorizer_appid string,response ...func(resp APPUserInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid}, func(resp JsonResponse) { var result APPUserInfoResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(response)>0 { response[0](result) } }) } //获取授权方选项设置信息 func (wx *WXKFManager) GetCompentAuthOptionInfo(authorizer_appid,option_name string,responsehandler ...func(APPOptionResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name}, func(resp JsonResponse) { var result APPOptionResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(responsehandler)>0 { responsehandler[0](result) } }) } //设置授权方选项信息 func (wx *WXKFManager) SetCompentAuthOption(authorizer_appid,option_name,option_value string,responsehandler ...func(BaseResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name, "option_value":option_value}, func(resp JsonResponse) { var result BaseResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(responsehandler)>0 { responsehandler[0](result) } }) } //授权通知处理 func (wx *WXKFManager) HandleCompentAuthEventPush(context * gin.Context,responsehandler ...func(appmsg APPAuthMsg)){ wx.parsereqToAPPAuthMsg(context, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg APPAuthMsg, safe bool) { if CheckSign { if len(responsehandler)>0 { responsehandler[0](Decrptmsg) } } }) context.String(http.StatusOK,"success") } //代公众号实现网页授权 /****************代公众号实现业务*******************/ //1.代公众号调用接口 //获取用户信息 func (wx *WXKFManager)GetUserInfo( authorizer_access_token,authorizer_appid string ,hanlder ...func(user JsonResponse) ){ if len(hanlder)>0 { getuserInfo(authorizer_access_token,authorizer_appid,hanlder[0]) }else { getuserInfo(authorizer_access_token,authorizer_appid) } } //获取用户列表 func (wx *WXKFManager)GetUserList( authorizer_access_token string,hanlder func(user JsonResponse),nestopid ...string ){ if len(nestopid)>0 { getuserlist(authorizer_access_token,hanlder,nestopid[0] ) }else { getuserlist(authorizer_access_token,hanlder) } } //2.代公众号处理消息和事件 func (wx *WXKFManager) HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})){ wx.parsereqToReqMsg(ctx, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg ReqMsg, safe bool) { if !CheckSign { ctx.String(http.StatusForbidden, "验证签名错误") return } if ctx.Request.Method!= http.MethodPost { echostr := ctx.Query("echostr") ctx.String(http.StatusOK,echostr) return } def,replymsg := handler(Decrptmsg) if def { ctx.String(http.StatusOK,"success") return } if safe { // ctx.String(http.StatusOK,string(ReplyMsgData(wx.msgEncrept(replymsg)))) }else { ctx.String(http.StatusOK, string(ReplyMsgData(replymsg))) } }) } //3.代公众号发起网页授权 //获取代公众号发起网页授权url func (wx *WXKFManager) GetAppAuthurl(appid,scope,redirect_uri,state string) string { str := url.QueryEscape(redirect_uri) url:="https://open.weixin.qq.com/connect/oauth2/authorize?appid=" url=url+appid+"&redirect_uri="+str url =url+"&response_type=code&scope="+scope+"&state="+state+"&component_appid="+wx.CompentAppid url =url+"#wechat_redirect" return url } //网页授权后回调 func (wx *WXKFManager) HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser JsonResponse,state string)(redicturl string)) { code := context.Query("code") appid := context.Query("appid") state := context.Query("state") fmt.Println(code,appid,"接收到的微信信息时") wx.getAppAuthUserAccesstoken(code,appid, func(authresp AuthResp) { wx.getAppAuthuserInfo(authresp, func(authuser JsonResponse) { redicturl:=completeHandler(authresp,authuser,state) context.Redirect(http.StatusMovedPermanently,redicturl) }) }) } //4代公众号调用jssdk /*...........私有方法.................*/ //加密XML结构体消息体 func (wx *WXKFManager)msgEncrept(msg interface{})EncryptMsg { return CreatEncryptMsg(ReplyMsgData(msg),DecodeAESKey(wx.CompentAeskey),wx.CompentAppid,wx.Compenttoken) } /*授权流程使用*/ //1.获取第三方平台access_token func (wx *WXKFManager) getComponent_access_token() { if len(wx.ComponentVerifyTicket)>0 { wx.Component_access_token = getcomponent_token(wx.CompentAppid,wx.Componentsecret,wx.ComponentVerifyTicket) }else { wx.Component_access_token="" } } //2.获取预授权码 func (wx *WXKFManager) getCompent_pre_auth_code() { if len(wx.Component_access_token)>0 { wx.Pre_auth_code= getCompent_pre_authcode(wx.Component_access_token,gin.H{"component_appid": wx.CompentAppid}) wx.Compentauthurl= getCompentAuthUrl(wx.CompentAppid,wx.Pre_auth_code,wx.Redircturl) }else { wx.Pre_auth_code="" } } //3.使用授权码换取公众号或小程序的接口调用凭据和授权信息 func (wx *WXKFManager) getCompentAuthAccesstoken(authorization_code string,handler ...func(appAuthInfo APPAuthInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorization_code":authorization_code}, func(response JsonResponse) { var result APPAuthInfoResp mapstructure.Decode(response.Dic,&result) result.JsonResponse=&response if len(handler)>0 { handler[0](result) } }) } //4.(刷新)授权公众号或小程序的接口调用凭据 func (wx *WXKFManager) RefreshCompentAuthAccessToken(authorizer_appid,authorizer_refresh_token string,response ...func(APPAuthInfo APPAuthInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"authorizer_refresh_token":authorizer_refresh_token}, func(res JsonResponse) { var result APPAuthInfoResp mapstructure.Decode(res.Dic,&result) result.JsonResponse=&res if len(response)>0 { response[0](result) } }) } /*代公众号实现业务使用*/ //代公众号获取useraccess_token func (wx *WXKFManager) getAppAuthUserAccesstoken(code ,appid string,handler ...func(authresp AuthResp)) { url :="https://api.weixin.qq.com/sns/oauth2/component/access_token?appid="+appid url = url +"&code="+code+"&grant_type=authorization_code&component_appid="+wx.CompentAppid+"&component_access_token=" url = url + wx.Component_access_token resp := Get(url) var result AuthResp mapstructure.Decode(resp,&result) fmt.Println("代公众号获取usertoken",resp,result,appid,url) if len(handler)>0 { handler[0](result) } } //代公众号获取网页登录用户信息 func (wx *WXKFManager) getAppAuthuserInfo(auth AuthResp,handler ...func(authuser JsonResponse)) { url :="https://api.weixin.qq.com/sns/userinfo?access_token=" url =url +auth.Access_token+"&openid="+auth.Openid+"&lang=zh_CN" resp := Get(url) fmt.Println("代公众号获取网页用户信息",resp.Dic) if len(handler)>0 { handler[0](resp) } } /*解析回电xml使用*/ //解析推送授权事件的XML func (wx *WXKFManager) parsereqToAPPAuthMsg(context2 * gin.Context,f func(CheckSign bool,Orignmsg ReqMsg,Decrptmsg APPAuthMsg,safe bool)){ parsereqMsg(context2,wx.Compenttoken,wx.CompentAeskey, func(CheckSign bool, Orignmsg ReqMsg, safe bool) { var decreptMsg APPAuthMsg if safe{ decreptMsg,_ = decryptAPPAuthMsg(Orignmsg.Encrypt,wx.CompentAeskey) wx.ComponentVerifyTicket = decreptMsg.ComponentVerifyTicket if wx.Component_access_token=="" { wx.getComponent_access_token() wx.getCompent_pre_auth_code() } } f(CheckSign,Orignmsg,decreptMsg,safe) }) } //解析代公众号实现事件消息XML func (wx *WXKFManager) parsereqToReqMsg(context2 * gin.Context,f func(CheckSign bool,Orignmsg ReqMsg,Decrptmsg ReqMsg,safe bool)){ parsereqMsg(context2,wx.Compenttoken,wx.CompentAeskey, func(CheckSign bool, Orignmsg ReqMsg, safe bool) { var decreptMsg ReqMsg if safe{ decreptMsg,_ = decryptReqmsg(Orignmsg.Encrypt,wx.CompentAeskey) } f(CheckSign,Orignmsg,decreptMsg,safe) }) } //解析请求 func parsereqMsg(context2 * gin.Context,token string,aeskey string,handler func(CheckSign bool,Orignmsg ReqMsg,safe bool)) { sign := context2.Query("signature") timestamp := context2.Query("timestamp") nonce := context2.Query("nonce") encrypt_type := context2.Query("encrypt_type") msgsign := context2.Query("msg_signature") var event ReqMsg s,_:=ioutil.ReadAll(context2.Request.Body) xml.Unmarshal(s,&event) safe := encrypt_type=="aes" checksign :=false if safe{ checksign = SignMsg(token,timestamp,nonce,event.Encrypt)==msgsign }else { checksign =SignMsg(token,timestamp,nonce)==sign } handler(checksign,event,safe) }
identifier_body
wxkfmanager.go
package wxapi import ( "encoding/xml" "fmt" "github.com/gin-gonic/gin" "github.com/mitchellh/mapstructure" "github.com/robfig/cron" "io/ioutil" "net/http" "net/url" ) type WXKfManager interface { //小程序或者公众号授权给第三方平台 HandleCompentAuthEventPush(context * gin.Context,responsehandler ...func(appmsg APPAuthMsg)) //授权事件推送,根据信息更新授权状态,对应授权事件接收URL HanleCompentAuth(context * gin.Context, responsehandler func(authinfo APPAuthInfoResp)(redicturl string)) //授权回调并得到授权方的账号信息 //授权方信息管理 GetCompentAuthOptionInfo(authorizer_appid,option_name string,responsehandler ...func(APPOptionResp)) //获取授权方的选项设置信息 SetCompentAuthOption(authorizer_appid,option_name,option_value string,responsehandler ...func(BaseResp)) //设置授权方的选项设置信息 //代公众号实现业务API //对应到消息与事件接收url中 HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})) //接收处处公众号事件和消息 //用户网页登录 //对应用户授权后回调 HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser AuthuserResp,state string)(redicturl string)) //获取第三方用户信息,并跳转到登录后的页面 GetAppAuthurl(appid,scope,redirect_uri,state string) string //用户登录发起页面 //用户信息 } type WXKFManager struct { Compenttoken string //第三方平台的token 消息校验token CompentAppid string //第三方平台的APPid CompentAeskey string //第三方平台的秘钥 Componentsecret string //第三方平台的appsecret Component_access_token string //第三方平台的component_access_token ComponentVerifyTicket string //微信服务器传输的第三方平台ComponentVerifyTicket Pre_auth_code string //预授权码 Redircturl string //公众号授权完成后的回调URL用来接收授权码auth_code Compentauthurl string //当前第三方平台授权移动端连接 AppAuthinfos []APPAuthInfoResp //第三方公众号令牌数组,自动刷新 } //初始化开放平台管理器 func InitWXKFManager(token,appid,EncodingAESKey,appseceret,Redircturl string,refreshAppAuthHanlder func(appauth ...APPAuthInfoResp),AppAuthinfos...[]APPAuthInfoResp) *WXKFManager { var wx WXKFManager wx.Compenttoken = token wx.CompentAeskey = EncodingAESKey wx.CompentAppid = appid wx.Componentsecret = appseceret wx.Redircturl=Redircturl //每小时刷新一次component_accesstoken cr := cron.New() spec := "0 0 */1 * * ?" err:=cr.AddFunc(spec,wx.getComponent_access_token) err= cr.AddFunc(spec, func() { if len(AppAuthinfos)>0 { wx.AppAuthinfos=AppAuthinfos[0] for _,value := range wx.AppAuthinfos{ wx.RefreshCompentAuthAccessToken(value.AuthorizationInfo.AuthorizerAppid,value.AuthorizationInfo.AuthorizerRefreshToken, func(APPAuthInfo APPAuthInfoResp) { refreshAppAuthHanlder(APPAuthInfo) }) } }else { refreshAppAuthHanlder() } }) fmt.Println("开房平台定时任务初始化",err,spec) spe :="0 */10 * * * ?" err=cr.AddFunc(spe,wx.getCompent_pre_auth_code) fmt.Println("开房平台定时任务初始化",err,spe) cr.Start() return &wx } //授权流程 //授权回调 获取授权码并根据授权码获取授权信息 func (wx *WXKFManager) HanleCompentAuth(context * gin.Context, responsehandler func(authinfo APPAuthInfoResp)(redicturl string)){ authcode := context.Query("auth_code") //查询公众号授权第三方平台的权限 wx.getCompentAuthAccesstoken(authcode, func(appAuthInfo APPAuthInfoResp) { wx.AppAuthinfos=append(wx.AppAuthinfos, appAuthInfo) redicturl := responsehandler(appAuthInfo) context.Redirect(http.StatusMovedPermanently,redicturl) }) } //获取授权方公众号账号基本信息 func (wx *WXKFManager) GetCompentAuthorizerInfo(authorizer_appid string,response ...func(resp APPUserInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid}, func(resp JsonResponse) { var result APPUserInfoResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(response)>0 { response[0](result) } }) }
//获取授权方选项设置信息 func (wx *WXKFManager) GetCompentAuthOptionInfo(authorizer_appid,option_name string,responsehandler ...func(APPOptionResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name}, func(resp JsonResponse) { var result APPOptionResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(responsehandler)>0 { responsehandler[0](result) } }) } //设置授权方选项信息 func (wx *WXKFManager) SetCompentAuthOption(authorizer_appid,option_name,option_value string,responsehandler ...func(BaseResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"option_name":option_name, "option_value":option_value}, func(resp JsonResponse) { var result BaseResp mapstructure.Decode(resp.Dic,&result) result.JsonResponse=&resp if len(responsehandler)>0 { responsehandler[0](result) } }) } //授权通知处理 func (wx *WXKFManager) HandleCompentAuthEventPush(context * gin.Context,responsehandler ...func(appmsg APPAuthMsg)){ wx.parsereqToAPPAuthMsg(context, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg APPAuthMsg, safe bool) { if CheckSign { if len(responsehandler)>0 { responsehandler[0](Decrptmsg) } } }) context.String(http.StatusOK,"success") } //代公众号实现网页授权 /****************代公众号实现业务*******************/ //1.代公众号调用接口 //获取用户信息 func (wx *WXKFManager)GetUserInfo( authorizer_access_token,authorizer_appid string ,hanlder ...func(user JsonResponse) ){ if len(hanlder)>0 { getuserInfo(authorizer_access_token,authorizer_appid,hanlder[0]) }else { getuserInfo(authorizer_access_token,authorizer_appid) } } //获取用户列表 func (wx *WXKFManager)GetUserList( authorizer_access_token string,hanlder func(user JsonResponse),nestopid ...string ){ if len(nestopid)>0 { getuserlist(authorizer_access_token,hanlder,nestopid[0] ) }else { getuserlist(authorizer_access_token,hanlder) } } //2.代公众号处理消息和事件 func (wx *WXKFManager) HandleAppEventPush(ctx * gin.Context, handler func(msg ReqMsg)(usedefult bool,replymsg interface{})){ wx.parsereqToReqMsg(ctx, func(CheckSign bool, Orignmsg ReqMsg, Decrptmsg ReqMsg, safe bool) { if !CheckSign { ctx.String(http.StatusForbidden, "验证签名错误") return } if ctx.Request.Method!= http.MethodPost { echostr := ctx.Query("echostr") ctx.String(http.StatusOK,echostr) return } def,replymsg := handler(Decrptmsg) if def { ctx.String(http.StatusOK,"success") return } if safe { // ctx.String(http.StatusOK,string(ReplyMsgData(wx.msgEncrept(replymsg)))) }else { ctx.String(http.StatusOK, string(ReplyMsgData(replymsg))) } }) } //3.代公众号发起网页授权 //获取代公众号发起网页授权url func (wx *WXKFManager) GetAppAuthurl(appid,scope,redirect_uri,state string) string { str := url.QueryEscape(redirect_uri) url:="https://open.weixin.qq.com/connect/oauth2/authorize?appid=" url=url+appid+"&redirect_uri="+str url =url+"&response_type=code&scope="+scope+"&state="+state+"&component_appid="+wx.CompentAppid url =url+"#wechat_redirect" return url } //网页授权后回调 func (wx *WXKFManager) HanledAppAuth(context * gin.Context,completeHandler func(resp AuthResp,authuser JsonResponse,state string)(redicturl string)) { code := context.Query("code") appid := context.Query("appid") state := context.Query("state") fmt.Println(code,appid,"接收到的微信信息时") wx.getAppAuthUserAccesstoken(code,appid, func(authresp AuthResp) { wx.getAppAuthuserInfo(authresp, func(authuser JsonResponse) { redicturl:=completeHandler(authresp,authuser,state) context.Redirect(http.StatusMovedPermanently,redicturl) }) }) } //4代公众号调用jssdk /*...........私有方法.................*/ //加密XML结构体消息体 func (wx *WXKFManager)msgEncrept(msg interface{})EncryptMsg { return CreatEncryptMsg(ReplyMsgData(msg),DecodeAESKey(wx.CompentAeskey),wx.CompentAppid,wx.Compenttoken) } /*授权流程使用*/ //1.获取第三方平台access_token func (wx *WXKFManager) getComponent_access_token() { if len(wx.ComponentVerifyTicket)>0 { wx.Component_access_token = getcomponent_token(wx.CompentAppid,wx.Componentsecret,wx.ComponentVerifyTicket) }else { wx.Component_access_token="" } } //2.获取预授权码 func (wx *WXKFManager) getCompent_pre_auth_code() { if len(wx.Component_access_token)>0 { wx.Pre_auth_code= getCompent_pre_authcode(wx.Component_access_token,gin.H{"component_appid": wx.CompentAppid}) wx.Compentauthurl= getCompentAuthUrl(wx.CompentAppid,wx.Pre_auth_code,wx.Redircturl) }else { wx.Pre_auth_code="" } } //3.使用授权码换取公众号或小程序的接口调用凭据和授权信息 func (wx *WXKFManager) getCompentAuthAccesstoken(authorization_code string,handler ...func(appAuthInfo APPAuthInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorization_code":authorization_code}, func(response JsonResponse) { var result APPAuthInfoResp mapstructure.Decode(response.Dic,&result) result.JsonResponse=&response if len(handler)>0 { handler[0](result) } }) } //4.(刷新)授权公众号或小程序的接口调用凭据 func (wx *WXKFManager) RefreshCompentAuthAccessToken(authorizer_appid,authorizer_refresh_token string,response ...func(APPAuthInfo APPAuthInfoResp)) { url :="https://api.weixin.qq.com/cgi-bin/component/api_query_auth?component_access_token=" url =url +wx.Component_access_token POSTJson(url,gin.H{"component_appid": wx.CompentAppid,"authorizer_appid":authorizer_appid,"authorizer_refresh_token":authorizer_refresh_token}, func(res JsonResponse) { var result APPAuthInfoResp mapstructure.Decode(res.Dic,&result) result.JsonResponse=&res if len(response)>0 { response[0](result) } }) } /*代公众号实现业务使用*/ //代公众号获取useraccess_token func (wx *WXKFManager) getAppAuthUserAccesstoken(code ,appid string,handler ...func(authresp AuthResp)) { url :="https://api.weixin.qq.com/sns/oauth2/component/access_token?appid="+appid url = url +"&code="+code+"&grant_type=authorization_code&component_appid="+wx.CompentAppid+"&component_access_token=" url = url + wx.Component_access_token resp := Get(url) var result AuthResp mapstructure.Decode(resp,&result) fmt.Println("代公众号获取usertoken",resp,result,appid,url) if len(handler)>0 { handler[0](result) } } //代公众号获取网页登录用户信息 func (wx *WXKFManager) getAppAuthuserInfo(auth AuthResp,handler ...func(authuser JsonResponse)) { url :="https://api.weixin.qq.com/sns/userinfo?access_token=" url =url +auth.Access_token+"&openid="+auth.Openid+"&lang=zh_CN" resp := Get(url) fmt.Println("代公众号获取网页用户信息",resp.Dic) if len(handler)>0 { handler[0](resp) } } /*解析回电xml使用*/ //解析推送授权事件的XML func (wx *WXKFManager) parsereqToAPPAuthMsg(context2 * gin.Context,f func(CheckSign bool,Orignmsg ReqMsg,Decrptmsg APPAuthMsg,safe bool)){ parsereqMsg(context2,wx.Compenttoken,wx.CompentAeskey, func(CheckSign bool, Orignmsg ReqMsg, safe bool) { var decreptMsg APPAuthMsg if safe{ decreptMsg,_ = decryptAPPAuthMsg(Orignmsg.Encrypt,wx.CompentAeskey) wx.ComponentVerifyTicket = decreptMsg.ComponentVerifyTicket if wx.Component_access_token=="" { wx.getComponent_access_token() wx.getCompent_pre_auth_code() } } f(CheckSign,Orignmsg,decreptMsg,safe) }) } //解析代公众号实现事件消息XML func (wx *WXKFManager) parsereqToReqMsg(context2 * gin.Context,f func(CheckSign bool,Orignmsg ReqMsg,Decrptmsg ReqMsg,safe bool)){ parsereqMsg(context2,wx.Compenttoken,wx.CompentAeskey, func(CheckSign bool, Orignmsg ReqMsg, safe bool) { var decreptMsg ReqMsg if safe{ decreptMsg,_ = decryptReqmsg(Orignmsg.Encrypt,wx.CompentAeskey) } f(CheckSign,Orignmsg,decreptMsg,safe) }) } //解析请求 func parsereqMsg(context2 * gin.Context,token string,aeskey string,handler func(CheckSign bool,Orignmsg ReqMsg,safe bool)) { sign := context2.Query("signature") timestamp := context2.Query("timestamp") nonce := context2.Query("nonce") encrypt_type := context2.Query("encrypt_type") msgsign := context2.Query("msg_signature") var event ReqMsg s,_:=ioutil.ReadAll(context2.Request.Body) xml.Unmarshal(s,&event) safe := encrypt_type=="aes" checksign :=false if safe{ checksign = SignMsg(token,timestamp,nonce,event.Encrypt)==msgsign }else { checksign =SignMsg(token,timestamp,nonce)==sign } handler(checksign,event,safe) }
random_line_split
technique.rs
use serde::{Deserialize, Serialize}; use serde_json::Value; use crate::error::*; use std::fs; use std::path::Path; use regex::Regex; use lazy_static::lazy_static; use toml; use nom::combinator::*; use nom::sequence::*; use nom::bytes::complete::*; use nom::character::complete::*; use nom::branch::alt; use nom::multi::many1; use nom::IResult; use std::str; #[derive(Serialize, Deserialize)] struct Technique { name: String, description: String, version: String, bundle_name: String, parameter: Vec<Value>, bundle_args: Vec<String>, method_calls: Vec<MethodCall>, } #[derive(Serialize, Deserialize)] struct MethodCall { method_name: String, class_context: String, args: Vec<String>, component: String, } pub fn
(json_file: &Path, rl_file: &Path) -> Result<()> { let config_data = fs::read_to_string("data/config.toml").expect("Cannot read config.toml file"); let config: toml::Value = toml::from_str(&config_data).expect("Invalig config.toml file"); // we use if let for error conversion // we don't use match for better linear reading let json_data = fs::read_to_string(&json_file); if json_data.is_err() { return Err(Error::User(format!("Cannot read file {}", json_file.to_string_lossy()))) } let technique = serde_json::from_str::<Technique>(&json_data.unwrap()); if technique.is_err() { return Err(Error::User(format!("Invalid technique in file {}", json_file.to_string_lossy()))) } let rl_technique = translate(&config, &technique.unwrap())?; if fs::write(&rl_file, rl_technique).is_err() { return Err(Error::User(format!("Cannot write file {}", rl_file.to_string_lossy()))) } Ok(()) } fn translate(config: &toml::Value, technique: &Technique) -> Result<String> { let parameters_meta = serde_json::to_string(&technique.parameter); if parameters_meta.is_err() { return Err(Error::User("Unable to parse technique file".to_string())) } let parameters = technique.bundle_args.join(","); let calls = map_strings_results( technique.method_calls.iter(), |c| translate_call(config, c), "\n" )?; let out = format!(r#"@format=0 # This file has been generated with rltranslate @name="{name}" @description="{description}" @version="{version}" @parameters={parameters_meta} resource {bundle_name}({parameters}) {bundle_name} state technique() {{ {calls} }} "#, description=technique.description, version=technique.version, name=technique.name, bundle_name=technique.bundle_name, parameters_meta=parameters_meta.unwrap(), parameters=parameters, calls=calls); Ok(out) } fn translate_call(config: &toml::Value, call: &MethodCall) -> Result<String> { lazy_static! { static ref RE:Regex = Regex::new(r"^([a-z]+)_(\w+)$").unwrap(); } // separate resource and state let (resource,state) = match RE.captures(&call.method_name) { Some(caps) => (caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()), None => return Err(Error::User(format!("Invalid method name '{}'", call.method_name))), }; // split argument list let rconf = match config.get("resources") { None => return Err(Error::User("No resources section in config.toml".into())), Some(m) => m, }; let res_arg_v = match rconf.get(resource) { None => toml::value::Value::Integer(1), Some(r) => r.clone(), }; let res_arg_count: usize = match res_arg_v.as_integer() { None => return Err(Error::User(format!("Resource prefix '{}' must have a number as its parameter count",resource))), Some(v) => v as usize, }; let it = &mut call.args.iter(); let res_args = map_strings_results(it.take(res_arg_count), |x| translate_arg(config,x), ",")?; let st_args = map_strings_results(it, |x| translate_arg(config,x), ",")?; // call formating let call_str = format!("{}({}).{}({})", resource, res_args, state, st_args); let out_state = if call.class_context == "any" { format!(" {}", call_str) } else { let condition = translate_condition(config, &call.class_context)?; format!(" if {} => {}", condition, call_str) }; // outcome detection and formating let mconf = match config.get("methods") { None => return Err(Error::User("No methods section in config.toml".into())), Some(m) => m, }; let method = match mconf.get(&call.method_name) { None => return Err(Error::User(format!("Unknown generic method call: {}",&call.method_name))), Some(m) => m, }; let class_prefix = match method.get("class_prefix") { None => return Err(Error::User(format!("Undefined class_prefix for {}",&call.method_name))), Some(m) => m.as_str().unwrap(), }; let class_parameter_id = match method.get("class_parameter_id") { None => return Err(Error::User(format!("Undefined class_parameter_id for {}",&call.method_name))), Some(m) => m.as_integer().unwrap(), }; let class_parameter_value = &call.args[class_parameter_id as usize]; let canonic_parameter = canonify(class_parameter_value); let outcome = format!(" as {}_{}",class_prefix,canonic_parameter); // TODO remove outcome if there is no usage Ok(format!(" @component = \"{}\"\n{}{}", &call.component, out_state, outcome)) } fn canonify(input: &str) -> String { let s = input.as_bytes().iter() .map(|x| if x.is_ascii_alphanumeric() || *x == b'_' { *x } else { b'_' } ) .collect::<Vec<u8>>(); str::from_utf8(&s).expect(&format!("Canonify failed on {}",input)).to_owned() } #[derive(Clone)] struct CFVariable { ns: Option<String>, name: String, } fn parse_cfvariable(i: &str) -> IResult<&str,CFVariable> { map(tuple(( opt(map(terminated(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),tag(".")),|x: &str| x.into())), map(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),|x: &str| x.into()), )), |(ns, name)| CFVariable { ns, name })(i) } #[derive(Clone)] enum CFStringElt { Static(String), // static content Variable(CFVariable), // variable name } impl CFStringElt { fn to_string(&self) -> Result<String> { Ok(match self { CFStringElt::Static(s) => s.to_string(), CFStringElt::Variable(v) => { match &v.ns { None => v.name.clone(), // a parameter Some(ns) => match ns.as_ref() { "const" => (match v.name.as_ref() { "dollar" => "$", "dirsep" => "/", "endl" => "\\n", "n" => "\\n", "r" => "\\r", "t" => "\\t", _ => return Err(Error::User(format!("Unknown constant '{}.{}'", ns, v.name))), }).into(), "sys" => return Err(Error::User(format!("Not implemented variable namespace sys '{}.{}'", ns, v.name))), "this" => return Err(Error::User(format!("Unsupported variable namespace this '{}.{}'", ns, v.name))), ns => format!("${{{}.{}}}",ns,v.name), }, } // TODO // - array -> ? // - list -> ? }, }) } } fn parse_cfstring(i: &str) -> IResult<&str,Vec<CFStringElt>> { // There is a rest inside so this just serve as a guard all_consuming( alt(( many1(alt(( // variable ${} map( delimited(tag("${"), parse_cfvariable, tag("}")), CFStringElt::Variable), // variable $() map( delimited(tag("$("), parse_cfvariable, tag(")")), CFStringElt::Variable), // constant map(take_until("$"), |s: &str| CFStringElt::Static(s.into())), // end of string map(preceded( peek(anychar), // do no take rest if we are already at the end rest), |s: &str| CFStringElt::Static(s.into())), ))), // empty string value(vec![CFStringElt::Static("".into())], not(anychar)), )) )(i) } fn translate_arg(config: &toml::Value, arg: &str) -> Result<String> { let var = match parse_cfstring(arg) { Err(_) => return Err(Error::User(format!("Invalid variable syntax in '{}'", arg))), Ok((_,o)) => o }; map_strings_results(var.iter(), |x| Ok(format!("\"{}\"",x.to_string()?)), ",") } fn translate_condition(config: &toml::Value, cond: &str) -> Result<String> { lazy_static! { static ref METHOD_RE:Regex = Regex::new(r"^(\w+)_(\w+)$").unwrap(); static ref OS_RE:Regex = Regex::new(r"^([a-zA-Z]+)(_(\d+))*$").unwrap(); } // detect method outcome class if let Some(caps) = METHOD_RE.captures(cond) { let (method, status) = (caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()); if vec![ "kept", "success" ].iter().any(|x| x == &status) { return Ok(format!("{} =~ success", method)); } else if vec![ "error", "not_ok", "failed", "denied", "timeout" ].iter().any(|x| x == &status) { return Ok(format!("{} =~ error", method)); } else if vec![ "repaired", "ok", "reached" ].iter().any(|x| x == &status) { return Ok(format!("{} =~ {}", method, status)); } }; // detect system classes if let Some(caps) = OS_RE.captures(cond) { // TODO here we consider any match is an os match, should we have an OS whitelist ? // OS are global enum so we don't have to say which enum to match return Ok(cond.into()); } // TODO detect condition expressions Err(Error::User(format!("Don't know how to handle class '{}'", cond))) } //#[cfg(test)] //mod tests { // use super::*; // // #[test] // fn test_json() { // let data = r#" //{ // "name": "variable", // "description": "", // "version": "1.0", // "bundle_name": "variable", // "parameter": [ // { // "constraints": { // "allow_whitespace_string": false, // "allow_empty_string": false, // "max_length": 16384 // }, // "name": "iname", // "id": "53042794-4d2a-41c7-a690-b0d760a78a51" // }, // { // "constraints": { // "allow_whitespace_string": false, // "allow_empty_string": false, // "max_length": 16384 // }, // "name": "ip", // "id": "aa74f824-6085-46b4-94b4-42803760fd61" // } // ], // "bundle_args": [ // "iname", // "ip" // ], // "method_calls": [ // { // "method_name": "variable_string", // "class_context": "any", // "args": [ // "foo", // "bar", // "vim" // ], // "component": "Variable string" // }, // { // "method_name": "package_state", // "class_context": "any", // "args": [ // "${foo.bar}", // "", // "", // "", // "present" // ], // "component": "Package state" // } // ] //} //"#; // let p: Result<Technique> = serde_json::from_str(data); // assert!(p.is_ok()); // //assert_eq!(p.unwrap().name, "variable".to_string()); // let s = translate(&p.unwrap()); // assert!(s.is_ok()); // print!("{}",s.unwrap()); // } //}
translate_file
identifier_name
technique.rs
use serde::{Deserialize, Serialize}; use serde_json::Value; use crate::error::*; use std::fs; use std::path::Path; use regex::Regex; use lazy_static::lazy_static; use toml; use nom::combinator::*; use nom::sequence::*; use nom::bytes::complete::*; use nom::character::complete::*; use nom::branch::alt; use nom::multi::many1; use nom::IResult; use std::str; #[derive(Serialize, Deserialize)] struct Technique { name: String, description: String, version: String, bundle_name: String, parameter: Vec<Value>, bundle_args: Vec<String>, method_calls: Vec<MethodCall>, } #[derive(Serialize, Deserialize)] struct MethodCall { method_name: String, class_context: String, args: Vec<String>, component: String, } pub fn translate_file(json_file: &Path, rl_file: &Path) -> Result<()> { let config_data = fs::read_to_string("data/config.toml").expect("Cannot read config.toml file"); let config: toml::Value = toml::from_str(&config_data).expect("Invalig config.toml file"); // we use if let for error conversion // we don't use match for better linear reading let json_data = fs::read_to_string(&json_file); if json_data.is_err() { return Err(Error::User(format!("Cannot read file {}", json_file.to_string_lossy()))) } let technique = serde_json::from_str::<Technique>(&json_data.unwrap()); if technique.is_err() { return Err(Error::User(format!("Invalid technique in file {}", json_file.to_string_lossy()))) } let rl_technique = translate(&config, &technique.unwrap())?; if fs::write(&rl_file, rl_technique).is_err() { return Err(Error::User(format!("Cannot write file {}", rl_file.to_string_lossy()))) } Ok(()) } fn translate(config: &toml::Value, technique: &Technique) -> Result<String> { let parameters_meta = serde_json::to_string(&technique.parameter); if parameters_meta.is_err() { return Err(Error::User("Unable to parse technique file".to_string())) } let parameters = technique.bundle_args.join(","); let calls = map_strings_results( technique.method_calls.iter(), |c| translate_call(config, c), "\n" )?; let out = format!(r#"@format=0 # This file has been generated with rltranslate @name="{name}" @description="{description}" @version="{version}" @parameters={parameters_meta} resource {bundle_name}({parameters}) {bundle_name} state technique() {{ {calls} }} "#, description=technique.description, version=technique.version, name=technique.name, bundle_name=technique.bundle_name, parameters_meta=parameters_meta.unwrap(), parameters=parameters, calls=calls); Ok(out) } fn translate_call(config: &toml::Value, call: &MethodCall) -> Result<String> { lazy_static! { static ref RE:Regex = Regex::new(r"^([a-z]+)_(\w+)$").unwrap(); } // separate resource and state let (resource,state) = match RE.captures(&call.method_name) { Some(caps) => (caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()), None => return Err(Error::User(format!("Invalid method name '{}'", call.method_name))), }; // split argument list let rconf = match config.get("resources") { None => return Err(Error::User("No resources section in config.toml".into())), Some(m) => m, }; let res_arg_v = match rconf.get(resource) { None => toml::value::Value::Integer(1), Some(r) => r.clone(), }; let res_arg_count: usize = match res_arg_v.as_integer() { None => return Err(Error::User(format!("Resource prefix '{}' must have a number as its parameter count",resource))), Some(v) => v as usize, }; let it = &mut call.args.iter(); let res_args = map_strings_results(it.take(res_arg_count), |x| translate_arg(config,x), ",")?; let st_args = map_strings_results(it, |x| translate_arg(config,x), ",")?; // call formating let call_str = format!("{}({}).{}({})", resource, res_args, state, st_args); let out_state = if call.class_context == "any" { format!(" {}", call_str) } else { let condition = translate_condition(config, &call.class_context)?; format!(" if {} => {}", condition, call_str) }; // outcome detection and formating let mconf = match config.get("methods") { None => return Err(Error::User("No methods section in config.toml".into())), Some(m) => m, }; let method = match mconf.get(&call.method_name) { None => return Err(Error::User(format!("Unknown generic method call: {}",&call.method_name))), Some(m) => m, }; let class_prefix = match method.get("class_prefix") { None => return Err(Error::User(format!("Undefined class_prefix for {}",&call.method_name))), Some(m) => m.as_str().unwrap(), }; let class_parameter_id = match method.get("class_parameter_id") { None => return Err(Error::User(format!("Undefined class_parameter_id for {}",&call.method_name))), Some(m) => m.as_integer().unwrap(), }; let class_parameter_value = &call.args[class_parameter_id as usize]; let canonic_parameter = canonify(class_parameter_value); let outcome = format!(" as {}_{}",class_prefix,canonic_parameter); // TODO remove outcome if there is no usage Ok(format!(" @component = \"{}\"\n{}{}", &call.component, out_state, outcome)) } fn canonify(input: &str) -> String { let s = input.as_bytes().iter() .map(|x| if x.is_ascii_alphanumeric() || *x == b'_' { *x } else { b'_' } ) .collect::<Vec<u8>>(); str::from_utf8(&s).expect(&format!("Canonify failed on {}",input)).to_owned() } #[derive(Clone)] struct CFVariable { ns: Option<String>, name: String, } fn parse_cfvariable(i: &str) -> IResult<&str,CFVariable> { map(tuple(( opt(map(terminated(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),tag(".")),|x: &str| x.into())), map(take_while1(|c: char| c.is_alphanumeric() || (c == '_')),|x: &str| x.into()), )), |(ns, name)| CFVariable { ns, name })(i) } #[derive(Clone)] enum CFStringElt { Static(String), // static content Variable(CFVariable), // variable name } impl CFStringElt { fn to_string(&self) -> Result<String> { Ok(match self { CFStringElt::Static(s) => s.to_string(), CFStringElt::Variable(v) => { match &v.ns { None => v.name.clone(), // a parameter Some(ns) => match ns.as_ref() { "const" => (match v.name.as_ref() { "dollar" => "$", "dirsep" => "/", "endl" => "\\n", "n" => "\\n", "r" => "\\r", "t" => "\\t", _ => return Err(Error::User(format!("Unknown constant '{}.{}'", ns, v.name))), }).into(), "sys" => return Err(Error::User(format!("Not implemented variable namespace sys '{}.{}'", ns, v.name))), "this" => return Err(Error::User(format!("Unsupported variable namespace this '{}.{}'", ns, v.name))), ns => format!("${{{}.{}}}",ns,v.name), }, } // TODO // - array -> ? // - list -> ? }, }) } } fn parse_cfstring(i: &str) -> IResult<&str,Vec<CFStringElt>> { // There is a rest inside so this just serve as a guard all_consuming( alt(( many1(alt(( // variable ${} map( delimited(tag("${"), parse_cfvariable, tag("}")), CFStringElt::Variable), // variable $() map( delimited(tag("$("), parse_cfvariable, tag(")")), CFStringElt::Variable), // constant map(take_until("$"), |s: &str| CFStringElt::Static(s.into())), // end of string map(preceded( peek(anychar), // do no take rest if we are already at the end rest), |s: &str| CFStringElt::Static(s.into())), ))),
fn translate_arg(config: &toml::Value, arg: &str) -> Result<String> { let var = match parse_cfstring(arg) { Err(_) => return Err(Error::User(format!("Invalid variable syntax in '{}'", arg))), Ok((_,o)) => o }; map_strings_results(var.iter(), |x| Ok(format!("\"{}\"",x.to_string()?)), ",") } fn translate_condition(config: &toml::Value, cond: &str) -> Result<String> { lazy_static! { static ref METHOD_RE:Regex = Regex::new(r"^(\w+)_(\w+)$").unwrap(); static ref OS_RE:Regex = Regex::new(r"^([a-zA-Z]+)(_(\d+))*$").unwrap(); } // detect method outcome class if let Some(caps) = METHOD_RE.captures(cond) { let (method, status) = (caps.get(1).unwrap().as_str(), caps.get(2).unwrap().as_str()); if vec![ "kept", "success" ].iter().any(|x| x == &status) { return Ok(format!("{} =~ success", method)); } else if vec![ "error", "not_ok", "failed", "denied", "timeout" ].iter().any(|x| x == &status) { return Ok(format!("{} =~ error", method)); } else if vec![ "repaired", "ok", "reached" ].iter().any(|x| x == &status) { return Ok(format!("{} =~ {}", method, status)); } }; // detect system classes if let Some(caps) = OS_RE.captures(cond) { // TODO here we consider any match is an os match, should we have an OS whitelist ? // OS are global enum so we don't have to say which enum to match return Ok(cond.into()); } // TODO detect condition expressions Err(Error::User(format!("Don't know how to handle class '{}'", cond))) } //#[cfg(test)] //mod tests { // use super::*; // // #[test] // fn test_json() { // let data = r#" //{ // "name": "variable", // "description": "", // "version": "1.0", // "bundle_name": "variable", // "parameter": [ // { // "constraints": { // "allow_whitespace_string": false, // "allow_empty_string": false, // "max_length": 16384 // }, // "name": "iname", // "id": "53042794-4d2a-41c7-a690-b0d760a78a51" // }, // { // "constraints": { // "allow_whitespace_string": false, // "allow_empty_string": false, // "max_length": 16384 // }, // "name": "ip", // "id": "aa74f824-6085-46b4-94b4-42803760fd61" // } // ], // "bundle_args": [ // "iname", // "ip" // ], // "method_calls": [ // { // "method_name": "variable_string", // "class_context": "any", // "args": [ // "foo", // "bar", // "vim" // ], // "component": "Variable string" // }, // { // "method_name": "package_state", // "class_context": "any", // "args": [ // "${foo.bar}", // "", // "", // "", // "present" // ], // "component": "Package state" // } // ] //} //"#; // let p: Result<Technique> = serde_json::from_str(data); // assert!(p.is_ok()); // //assert_eq!(p.unwrap().name, "variable".to_string()); // let s = translate(&p.unwrap()); // assert!(s.is_ok()); // print!("{}",s.unwrap()); // } //}
// empty string value(vec![CFStringElt::Static("".into())], not(anychar)), )) )(i) }
random_line_split
html5_player.js
isFullScreen = false; isButtonFullscreen = false; $(document).ready(function() { //INITIALIZE var video = $('#myVideo'); var container = $('.cont'); var test = $(".cont,.myVideo"); var loadmetadata = true; var _pause = false; //showing pause icon by before video load $('.btnPlay').addClass('paused'); attachEvents(); $('.loading').fadeIn(500); // setting player controls when video gets ready :) var ready_state_interval = setInterval(function(){ var vid = document.getElementById("myVideo"); var rdy = vid.readyState; if (rdy >= '2') { clearTimeout(ready_state_interval); //var source = get_current_video_source("myVideo"); $('.fduration').text(timeFormat(video[0].duration)); $('.fcurrent').text(timeFormat(0)); $('.loading').fadeOut(500); $('.caption').fadeIn(500); if( autoplay == 'true'){ setTimeout(startBuffer, 10); $('.btnPlay').addClass('paused'); } else { $('.init').fadeIn(2500); $('.btnPlay').removeClass('paused'); } updateVolume(0, 0.7); } },1000); // initializing the player click function $('.cont').on("click",function(){ if( autoplay == '') { $('.init').hide(); $('.btnPlay').addClass('paused'); $(this).unbind('click'); video[0].play(); $('.buffer').show(); //start to get video buffering data setTimeout(startBuffer, 10); } }); //setting up flag variable true for controls hover var cb_controls = false; $('.control').on("mouseenter",function(){ cb_controls = true; }); //setting up flag variable false for controls $('.control').on("mouseleave",function(){ cb_controls = false; }); // on mouse stop on container, controls get hide after 5 secs var lastTimeMouseMoved = ""; $('.cont').mousemove(function(e){ caption_show(); lastTimeMouseMoved = new Date().getTime(); var t=setInterval(function(){ var currentTime_new = new Date().getTime(); if(currentTime_new - lastTimeMouseMoved > 5000 && !_pause && !cb_controls) { caption_hide(); } },1000); }); // on hover controils and captions get showed var on_player = false; $('.cont').on("mouseenter",function(){ on_player = true; caption_show(); }); // on hover controils and captions get showed $('.cont').on("mouseleave",function(){ on_player = false; if (!_pause) caption_hide(); }); //display video buffering bar var startBuffer = function() { var currentBuffer = video[0].buffered.end(0); var maxduration = video[0].duration; var perc = 100 * currentBuffer / maxduration; $('.bufferBar').css('width',perc+'%'); if(currentBuffer < maxduration) { setTimeout(startBuffer, 2000); } }; //display current video play time video.on('timeupdate', function() { var currentPos = video[0].currentTime; var maxduration = video[0].duration; var perc = 100 * currentPos / maxduration; $('.timeBar').css('width',perc+'%'); $('.fcurrent').text(timeFormat(currentPos)); }); /* * Changes made by *Saqib Razzaq* * checks if windows size is less than 400 * if it is, hides volumebar and shows it when * you hover over vol icon hiding video time once * you move away from sound icon it hides itself showing video time */ if ($( window ).width() < 400) { $("#volSec").hide(); $("#soundIcon").mouseenter(function(){ $("#volSec").show(); $(".fcurrent").hide(); }); $(".volume").mouseleave(function(){ $("#volSec").hide(); $(".fcurrent").show(); }); } /* * ================== * volume changes end * ================== */ //CONTROLS EVENTS //video screen and play button clicked video.on('click', function() { playpause(); } ); $('.btnPlay').on('click', function() { playpause(); } ); $('.caption').on('click', function() { playpause(); } ); $('.init').on('click', function() { playpause(); } ); var playpause = function() { if(video[0].paused || video[0].ended) { $('.init').hide(); $('.btnPlay').addClass('paused'); video[0].play(); _pause = false; } else { $('.init').show(); $('.btnPlay').removeClass('paused'); video[0].pause(); _pause = true; } }; $( "#replay_v" ).click(function() { video[0].play(); $('#opacity').hide(); $('#related_1').hide(); $('.control').show(); $('.caption').show(); $('#web').show(); //showing pause icon by before video load $('.btnPlay').addClass('paused'); }); $( "#cancel_v" ).click(function() { $('#opacity').hide(); $('#related_1').hide(); $('.control').show(); $('.caption').show(); $('.init').show(); $('#web').show(); }); //speed text clicked $('.btnx1').on('click', function() { fastfowrd(this, 1); }); $('.btnx3').on('click', function() { fastfowrd(this, 3); }); var fastfowrd = function(obj, spd) { $('.text').removeClass('selected'); $(obj).addClass('selected'); video[0].playbackRate = spd; video[0].play(); }; //stop button clicked $('.btnStop').on('click', function() { $('.btnPlay').removeClass('paused'); updatebar($('.progress').offset().left); video[0].pause(); }); $('.btnFS').on('click', function() { $(this).toggleClass('enterbtnFS'); isButtonFullscreen = true; if($.isFunction(container[0].webkitRequestFullScreen)) { if(isFullScreen) { isFullScreen = false; document.webkitCancelFullScreen(); } else { isFullScreen = true; container[0].webkitRequestFullScreen(); } } else if($.isFunction(container[0].mozRequestFullScreen)) { if(isFullScreen) { isFullScreen = false; document.mozCancelFullScreen(); } else { isFullScreen = true; container[0].mozRequestFullScreen(); } } }); var _HD_flag = false; //HD on/off button clicked $(".hdon").on('click', function() { $(this).toggleClass('hdoff'); $('.myVideo').removeClass('init'); $('source', '#myVideo').eq(1).prependTo('#myVideo'); $('#myVideo')[0].load(); $('#myVideo')[0].play(); $('.init').hide(); $('.btnPlay').addClass('paused'); _HD_flag = true; }); //sound button clicked $('.sound').click(function() { video[0].muted = !video[0].muted; $(this).toggleClass('muted'); if(video[0].muted) { $('.volumeBar').css('width',0); } else{ $('.volumeBar').css('width', video[0].volume*100+'%'); } }); //VIDEO EVENTS //video canplay event video.on('canplay', function() { $('.loading').fadeOut(100); }); //video canplaythrough event //solve Chrome cache issue var completeloaded = false; video.on('canplaythrough', function() { completeloaded = true; }); //video ended event video.on('ended', function() { $('.btnPlay').removeClass('paused'); video[0].pause(); $('#opacity').show(); $('#related_1').show(); $('.control').hide(); $('.caption').hide(); $('#web').hide(); }); //video seeking event video.on('seeking', function() { //if video fully loaded, ignore loading screen if(!completeloaded) { $('.loading').fadeIn(200); } }); //video seeked event video.on('seeked', function() { $('.loading').fadeOut(200); }); //video waiting for more data event video.on('waiting', function() { $('.loading').fadeIn(200); }); //VIDEO PROGRESS BAR //when video timebar clicked var timeDrag = false; /* check for drag event */ $('.progress').on('mousedown', function(e) { timeDrag = true; updatebar(e.pageX); }); $(document).on('mouseup', function(e) { if(timeDrag) { timeDrag = false; updatebar(e.pageX); } }); $(document).on('mousemove', function(e) { if(timeDrag) { updatebar(e.pageX); } }); var updatebar = function(x) { var progress = $('.progress'); //calculate drag position //and update video currenttime //as well as progress bar var maxduration = video[0].duration; var position = x - progress.offset().left; var percentage = 100 * position / progress.width(); if(percentage > 100) { percentage = 100; } if(percentage < 0) { percentage = 0; } $('.timeBar').css('width',percentage+'%'); video[0].currentTime = maxduration * percentage / 100; }; //VOLUME BAR //volume bar event var volumeDrag = false; $('.volume').on('mousedown', function(e) { volumeDrag = true; video[0].muted = false; $('.sound').removeClass('muted'); updateVolume(e.pageX); }); $(document).on('mouseup', function(e) { if(volumeDrag) { volumeDrag = false; updateVolume(e.pageX); } }); $(document).on('mousemove', function(e) { if(volumeDrag) { updateVolume(e.pageX); } }); var updateVolume = function(x, vol) { var volume = $('.volume'); var percentage; //if only volume have specificed //then direct update volume if(vol) { percentage = vol * 100; } else { var position = x - volume.offset().left; percentage = 100 * position / volume.width(); } if(percentage > 100) { percentage = 100; } if(percentage < 0) { percentage = 0; } //update volume bar and video volume $('.volumeBar').css('width',percentage+'%'); video[0].volume = percentage / 100; //change sound icon based on volume if(video[0].volume == 0){ $('.sound').removeClass('sound2').addClass('muted'); } else if(video[0].volume > 0.5){ $('.sound').removeClass('muted').addClass('sound2'); } else{ $('.sound').removeClass('muted').removeClass('sound2'); } }; //getting large screen button only for watch video page if (enlarge_small == 'true') { $('.btmControl').append('<div class="smallscr largescr hbtn" id="largescr" title="Enlarge/Small Size"></div>'); $('#largescr').insertBefore("#fs"); } //Large screen function $(".largescr").click(function() { $(this).toggleClass('smallscr'); if(!$(this).hasClass('smallscr')) { $(".cb_player").animate({height:'+=220px',width:'+=390px'},"fast"); $('.html5_player_enlarge').addClass('col-lg-12').removeClass('col-lg-8'); } else { $(".cb_player").animate({height:'-=220px',width:'-=390px'},"fast"); $('.html5_player_enlarge').removeClass('col-lg-12').addClass('col-lg-8'); } }); //Right Click Menu $('#cont').append('<div id="rightcmenu"></div>'); //$('#rightcmenu').append('<span id="op">CB Html5 menu</span>'); $('#rightcmenu').append('<ul id="ritems"></ul>'); $('#ritems').append('<li id="copy" class="rlist copy">Show Video link</li>'); $('#ritems').append('<li class="rlist about">About</li>'); $('#ritems').append('<li class="rlist clip">Powered by Clipbucket</li>'); $('.cont').bind("contextmenu", function (e) { e.preventDefault(); // To prevent the default context menu. $("#rightcmenu").css("left", e.pageX); // For updating the menu position. $("#rightcmenu").css("top", e.pageY); // $("#rightcmenu").fadeIn(500, startFocusOut()); // For bringing the context menu in picture. }); function startFocusOut() { $(document).on("click", function () { $("#rightcmenu").hide(500); // To hide the context menu $('.cont').off("click"); }); } $(".clip").click(function(event) { window.open(homepath, '_blank'); }); $(".about").click(function(event) { window.open(homepath, '_blank'); }); $('.copy').click(function() { alert(document.URL); }); //Logo $('.cb-playerLogo').append('<div id="path" class="path hbtn" > </div>'); $('#path').prop("href",product_link); // $("#path").insertAfter("#hd"); $('#path').css({ 'backgroundImage': 'url(data:image/png;base64,' + webpath + ')', 'margin-right':'7px', 'margin-top':'0px', 'background-repeat':'no-repeat', 'background-position' : '100% 50%', }); $("#path").click(function(event) { window.open(product_link, '_blank'); }); $('#name_v,#thumb_v').mouseover(function() { $(this).css({'opacity':'1', 'border': '0px solid #000', 'box-shadow':'1px 0 5px #fff', '-moz-box-shadow':'1px 0 5px #fff', '-webkit-box-shadow':'1px 0 5px #fff',}); }); $('#name_v,#thumb_v').mouseout(function() { $(this).css({'opacity':'.9', 'border': '0px solid #000', 'box-shadow':'0px 0 0px #fff', '-moz-box-shadow':'0px 0 0px #fff', '-webkit-box-shadow':'0px 0 0px #fff',}); }); // Setting in-video logo for player if( iv_logo_enable == 'yes') { $('.cont').append('<img id="web" src=data:image/png;base64,'+ web +'> '); $('#web').css({ 'top' : $top, 'left' : $left, 'bottom' : $bottom, 'right' : $right , 'position': 'absolute', 'width': '100px', 'height': '30px', 'z-index' : '-1' }); } //For multiserver plugin videos :) if(files) { var toggle = false; var time_var = false; var start_time = 0; $('#res').on('click',function(event){ if(toggle == false) { $('.video_files').show(10); toggle = true; } else { $('.video_files').hide(10); toggle = false; } event.stopPropagation(); }); $('html').click(function() { $('.video_files').hide(100); toggle =false; }); // All multiserver Video Files (Json) var jsonData = JSON.parse(files); // cheking for if 360 resolution is not available if(!jsonData["360"]) { video.attr('src',jsonData["240"]); $('#li_240').addClass('selected_player'); } else $('#li_360').addClass('selected_player'); $.each(jsonData, function (key, data) { $('#li_' + key).on('mouseenter', function(){ $(this).css({'background-color':'#000'}); }); $('#li_' + key).on('mouseleave', function(){ $(this).css({'background-color':'#1D1D1D'}); }); $('#li_' + key).on('click', function(){ //getting current time variable for video to play .. on change resolution and passing to loadmetadata start_time = video[0].currentTime; //Changing source attribute for the required resolution .. console.log("current_video=>"+jsonData[key]); video.attr('src',jsonData[key]); load_meta_data(start_time,video); time_var = true; }); }); $('#ul_files .list_player').click(function() { $('#ul_files .list_player.selected_player').removeClass('selected_player'); $(this).closest('li').addClass('selected_player'); }); } /** * For multiserver plugin videos <<--END-->> :) */ //Time format converter - 00:00 var timeFormat = function(seconds){ var m = Math.floor(seconds/60)<10 ? "0"+Math.floor(seconds/60) : Math.floor(seconds/60); var s = Math.floor(seconds-(m*60))<10 ? "0"+Math.floor(seconds-(m*60)) : Math.floor(seconds-(m*60)); return m+":"+s; }; /** * Following function is used to attach fullscreen events to document */ function attachEvents (){ $( document ).on( 'fullscreenchange', toggleFullScreen ); $( document ).on( 'webkitfullscreenchange', toggleFullScreen ); $( document ).on( 'mozfullscreenchange', toggleFullScreen ); $( document ).on( 'MSFullscreenchange', toggleFullScreen ); } /** * Following events are used to check fullscreen events */ document.addEventListener("fullscreenchange", function (e) { toggleFullScreen(e); }, false); document.addEventListener("mozfullscreenchange", function (e) { toggleFullScreen(e); }, false); document.addEventListener("webkitfullscreenchange", function (e) { toggleFullScreen(e); }, false); /** * Following function is used to show controls on exit fullscreen */ function showControl(){ $('.caption').hide(); $(".largescr").hide(); $(".control").hover( function() { $('.control').stop().animate({'bottom':0}, 100); }, function() { $('.control').stop().animate({'bottom':-40}, 1000); }); } /** * Following function is used to show controls on enter fullscreen */ function hideControl() { $('.caption').show(); $(".largescr").show(); $('.cb-item-title-container').css({'margin-top':0}); $(".control").hover( function() { $(this).unbind('mouseenter').unbind('mouseleave'); }); } /** * Following function is used to call events on toggle screen */ function toggleFullScreen (e) { isFullScreen = ( document.fullscreenElement || document.webkitFullscreenElement || document.mozFullScreenElement || document.msFullscreenElement ); if ( isFullScreen ) { showControl(); } else { hideControl(); } } }); /** * caption hide */ function caption_hide() { $('.control').stop().animate({'bottom':-51}, 500); $('.caption').stop().animate({'top':-200}, 500); } /** * caption hide */ function caption_show() { $('.control').stop().animate({'bottom':0}, 100); $('.caption').stop().animate({'top':-7}, 100); } function load_meta_data(start_time,video)
function get_current_video_source(object_id) { var video = document.getElementById(object_id); var src = video.currentSrc; return src; } /******CAUTION*****\ DO NOT REMOVE THIS COMMENTED CODE /***website logo***\ /*$('.cont').append('<div><img id="web" src=data:image/png;base64,'+ web +'> </div>'); $('#web').css({ 'top' : $top, 'left' : $left, 'bottom' : $bottom, 'right' : $right , 'position': 'absolute', 'width': '100px', 'height': '30px', }); */ //before everything get started /* video.on('loadedmetadata', function() { if (time_var == true){ video[0].currentTime = start_time; video[0].play(); } //set video properties $('.fcurrent').text(timeFormat(0)); $('.fduration').text(timeFormat(video[0].duration)); $('.caption').fadeIn(500); updateVolume(0, 0.7); $('.loading').fadeOut(500); if( autoplay == 'true' && !_HD_flag){ setTimeout(startBuffer, 10); $('.btnPlay').addClass('paused'); } if(time_var == true){ $('.init').hide(); } else{ if( autoplay == '' && !_HD_flag){ $('.init').fadeIn(2500); $('.btnPlay').removeClass('paused'); } } loadmetadata = false; });*/ // On press space vidoe play/pasue /*var play = false; $(window).keypress(function(e) { e.preventDefault(); if (e.keyCode == 0) { console.log(e.keyCode); if (!play) { play = true; $('.init').show(); $('.btnPlay').removeClass('paused'); video[0].pause(); _pause = true; } else { play = false; $('.init').hide(); $('.btnPlay').addClass('paused'); video[0].play(); _pause = false; } } });*/
{ //before everything get started video.on('loadedmetadata', function() { video[0].currentTime = start_time; video[0].play(); //set video properties $('.loading').fadeOut(500); $('.init').hide(); }); }
identifier_body
html5_player.js
isFullScreen = false; isButtonFullscreen = false; $(document).ready(function() { //INITIALIZE var video = $('#myVideo'); var container = $('.cont'); var test = $(".cont,.myVideo"); var loadmetadata = true; var _pause = false; //showing pause icon by before video load $('.btnPlay').addClass('paused'); attachEvents(); $('.loading').fadeIn(500); // setting player controls when video gets ready :) var ready_state_interval = setInterval(function(){ var vid = document.getElementById("myVideo"); var rdy = vid.readyState; if (rdy >= '2') { clearTimeout(ready_state_interval); //var source = get_current_video_source("myVideo"); $('.fduration').text(timeFormat(video[0].duration)); $('.fcurrent').text(timeFormat(0)); $('.loading').fadeOut(500); $('.caption').fadeIn(500); if( autoplay == 'true'){ setTimeout(startBuffer, 10); $('.btnPlay').addClass('paused'); } else { $('.init').fadeIn(2500); $('.btnPlay').removeClass('paused'); } updateVolume(0, 0.7); } },1000); // initializing the player click function $('.cont').on("click",function(){ if( autoplay == '') { $('.init').hide(); $('.btnPlay').addClass('paused'); $(this).unbind('click'); video[0].play(); $('.buffer').show(); //start to get video buffering data setTimeout(startBuffer, 10); } }); //setting up flag variable true for controls hover var cb_controls = false; $('.control').on("mouseenter",function(){ cb_controls = true; }); //setting up flag variable false for controls $('.control').on("mouseleave",function(){ cb_controls = false; }); // on mouse stop on container, controls get hide after 5 secs var lastTimeMouseMoved = ""; $('.cont').mousemove(function(e){ caption_show(); lastTimeMouseMoved = new Date().getTime(); var t=setInterval(function(){ var currentTime_new = new Date().getTime(); if(currentTime_new - lastTimeMouseMoved > 5000 && !_pause && !cb_controls) { caption_hide(); } },1000); }); // on hover controils and captions get showed var on_player = false; $('.cont').on("mouseenter",function(){ on_player = true; caption_show(); }); // on hover controils and captions get showed $('.cont').on("mouseleave",function(){ on_player = false; if (!_pause) caption_hide(); }); //display video buffering bar var startBuffer = function() { var currentBuffer = video[0].buffered.end(0); var maxduration = video[0].duration; var perc = 100 * currentBuffer / maxduration; $('.bufferBar').css('width',perc+'%'); if(currentBuffer < maxduration) { setTimeout(startBuffer, 2000); } }; //display current video play time video.on('timeupdate', function() { var currentPos = video[0].currentTime; var maxduration = video[0].duration; var perc = 100 * currentPos / maxduration; $('.timeBar').css('width',perc+'%'); $('.fcurrent').text(timeFormat(currentPos)); }); /* * Changes made by *Saqib Razzaq* * checks if windows size is less than 400 * if it is, hides volumebar and shows it when * you hover over vol icon hiding video time once * you move away from sound icon it hides itself showing video time */ if ($( window ).width() < 400) { $("#volSec").hide(); $("#soundIcon").mouseenter(function(){ $("#volSec").show(); $(".fcurrent").hide(); }); $(".volume").mouseleave(function(){ $("#volSec").hide(); $(".fcurrent").show(); }); } /* * ================== * volume changes end * ================== */ //CONTROLS EVENTS //video screen and play button clicked video.on('click', function() { playpause(); } ); $('.btnPlay').on('click', function() { playpause(); } ); $('.caption').on('click', function() { playpause(); } ); $('.init').on('click', function() { playpause(); } ); var playpause = function() { if(video[0].paused || video[0].ended) { $('.init').hide(); $('.btnPlay').addClass('paused'); video[0].play(); _pause = false; } else
}; $( "#replay_v" ).click(function() { video[0].play(); $('#opacity').hide(); $('#related_1').hide(); $('.control').show(); $('.caption').show(); $('#web').show(); //showing pause icon by before video load $('.btnPlay').addClass('paused'); }); $( "#cancel_v" ).click(function() { $('#opacity').hide(); $('#related_1').hide(); $('.control').show(); $('.caption').show(); $('.init').show(); $('#web').show(); }); //speed text clicked $('.btnx1').on('click', function() { fastfowrd(this, 1); }); $('.btnx3').on('click', function() { fastfowrd(this, 3); }); var fastfowrd = function(obj, spd) { $('.text').removeClass('selected'); $(obj).addClass('selected'); video[0].playbackRate = spd; video[0].play(); }; //stop button clicked $('.btnStop').on('click', function() { $('.btnPlay').removeClass('paused'); updatebar($('.progress').offset().left); video[0].pause(); }); $('.btnFS').on('click', function() { $(this).toggleClass('enterbtnFS'); isButtonFullscreen = true; if($.isFunction(container[0].webkitRequestFullScreen)) { if(isFullScreen) { isFullScreen = false; document.webkitCancelFullScreen(); } else { isFullScreen = true; container[0].webkitRequestFullScreen(); } } else if($.isFunction(container[0].mozRequestFullScreen)) { if(isFullScreen) { isFullScreen = false; document.mozCancelFullScreen(); } else { isFullScreen = true; container[0].mozRequestFullScreen(); } } }); var _HD_flag = false; //HD on/off button clicked $(".hdon").on('click', function() { $(this).toggleClass('hdoff'); $('.myVideo').removeClass('init'); $('source', '#myVideo').eq(1).prependTo('#myVideo'); $('#myVideo')[0].load(); $('#myVideo')[0].play(); $('.init').hide(); $('.btnPlay').addClass('paused'); _HD_flag = true; }); //sound button clicked $('.sound').click(function() { video[0].muted = !video[0].muted; $(this).toggleClass('muted'); if(video[0].muted) { $('.volumeBar').css('width',0); } else{ $('.volumeBar').css('width', video[0].volume*100+'%'); } }); //VIDEO EVENTS //video canplay event video.on('canplay', function() { $('.loading').fadeOut(100); }); //video canplaythrough event //solve Chrome cache issue var completeloaded = false; video.on('canplaythrough', function() { completeloaded = true; }); //video ended event video.on('ended', function() { $('.btnPlay').removeClass('paused'); video[0].pause(); $('#opacity').show(); $('#related_1').show(); $('.control').hide(); $('.caption').hide(); $('#web').hide(); }); //video seeking event video.on('seeking', function() { //if video fully loaded, ignore loading screen if(!completeloaded) { $('.loading').fadeIn(200); } }); //video seeked event video.on('seeked', function() { $('.loading').fadeOut(200); }); //video waiting for more data event video.on('waiting', function() { $('.loading').fadeIn(200); }); //VIDEO PROGRESS BAR //when video timebar clicked var timeDrag = false; /* check for drag event */ $('.progress').on('mousedown', function(e) { timeDrag = true; updatebar(e.pageX); }); $(document).on('mouseup', function(e) { if(timeDrag) { timeDrag = false; updatebar(e.pageX); } }); $(document).on('mousemove', function(e) { if(timeDrag) { updatebar(e.pageX); } }); var updatebar = function(x) { var progress = $('.progress'); //calculate drag position //and update video currenttime //as well as progress bar var maxduration = video[0].duration; var position = x - progress.offset().left; var percentage = 100 * position / progress.width(); if(percentage > 100) { percentage = 100; } if(percentage < 0) { percentage = 0; } $('.timeBar').css('width',percentage+'%'); video[0].currentTime = maxduration * percentage / 100; }; //VOLUME BAR //volume bar event var volumeDrag = false; $('.volume').on('mousedown', function(e) { volumeDrag = true; video[0].muted = false; $('.sound').removeClass('muted'); updateVolume(e.pageX); }); $(document).on('mouseup', function(e) { if(volumeDrag) { volumeDrag = false; updateVolume(e.pageX); } }); $(document).on('mousemove', function(e) { if(volumeDrag) { updateVolume(e.pageX); } }); var updateVolume = function(x, vol) { var volume = $('.volume'); var percentage; //if only volume have specificed //then direct update volume if(vol) { percentage = vol * 100; } else { var position = x - volume.offset().left; percentage = 100 * position / volume.width(); } if(percentage > 100) { percentage = 100; } if(percentage < 0) { percentage = 0; } //update volume bar and video volume $('.volumeBar').css('width',percentage+'%'); video[0].volume = percentage / 100; //change sound icon based on volume if(video[0].volume == 0){ $('.sound').removeClass('sound2').addClass('muted'); } else if(video[0].volume > 0.5){ $('.sound').removeClass('muted').addClass('sound2'); } else{ $('.sound').removeClass('muted').removeClass('sound2'); } }; //getting large screen button only for watch video page if (enlarge_small == 'true') { $('.btmControl').append('<div class="smallscr largescr hbtn" id="largescr" title="Enlarge/Small Size"></div>'); $('#largescr').insertBefore("#fs"); } //Large screen function $(".largescr").click(function() { $(this).toggleClass('smallscr'); if(!$(this).hasClass('smallscr')) { $(".cb_player").animate({height:'+=220px',width:'+=390px'},"fast"); $('.html5_player_enlarge').addClass('col-lg-12').removeClass('col-lg-8'); } else { $(".cb_player").animate({height:'-=220px',width:'-=390px'},"fast"); $('.html5_player_enlarge').removeClass('col-lg-12').addClass('col-lg-8'); } }); //Right Click Menu $('#cont').append('<div id="rightcmenu"></div>'); //$('#rightcmenu').append('<span id="op">CB Html5 menu</span>'); $('#rightcmenu').append('<ul id="ritems"></ul>'); $('#ritems').append('<li id="copy" class="rlist copy">Show Video link</li>'); $('#ritems').append('<li class="rlist about">About</li>'); $('#ritems').append('<li class="rlist clip">Powered by Clipbucket</li>'); $('.cont').bind("contextmenu", function (e) { e.preventDefault(); // To prevent the default context menu. $("#rightcmenu").css("left", e.pageX); // For updating the menu position. $("#rightcmenu").css("top", e.pageY); // $("#rightcmenu").fadeIn(500, startFocusOut()); // For bringing the context menu in picture. }); function startFocusOut() { $(document).on("click", function () { $("#rightcmenu").hide(500); // To hide the context menu $('.cont').off("click"); }); } $(".clip").click(function(event) { window.open(homepath, '_blank'); }); $(".about").click(function(event) { window.open(homepath, '_blank'); }); $('.copy').click(function() { alert(document.URL); }); //Logo $('.cb-playerLogo').append('<div id="path" class="path hbtn" > </div>'); $('#path').prop("href",product_link); // $("#path").insertAfter("#hd"); $('#path').css({ 'backgroundImage': 'url(data:image/png;base64,' + webpath + ')', 'margin-right':'7px', 'margin-top':'0px', 'background-repeat':'no-repeat', 'background-position' : '100% 50%', }); $("#path").click(function(event) { window.open(product_link, '_blank'); }); $('#name_v,#thumb_v').mouseover(function() { $(this).css({'opacity':'1', 'border': '0px solid #000', 'box-shadow':'1px 0 5px #fff', '-moz-box-shadow':'1px 0 5px #fff', '-webkit-box-shadow':'1px 0 5px #fff',}); }); $('#name_v,#thumb_v').mouseout(function() { $(this).css({'opacity':'.9', 'border': '0px solid #000', 'box-shadow':'0px 0 0px #fff', '-moz-box-shadow':'0px 0 0px #fff', '-webkit-box-shadow':'0px 0 0px #fff',}); }); // Setting in-video logo for player if( iv_logo_enable == 'yes') { $('.cont').append('<img id="web" src=data:image/png;base64,'+ web +'> '); $('#web').css({ 'top' : $top, 'left' : $left, 'bottom' : $bottom, 'right' : $right , 'position': 'absolute', 'width': '100px', 'height': '30px', 'z-index' : '-1' }); } //For multiserver plugin videos :) if(files) { var toggle = false; var time_var = false; var start_time = 0; $('#res').on('click',function(event){ if(toggle == false) { $('.video_files').show(10); toggle = true; } else { $('.video_files').hide(10); toggle = false; } event.stopPropagation(); }); $('html').click(function() { $('.video_files').hide(100); toggle =false; }); // All multiserver Video Files (Json) var jsonData = JSON.parse(files); // cheking for if 360 resolution is not available if(!jsonData["360"]) { video.attr('src',jsonData["240"]); $('#li_240').addClass('selected_player'); } else $('#li_360').addClass('selected_player'); $.each(jsonData, function (key, data) { $('#li_' + key).on('mouseenter', function(){ $(this).css({'background-color':'#000'}); }); $('#li_' + key).on('mouseleave', function(){ $(this).css({'background-color':'#1D1D1D'}); }); $('#li_' + key).on('click', function(){ //getting current time variable for video to play .. on change resolution and passing to loadmetadata start_time = video[0].currentTime; //Changing source attribute for the required resolution .. console.log("current_video=>"+jsonData[key]); video.attr('src',jsonData[key]); load_meta_data(start_time,video); time_var = true; }); }); $('#ul_files .list_player').click(function() { $('#ul_files .list_player.selected_player').removeClass('selected_player'); $(this).closest('li').addClass('selected_player'); }); } /** * For multiserver plugin videos <<--END-->> :) */ //Time format converter - 00:00 var timeFormat = function(seconds){ var m = Math.floor(seconds/60)<10 ? "0"+Math.floor(seconds/60) : Math.floor(seconds/60); var s = Math.floor(seconds-(m*60))<10 ? "0"+Math.floor(seconds-(m*60)) : Math.floor(seconds-(m*60)); return m+":"+s; }; /** * Following function is used to attach fullscreen events to document */ function attachEvents (){ $( document ).on( 'fullscreenchange', toggleFullScreen ); $( document ).on( 'webkitfullscreenchange', toggleFullScreen ); $( document ).on( 'mozfullscreenchange', toggleFullScreen ); $( document ).on( 'MSFullscreenchange', toggleFullScreen ); } /** * Following events are used to check fullscreen events */ document.addEventListener("fullscreenchange", function (e) { toggleFullScreen(e); }, false); document.addEventListener("mozfullscreenchange", function (e) { toggleFullScreen(e); }, false); document.addEventListener("webkitfullscreenchange", function (e) { toggleFullScreen(e); }, false); /** * Following function is used to show controls on exit fullscreen */ function showControl(){ $('.caption').hide(); $(".largescr").hide(); $(".control").hover( function() { $('.control').stop().animate({'bottom':0}, 100); }, function() { $('.control').stop().animate({'bottom':-40}, 1000); }); } /** * Following function is used to show controls on enter fullscreen */ function hideControl() { $('.caption').show(); $(".largescr").show(); $('.cb-item-title-container').css({'margin-top':0}); $(".control").hover( function() { $(this).unbind('mouseenter').unbind('mouseleave'); }); } /** * Following function is used to call events on toggle screen */ function toggleFullScreen (e) { isFullScreen = ( document.fullscreenElement || document.webkitFullscreenElement || document.mozFullScreenElement || document.msFullscreenElement ); if ( isFullScreen ) { showControl(); } else { hideControl(); } } }); /** * caption hide */ function caption_hide() { $('.control').stop().animate({'bottom':-51}, 500); $('.caption').stop().animate({'top':-200}, 500); } /** * caption hide */ function caption_show() { $('.control').stop().animate({'bottom':0}, 100); $('.caption').stop().animate({'top':-7}, 100); } function load_meta_data(start_time,video) { //before everything get started video.on('loadedmetadata', function() { video[0].currentTime = start_time; video[0].play(); //set video properties $('.loading').fadeOut(500); $('.init').hide(); }); } function get_current_video_source(object_id) { var video = document.getElementById(object_id); var src = video.currentSrc; return src; } /******CAUTION*****\ DO NOT REMOVE THIS COMMENTED CODE /***website logo***\ /*$('.cont').append('<div><img id="web" src=data:image/png;base64,'+ web +'> </div>'); $('#web').css({ 'top' : $top, 'left' : $left, 'bottom' : $bottom, 'right' : $right , 'position': 'absolute', 'width': '100px', 'height': '30px', }); */ //before everything get started /* video.on('loadedmetadata', function() { if (time_var == true){ video[0].currentTime = start_time; video[0].play(); } //set video properties $('.fcurrent').text(timeFormat(0)); $('.fduration').text(timeFormat(video[0].duration)); $('.caption').fadeIn(500); updateVolume(0, 0.7); $('.loading').fadeOut(500); if( autoplay == 'true' && !_HD_flag){ setTimeout(startBuffer, 10); $('.btnPlay').addClass('paused'); } if(time_var == true){ $('.init').hide(); } else{ if( autoplay == '' && !_HD_flag){ $('.init').fadeIn(2500); $('.btnPlay').removeClass('paused'); } } loadmetadata = false; });*/ // On press space vidoe play/pasue /*var play = false; $(window).keypress(function(e) { e.preventDefault(); if (e.keyCode == 0) { console.log(e.keyCode); if (!play) { play = true; $('.init').show(); $('.btnPlay').removeClass('paused'); video[0].pause(); _pause = true; } else { play = false; $('.init').hide(); $('.btnPlay').addClass('paused'); video[0].play(); _pause = false; } } });*/
{ $('.init').show(); $('.btnPlay').removeClass('paused'); video[0].pause(); _pause = true; }
conditional_block
html5_player.js
isFullScreen = false; isButtonFullscreen = false; $(document).ready(function() { //INITIALIZE var video = $('#myVideo'); var container = $('.cont'); var test = $(".cont,.myVideo"); var loadmetadata = true; var _pause = false; //showing pause icon by before video load $('.btnPlay').addClass('paused'); attachEvents(); $('.loading').fadeIn(500); // setting player controls when video gets ready :) var ready_state_interval = setInterval(function(){ var vid = document.getElementById("myVideo"); var rdy = vid.readyState; if (rdy >= '2') { clearTimeout(ready_state_interval); //var source = get_current_video_source("myVideo"); $('.fduration').text(timeFormat(video[0].duration)); $('.fcurrent').text(timeFormat(0)); $('.loading').fadeOut(500); $('.caption').fadeIn(500); if( autoplay == 'true'){ setTimeout(startBuffer, 10); $('.btnPlay').addClass('paused'); } else { $('.init').fadeIn(2500); $('.btnPlay').removeClass('paused'); } updateVolume(0, 0.7); } },1000); // initializing the player click function $('.cont').on("click",function(){ if( autoplay == '') { $('.init').hide(); $('.btnPlay').addClass('paused'); $(this).unbind('click'); video[0].play(); $('.buffer').show(); //start to get video buffering data setTimeout(startBuffer, 10); } }); //setting up flag variable true for controls hover var cb_controls = false; $('.control').on("mouseenter",function(){ cb_controls = true; }); //setting up flag variable false for controls $('.control').on("mouseleave",function(){ cb_controls = false; }); // on mouse stop on container, controls get hide after 5 secs var lastTimeMouseMoved = ""; $('.cont').mousemove(function(e){ caption_show(); lastTimeMouseMoved = new Date().getTime(); var t=setInterval(function(){ var currentTime_new = new Date().getTime(); if(currentTime_new - lastTimeMouseMoved > 5000 && !_pause && !cb_controls) { caption_hide(); } },1000); }); // on hover controils and captions get showed var on_player = false; $('.cont').on("mouseenter",function(){ on_player = true; caption_show(); }); // on hover controils and captions get showed $('.cont').on("mouseleave",function(){ on_player = false; if (!_pause) caption_hide(); }); //display video buffering bar var startBuffer = function() { var currentBuffer = video[0].buffered.end(0); var maxduration = video[0].duration; var perc = 100 * currentBuffer / maxduration; $('.bufferBar').css('width',perc+'%'); if(currentBuffer < maxduration) { setTimeout(startBuffer, 2000); } }; //display current video play time video.on('timeupdate', function() { var currentPos = video[0].currentTime; var maxduration = video[0].duration; var perc = 100 * currentPos / maxduration; $('.timeBar').css('width',perc+'%'); $('.fcurrent').text(timeFormat(currentPos)); }); /* * Changes made by *Saqib Razzaq* * checks if windows size is less than 400 * if it is, hides volumebar and shows it when * you hover over vol icon hiding video time once * you move away from sound icon it hides itself showing video time */ if ($( window ).width() < 400) { $("#volSec").hide(); $("#soundIcon").mouseenter(function(){ $("#volSec").show(); $(".fcurrent").hide(); }); $(".volume").mouseleave(function(){ $("#volSec").hide(); $(".fcurrent").show(); }); } /* * ================== * volume changes end * ================== */ //CONTROLS EVENTS //video screen and play button clicked video.on('click', function() { playpause(); } ); $('.btnPlay').on('click', function() { playpause(); } ); $('.caption').on('click', function() { playpause(); } ); $('.init').on('click', function() { playpause(); } ); var playpause = function() { if(video[0].paused || video[0].ended) { $('.init').hide(); $('.btnPlay').addClass('paused'); video[0].play(); _pause = false; } else { $('.init').show(); $('.btnPlay').removeClass('paused'); video[0].pause(); _pause = true; } }; $( "#replay_v" ).click(function() { video[0].play(); $('#opacity').hide(); $('#related_1').hide(); $('.control').show(); $('.caption').show(); $('#web').show(); //showing pause icon by before video load $('.btnPlay').addClass('paused'); }); $( "#cancel_v" ).click(function() { $('#opacity').hide(); $('#related_1').hide(); $('.control').show(); $('.caption').show(); $('.init').show(); $('#web').show(); }); //speed text clicked $('.btnx1').on('click', function() { fastfowrd(this, 1); }); $('.btnx3').on('click', function() { fastfowrd(this, 3); }); var fastfowrd = function(obj, spd) { $('.text').removeClass('selected'); $(obj).addClass('selected'); video[0].playbackRate = spd; video[0].play(); }; //stop button clicked $('.btnStop').on('click', function() { $('.btnPlay').removeClass('paused'); updatebar($('.progress').offset().left); video[0].pause(); }); $('.btnFS').on('click', function() { $(this).toggleClass('enterbtnFS'); isButtonFullscreen = true; if($.isFunction(container[0].webkitRequestFullScreen)) { if(isFullScreen) { isFullScreen = false; document.webkitCancelFullScreen(); } else { isFullScreen = true; container[0].webkitRequestFullScreen(); } } else if($.isFunction(container[0].mozRequestFullScreen)) { if(isFullScreen) { isFullScreen = false; document.mozCancelFullScreen(); } else { isFullScreen = true; container[0].mozRequestFullScreen(); } } }); var _HD_flag = false; //HD on/off button clicked $(".hdon").on('click', function() { $(this).toggleClass('hdoff'); $('.myVideo').removeClass('init'); $('source', '#myVideo').eq(1).prependTo('#myVideo'); $('#myVideo')[0].load(); $('#myVideo')[0].play(); $('.init').hide(); $('.btnPlay').addClass('paused'); _HD_flag = true; }); //sound button clicked $('.sound').click(function() { video[0].muted = !video[0].muted; $(this).toggleClass('muted'); if(video[0].muted) { $('.volumeBar').css('width',0); } else{ $('.volumeBar').css('width', video[0].volume*100+'%'); } }); //VIDEO EVENTS //video canplay event video.on('canplay', function() { $('.loading').fadeOut(100); }); //video canplaythrough event //solve Chrome cache issue var completeloaded = false; video.on('canplaythrough', function() { completeloaded = true; }); //video ended event video.on('ended', function() { $('.btnPlay').removeClass('paused'); video[0].pause(); $('#opacity').show(); $('#related_1').show(); $('.control').hide(); $('.caption').hide(); $('#web').hide(); }); //video seeking event video.on('seeking', function() { //if video fully loaded, ignore loading screen if(!completeloaded) { $('.loading').fadeIn(200); } }); //video seeked event video.on('seeked', function() { $('.loading').fadeOut(200); }); //video waiting for more data event video.on('waiting', function() { $('.loading').fadeIn(200); }); //VIDEO PROGRESS BAR //when video timebar clicked var timeDrag = false; /* check for drag event */ $('.progress').on('mousedown', function(e) { timeDrag = true; updatebar(e.pageX); }); $(document).on('mouseup', function(e) { if(timeDrag) { timeDrag = false; updatebar(e.pageX); } }); $(document).on('mousemove', function(e) { if(timeDrag) { updatebar(e.pageX); } }); var updatebar = function(x) { var progress = $('.progress'); //calculate drag position //and update video currenttime //as well as progress bar var maxduration = video[0].duration; var position = x - progress.offset().left; var percentage = 100 * position / progress.width(); if(percentage > 100) { percentage = 100; } if(percentage < 0) { percentage = 0; } $('.timeBar').css('width',percentage+'%'); video[0].currentTime = maxduration * percentage / 100; }; //VOLUME BAR //volume bar event var volumeDrag = false; $('.volume').on('mousedown', function(e) { volumeDrag = true; video[0].muted = false; $('.sound').removeClass('muted'); updateVolume(e.pageX); }); $(document).on('mouseup', function(e) { if(volumeDrag) { volumeDrag = false; updateVolume(e.pageX); } }); $(document).on('mousemove', function(e) { if(volumeDrag) { updateVolume(e.pageX); } }); var updateVolume = function(x, vol) { var volume = $('.volume'); var percentage; //if only volume have specificed //then direct update volume if(vol) { percentage = vol * 100; } else { var position = x - volume.offset().left; percentage = 100 * position / volume.width(); } if(percentage > 100) { percentage = 100; } if(percentage < 0) { percentage = 0; } //update volume bar and video volume $('.volumeBar').css('width',percentage+'%'); video[0].volume = percentage / 100; //change sound icon based on volume if(video[0].volume == 0){ $('.sound').removeClass('sound2').addClass('muted'); } else if(video[0].volume > 0.5){ $('.sound').removeClass('muted').addClass('sound2'); } else{ $('.sound').removeClass('muted').removeClass('sound2'); } }; //getting large screen button only for watch video page if (enlarge_small == 'true') { $('.btmControl').append('<div class="smallscr largescr hbtn" id="largescr" title="Enlarge/Small Size"></div>'); $('#largescr').insertBefore("#fs"); } //Large screen function $(".largescr").click(function() { $(this).toggleClass('smallscr'); if(!$(this).hasClass('smallscr')) { $(".cb_player").animate({height:'+=220px',width:'+=390px'},"fast"); $('.html5_player_enlarge').addClass('col-lg-12').removeClass('col-lg-8'); } else { $(".cb_player").animate({height:'-=220px',width:'-=390px'},"fast"); $('.html5_player_enlarge').removeClass('col-lg-12').addClass('col-lg-8'); } }); //Right Click Menu $('#cont').append('<div id="rightcmenu"></div>'); //$('#rightcmenu').append('<span id="op">CB Html5 menu</span>'); $('#rightcmenu').append('<ul id="ritems"></ul>'); $('#ritems').append('<li id="copy" class="rlist copy">Show Video link</li>'); $('#ritems').append('<li class="rlist about">About</li>'); $('#ritems').append('<li class="rlist clip">Powered by Clipbucket</li>'); $('.cont').bind("contextmenu", function (e) { e.preventDefault(); // To prevent the default context menu. $("#rightcmenu").css("left", e.pageX); // For updating the menu position. $("#rightcmenu").css("top", e.pageY); // $("#rightcmenu").fadeIn(500, startFocusOut()); // For bringing the context menu in picture. }); function
() { $(document).on("click", function () { $("#rightcmenu").hide(500); // To hide the context menu $('.cont').off("click"); }); } $(".clip").click(function(event) { window.open(homepath, '_blank'); }); $(".about").click(function(event) { window.open(homepath, '_blank'); }); $('.copy').click(function() { alert(document.URL); }); //Logo $('.cb-playerLogo').append('<div id="path" class="path hbtn" > </div>'); $('#path').prop("href",product_link); // $("#path").insertAfter("#hd"); $('#path').css({ 'backgroundImage': 'url(data:image/png;base64,' + webpath + ')', 'margin-right':'7px', 'margin-top':'0px', 'background-repeat':'no-repeat', 'background-position' : '100% 50%', }); $("#path").click(function(event) { window.open(product_link, '_blank'); }); $('#name_v,#thumb_v').mouseover(function() { $(this).css({'opacity':'1', 'border': '0px solid #000', 'box-shadow':'1px 0 5px #fff', '-moz-box-shadow':'1px 0 5px #fff', '-webkit-box-shadow':'1px 0 5px #fff',}); }); $('#name_v,#thumb_v').mouseout(function() { $(this).css({'opacity':'.9', 'border': '0px solid #000', 'box-shadow':'0px 0 0px #fff', '-moz-box-shadow':'0px 0 0px #fff', '-webkit-box-shadow':'0px 0 0px #fff',}); }); // Setting in-video logo for player if( iv_logo_enable == 'yes') { $('.cont').append('<img id="web" src=data:image/png;base64,'+ web +'> '); $('#web').css({ 'top' : $top, 'left' : $left, 'bottom' : $bottom, 'right' : $right , 'position': 'absolute', 'width': '100px', 'height': '30px', 'z-index' : '-1' }); } //For multiserver plugin videos :) if(files) { var toggle = false; var time_var = false; var start_time = 0; $('#res').on('click',function(event){ if(toggle == false) { $('.video_files').show(10); toggle = true; } else { $('.video_files').hide(10); toggle = false; } event.stopPropagation(); }); $('html').click(function() { $('.video_files').hide(100); toggle =false; }); // All multiserver Video Files (Json) var jsonData = JSON.parse(files); // cheking for if 360 resolution is not available if(!jsonData["360"]) { video.attr('src',jsonData["240"]); $('#li_240').addClass('selected_player'); } else $('#li_360').addClass('selected_player'); $.each(jsonData, function (key, data) { $('#li_' + key).on('mouseenter', function(){ $(this).css({'background-color':'#000'}); }); $('#li_' + key).on('mouseleave', function(){ $(this).css({'background-color':'#1D1D1D'}); }); $('#li_' + key).on('click', function(){ //getting current time variable for video to play .. on change resolution and passing to loadmetadata start_time = video[0].currentTime; //Changing source attribute for the required resolution .. console.log("current_video=>"+jsonData[key]); video.attr('src',jsonData[key]); load_meta_data(start_time,video); time_var = true; }); }); $('#ul_files .list_player').click(function() { $('#ul_files .list_player.selected_player').removeClass('selected_player'); $(this).closest('li').addClass('selected_player'); }); } /** * For multiserver plugin videos <<--END-->> :) */ //Time format converter - 00:00 var timeFormat = function(seconds){ var m = Math.floor(seconds/60)<10 ? "0"+Math.floor(seconds/60) : Math.floor(seconds/60); var s = Math.floor(seconds-(m*60))<10 ? "0"+Math.floor(seconds-(m*60)) : Math.floor(seconds-(m*60)); return m+":"+s; }; /** * Following function is used to attach fullscreen events to document */ function attachEvents (){ $( document ).on( 'fullscreenchange', toggleFullScreen ); $( document ).on( 'webkitfullscreenchange', toggleFullScreen ); $( document ).on( 'mozfullscreenchange', toggleFullScreen ); $( document ).on( 'MSFullscreenchange', toggleFullScreen ); } /** * Following events are used to check fullscreen events */ document.addEventListener("fullscreenchange", function (e) { toggleFullScreen(e); }, false); document.addEventListener("mozfullscreenchange", function (e) { toggleFullScreen(e); }, false); document.addEventListener("webkitfullscreenchange", function (e) { toggleFullScreen(e); }, false); /** * Following function is used to show controls on exit fullscreen */ function showControl(){ $('.caption').hide(); $(".largescr").hide(); $(".control").hover( function() { $('.control').stop().animate({'bottom':0}, 100); }, function() { $('.control').stop().animate({'bottom':-40}, 1000); }); } /** * Following function is used to show controls on enter fullscreen */ function hideControl() { $('.caption').show(); $(".largescr").show(); $('.cb-item-title-container').css({'margin-top':0}); $(".control").hover( function() { $(this).unbind('mouseenter').unbind('mouseleave'); }); } /** * Following function is used to call events on toggle screen */ function toggleFullScreen (e) { isFullScreen = ( document.fullscreenElement || document.webkitFullscreenElement || document.mozFullScreenElement || document.msFullscreenElement ); if ( isFullScreen ) { showControl(); } else { hideControl(); } } }); /** * caption hide */ function caption_hide() { $('.control').stop().animate({'bottom':-51}, 500); $('.caption').stop().animate({'top':-200}, 500); } /** * caption hide */ function caption_show() { $('.control').stop().animate({'bottom':0}, 100); $('.caption').stop().animate({'top':-7}, 100); } function load_meta_data(start_time,video) { //before everything get started video.on('loadedmetadata', function() { video[0].currentTime = start_time; video[0].play(); //set video properties $('.loading').fadeOut(500); $('.init').hide(); }); } function get_current_video_source(object_id) { var video = document.getElementById(object_id); var src = video.currentSrc; return src; } /******CAUTION*****\ DO NOT REMOVE THIS COMMENTED CODE /***website logo***\ /*$('.cont').append('<div><img id="web" src=data:image/png;base64,'+ web +'> </div>'); $('#web').css({ 'top' : $top, 'left' : $left, 'bottom' : $bottom, 'right' : $right , 'position': 'absolute', 'width': '100px', 'height': '30px', }); */ //before everything get started /* video.on('loadedmetadata', function() { if (time_var == true){ video[0].currentTime = start_time; video[0].play(); } //set video properties $('.fcurrent').text(timeFormat(0)); $('.fduration').text(timeFormat(video[0].duration)); $('.caption').fadeIn(500); updateVolume(0, 0.7); $('.loading').fadeOut(500); if( autoplay == 'true' && !_HD_flag){ setTimeout(startBuffer, 10); $('.btnPlay').addClass('paused'); } if(time_var == true){ $('.init').hide(); } else{ if( autoplay == '' && !_HD_flag){ $('.init').fadeIn(2500); $('.btnPlay').removeClass('paused'); } } loadmetadata = false; });*/ // On press space vidoe play/pasue /*var play = false; $(window).keypress(function(e) { e.preventDefault(); if (e.keyCode == 0) { console.log(e.keyCode); if (!play) { play = true; $('.init').show(); $('.btnPlay').removeClass('paused'); video[0].pause(); _pause = true; } else { play = false; $('.init').hide(); $('.btnPlay').addClass('paused'); video[0].play(); _pause = false; } } });*/
startFocusOut
identifier_name
html5_player.js
isFullScreen = false; isButtonFullscreen = false; $(document).ready(function() { //INITIALIZE var video = $('#myVideo'); var container = $('.cont'); var test = $(".cont,.myVideo"); var loadmetadata = true; var _pause = false; //showing pause icon by before video load $('.btnPlay').addClass('paused'); attachEvents(); $('.loading').fadeIn(500); // setting player controls when video gets ready :) var ready_state_interval = setInterval(function(){ var vid = document.getElementById("myVideo"); var rdy = vid.readyState; if (rdy >= '2') { clearTimeout(ready_state_interval); //var source = get_current_video_source("myVideo"); $('.fduration').text(timeFormat(video[0].duration)); $('.fcurrent').text(timeFormat(0)); $('.loading').fadeOut(500); $('.caption').fadeIn(500); if( autoplay == 'true'){ setTimeout(startBuffer, 10); $('.btnPlay').addClass('paused'); } else { $('.init').fadeIn(2500); $('.btnPlay').removeClass('paused'); } updateVolume(0, 0.7); } },1000); // initializing the player click function $('.cont').on("click",function(){ if( autoplay == '') { $('.init').hide(); $('.btnPlay').addClass('paused'); $(this).unbind('click'); video[0].play(); $('.buffer').show(); //start to get video buffering data setTimeout(startBuffer, 10); } }); //setting up flag variable true for controls hover var cb_controls = false; $('.control').on("mouseenter",function(){ cb_controls = true; }); //setting up flag variable false for controls $('.control').on("mouseleave",function(){ cb_controls = false; }); // on mouse stop on container, controls get hide after 5 secs var lastTimeMouseMoved = ""; $('.cont').mousemove(function(e){ caption_show(); lastTimeMouseMoved = new Date().getTime(); var t=setInterval(function(){ var currentTime_new = new Date().getTime(); if(currentTime_new - lastTimeMouseMoved > 5000 && !_pause && !cb_controls) { caption_hide(); } },1000); }); // on hover controils and captions get showed var on_player = false; $('.cont').on("mouseenter",function(){ on_player = true; caption_show(); }); // on hover controils and captions get showed $('.cont').on("mouseleave",function(){ on_player = false; if (!_pause) caption_hide(); }); //display video buffering bar var startBuffer = function() { var currentBuffer = video[0].buffered.end(0); var maxduration = video[0].duration; var perc = 100 * currentBuffer / maxduration; $('.bufferBar').css('width',perc+'%'); if(currentBuffer < maxduration) { setTimeout(startBuffer, 2000); } }; //display current video play time video.on('timeupdate', function() { var currentPos = video[0].currentTime; var maxduration = video[0].duration; var perc = 100 * currentPos / maxduration; $('.timeBar').css('width',perc+'%'); $('.fcurrent').text(timeFormat(currentPos)); }); /* * Changes made by *Saqib Razzaq* * checks if windows size is less than 400 * if it is, hides volumebar and shows it when * you hover over vol icon hiding video time once * you move away from sound icon it hides itself showing video time */ if ($( window ).width() < 400) { $("#volSec").hide(); $("#soundIcon").mouseenter(function(){ $("#volSec").show(); $(".fcurrent").hide(); }); $(".volume").mouseleave(function(){ $("#volSec").hide(); $(".fcurrent").show(); }); } /* * ================== * volume changes end * ================== */ //CONTROLS EVENTS //video screen and play button clicked video.on('click', function() { playpause(); } ); $('.btnPlay').on('click', function() { playpause(); } ); $('.caption').on('click', function() { playpause(); } ); $('.init').on('click', function() { playpause(); } ); var playpause = function() { if(video[0].paused || video[0].ended) { $('.init').hide(); $('.btnPlay').addClass('paused'); video[0].play(); _pause = false; } else { $('.init').show(); $('.btnPlay').removeClass('paused'); video[0].pause(); _pause = true; } }; $( "#replay_v" ).click(function() { video[0].play(); $('#opacity').hide(); $('#related_1').hide(); $('.control').show(); $('.caption').show(); $('#web').show(); //showing pause icon by before video load $('.btnPlay').addClass('paused'); }); $( "#cancel_v" ).click(function() { $('#opacity').hide(); $('#related_1').hide(); $('.control').show(); $('.caption').show(); $('.init').show(); $('#web').show(); }); //speed text clicked $('.btnx1').on('click', function() { fastfowrd(this, 1); }); $('.btnx3').on('click', function() { fastfowrd(this, 3); }); var fastfowrd = function(obj, spd) { $('.text').removeClass('selected'); $(obj).addClass('selected'); video[0].playbackRate = spd; video[0].play(); }; //stop button clicked $('.btnStop').on('click', function() { $('.btnPlay').removeClass('paused'); updatebar($('.progress').offset().left); video[0].pause(); }); $('.btnFS').on('click', function() { $(this).toggleClass('enterbtnFS'); isButtonFullscreen = true; if($.isFunction(container[0].webkitRequestFullScreen)) { if(isFullScreen) { isFullScreen = false; document.webkitCancelFullScreen(); } else { isFullScreen = true; container[0].webkitRequestFullScreen(); } } else if($.isFunction(container[0].mozRequestFullScreen)) { if(isFullScreen) { isFullScreen = false; document.mozCancelFullScreen(); } else { isFullScreen = true; container[0].mozRequestFullScreen(); } } }); var _HD_flag = false; //HD on/off button clicked $(".hdon").on('click', function() { $(this).toggleClass('hdoff'); $('.myVideo').removeClass('init'); $('source', '#myVideo').eq(1).prependTo('#myVideo'); $('#myVideo')[0].load(); $('#myVideo')[0].play(); $('.init').hide(); $('.btnPlay').addClass('paused'); _HD_flag = true; }); //sound button clicked $('.sound').click(function() { video[0].muted = !video[0].muted; $(this).toggleClass('muted'); if(video[0].muted) { $('.volumeBar').css('width',0); } else{ $('.volumeBar').css('width', video[0].volume*100+'%'); } }); //VIDEO EVENTS //video canplay event video.on('canplay', function() { $('.loading').fadeOut(100); }); //video canplaythrough event //solve Chrome cache issue var completeloaded = false; video.on('canplaythrough', function() { completeloaded = true; }); //video ended event video.on('ended', function() { $('.btnPlay').removeClass('paused'); video[0].pause(); $('#opacity').show(); $('#related_1').show(); $('.control').hide(); $('.caption').hide(); $('#web').hide(); }); //video seeking event video.on('seeking', function() { //if video fully loaded, ignore loading screen if(!completeloaded) { $('.loading').fadeIn(200); } }); //video seeked event video.on('seeked', function() { $('.loading').fadeOut(200); }); //video waiting for more data event video.on('waiting', function() { $('.loading').fadeIn(200); }); //VIDEO PROGRESS BAR //when video timebar clicked var timeDrag = false; /* check for drag event */ $('.progress').on('mousedown', function(e) { timeDrag = true; updatebar(e.pageX); }); $(document).on('mouseup', function(e) { if(timeDrag) { timeDrag = false; updatebar(e.pageX); } }); $(document).on('mousemove', function(e) { if(timeDrag) { updatebar(e.pageX); } }); var updatebar = function(x) { var progress = $('.progress'); //calculate drag position //and update video currenttime //as well as progress bar var maxduration = video[0].duration; var position = x - progress.offset().left; var percentage = 100 * position / progress.width(); if(percentage > 100) { percentage = 100; } if(percentage < 0) { percentage = 0; } $('.timeBar').css('width',percentage+'%'); video[0].currentTime = maxduration * percentage / 100; }; //VOLUME BAR //volume bar event var volumeDrag = false; $('.volume').on('mousedown', function(e) { volumeDrag = true; video[0].muted = false; $('.sound').removeClass('muted'); updateVolume(e.pageX); }); $(document).on('mouseup', function(e) { if(volumeDrag) { volumeDrag = false; updateVolume(e.pageX); } }); $(document).on('mousemove', function(e) { if(volumeDrag) { updateVolume(e.pageX); } }); var updateVolume = function(x, vol) { var volume = $('.volume'); var percentage; //if only volume have specificed //then direct update volume if(vol) { percentage = vol * 100; } else { var position = x - volume.offset().left; percentage = 100 * position / volume.width(); } if(percentage > 100) { percentage = 100; } if(percentage < 0) { percentage = 0; } //update volume bar and video volume $('.volumeBar').css('width',percentage+'%'); video[0].volume = percentage / 100; //change sound icon based on volume if(video[0].volume == 0){ $('.sound').removeClass('sound2').addClass('muted'); } else if(video[0].volume > 0.5){ $('.sound').removeClass('muted').addClass('sound2'); } else{ $('.sound').removeClass('muted').removeClass('sound2'); } }; //getting large screen button only for watch video page if (enlarge_small == 'true') { $('.btmControl').append('<div class="smallscr largescr hbtn" id="largescr" title="Enlarge/Small Size"></div>'); $('#largescr').insertBefore("#fs"); } //Large screen function $(".largescr").click(function() { $(this).toggleClass('smallscr'); if(!$(this).hasClass('smallscr')) { $(".cb_player").animate({height:'+=220px',width:'+=390px'},"fast"); $('.html5_player_enlarge').addClass('col-lg-12').removeClass('col-lg-8'); } else { $(".cb_player").animate({height:'-=220px',width:'-=390px'},"fast"); $('.html5_player_enlarge').removeClass('col-lg-12').addClass('col-lg-8'); } }); //Right Click Menu $('#cont').append('<div id="rightcmenu"></div>'); //$('#rightcmenu').append('<span id="op">CB Html5 menu</span>'); $('#rightcmenu').append('<ul id="ritems"></ul>'); $('#ritems').append('<li id="copy" class="rlist copy">Show Video link</li>'); $('#ritems').append('<li class="rlist about">About</li>'); $('#ritems').append('<li class="rlist clip">Powered by Clipbucket</li>'); $('.cont').bind("contextmenu", function (e) { e.preventDefault(); // To prevent the default context menu. $("#rightcmenu").css("left", e.pageX); // For updating the menu position. $("#rightcmenu").css("top", e.pageY); // $("#rightcmenu").fadeIn(500, startFocusOut()); // For bringing the context menu in picture. }); function startFocusOut() { $(document).on("click", function () { $("#rightcmenu").hide(500); // To hide the context menu $('.cont').off("click"); }); } $(".clip").click(function(event) { window.open(homepath, '_blank'); }); $(".about").click(function(event) { window.open(homepath, '_blank'); }); $('.copy').click(function() { alert(document.URL); }); //Logo $('.cb-playerLogo').append('<div id="path" class="path hbtn" > </div>'); $('#path').prop("href",product_link); // $("#path").insertAfter("#hd"); $('#path').css({ 'backgroundImage': 'url(data:image/png;base64,' + webpath + ')', 'margin-right':'7px', 'margin-top':'0px', 'background-repeat':'no-repeat', 'background-position' : '100% 50%', }); $("#path").click(function(event) { window.open(product_link, '_blank'); }); $('#name_v,#thumb_v').mouseover(function() { $(this).css({'opacity':'1', 'border': '0px solid #000', 'box-shadow':'1px 0 5px #fff', '-moz-box-shadow':'1px 0 5px #fff', '-webkit-box-shadow':'1px 0 5px #fff',}); }); $('#name_v,#thumb_v').mouseout(function() { $(this).css({'opacity':'.9', 'border': '0px solid #000', 'box-shadow':'0px 0 0px #fff', '-moz-box-shadow':'0px 0 0px #fff', '-webkit-box-shadow':'0px 0 0px #fff',}); }); // Setting in-video logo for player if( iv_logo_enable == 'yes') { $('.cont').append('<img id="web" src=data:image/png;base64,'+ web +'> '); $('#web').css({ 'top' : $top, 'left' : $left, 'bottom' : $bottom, 'right' : $right , 'position': 'absolute', 'width': '100px', 'height': '30px', 'z-index' : '-1' }); } //For multiserver plugin videos :) if(files) { var toggle = false; var time_var = false; var start_time = 0; $('#res').on('click',function(event){ if(toggle == false) { $('.video_files').show(10); toggle = true; } else { $('.video_files').hide(10); toggle = false; } event.stopPropagation(); }); $('html').click(function() { $('.video_files').hide(100); toggle =false; }); // All multiserver Video Files (Json) var jsonData = JSON.parse(files); // cheking for if 360 resolution is not available
video.attr('src',jsonData["240"]); $('#li_240').addClass('selected_player'); } else $('#li_360').addClass('selected_player'); $.each(jsonData, function (key, data) { $('#li_' + key).on('mouseenter', function(){ $(this).css({'background-color':'#000'}); }); $('#li_' + key).on('mouseleave', function(){ $(this).css({'background-color':'#1D1D1D'}); }); $('#li_' + key).on('click', function(){ //getting current time variable for video to play .. on change resolution and passing to loadmetadata start_time = video[0].currentTime; //Changing source attribute for the required resolution .. console.log("current_video=>"+jsonData[key]); video.attr('src',jsonData[key]); load_meta_data(start_time,video); time_var = true; }); }); $('#ul_files .list_player').click(function() { $('#ul_files .list_player.selected_player').removeClass('selected_player'); $(this).closest('li').addClass('selected_player'); }); } /** * For multiserver plugin videos <<--END-->> :) */ //Time format converter - 00:00 var timeFormat = function(seconds){ var m = Math.floor(seconds/60)<10 ? "0"+Math.floor(seconds/60) : Math.floor(seconds/60); var s = Math.floor(seconds-(m*60))<10 ? "0"+Math.floor(seconds-(m*60)) : Math.floor(seconds-(m*60)); return m+":"+s; }; /** * Following function is used to attach fullscreen events to document */ function attachEvents (){ $( document ).on( 'fullscreenchange', toggleFullScreen ); $( document ).on( 'webkitfullscreenchange', toggleFullScreen ); $( document ).on( 'mozfullscreenchange', toggleFullScreen ); $( document ).on( 'MSFullscreenchange', toggleFullScreen ); } /** * Following events are used to check fullscreen events */ document.addEventListener("fullscreenchange", function (e) { toggleFullScreen(e); }, false); document.addEventListener("mozfullscreenchange", function (e) { toggleFullScreen(e); }, false); document.addEventListener("webkitfullscreenchange", function (e) { toggleFullScreen(e); }, false); /** * Following function is used to show controls on exit fullscreen */ function showControl(){ $('.caption').hide(); $(".largescr").hide(); $(".control").hover( function() { $('.control').stop().animate({'bottom':0}, 100); }, function() { $('.control').stop().animate({'bottom':-40}, 1000); }); } /** * Following function is used to show controls on enter fullscreen */ function hideControl() { $('.caption').show(); $(".largescr").show(); $('.cb-item-title-container').css({'margin-top':0}); $(".control").hover( function() { $(this).unbind('mouseenter').unbind('mouseleave'); }); } /** * Following function is used to call events on toggle screen */ function toggleFullScreen (e) { isFullScreen = ( document.fullscreenElement || document.webkitFullscreenElement || document.mozFullScreenElement || document.msFullscreenElement ); if ( isFullScreen ) { showControl(); } else { hideControl(); } } }); /** * caption hide */ function caption_hide() { $('.control').stop().animate({'bottom':-51}, 500); $('.caption').stop().animate({'top':-200}, 500); } /** * caption hide */ function caption_show() { $('.control').stop().animate({'bottom':0}, 100); $('.caption').stop().animate({'top':-7}, 100); } function load_meta_data(start_time,video) { //before everything get started video.on('loadedmetadata', function() { video[0].currentTime = start_time; video[0].play(); //set video properties $('.loading').fadeOut(500); $('.init').hide(); }); } function get_current_video_source(object_id) { var video = document.getElementById(object_id); var src = video.currentSrc; return src; } /******CAUTION*****\ DO NOT REMOVE THIS COMMENTED CODE /***website logo***\ /*$('.cont').append('<div><img id="web" src=data:image/png;base64,'+ web +'> </div>'); $('#web').css({ 'top' : $top, 'left' : $left, 'bottom' : $bottom, 'right' : $right , 'position': 'absolute', 'width': '100px', 'height': '30px', }); */ //before everything get started /* video.on('loadedmetadata', function() { if (time_var == true){ video[0].currentTime = start_time; video[0].play(); } //set video properties $('.fcurrent').text(timeFormat(0)); $('.fduration').text(timeFormat(video[0].duration)); $('.caption').fadeIn(500); updateVolume(0, 0.7); $('.loading').fadeOut(500); if( autoplay == 'true' && !_HD_flag){ setTimeout(startBuffer, 10); $('.btnPlay').addClass('paused'); } if(time_var == true){ $('.init').hide(); } else{ if( autoplay == '' && !_HD_flag){ $('.init').fadeIn(2500); $('.btnPlay').removeClass('paused'); } } loadmetadata = false; });*/ // On press space vidoe play/pasue /*var play = false; $(window).keypress(function(e) { e.preventDefault(); if (e.keyCode == 0) { console.log(e.keyCode); if (!play) { play = true; $('.init').show(); $('.btnPlay').removeClass('paused'); video[0].pause(); _pause = true; } else { play = false; $('.init').hide(); $('.btnPlay').addClass('paused'); video[0].play(); _pause = false; } } });*/
if(!jsonData["360"]) {
random_line_split
SignalDef.rs
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use core::fmt; use alloc::collections::linked_list::LinkedList; use alloc::boxed::Box; use super::qlib::common::*; use super::qlib::linux_def::*; use super::kernel::posixtimer::*; use super::task::*; #[repr(C)] #[derive(Debug, Copy, Clone, Default)] //copy from https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/ptrace.h#L18 pub struct PtRegs { /* * C ABI says these regs are callee-preserved. They aren't saved on kernel entry * unless syscall needs a complete, fully filled "struct pt_regs". */ pub r15: u64, pub r14: u64, pub r13: u64, pub r12: u64, pub rbp: u64, pub rbx: u64, /* These regs are callee-clobbered. Always saved on kernel entry. */ pub r11: u64, pub r10: u64, pub r9: u64, pub r8: u64, pub rax: u64, pub rcx: u64, pub rdx: u64, pub rsi: u64, pub rdi: u64, /* * On syscall entry, this is syscall#. On CPU exception, this is error code. * On hw interrupt, it's IRQ number: */ pub orig_rax: u64, /* Return frame for iretq */ pub rip: u64, pub cs: u64, pub eflags: u64, pub rsp: u64, pub ss: u64, /* top of stack page */ } impl PtRegs { pub fn Set(&mut self, ctx: &SigContext) { self.r15 = ctx.r15; self.r14 = ctx.r14; self.r13 = ctx.r13; self.r12 = ctx.r12; self.rbp = ctx.rbp; self.rbx = ctx.rbx; self.r11 = ctx.r11; self.r10 = ctx.r10; self.r9 = ctx.r9; self.r8 = ctx.r8; self.rax = ctx.rax; self.rcx = ctx.rcx; self.rdx = ctx.rdx; self.rsi = ctx.rsi; self.rdi = ctx.rdi; self.orig_rax = ctx.rax; self.rip = ctx.rip; self.cs = ctx.cs as u64; self.eflags = ctx.eflags; self.rsp = ctx.rsp; self.ss = ctx.ss as u64; } } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigRetInfo { pub sigInfoAddr: u64, pub sigCtxAddr: u64, pub ret: u64, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct Kill { pub pid: i32, pub uid: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigTimer { pub tid: i32, pub overrun: i32, pub sigval: u64, pub sysPrivate: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigRt { pub pid: i32, pub uid: u32, pub sigval: u64, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigChld { pub pid: i32, //child pub uid: u32, //sender's uid pub status: i32, //Exit code pub uTime: i32, pub sTime: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigFault { pub addr: u64, pub lsb: u16, } #[repr(C)] #[derive(Copy, Clone)] pub struct SignalInfo { pub Signo: i32, // Signal number pub Errno: i32, // Errno value pub Code: i32, // Signal code pub _r: u32, pub fields: [u8; 128 - 16], } impl<'a> Default for SignalInfo { fn default() -> Self { return Self { Signo: 0, Errno: 0, Code: 0, _r: 0, fields: [0; 128 - 16] } } } impl core::fmt::Debug for SignalInfo { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SignalInfo") .field("Signo", &self.Signo) .field("Errno", &self.Errno) .field("Code", &self.Code) .finish() } } impl SignalInfo { pub fn SignalInfoPriv(sig: Signal) -> Self { return Self { Signo: sig.0, Code: Self::SIGNAL_INFO_KERNEL, ..Default::default() } } // FixSignalCodeForUser fixes up si_code. // // The si_code we get from Linux may contain the kernel-specific code in the // top 16 bits if it's positive (e.g., from ptrace). Linux's // copy_siginfo_to_user does // err |= __put_user((short)from->si_code, &to->si_code); // to mask out those bits and we need to do the same. pub fn FixSignalCodeForUser(&mut self) { if self.Code > 0 { self.Code &= 0xffff; } } pub fn Kill(&self) -> &mut Kill { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut Kill) } } pub fn SigTimer(&mut self) -> &mut SigTimer { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigTimer) } } pub fn SigRt(&mut self) -> &mut SigRt { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigRt) } } pub fn SigChld(&mut self) -> &mut SigChld { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigChld) } } pub fn SigFault(&self) -> &mut SigFault { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigFault) } } // SignalInfoUser (properly SI_USER) indicates that a signal was sent from // a kill() or raise() syscall. pub const SIGNAL_INFO_USER: i32 = 0; // SignalInfoKernel (properly SI_KERNEL) indicates that the signal was sent // by the kernel. pub const SIGNAL_INFO_KERNEL: i32 = 0x80; // SignalInfoTimer (properly SI_TIMER) indicates that the signal was sent // by an expired timer. pub const SIGNAL_INFO_TIMER: i32 = -2; // SignalInfoTkill (properly SI_TKILL) indicates that the signal was sent // from a tkill() or tgkill() syscall. pub const SIGNAL_INFO_TKILL: i32 = -6; // CLD_* codes are only meaningful for SIGCHLD. // CLD_EXITED indicates that a task exited. pub const CLD_EXITED: i32 = 1; // CLD_KILLED indicates that a task was killed by a signal. pub const CLD_KILLED: i32 = 2; // CLD_DUMPED indicates that a task was killed by a signal and then dumped // core. pub const CLD_DUMPED: i32 = 3; // CLD_TRAPPED indicates that a task was stopped by ptrace. pub const CLD_TRAPPED: i32 = 4; // CLD_STOPPED indicates that a thread group completed a group stop. pub const CLD_STOPPED: i32 = 5; // CLD_CONTINUED indicates that a group-stopped thread group was continued. pub const CLD_CONTINUED: i32 = 6; // SYS_* codes are only meaningful for SIGSYS. // SYS_SECCOMP indicates that a signal originates from seccomp. pub const SYS_SECCOMP: i32 = 1; // TRAP_* codes are only meaningful for SIGTRAP. // TRAP_BRKPT indicates a breakpoint trap. pub const TRAP_BRKPT: i32 = 1; } pub const UC_FP_XSTATE: u64 = 1; pub const UC_SIGCONTEXT_SS: u64 = 2; pub const UC_STRICT_RESTORE_SS: u64 = 4; // https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/ucontext.h#L5 #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct UContext { pub Flags: u64, pub Link: u64, pub Stack: SignalStack, pub MContext: SigContext, pub Sigset: u64, } impl UContext { pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64, alt: &SignalStack) -> Self { return Self { Flags: 2, Link: 0, Stack: alt.clone(), MContext: SigContext::New(ptRegs, oldMask, cr2, fpstate), Sigset: 0, } } } // https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/sigcontext.h#L284 #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigContext { pub r8: u64, pub r9: u64, pub r10: u64, pub r11: u64, pub r12: u64, pub r13: u64, pub r14: u64, pub r15: u64, pub rdi: u64, pub rsi: u64, pub rbp: u64, pub rbx: u64, pub rdx: u64, pub rax: u64, pub rcx: u64, pub rsp: u64, pub rip: u64, pub eflags: u64, pub cs: u16, pub gs: u16, // always 0 on amd64. pub fs: u16, // always 0 on amd64. pub ss: u16, // only restored if _UC_STRICT_RESTORE_SS (unsupported). pub err: u64, pub trapno: u64, pub oldmask: u64, pub cr2: u64, // Pointer to a struct _fpstate. pub fpstate: u64, pub reserved: [u64; 8], } impl SigContext { pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64) -> Self { return Self { r8: ptRegs.r8, r9: ptRegs.r9, r10: ptRegs.r10, r11: ptRegs.r11, r12: ptRegs.r12, r13: ptRegs.r13, r14: ptRegs.r14, r15: ptRegs.r15, rdi: ptRegs.rdi, rsi: ptRegs.rsi, rbp: ptRegs.rbp, rbx: ptRegs.rbx, rdx: ptRegs.rdx, rax: ptRegs.rax, rcx: ptRegs.rcx, rsp: ptRegs.rsp, rip: ptRegs.rip, eflags: ptRegs.eflags, cs: ptRegs.cs as u16, gs: 0, fs: 0, ss: ptRegs.ss as u16, err: 0, trapno: 0, oldmask: oldMask, cr2: cr2, fpstate: fpstate, ..Default::default() } } } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigFlag(pub u64); impl SigFlag { pub const SIGNAL_FLAG_NO_CLD_STOP: u64 = 0x00000001; pub const SIGNAL_FLAG_NO_CLD_WAIT: u64 = 0x00000002; pub const SIGNAL_FLAG_SIG_INFO: u64 = 0x00000004; pub const SIGNAL_FLAG_RESTORER: u64 = 0x04000000; pub const SIGNAL_FLAG_ON_STACK: u64 = 0x08000000; pub const SIGNAL_FLAG_RESTART: u64 = 0x10000000; pub const SIGNAL_FLAG_INTERRUPT: u64 = 0x20000000; pub const SIGNAL_FLAG_NO_DEFER: u64 = 0x40000000; pub const SIGNAL_FLAG_RESET_HANDLER: u64 = 0x80000000; pub fn IsNoCldStop(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0; } pub fn IsNoCldWait(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_WAIT != 0; } pub fn IsSigInfo(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_SIG_INFO != 0; } pub fn IsNoDefer(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_DEFER != 0; } pub fn IsRestart(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESTART != 0; } pub fn IsResetHandler(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESET_HANDLER != 0; } pub fn IsOnStack(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_ON_STACK != 0; } pub fn HasRestorer(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESTORER != 0; } pub fn IsNoChildStop(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0 } } // https://github.com/lattera/glibc/blob/master/sysdeps/unix/sysv/linux/kernel_sigaction.h #[derive(Copy, Clone, Default)] #[repr(C)] pub struct SigAct { pub handler: u64, pub flags: SigFlag, pub restorer: u64, pub mask: u64, } impl fmt::Debug for SigAct { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "SigAction {{ \n\ handler: {:x}, \n\ flag : {:x}, \n \ flags::HasRestorer: {}, \n \ flags::IsOnStack: {}, \n \ flags::IsRestart: {}, \n \ flags::IsResetHandler: {}, \n \ flags::IsNoDefer: {}, \n \ flags::IsSigInfo: {}, \n \ restorer : {:x}, \n\ mask: {:x}, \n}}", self.handler, self.flags.0, self.flags.HasRestorer(), self.flags.IsOnStack(), self.flags.IsRestart(), self.flags.IsResetHandler(), self.flags.IsNoDefer(), self.flags.IsSigInfo(), self.restorer, self.mask ) } } impl SigAct { // SignalActDefault is SIG_DFL and specifies that the default behavior for // a signal should be taken. pub const SIGNAL_ACT_DEFAULT: u64 = 0; // SignalActIgnore is SIG_IGN and specifies that a signal should be // ignored. pub const SIGNAL_ACT_IGNORE: u64 = 1; } pub const UNMASKABLE_MASK : u64 = 1 << (Signal::SIGKILL - 1) | 1 << (Signal::SIGSTOP - 1); #[derive(Clone, Copy, Debug)] pub struct SignalSet(pub u64); impl Default for SignalSet { fn default() -> Self { return Self(0) } } impl SignalSet { pub fn New(sig: Signal) -> Self { return SignalSet(1 << sig.Index()) } pub fn Add(&mut self, sig: Signal) { self.0 |= 1 << sig.Index() } pub fn Remove(&mut self, sig: Signal) { self.0 &= !(1 << sig.0) } pub fn TailingZero(&self) -> usize { for i in 0..64 { let idx = 64 - i - 1; if self.0 & (1 << idx) != 0
} return 64 } pub fn MakeSignalSet(sigs: &[Signal]) -> Self { let mut res = Self::default(); for sig in sigs { res.Add(*sig) } return res; } pub fn ForEachSignal(&self, mut f: impl FnMut(Signal)) { for i in 0..64 { if self.0 & (1 << i) != 0 { f(Signal(i as i32 + 1)) } } } } #[derive(Debug, Clone, Default)] pub struct SignalQueue { signals: LinkedList<PendingSignal>, } impl SignalQueue { pub const RT_SIG_CAP: usize = 32; pub fn Len(&mut self) -> u64 { return self.signals.len() as u64; } pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> bool { if self.signals.len() == Self::RT_SIG_CAP { return false } self.signals.push_back(PendingSignal { sigInfo: info, timer: timer, }); return true } pub fn Deque(&mut self) -> Option<PendingSignal> { return self.signals.pop_front(); } pub fn Clear(&mut self) { self.signals.clear(); } } pub const SIGNAL_COUNT: usize = 64; pub const STD_SIGNAL_COUNT: usize = 31; // 1 ~ 31 pub const RT_SIGNAL_COUNT: usize = 33; // 32 ~ 64 pub const RT_SIGNAL_START: usize = 32; // 32 ~ 64 #[derive(Debug, Clone, Default)] pub struct PendingSignal { pub sigInfo: Box<SignalInfo>, pub timer: Option<IntervalTimer>, } pub struct PendingSignals { pub stdSignals: [Option<PendingSignal>; STD_SIGNAL_COUNT], pub rtSignals: [SignalQueue; RT_SIGNAL_COUNT], pub pendingSet: SignalSet, } impl fmt::Debug for PendingSignals { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PendingSignals") .field("stdSignals", &self.stdSignals) .field("rtSignals0", &self.rtSignals[0]) .field("rtSignals2", &self.rtSignals[32]) .field("pendingSet", &self.pendingSet) .finish() } } impl Default for PendingSignals { fn default() -> Self { return Self { stdSignals : Default::default(), rtSignals : [ SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), ], pendingSet: Default::default(), } } } impl PendingSignals { pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> Result<bool> { let sig = Signal(info.Signo); if sig.IsStandard() { match &self.stdSignals[sig.Index()] { None => (), _ => return Ok(false), } self.stdSignals[sig.Index()] = Some(PendingSignal { sigInfo: info, timer: timer, }); self.pendingSet.Add(sig); return Ok(true); } else if sig.IsRealtime() { let q = &mut self.rtSignals[sig.Index() - 31]; self.pendingSet.Add(sig); return Ok(q.Enque(info, timer)); } else { return Err(Error::InvalidInput) } } pub fn HasSignal(&self, mask: SignalSet) -> bool { let set = SignalSet(self.pendingSet.0 & !(mask.0)); if set.0 == 0 { return false } return true; } pub fn Deque(&mut self, mask: SignalSet) -> Option<Box<SignalInfo>> { let set = SignalSet(self.pendingSet.0 & !(mask.0)); if set.0 == 0 { return None } let lastOne = set.TailingZero(); if lastOne < STD_SIGNAL_COUNT { self.pendingSet.0 &= !(1 << lastOne); let ps = self.stdSignals[lastOne].take(); if let Some(ps) = ps { let mut sigInfo = ps.sigInfo; match ps.timer { None => (), Some(timer) => { timer.lock().updateDequeuedSignalLocked(&mut sigInfo) } } return Some(sigInfo); } else { return None; } } if self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Len() == 1 { self.pendingSet.0 &= !(1 << lastOne); } let ps = self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Deque(); if let Some(ps) = ps { let mut sigInfo = ps.sigInfo; match ps.timer { None => (), Some(timer) => { timer.lock().updateDequeuedSignalLocked(&mut sigInfo) } } return Some(sigInfo); } else { return None; } } pub fn Discard(&mut self, sig: Signal) { self.pendingSet.0 &= !(1 << sig.Index()); if sig.0 <= STD_SIGNAL_COUNT as i32 { self.stdSignals[sig.Index()] = None; return } self.rtSignals[sig.0 as usize - RT_SIGNAL_START].Clear() } } #[derive(Default, Debug)] pub struct SignalStruct { pendingSignals: PendingSignals, signalMask: SignalSet, realSignalMask: SignalSet, //sigtimedwait groupStopPending: bool, groupStopAck: bool, trapStopPending: bool, } // https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/signal.h#L132 #[derive(Debug, Clone, Copy)] #[repr(C)] pub struct SignalStack { pub addr: u64, pub flags: u32, pub size: u64, } impl Default for SignalStack { fn default() -> Self { return Self { addr: 0, flags: Self::FLAG_DISABLE, size: 0, } } } impl SignalStack { pub const FLAG_ON_STACK: u32 = 1; pub const FLAG_DISABLE: u32 = 2; pub fn Contains(&self, sp: u64) -> bool { return self.addr < sp && sp <= self.addr + self.size } pub fn SetOnStack(&mut self) { self.flags |= Self::FLAG_ON_STACK; } pub fn IsEnable(&self) -> bool { return self.flags & Self::FLAG_DISABLE == 0 } pub fn Top(&self) -> u64 { return self.addr + self.size } } pub struct SigHow {} impl SigHow { pub const SIG_BLOCK: u64 = 0; pub const SIG_UNBLOCK: u64 = 1; pub const SIG_SETMASK: u64 = 2; } pub fn SignalInfoPriv(sig: i32) -> SignalInfo { return SignalInfo { Signo: sig, Code: SignalInfo::SIGNAL_INFO_KERNEL, ..Default::default() } } // Sigevent represents struct sigevent. #[repr(C)] #[derive(Default, Copy, Clone)] pub struct Sigevent { pub Value: u64, pub Signo: i32, pub Notify: i32, pub Tid: i32, // struct sigevent here contains 48-byte union _sigev_un. However, only // member _tid is significant to the kernel. pub UnRemainder1: [u8; 32], pub UnRemainder: [u8; 12], } pub const SIGEV_SIGNAL: i32 = 0; pub const SIGEV_NONE: i32 = 1; pub const SIGEV_THREAD: i32 = 2; pub const SIGEV_THREAD_ID: i32 = 4; // copyInSigSetWithSize copies in a structure as below // // struct { // const sigset_t *ss; /* Pointer to signal set */ // size_t ss_len; /* Size (in bytes) of object pointed to by 'ss' */ // }; // // and returns sigset_addr and size. #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct SigMask { pub addr: u64, pub len: usize, } pub fn CopyInSigSetWithSize(task: &Task, addr: u64) -> Result<(u64, usize)> { let mask : SigMask = task.CopyInObj(addr)?; return Ok((mask.addr, mask.len)) } pub const SIGNAL_SET_SIZE: usize = 8; pub fn UnblockableSignals() -> SignalSet { return SignalSet::MakeSignalSet(&[Signal(Signal::SIGKILL), Signal(Signal::SIGSTOP)]); } pub fn CopyInSigSet(task: &Task, sigSetAddr: u64, size: usize) -> Result<SignalSet> { if size != SIGNAL_SET_SIZE { return Err(Error::SysError(SysErr::EINVAL)) } let mask : u64 = task.CopyInObj(sigSetAddr)?; return Ok(SignalSet(mask & !UnblockableSignals().0)) }
{ return idx }
conditional_block
SignalDef.rs
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use core::fmt; use alloc::collections::linked_list::LinkedList; use alloc::boxed::Box; use super::qlib::common::*; use super::qlib::linux_def::*; use super::kernel::posixtimer::*; use super::task::*; #[repr(C)] #[derive(Debug, Copy, Clone, Default)] //copy from https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/ptrace.h#L18 pub struct PtRegs { /* * C ABI says these regs are callee-preserved. They aren't saved on kernel entry * unless syscall needs a complete, fully filled "struct pt_regs". */ pub r15: u64, pub r14: u64, pub r13: u64, pub r12: u64, pub rbp: u64, pub rbx: u64, /* These regs are callee-clobbered. Always saved on kernel entry. */ pub r11: u64, pub r10: u64, pub r9: u64, pub r8: u64, pub rax: u64, pub rcx: u64, pub rdx: u64, pub rsi: u64, pub rdi: u64, /* * On syscall entry, this is syscall#. On CPU exception, this is error code. * On hw interrupt, it's IRQ number: */ pub orig_rax: u64, /* Return frame for iretq */ pub rip: u64, pub cs: u64, pub eflags: u64, pub rsp: u64, pub ss: u64, /* top of stack page */ } impl PtRegs { pub fn Set(&mut self, ctx: &SigContext) { self.r15 = ctx.r15; self.r14 = ctx.r14; self.r13 = ctx.r13; self.r12 = ctx.r12; self.rbp = ctx.rbp; self.rbx = ctx.rbx; self.r11 = ctx.r11; self.r10 = ctx.r10; self.r9 = ctx.r9; self.r8 = ctx.r8; self.rax = ctx.rax; self.rcx = ctx.rcx; self.rdx = ctx.rdx; self.rsi = ctx.rsi; self.rdi = ctx.rdi; self.orig_rax = ctx.rax; self.rip = ctx.rip; self.cs = ctx.cs as u64; self.eflags = ctx.eflags; self.rsp = ctx.rsp; self.ss = ctx.ss as u64; } } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigRetInfo { pub sigInfoAddr: u64, pub sigCtxAddr: u64, pub ret: u64, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct Kill { pub pid: i32, pub uid: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigTimer { pub tid: i32, pub overrun: i32, pub sigval: u64, pub sysPrivate: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigRt { pub pid: i32, pub uid: u32, pub sigval: u64, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigChld { pub pid: i32, //child pub uid: u32, //sender's uid pub status: i32, //Exit code pub uTime: i32, pub sTime: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigFault { pub addr: u64, pub lsb: u16, } #[repr(C)] #[derive(Copy, Clone)] pub struct SignalInfo { pub Signo: i32, // Signal number pub Errno: i32, // Errno value pub Code: i32, // Signal code pub _r: u32, pub fields: [u8; 128 - 16], } impl<'a> Default for SignalInfo { fn default() -> Self { return Self { Signo: 0, Errno: 0, Code: 0, _r: 0, fields: [0; 128 - 16] } } } impl core::fmt::Debug for SignalInfo { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SignalInfo") .field("Signo", &self.Signo) .field("Errno", &self.Errno) .field("Code", &self.Code) .finish() } } impl SignalInfo { pub fn SignalInfoPriv(sig: Signal) -> Self { return Self { Signo: sig.0, Code: Self::SIGNAL_INFO_KERNEL, ..Default::default() } } // FixSignalCodeForUser fixes up si_code. // // The si_code we get from Linux may contain the kernel-specific code in the // top 16 bits if it's positive (e.g., from ptrace). Linux's // copy_siginfo_to_user does // err |= __put_user((short)from->si_code, &to->si_code); // to mask out those bits and we need to do the same. pub fn FixSignalCodeForUser(&mut self) { if self.Code > 0 { self.Code &= 0xffff; } } pub fn Kill(&self) -> &mut Kill { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut Kill) } } pub fn SigTimer(&mut self) -> &mut SigTimer { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigTimer) } } pub fn SigRt(&mut self) -> &mut SigRt { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigRt) } } pub fn SigChld(&mut self) -> &mut SigChld { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigChld) } } pub fn SigFault(&self) -> &mut SigFault { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigFault) } } // SignalInfoUser (properly SI_USER) indicates that a signal was sent from // a kill() or raise() syscall. pub const SIGNAL_INFO_USER: i32 = 0; // SignalInfoKernel (properly SI_KERNEL) indicates that the signal was sent // by the kernel. pub const SIGNAL_INFO_KERNEL: i32 = 0x80; // SignalInfoTimer (properly SI_TIMER) indicates that the signal was sent // by an expired timer. pub const SIGNAL_INFO_TIMER: i32 = -2; // SignalInfoTkill (properly SI_TKILL) indicates that the signal was sent // from a tkill() or tgkill() syscall. pub const SIGNAL_INFO_TKILL: i32 = -6; // CLD_* codes are only meaningful for SIGCHLD. // CLD_EXITED indicates that a task exited. pub const CLD_EXITED: i32 = 1; // CLD_KILLED indicates that a task was killed by a signal. pub const CLD_KILLED: i32 = 2; // CLD_DUMPED indicates that a task was killed by a signal and then dumped // core. pub const CLD_DUMPED: i32 = 3; // CLD_TRAPPED indicates that a task was stopped by ptrace. pub const CLD_TRAPPED: i32 = 4; // CLD_STOPPED indicates that a thread group completed a group stop. pub const CLD_STOPPED: i32 = 5; // CLD_CONTINUED indicates that a group-stopped thread group was continued. pub const CLD_CONTINUED: i32 = 6; // SYS_* codes are only meaningful for SIGSYS. // SYS_SECCOMP indicates that a signal originates from seccomp. pub const SYS_SECCOMP: i32 = 1; // TRAP_* codes are only meaningful for SIGTRAP. // TRAP_BRKPT indicates a breakpoint trap. pub const TRAP_BRKPT: i32 = 1; } pub const UC_FP_XSTATE: u64 = 1; pub const UC_SIGCONTEXT_SS: u64 = 2; pub const UC_STRICT_RESTORE_SS: u64 = 4; // https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/ucontext.h#L5 #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct UContext { pub Flags: u64, pub Link: u64, pub Stack: SignalStack, pub MContext: SigContext, pub Sigset: u64, } impl UContext { pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64, alt: &SignalStack) -> Self { return Self { Flags: 2, Link: 0, Stack: alt.clone(), MContext: SigContext::New(ptRegs, oldMask, cr2, fpstate), Sigset: 0, } } } // https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/sigcontext.h#L284 #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigContext { pub r8: u64, pub r9: u64, pub r10: u64, pub r11: u64, pub r12: u64, pub r13: u64, pub r14: u64, pub r15: u64, pub rdi: u64, pub rsi: u64, pub rbp: u64, pub rbx: u64, pub rdx: u64, pub rax: u64, pub rcx: u64, pub rsp: u64, pub rip: u64, pub eflags: u64, pub cs: u16, pub gs: u16, // always 0 on amd64. pub fs: u16, // always 0 on amd64. pub ss: u16, // only restored if _UC_STRICT_RESTORE_SS (unsupported). pub err: u64, pub trapno: u64, pub oldmask: u64, pub cr2: u64, // Pointer to a struct _fpstate. pub fpstate: u64, pub reserved: [u64; 8], } impl SigContext { pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64) -> Self { return Self { r8: ptRegs.r8, r9: ptRegs.r9, r10: ptRegs.r10, r11: ptRegs.r11, r12: ptRegs.r12, r13: ptRegs.r13, r14: ptRegs.r14, r15: ptRegs.r15, rdi: ptRegs.rdi, rsi: ptRegs.rsi, rbp: ptRegs.rbp, rbx: ptRegs.rbx, rdx: ptRegs.rdx, rax: ptRegs.rax, rcx: ptRegs.rcx, rsp: ptRegs.rsp, rip: ptRegs.rip, eflags: ptRegs.eflags, cs: ptRegs.cs as u16, gs: 0, fs: 0, ss: ptRegs.ss as u16, err: 0, trapno: 0, oldmask: oldMask, cr2: cr2, fpstate: fpstate, ..Default::default() } } } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigFlag(pub u64); impl SigFlag { pub const SIGNAL_FLAG_NO_CLD_STOP: u64 = 0x00000001; pub const SIGNAL_FLAG_NO_CLD_WAIT: u64 = 0x00000002; pub const SIGNAL_FLAG_SIG_INFO: u64 = 0x00000004; pub const SIGNAL_FLAG_RESTORER: u64 = 0x04000000; pub const SIGNAL_FLAG_ON_STACK: u64 = 0x08000000; pub const SIGNAL_FLAG_RESTART: u64 = 0x10000000; pub const SIGNAL_FLAG_INTERRUPT: u64 = 0x20000000; pub const SIGNAL_FLAG_NO_DEFER: u64 = 0x40000000; pub const SIGNAL_FLAG_RESET_HANDLER: u64 = 0x80000000; pub fn IsNoCldStop(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0; } pub fn IsNoCldWait(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_WAIT != 0; } pub fn IsSigInfo(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_SIG_INFO != 0; } pub fn IsNoDefer(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_DEFER != 0; } pub fn IsRestart(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESTART != 0; } pub fn IsResetHandler(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESET_HANDLER != 0; } pub fn IsOnStack(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_ON_STACK != 0; } pub fn HasRestorer(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESTORER != 0; } pub fn IsNoChildStop(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0 } } // https://github.com/lattera/glibc/blob/master/sysdeps/unix/sysv/linux/kernel_sigaction.h #[derive(Copy, Clone, Default)] #[repr(C)] pub struct SigAct { pub handler: u64, pub flags: SigFlag, pub restorer: u64, pub mask: u64, } impl fmt::Debug for SigAct { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "SigAction {{ \n\ handler: {:x}, \n\ flag : {:x}, \n \ flags::HasRestorer: {}, \n \ flags::IsOnStack: {}, \n \ flags::IsRestart: {}, \n \ flags::IsResetHandler: {}, \n \ flags::IsNoDefer: {}, \n \ flags::IsSigInfo: {}, \n \ restorer : {:x}, \n\ mask: {:x}, \n}}", self.handler, self.flags.0, self.flags.HasRestorer(), self.flags.IsOnStack(), self.flags.IsRestart(), self.flags.IsResetHandler(), self.flags.IsNoDefer(), self.flags.IsSigInfo(), self.restorer, self.mask ) } } impl SigAct { // SignalActDefault is SIG_DFL and specifies that the default behavior for // a signal should be taken. pub const SIGNAL_ACT_DEFAULT: u64 = 0; // SignalActIgnore is SIG_IGN and specifies that a signal should be // ignored. pub const SIGNAL_ACT_IGNORE: u64 = 1; } pub const UNMASKABLE_MASK : u64 = 1 << (Signal::SIGKILL - 1) | 1 << (Signal::SIGSTOP - 1); #[derive(Clone, Copy, Debug)] pub struct SignalSet(pub u64); impl Default for SignalSet { fn default() -> Self { return Self(0) } } impl SignalSet { pub fn
(sig: Signal) -> Self { return SignalSet(1 << sig.Index()) } pub fn Add(&mut self, sig: Signal) { self.0 |= 1 << sig.Index() } pub fn Remove(&mut self, sig: Signal) { self.0 &= !(1 << sig.0) } pub fn TailingZero(&self) -> usize { for i in 0..64 { let idx = 64 - i - 1; if self.0 & (1 << idx) != 0 { return idx } } return 64 } pub fn MakeSignalSet(sigs: &[Signal]) -> Self { let mut res = Self::default(); for sig in sigs { res.Add(*sig) } return res; } pub fn ForEachSignal(&self, mut f: impl FnMut(Signal)) { for i in 0..64 { if self.0 & (1 << i) != 0 { f(Signal(i as i32 + 1)) } } } } #[derive(Debug, Clone, Default)] pub struct SignalQueue { signals: LinkedList<PendingSignal>, } impl SignalQueue { pub const RT_SIG_CAP: usize = 32; pub fn Len(&mut self) -> u64 { return self.signals.len() as u64; } pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> bool { if self.signals.len() == Self::RT_SIG_CAP { return false } self.signals.push_back(PendingSignal { sigInfo: info, timer: timer, }); return true } pub fn Deque(&mut self) -> Option<PendingSignal> { return self.signals.pop_front(); } pub fn Clear(&mut self) { self.signals.clear(); } } pub const SIGNAL_COUNT: usize = 64; pub const STD_SIGNAL_COUNT: usize = 31; // 1 ~ 31 pub const RT_SIGNAL_COUNT: usize = 33; // 32 ~ 64 pub const RT_SIGNAL_START: usize = 32; // 32 ~ 64 #[derive(Debug, Clone, Default)] pub struct PendingSignal { pub sigInfo: Box<SignalInfo>, pub timer: Option<IntervalTimer>, } pub struct PendingSignals { pub stdSignals: [Option<PendingSignal>; STD_SIGNAL_COUNT], pub rtSignals: [SignalQueue; RT_SIGNAL_COUNT], pub pendingSet: SignalSet, } impl fmt::Debug for PendingSignals { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PendingSignals") .field("stdSignals", &self.stdSignals) .field("rtSignals0", &self.rtSignals[0]) .field("rtSignals2", &self.rtSignals[32]) .field("pendingSet", &self.pendingSet) .finish() } } impl Default for PendingSignals { fn default() -> Self { return Self { stdSignals : Default::default(), rtSignals : [ SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), ], pendingSet: Default::default(), } } } impl PendingSignals { pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> Result<bool> { let sig = Signal(info.Signo); if sig.IsStandard() { match &self.stdSignals[sig.Index()] { None => (), _ => return Ok(false), } self.stdSignals[sig.Index()] = Some(PendingSignal { sigInfo: info, timer: timer, }); self.pendingSet.Add(sig); return Ok(true); } else if sig.IsRealtime() { let q = &mut self.rtSignals[sig.Index() - 31]; self.pendingSet.Add(sig); return Ok(q.Enque(info, timer)); } else { return Err(Error::InvalidInput) } } pub fn HasSignal(&self, mask: SignalSet) -> bool { let set = SignalSet(self.pendingSet.0 & !(mask.0)); if set.0 == 0 { return false } return true; } pub fn Deque(&mut self, mask: SignalSet) -> Option<Box<SignalInfo>> { let set = SignalSet(self.pendingSet.0 & !(mask.0)); if set.0 == 0 { return None } let lastOne = set.TailingZero(); if lastOne < STD_SIGNAL_COUNT { self.pendingSet.0 &= !(1 << lastOne); let ps = self.stdSignals[lastOne].take(); if let Some(ps) = ps { let mut sigInfo = ps.sigInfo; match ps.timer { None => (), Some(timer) => { timer.lock().updateDequeuedSignalLocked(&mut sigInfo) } } return Some(sigInfo); } else { return None; } } if self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Len() == 1 { self.pendingSet.0 &= !(1 << lastOne); } let ps = self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Deque(); if let Some(ps) = ps { let mut sigInfo = ps.sigInfo; match ps.timer { None => (), Some(timer) => { timer.lock().updateDequeuedSignalLocked(&mut sigInfo) } } return Some(sigInfo); } else { return None; } } pub fn Discard(&mut self, sig: Signal) { self.pendingSet.0 &= !(1 << sig.Index()); if sig.0 <= STD_SIGNAL_COUNT as i32 { self.stdSignals[sig.Index()] = None; return } self.rtSignals[sig.0 as usize - RT_SIGNAL_START].Clear() } } #[derive(Default, Debug)] pub struct SignalStruct { pendingSignals: PendingSignals, signalMask: SignalSet, realSignalMask: SignalSet, //sigtimedwait groupStopPending: bool, groupStopAck: bool, trapStopPending: bool, } // https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/signal.h#L132 #[derive(Debug, Clone, Copy)] #[repr(C)] pub struct SignalStack { pub addr: u64, pub flags: u32, pub size: u64, } impl Default for SignalStack { fn default() -> Self { return Self { addr: 0, flags: Self::FLAG_DISABLE, size: 0, } } } impl SignalStack { pub const FLAG_ON_STACK: u32 = 1; pub const FLAG_DISABLE: u32 = 2; pub fn Contains(&self, sp: u64) -> bool { return self.addr < sp && sp <= self.addr + self.size } pub fn SetOnStack(&mut self) { self.flags |= Self::FLAG_ON_STACK; } pub fn IsEnable(&self) -> bool { return self.flags & Self::FLAG_DISABLE == 0 } pub fn Top(&self) -> u64 { return self.addr + self.size } } pub struct SigHow {} impl SigHow { pub const SIG_BLOCK: u64 = 0; pub const SIG_UNBLOCK: u64 = 1; pub const SIG_SETMASK: u64 = 2; } pub fn SignalInfoPriv(sig: i32) -> SignalInfo { return SignalInfo { Signo: sig, Code: SignalInfo::SIGNAL_INFO_KERNEL, ..Default::default() } } // Sigevent represents struct sigevent. #[repr(C)] #[derive(Default, Copy, Clone)] pub struct Sigevent { pub Value: u64, pub Signo: i32, pub Notify: i32, pub Tid: i32, // struct sigevent here contains 48-byte union _sigev_un. However, only // member _tid is significant to the kernel. pub UnRemainder1: [u8; 32], pub UnRemainder: [u8; 12], } pub const SIGEV_SIGNAL: i32 = 0; pub const SIGEV_NONE: i32 = 1; pub const SIGEV_THREAD: i32 = 2; pub const SIGEV_THREAD_ID: i32 = 4; // copyInSigSetWithSize copies in a structure as below // // struct { // const sigset_t *ss; /* Pointer to signal set */ // size_t ss_len; /* Size (in bytes) of object pointed to by 'ss' */ // }; // // and returns sigset_addr and size. #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct SigMask { pub addr: u64, pub len: usize, } pub fn CopyInSigSetWithSize(task: &Task, addr: u64) -> Result<(u64, usize)> { let mask : SigMask = task.CopyInObj(addr)?; return Ok((mask.addr, mask.len)) } pub const SIGNAL_SET_SIZE: usize = 8; pub fn UnblockableSignals() -> SignalSet { return SignalSet::MakeSignalSet(&[Signal(Signal::SIGKILL), Signal(Signal::SIGSTOP)]); } pub fn CopyInSigSet(task: &Task, sigSetAddr: u64, size: usize) -> Result<SignalSet> { if size != SIGNAL_SET_SIZE { return Err(Error::SysError(SysErr::EINVAL)) } let mask : u64 = task.CopyInObj(sigSetAddr)?; return Ok(SignalSet(mask & !UnblockableSignals().0)) }
New
identifier_name
SignalDef.rs
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use core::fmt; use alloc::collections::linked_list::LinkedList; use alloc::boxed::Box; use super::qlib::common::*; use super::qlib::linux_def::*; use super::kernel::posixtimer::*; use super::task::*; #[repr(C)] #[derive(Debug, Copy, Clone, Default)] //copy from https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/ptrace.h#L18 pub struct PtRegs { /* * C ABI says these regs are callee-preserved. They aren't saved on kernel entry * unless syscall needs a complete, fully filled "struct pt_regs". */ pub r15: u64, pub r14: u64, pub r13: u64, pub r12: u64, pub rbp: u64, pub rbx: u64, /* These regs are callee-clobbered. Always saved on kernel entry. */ pub r11: u64, pub r10: u64, pub r9: u64, pub r8: u64, pub rax: u64, pub rcx: u64, pub rdx: u64, pub rsi: u64, pub rdi: u64, /* * On syscall entry, this is syscall#. On CPU exception, this is error code. * On hw interrupt, it's IRQ number: */ pub orig_rax: u64, /* Return frame for iretq */ pub rip: u64,
pub rsp: u64, pub ss: u64, /* top of stack page */ } impl PtRegs { pub fn Set(&mut self, ctx: &SigContext) { self.r15 = ctx.r15; self.r14 = ctx.r14; self.r13 = ctx.r13; self.r12 = ctx.r12; self.rbp = ctx.rbp; self.rbx = ctx.rbx; self.r11 = ctx.r11; self.r10 = ctx.r10; self.r9 = ctx.r9; self.r8 = ctx.r8; self.rax = ctx.rax; self.rcx = ctx.rcx; self.rdx = ctx.rdx; self.rsi = ctx.rsi; self.rdi = ctx.rdi; self.orig_rax = ctx.rax; self.rip = ctx.rip; self.cs = ctx.cs as u64; self.eflags = ctx.eflags; self.rsp = ctx.rsp; self.ss = ctx.ss as u64; } } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigRetInfo { pub sigInfoAddr: u64, pub sigCtxAddr: u64, pub ret: u64, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct Kill { pub pid: i32, pub uid: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigTimer { pub tid: i32, pub overrun: i32, pub sigval: u64, pub sysPrivate: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigRt { pub pid: i32, pub uid: u32, pub sigval: u64, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigChld { pub pid: i32, //child pub uid: u32, //sender's uid pub status: i32, //Exit code pub uTime: i32, pub sTime: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigFault { pub addr: u64, pub lsb: u16, } #[repr(C)] #[derive(Copy, Clone)] pub struct SignalInfo { pub Signo: i32, // Signal number pub Errno: i32, // Errno value pub Code: i32, // Signal code pub _r: u32, pub fields: [u8; 128 - 16], } impl<'a> Default for SignalInfo { fn default() -> Self { return Self { Signo: 0, Errno: 0, Code: 0, _r: 0, fields: [0; 128 - 16] } } } impl core::fmt::Debug for SignalInfo { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SignalInfo") .field("Signo", &self.Signo) .field("Errno", &self.Errno) .field("Code", &self.Code) .finish() } } impl SignalInfo { pub fn SignalInfoPriv(sig: Signal) -> Self { return Self { Signo: sig.0, Code: Self::SIGNAL_INFO_KERNEL, ..Default::default() } } // FixSignalCodeForUser fixes up si_code. // // The si_code we get from Linux may contain the kernel-specific code in the // top 16 bits if it's positive (e.g., from ptrace). Linux's // copy_siginfo_to_user does // err |= __put_user((short)from->si_code, &to->si_code); // to mask out those bits and we need to do the same. pub fn FixSignalCodeForUser(&mut self) { if self.Code > 0 { self.Code &= 0xffff; } } pub fn Kill(&self) -> &mut Kill { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut Kill) } } pub fn SigTimer(&mut self) -> &mut SigTimer { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigTimer) } } pub fn SigRt(&mut self) -> &mut SigRt { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigRt) } } pub fn SigChld(&mut self) -> &mut SigChld { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigChld) } } pub fn SigFault(&self) -> &mut SigFault { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigFault) } } // SignalInfoUser (properly SI_USER) indicates that a signal was sent from // a kill() or raise() syscall. pub const SIGNAL_INFO_USER: i32 = 0; // SignalInfoKernel (properly SI_KERNEL) indicates that the signal was sent // by the kernel. pub const SIGNAL_INFO_KERNEL: i32 = 0x80; // SignalInfoTimer (properly SI_TIMER) indicates that the signal was sent // by an expired timer. pub const SIGNAL_INFO_TIMER: i32 = -2; // SignalInfoTkill (properly SI_TKILL) indicates that the signal was sent // from a tkill() or tgkill() syscall. pub const SIGNAL_INFO_TKILL: i32 = -6; // CLD_* codes are only meaningful for SIGCHLD. // CLD_EXITED indicates that a task exited. pub const CLD_EXITED: i32 = 1; // CLD_KILLED indicates that a task was killed by a signal. pub const CLD_KILLED: i32 = 2; // CLD_DUMPED indicates that a task was killed by a signal and then dumped // core. pub const CLD_DUMPED: i32 = 3; // CLD_TRAPPED indicates that a task was stopped by ptrace. pub const CLD_TRAPPED: i32 = 4; // CLD_STOPPED indicates that a thread group completed a group stop. pub const CLD_STOPPED: i32 = 5; // CLD_CONTINUED indicates that a group-stopped thread group was continued. pub const CLD_CONTINUED: i32 = 6; // SYS_* codes are only meaningful for SIGSYS. // SYS_SECCOMP indicates that a signal originates from seccomp. pub const SYS_SECCOMP: i32 = 1; // TRAP_* codes are only meaningful for SIGTRAP. // TRAP_BRKPT indicates a breakpoint trap. pub const TRAP_BRKPT: i32 = 1; } pub const UC_FP_XSTATE: u64 = 1; pub const UC_SIGCONTEXT_SS: u64 = 2; pub const UC_STRICT_RESTORE_SS: u64 = 4; // https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/ucontext.h#L5 #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct UContext { pub Flags: u64, pub Link: u64, pub Stack: SignalStack, pub MContext: SigContext, pub Sigset: u64, } impl UContext { pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64, alt: &SignalStack) -> Self { return Self { Flags: 2, Link: 0, Stack: alt.clone(), MContext: SigContext::New(ptRegs, oldMask, cr2, fpstate), Sigset: 0, } } } // https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/sigcontext.h#L284 #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigContext { pub r8: u64, pub r9: u64, pub r10: u64, pub r11: u64, pub r12: u64, pub r13: u64, pub r14: u64, pub r15: u64, pub rdi: u64, pub rsi: u64, pub rbp: u64, pub rbx: u64, pub rdx: u64, pub rax: u64, pub rcx: u64, pub rsp: u64, pub rip: u64, pub eflags: u64, pub cs: u16, pub gs: u16, // always 0 on amd64. pub fs: u16, // always 0 on amd64. pub ss: u16, // only restored if _UC_STRICT_RESTORE_SS (unsupported). pub err: u64, pub trapno: u64, pub oldmask: u64, pub cr2: u64, // Pointer to a struct _fpstate. pub fpstate: u64, pub reserved: [u64; 8], } impl SigContext { pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64) -> Self { return Self { r8: ptRegs.r8, r9: ptRegs.r9, r10: ptRegs.r10, r11: ptRegs.r11, r12: ptRegs.r12, r13: ptRegs.r13, r14: ptRegs.r14, r15: ptRegs.r15, rdi: ptRegs.rdi, rsi: ptRegs.rsi, rbp: ptRegs.rbp, rbx: ptRegs.rbx, rdx: ptRegs.rdx, rax: ptRegs.rax, rcx: ptRegs.rcx, rsp: ptRegs.rsp, rip: ptRegs.rip, eflags: ptRegs.eflags, cs: ptRegs.cs as u16, gs: 0, fs: 0, ss: ptRegs.ss as u16, err: 0, trapno: 0, oldmask: oldMask, cr2: cr2, fpstate: fpstate, ..Default::default() } } } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigFlag(pub u64); impl SigFlag { pub const SIGNAL_FLAG_NO_CLD_STOP: u64 = 0x00000001; pub const SIGNAL_FLAG_NO_CLD_WAIT: u64 = 0x00000002; pub const SIGNAL_FLAG_SIG_INFO: u64 = 0x00000004; pub const SIGNAL_FLAG_RESTORER: u64 = 0x04000000; pub const SIGNAL_FLAG_ON_STACK: u64 = 0x08000000; pub const SIGNAL_FLAG_RESTART: u64 = 0x10000000; pub const SIGNAL_FLAG_INTERRUPT: u64 = 0x20000000; pub const SIGNAL_FLAG_NO_DEFER: u64 = 0x40000000; pub const SIGNAL_FLAG_RESET_HANDLER: u64 = 0x80000000; pub fn IsNoCldStop(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0; } pub fn IsNoCldWait(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_WAIT != 0; } pub fn IsSigInfo(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_SIG_INFO != 0; } pub fn IsNoDefer(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_DEFER != 0; } pub fn IsRestart(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESTART != 0; } pub fn IsResetHandler(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESET_HANDLER != 0; } pub fn IsOnStack(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_ON_STACK != 0; } pub fn HasRestorer(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESTORER != 0; } pub fn IsNoChildStop(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0 } } // https://github.com/lattera/glibc/blob/master/sysdeps/unix/sysv/linux/kernel_sigaction.h #[derive(Copy, Clone, Default)] #[repr(C)] pub struct SigAct { pub handler: u64, pub flags: SigFlag, pub restorer: u64, pub mask: u64, } impl fmt::Debug for SigAct { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "SigAction {{ \n\ handler: {:x}, \n\ flag : {:x}, \n \ flags::HasRestorer: {}, \n \ flags::IsOnStack: {}, \n \ flags::IsRestart: {}, \n \ flags::IsResetHandler: {}, \n \ flags::IsNoDefer: {}, \n \ flags::IsSigInfo: {}, \n \ restorer : {:x}, \n\ mask: {:x}, \n}}", self.handler, self.flags.0, self.flags.HasRestorer(), self.flags.IsOnStack(), self.flags.IsRestart(), self.flags.IsResetHandler(), self.flags.IsNoDefer(), self.flags.IsSigInfo(), self.restorer, self.mask ) } } impl SigAct { // SignalActDefault is SIG_DFL and specifies that the default behavior for // a signal should be taken. pub const SIGNAL_ACT_DEFAULT: u64 = 0; // SignalActIgnore is SIG_IGN and specifies that a signal should be // ignored. pub const SIGNAL_ACT_IGNORE: u64 = 1; } pub const UNMASKABLE_MASK : u64 = 1 << (Signal::SIGKILL - 1) | 1 << (Signal::SIGSTOP - 1); #[derive(Clone, Copy, Debug)] pub struct SignalSet(pub u64); impl Default for SignalSet { fn default() -> Self { return Self(0) } } impl SignalSet { pub fn New(sig: Signal) -> Self { return SignalSet(1 << sig.Index()) } pub fn Add(&mut self, sig: Signal) { self.0 |= 1 << sig.Index() } pub fn Remove(&mut self, sig: Signal) { self.0 &= !(1 << sig.0) } pub fn TailingZero(&self) -> usize { for i in 0..64 { let idx = 64 - i - 1; if self.0 & (1 << idx) != 0 { return idx } } return 64 } pub fn MakeSignalSet(sigs: &[Signal]) -> Self { let mut res = Self::default(); for sig in sigs { res.Add(*sig) } return res; } pub fn ForEachSignal(&self, mut f: impl FnMut(Signal)) { for i in 0..64 { if self.0 & (1 << i) != 0 { f(Signal(i as i32 + 1)) } } } } #[derive(Debug, Clone, Default)] pub struct SignalQueue { signals: LinkedList<PendingSignal>, } impl SignalQueue { pub const RT_SIG_CAP: usize = 32; pub fn Len(&mut self) -> u64 { return self.signals.len() as u64; } pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> bool { if self.signals.len() == Self::RT_SIG_CAP { return false } self.signals.push_back(PendingSignal { sigInfo: info, timer: timer, }); return true } pub fn Deque(&mut self) -> Option<PendingSignal> { return self.signals.pop_front(); } pub fn Clear(&mut self) { self.signals.clear(); } } pub const SIGNAL_COUNT: usize = 64; pub const STD_SIGNAL_COUNT: usize = 31; // 1 ~ 31 pub const RT_SIGNAL_COUNT: usize = 33; // 32 ~ 64 pub const RT_SIGNAL_START: usize = 32; // 32 ~ 64 #[derive(Debug, Clone, Default)] pub struct PendingSignal { pub sigInfo: Box<SignalInfo>, pub timer: Option<IntervalTimer>, } pub struct PendingSignals { pub stdSignals: [Option<PendingSignal>; STD_SIGNAL_COUNT], pub rtSignals: [SignalQueue; RT_SIGNAL_COUNT], pub pendingSet: SignalSet, } impl fmt::Debug for PendingSignals { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PendingSignals") .field("stdSignals", &self.stdSignals) .field("rtSignals0", &self.rtSignals[0]) .field("rtSignals2", &self.rtSignals[32]) .field("pendingSet", &self.pendingSet) .finish() } } impl Default for PendingSignals { fn default() -> Self { return Self { stdSignals : Default::default(), rtSignals : [ SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), ], pendingSet: Default::default(), } } } impl PendingSignals { pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> Result<bool> { let sig = Signal(info.Signo); if sig.IsStandard() { match &self.stdSignals[sig.Index()] { None => (), _ => return Ok(false), } self.stdSignals[sig.Index()] = Some(PendingSignal { sigInfo: info, timer: timer, }); self.pendingSet.Add(sig); return Ok(true); } else if sig.IsRealtime() { let q = &mut self.rtSignals[sig.Index() - 31]; self.pendingSet.Add(sig); return Ok(q.Enque(info, timer)); } else { return Err(Error::InvalidInput) } } pub fn HasSignal(&self, mask: SignalSet) -> bool { let set = SignalSet(self.pendingSet.0 & !(mask.0)); if set.0 == 0 { return false } return true; } pub fn Deque(&mut self, mask: SignalSet) -> Option<Box<SignalInfo>> { let set = SignalSet(self.pendingSet.0 & !(mask.0)); if set.0 == 0 { return None } let lastOne = set.TailingZero(); if lastOne < STD_SIGNAL_COUNT { self.pendingSet.0 &= !(1 << lastOne); let ps = self.stdSignals[lastOne].take(); if let Some(ps) = ps { let mut sigInfo = ps.sigInfo; match ps.timer { None => (), Some(timer) => { timer.lock().updateDequeuedSignalLocked(&mut sigInfo) } } return Some(sigInfo); } else { return None; } } if self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Len() == 1 { self.pendingSet.0 &= !(1 << lastOne); } let ps = self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Deque(); if let Some(ps) = ps { let mut sigInfo = ps.sigInfo; match ps.timer { None => (), Some(timer) => { timer.lock().updateDequeuedSignalLocked(&mut sigInfo) } } return Some(sigInfo); } else { return None; } } pub fn Discard(&mut self, sig: Signal) { self.pendingSet.0 &= !(1 << sig.Index()); if sig.0 <= STD_SIGNAL_COUNT as i32 { self.stdSignals[sig.Index()] = None; return } self.rtSignals[sig.0 as usize - RT_SIGNAL_START].Clear() } } #[derive(Default, Debug)] pub struct SignalStruct { pendingSignals: PendingSignals, signalMask: SignalSet, realSignalMask: SignalSet, //sigtimedwait groupStopPending: bool, groupStopAck: bool, trapStopPending: bool, } // https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/signal.h#L132 #[derive(Debug, Clone, Copy)] #[repr(C)] pub struct SignalStack { pub addr: u64, pub flags: u32, pub size: u64, } impl Default for SignalStack { fn default() -> Self { return Self { addr: 0, flags: Self::FLAG_DISABLE, size: 0, } } } impl SignalStack { pub const FLAG_ON_STACK: u32 = 1; pub const FLAG_DISABLE: u32 = 2; pub fn Contains(&self, sp: u64) -> bool { return self.addr < sp && sp <= self.addr + self.size } pub fn SetOnStack(&mut self) { self.flags |= Self::FLAG_ON_STACK; } pub fn IsEnable(&self) -> bool { return self.flags & Self::FLAG_DISABLE == 0 } pub fn Top(&self) -> u64 { return self.addr + self.size } } pub struct SigHow {} impl SigHow { pub const SIG_BLOCK: u64 = 0; pub const SIG_UNBLOCK: u64 = 1; pub const SIG_SETMASK: u64 = 2; } pub fn SignalInfoPriv(sig: i32) -> SignalInfo { return SignalInfo { Signo: sig, Code: SignalInfo::SIGNAL_INFO_KERNEL, ..Default::default() } } // Sigevent represents struct sigevent. #[repr(C)] #[derive(Default, Copy, Clone)] pub struct Sigevent { pub Value: u64, pub Signo: i32, pub Notify: i32, pub Tid: i32, // struct sigevent here contains 48-byte union _sigev_un. However, only // member _tid is significant to the kernel. pub UnRemainder1: [u8; 32], pub UnRemainder: [u8; 12], } pub const SIGEV_SIGNAL: i32 = 0; pub const SIGEV_NONE: i32 = 1; pub const SIGEV_THREAD: i32 = 2; pub const SIGEV_THREAD_ID: i32 = 4; // copyInSigSetWithSize copies in a structure as below // // struct { // const sigset_t *ss; /* Pointer to signal set */ // size_t ss_len; /* Size (in bytes) of object pointed to by 'ss' */ // }; // // and returns sigset_addr and size. #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct SigMask { pub addr: u64, pub len: usize, } pub fn CopyInSigSetWithSize(task: &Task, addr: u64) -> Result<(u64, usize)> { let mask : SigMask = task.CopyInObj(addr)?; return Ok((mask.addr, mask.len)) } pub const SIGNAL_SET_SIZE: usize = 8; pub fn UnblockableSignals() -> SignalSet { return SignalSet::MakeSignalSet(&[Signal(Signal::SIGKILL), Signal(Signal::SIGSTOP)]); } pub fn CopyInSigSet(task: &Task, sigSetAddr: u64, size: usize) -> Result<SignalSet> { if size != SIGNAL_SET_SIZE { return Err(Error::SysError(SysErr::EINVAL)) } let mask : u64 = task.CopyInObj(sigSetAddr)?; return Ok(SignalSet(mask & !UnblockableSignals().0)) }
pub cs: u64, pub eflags: u64,
random_line_split
SignalDef.rs
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use core::fmt; use alloc::collections::linked_list::LinkedList; use alloc::boxed::Box; use super::qlib::common::*; use super::qlib::linux_def::*; use super::kernel::posixtimer::*; use super::task::*; #[repr(C)] #[derive(Debug, Copy, Clone, Default)] //copy from https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/ptrace.h#L18 pub struct PtRegs { /* * C ABI says these regs are callee-preserved. They aren't saved on kernel entry * unless syscall needs a complete, fully filled "struct pt_regs". */ pub r15: u64, pub r14: u64, pub r13: u64, pub r12: u64, pub rbp: u64, pub rbx: u64, /* These regs are callee-clobbered. Always saved on kernel entry. */ pub r11: u64, pub r10: u64, pub r9: u64, pub r8: u64, pub rax: u64, pub rcx: u64, pub rdx: u64, pub rsi: u64, pub rdi: u64, /* * On syscall entry, this is syscall#. On CPU exception, this is error code. * On hw interrupt, it's IRQ number: */ pub orig_rax: u64, /* Return frame for iretq */ pub rip: u64, pub cs: u64, pub eflags: u64, pub rsp: u64, pub ss: u64, /* top of stack page */ } impl PtRegs { pub fn Set(&mut self, ctx: &SigContext) { self.r15 = ctx.r15; self.r14 = ctx.r14; self.r13 = ctx.r13; self.r12 = ctx.r12; self.rbp = ctx.rbp; self.rbx = ctx.rbx; self.r11 = ctx.r11; self.r10 = ctx.r10; self.r9 = ctx.r9; self.r8 = ctx.r8; self.rax = ctx.rax; self.rcx = ctx.rcx; self.rdx = ctx.rdx; self.rsi = ctx.rsi; self.rdi = ctx.rdi; self.orig_rax = ctx.rax; self.rip = ctx.rip; self.cs = ctx.cs as u64; self.eflags = ctx.eflags; self.rsp = ctx.rsp; self.ss = ctx.ss as u64; } } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigRetInfo { pub sigInfoAddr: u64, pub sigCtxAddr: u64, pub ret: u64, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct Kill { pub pid: i32, pub uid: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigTimer { pub tid: i32, pub overrun: i32, pub sigval: u64, pub sysPrivate: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigRt { pub pid: i32, pub uid: u32, pub sigval: u64, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigChld { pub pid: i32, //child pub uid: u32, //sender's uid pub status: i32, //Exit code pub uTime: i32, pub sTime: i32, } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigFault { pub addr: u64, pub lsb: u16, } #[repr(C)] #[derive(Copy, Clone)] pub struct SignalInfo { pub Signo: i32, // Signal number pub Errno: i32, // Errno value pub Code: i32, // Signal code pub _r: u32, pub fields: [u8; 128 - 16], } impl<'a> Default for SignalInfo { fn default() -> Self { return Self { Signo: 0, Errno: 0, Code: 0, _r: 0, fields: [0; 128 - 16] } } } impl core::fmt::Debug for SignalInfo { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SignalInfo") .field("Signo", &self.Signo) .field("Errno", &self.Errno) .field("Code", &self.Code) .finish() } } impl SignalInfo { pub fn SignalInfoPriv(sig: Signal) -> Self { return Self { Signo: sig.0, Code: Self::SIGNAL_INFO_KERNEL, ..Default::default() } } // FixSignalCodeForUser fixes up si_code. // // The si_code we get from Linux may contain the kernel-specific code in the // top 16 bits if it's positive (e.g., from ptrace). Linux's // copy_siginfo_to_user does // err |= __put_user((short)from->si_code, &to->si_code); // to mask out those bits and we need to do the same. pub fn FixSignalCodeForUser(&mut self) { if self.Code > 0 { self.Code &= 0xffff; } } pub fn Kill(&self) -> &mut Kill { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut Kill) } } pub fn SigTimer(&mut self) -> &mut SigTimer { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigTimer) } } pub fn SigRt(&mut self) -> &mut SigRt { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigRt) } } pub fn SigChld(&mut self) -> &mut SigChld { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigChld) } } pub fn SigFault(&self) -> &mut SigFault { let addr = &self.fields[0] as *const _ as u64; return unsafe { &mut *(addr as *mut SigFault) } } // SignalInfoUser (properly SI_USER) indicates that a signal was sent from // a kill() or raise() syscall. pub const SIGNAL_INFO_USER: i32 = 0; // SignalInfoKernel (properly SI_KERNEL) indicates that the signal was sent // by the kernel. pub const SIGNAL_INFO_KERNEL: i32 = 0x80; // SignalInfoTimer (properly SI_TIMER) indicates that the signal was sent // by an expired timer. pub const SIGNAL_INFO_TIMER: i32 = -2; // SignalInfoTkill (properly SI_TKILL) indicates that the signal was sent // from a tkill() or tgkill() syscall. pub const SIGNAL_INFO_TKILL: i32 = -6; // CLD_* codes are only meaningful for SIGCHLD. // CLD_EXITED indicates that a task exited. pub const CLD_EXITED: i32 = 1; // CLD_KILLED indicates that a task was killed by a signal. pub const CLD_KILLED: i32 = 2; // CLD_DUMPED indicates that a task was killed by a signal and then dumped // core. pub const CLD_DUMPED: i32 = 3; // CLD_TRAPPED indicates that a task was stopped by ptrace. pub const CLD_TRAPPED: i32 = 4; // CLD_STOPPED indicates that a thread group completed a group stop. pub const CLD_STOPPED: i32 = 5; // CLD_CONTINUED indicates that a group-stopped thread group was continued. pub const CLD_CONTINUED: i32 = 6; // SYS_* codes are only meaningful for SIGSYS. // SYS_SECCOMP indicates that a signal originates from seccomp. pub const SYS_SECCOMP: i32 = 1; // TRAP_* codes are only meaningful for SIGTRAP. // TRAP_BRKPT indicates a breakpoint trap. pub const TRAP_BRKPT: i32 = 1; } pub const UC_FP_XSTATE: u64 = 1; pub const UC_SIGCONTEXT_SS: u64 = 2; pub const UC_STRICT_RESTORE_SS: u64 = 4; // https://elixir.bootlin.com/linux/latest/source/include/uapi/asm-generic/ucontext.h#L5 #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct UContext { pub Flags: u64, pub Link: u64, pub Stack: SignalStack, pub MContext: SigContext, pub Sigset: u64, } impl UContext { pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64, alt: &SignalStack) -> Self { return Self { Flags: 2, Link: 0, Stack: alt.clone(), MContext: SigContext::New(ptRegs, oldMask, cr2, fpstate), Sigset: 0, } } } // https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/sigcontext.h#L284 #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigContext { pub r8: u64, pub r9: u64, pub r10: u64, pub r11: u64, pub r12: u64, pub r13: u64, pub r14: u64, pub r15: u64, pub rdi: u64, pub rsi: u64, pub rbp: u64, pub rbx: u64, pub rdx: u64, pub rax: u64, pub rcx: u64, pub rsp: u64, pub rip: u64, pub eflags: u64, pub cs: u16, pub gs: u16, // always 0 on amd64. pub fs: u16, // always 0 on amd64. pub ss: u16, // only restored if _UC_STRICT_RESTORE_SS (unsupported). pub err: u64, pub trapno: u64, pub oldmask: u64, pub cr2: u64, // Pointer to a struct _fpstate. pub fpstate: u64, pub reserved: [u64; 8], } impl SigContext { pub fn New(ptRegs: &PtRegs, oldMask: u64, cr2: u64, fpstate: u64) -> Self { return Self { r8: ptRegs.r8, r9: ptRegs.r9, r10: ptRegs.r10, r11: ptRegs.r11, r12: ptRegs.r12, r13: ptRegs.r13, r14: ptRegs.r14, r15: ptRegs.r15, rdi: ptRegs.rdi, rsi: ptRegs.rsi, rbp: ptRegs.rbp, rbx: ptRegs.rbx, rdx: ptRegs.rdx, rax: ptRegs.rax, rcx: ptRegs.rcx, rsp: ptRegs.rsp, rip: ptRegs.rip, eflags: ptRegs.eflags, cs: ptRegs.cs as u16, gs: 0, fs: 0, ss: ptRegs.ss as u16, err: 0, trapno: 0, oldmask: oldMask, cr2: cr2, fpstate: fpstate, ..Default::default() } } } #[repr(C)] #[derive(Debug, Copy, Clone, Default)] pub struct SigFlag(pub u64); impl SigFlag { pub const SIGNAL_FLAG_NO_CLD_STOP: u64 = 0x00000001; pub const SIGNAL_FLAG_NO_CLD_WAIT: u64 = 0x00000002; pub const SIGNAL_FLAG_SIG_INFO: u64 = 0x00000004; pub const SIGNAL_FLAG_RESTORER: u64 = 0x04000000; pub const SIGNAL_FLAG_ON_STACK: u64 = 0x08000000; pub const SIGNAL_FLAG_RESTART: u64 = 0x10000000; pub const SIGNAL_FLAG_INTERRUPT: u64 = 0x20000000; pub const SIGNAL_FLAG_NO_DEFER: u64 = 0x40000000; pub const SIGNAL_FLAG_RESET_HANDLER: u64 = 0x80000000; pub fn IsNoCldStop(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0; } pub fn IsNoCldWait(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_WAIT != 0; } pub fn IsSigInfo(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_SIG_INFO != 0; } pub fn IsNoDefer(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_DEFER != 0; } pub fn IsRestart(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESTART != 0; } pub fn IsResetHandler(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESET_HANDLER != 0; } pub fn IsOnStack(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_ON_STACK != 0; } pub fn HasRestorer(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_RESTORER != 0; } pub fn IsNoChildStop(&self) -> bool { return self.0 & Self::SIGNAL_FLAG_NO_CLD_STOP != 0 } } // https://github.com/lattera/glibc/blob/master/sysdeps/unix/sysv/linux/kernel_sigaction.h #[derive(Copy, Clone, Default)] #[repr(C)] pub struct SigAct { pub handler: u64, pub flags: SigFlag, pub restorer: u64, pub mask: u64, } impl fmt::Debug for SigAct { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "SigAction {{ \n\ handler: {:x}, \n\ flag : {:x}, \n \ flags::HasRestorer: {}, \n \ flags::IsOnStack: {}, \n \ flags::IsRestart: {}, \n \ flags::IsResetHandler: {}, \n \ flags::IsNoDefer: {}, \n \ flags::IsSigInfo: {}, \n \ restorer : {:x}, \n\ mask: {:x}, \n}}", self.handler, self.flags.0, self.flags.HasRestorer(), self.flags.IsOnStack(), self.flags.IsRestart(), self.flags.IsResetHandler(), self.flags.IsNoDefer(), self.flags.IsSigInfo(), self.restorer, self.mask ) } } impl SigAct { // SignalActDefault is SIG_DFL and specifies that the default behavior for // a signal should be taken. pub const SIGNAL_ACT_DEFAULT: u64 = 0; // SignalActIgnore is SIG_IGN and specifies that a signal should be // ignored. pub const SIGNAL_ACT_IGNORE: u64 = 1; } pub const UNMASKABLE_MASK : u64 = 1 << (Signal::SIGKILL - 1) | 1 << (Signal::SIGSTOP - 1); #[derive(Clone, Copy, Debug)] pub struct SignalSet(pub u64); impl Default for SignalSet { fn default() -> Self { return Self(0) } } impl SignalSet { pub fn New(sig: Signal) -> Self { return SignalSet(1 << sig.Index()) } pub fn Add(&mut self, sig: Signal) { self.0 |= 1 << sig.Index() } pub fn Remove(&mut self, sig: Signal) { self.0 &= !(1 << sig.0) } pub fn TailingZero(&self) -> usize { for i in 0..64 { let idx = 64 - i - 1; if self.0 & (1 << idx) != 0 { return idx } } return 64 } pub fn MakeSignalSet(sigs: &[Signal]) -> Self { let mut res = Self::default(); for sig in sigs { res.Add(*sig) } return res; } pub fn ForEachSignal(&self, mut f: impl FnMut(Signal)) { for i in 0..64 { if self.0 & (1 << i) != 0 { f(Signal(i as i32 + 1)) } } } } #[derive(Debug, Clone, Default)] pub struct SignalQueue { signals: LinkedList<PendingSignal>, } impl SignalQueue { pub const RT_SIG_CAP: usize = 32; pub fn Len(&mut self) -> u64 { return self.signals.len() as u64; } pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> bool { if self.signals.len() == Self::RT_SIG_CAP { return false } self.signals.push_back(PendingSignal { sigInfo: info, timer: timer, }); return true } pub fn Deque(&mut self) -> Option<PendingSignal> { return self.signals.pop_front(); } pub fn Clear(&mut self) { self.signals.clear(); } } pub const SIGNAL_COUNT: usize = 64; pub const STD_SIGNAL_COUNT: usize = 31; // 1 ~ 31 pub const RT_SIGNAL_COUNT: usize = 33; // 32 ~ 64 pub const RT_SIGNAL_START: usize = 32; // 32 ~ 64 #[derive(Debug, Clone, Default)] pub struct PendingSignal { pub sigInfo: Box<SignalInfo>, pub timer: Option<IntervalTimer>, } pub struct PendingSignals { pub stdSignals: [Option<PendingSignal>; STD_SIGNAL_COUNT], pub rtSignals: [SignalQueue; RT_SIGNAL_COUNT], pub pendingSet: SignalSet, } impl fmt::Debug for PendingSignals { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("PendingSignals") .field("stdSignals", &self.stdSignals) .field("rtSignals0", &self.rtSignals[0]) .field("rtSignals2", &self.rtSignals[32]) .field("pendingSet", &self.pendingSet) .finish() } } impl Default for PendingSignals { fn default() -> Self { return Self { stdSignals : Default::default(), rtSignals : [ SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), SignalQueue::default(), ], pendingSet: Default::default(), } } } impl PendingSignals { pub fn Enque(&mut self, info: Box<SignalInfo>, timer: Option<IntervalTimer>) -> Result<bool> { let sig = Signal(info.Signo); if sig.IsStandard() { match &self.stdSignals[sig.Index()] { None => (), _ => return Ok(false), } self.stdSignals[sig.Index()] = Some(PendingSignal { sigInfo: info, timer: timer, }); self.pendingSet.Add(sig); return Ok(true); } else if sig.IsRealtime() { let q = &mut self.rtSignals[sig.Index() - 31]; self.pendingSet.Add(sig); return Ok(q.Enque(info, timer)); } else { return Err(Error::InvalidInput) } } pub fn HasSignal(&self, mask: SignalSet) -> bool { let set = SignalSet(self.pendingSet.0 & !(mask.0)); if set.0 == 0 { return false } return true; } pub fn Deque(&mut self, mask: SignalSet) -> Option<Box<SignalInfo>> { let set = SignalSet(self.pendingSet.0 & !(mask.0)); if set.0 == 0 { return None } let lastOne = set.TailingZero(); if lastOne < STD_SIGNAL_COUNT { self.pendingSet.0 &= !(1 << lastOne); let ps = self.stdSignals[lastOne].take(); if let Some(ps) = ps { let mut sigInfo = ps.sigInfo; match ps.timer { None => (), Some(timer) => { timer.lock().updateDequeuedSignalLocked(&mut sigInfo) } } return Some(sigInfo); } else { return None; } } if self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Len() == 1 { self.pendingSet.0 &= !(1 << lastOne); } let ps = self.rtSignals[lastOne + 1 - RT_SIGNAL_START].Deque(); if let Some(ps) = ps { let mut sigInfo = ps.sigInfo; match ps.timer { None => (), Some(timer) => { timer.lock().updateDequeuedSignalLocked(&mut sigInfo) } } return Some(sigInfo); } else { return None; } } pub fn Discard(&mut self, sig: Signal) { self.pendingSet.0 &= !(1 << sig.Index()); if sig.0 <= STD_SIGNAL_COUNT as i32 { self.stdSignals[sig.Index()] = None; return } self.rtSignals[sig.0 as usize - RT_SIGNAL_START].Clear() } } #[derive(Default, Debug)] pub struct SignalStruct { pendingSignals: PendingSignals, signalMask: SignalSet, realSignalMask: SignalSet, //sigtimedwait groupStopPending: bool, groupStopAck: bool, trapStopPending: bool, } // https://elixir.bootlin.com/linux/latest/source/arch/x86/include/uapi/asm/signal.h#L132 #[derive(Debug, Clone, Copy)] #[repr(C)] pub struct SignalStack { pub addr: u64, pub flags: u32, pub size: u64, } impl Default for SignalStack { fn default() -> Self { return Self { addr: 0, flags: Self::FLAG_DISABLE, size: 0, } } } impl SignalStack { pub const FLAG_ON_STACK: u32 = 1; pub const FLAG_DISABLE: u32 = 2; pub fn Contains(&self, sp: u64) -> bool { return self.addr < sp && sp <= self.addr + self.size } pub fn SetOnStack(&mut self) { self.flags |= Self::FLAG_ON_STACK; } pub fn IsEnable(&self) -> bool { return self.flags & Self::FLAG_DISABLE == 0 } pub fn Top(&self) -> u64 { return self.addr + self.size } } pub struct SigHow {} impl SigHow { pub const SIG_BLOCK: u64 = 0; pub const SIG_UNBLOCK: u64 = 1; pub const SIG_SETMASK: u64 = 2; } pub fn SignalInfoPriv(sig: i32) -> SignalInfo { return SignalInfo { Signo: sig, Code: SignalInfo::SIGNAL_INFO_KERNEL, ..Default::default() } } // Sigevent represents struct sigevent. #[repr(C)] #[derive(Default, Copy, Clone)] pub struct Sigevent { pub Value: u64, pub Signo: i32, pub Notify: i32, pub Tid: i32, // struct sigevent here contains 48-byte union _sigev_un. However, only // member _tid is significant to the kernel. pub UnRemainder1: [u8; 32], pub UnRemainder: [u8; 12], } pub const SIGEV_SIGNAL: i32 = 0; pub const SIGEV_NONE: i32 = 1; pub const SIGEV_THREAD: i32 = 2; pub const SIGEV_THREAD_ID: i32 = 4; // copyInSigSetWithSize copies in a structure as below // // struct { // const sigset_t *ss; /* Pointer to signal set */ // size_t ss_len; /* Size (in bytes) of object pointed to by 'ss' */ // }; // // and returns sigset_addr and size. #[repr(C)] #[derive(Debug, Default, Copy, Clone)] pub struct SigMask { pub addr: u64, pub len: usize, } pub fn CopyInSigSetWithSize(task: &Task, addr: u64) -> Result<(u64, usize)>
pub const SIGNAL_SET_SIZE: usize = 8; pub fn UnblockableSignals() -> SignalSet { return SignalSet::MakeSignalSet(&[Signal(Signal::SIGKILL), Signal(Signal::SIGSTOP)]); } pub fn CopyInSigSet(task: &Task, sigSetAddr: u64, size: usize) -> Result<SignalSet> { if size != SIGNAL_SET_SIZE { return Err(Error::SysError(SysErr::EINVAL)) } let mask : u64 = task.CopyInObj(sigSetAddr)?; return Ok(SignalSet(mask & !UnblockableSignals().0)) }
{ let mask : SigMask = task.CopyInObj(addr)?; return Ok((mask.addr, mask.len)) }
identifier_body
api-put-object-multipart.go
/* * MinIO Go Library for Amazon S3 Compatible Cloud Storage * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package minio import ( "bytes" "context" "encoding/base64" "encoding/hex" "encoding/xml" "fmt" "hash/crc32" "io" "net/http" "net/url" "sort" "strconv" "strings" "github.com/google/uuid" "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/s3utils" ) func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions, ) (info UploadInfo, err error) { info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) if err != nil { errResp := ToErrorResponse(err) // Verify if multipart functionality is not available, if not // fall back to single PutObject operation. if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { // Verify if size of reader is greater than '5GiB'. if size > maxSinglePutObjectSize { return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. return c.putObject(ctx, bucketName, objectName, reader, size, opts) } } return info, err } func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err } if err = s3utils.CheckValidObjectName(objectName); err != nil { return UploadInfo{}, err } // Total data read and written to server. should be equal to // 'size' at the end of the call. var totalUploadedSize int64 // Complete multipart upload. var complMultipartUpload completeMultipartUpload // Calculate the optimal parts info for a given size. totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) if err != nil { return UploadInfo{}, err } // Choose hash algorithms to be calculated by hashCopyN, // avoid sha256 with non-v4 signature request or // HTTPS connection. hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) if len(hashSums) == 0
// Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") defer func() { if err != nil { c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() // Part number always starts with '1'. partNumber := 1 // Initialize parts uploaded map. partsInfo := make(map[int]ObjectPart) // Create a buffer. buf := make([]byte, partSize) // Create checksums // CRC32C is ~50% faster on AMD64 @ 30GB/s var crcBytes []byte customHeader := make(http.Header) crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) for partNumber <= totalPartsCount { length, rErr := readFull(reader, buf) if rErr == io.EOF && partNumber > 1 { break } if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { return UploadInfo{}, rErr } // Calculates hash sums while copying partSize bytes into cw. for k, v := range hashAlgos { v.Write(buf[:length]) hashSums[k] = v.Sum(nil) v.Close() } // Update progress reader appropriately to the latest offset // as we read from the source. rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Checksums.. var ( md5Base64 string sha256Hex string ) if hashSums["md5"] != nil { md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) } if hashSums["sha256"] != nil { sha256Hex = hex.EncodeToString(hashSums["sha256"]) } if len(hashSums) == 0 { crc.Reset() crc.Write(buf[:length]) cSum := crc.Sum(nil) customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) crcBytes = append(crcBytes, cSum...) } p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} // Proceed to upload the part. objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } // Save successfully uploaded part metadata. partsInfo[partNumber] = objPart // Save successfully uploaded size. totalUploadedSize += int64(length) // Increment part number. partNumber++ // For unknown size, Read EOF we break away. // We do not have to upload till totalPartsCount. if rErr == io.EOF { break } } // Loop over total uploaded parts to save them in // Parts array before completing the multipart request. for i := 1; i < partNumber; i++ { part, ok := partsInfo[i] if !ok { return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ ETag: part.ETag, PartNumber: part.PartNumber, ChecksumCRC32: part.ChecksumCRC32, ChecksumCRC32C: part.ChecksumCRC32C, ChecksumSHA1: part.ChecksumSHA1, ChecksumSHA256: part.ChecksumSHA256, }) } // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) opts = PutObjectOptions{ ServerSideEncryption: opts.ServerSideEncryption, } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() crc.Write(crcBytes) opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} } uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err } uploadInfo.Size = totalUploadedSize return uploadInfo, nil } // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { return initiateMultipartUploadResult{}, err } // Initialize url queries. urlValues := make(url.Values) urlValues.Set("uploads", "") if opts.Internal.SourceVersionID != "" { if opts.Internal.SourceVersionID != nullVersionID { if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) } } urlValues.Set("versionId", opts.Internal.SourceVersionID) } // Set ContentType header. customHeader := opts.Header() reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, customHeader: customHeader, } // Execute POST on an objectName to initiate multipart upload. resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) defer closeResponse(resp) if err != nil { return initiateMultipartUploadResult{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) } } // Decode xml for new multipart upload. initiateMultipartUploadResult := initiateMultipartUploadResult{} err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) if err != nil { return initiateMultipartUploadResult, err } return initiateMultipartUploadResult, nil } type uploadPartParams struct { bucketName string objectName string uploadID string reader io.Reader partNumber int md5Base64 string sha256Hex string size int64 sse encrypt.ServerSide streamSha256 bool customHeader http.Header trailer http.Header } // uploadPart - Uploads a part in a multipart upload. func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { return ObjectPart{}, err } if err := s3utils.CheckValidObjectName(p.objectName); err != nil { return ObjectPart{}, err } if p.size > maxPartSize { return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) } if p.size <= -1 { return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) } if p.partNumber <= 0 { return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") } if p.uploadID == "" { return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") } // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number. urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) // Set upload id. urlValues.Set("uploadId", p.uploadID) // Set encryption headers, if any. if p.customHeader == nil { p.customHeader = make(http.Header) } // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html // Server-side encryption is supported by the S3 Multipart Upload actions. // Unless you are using a customer-provided encryption key, you don't need // to specify the encryption parameters in each UploadPart request. if p.sse != nil && p.sse.Type() == encrypt.SSEC { p.sse.Marshal(p.customHeader) } reqMetadata := requestMetadata{ bucketName: p.bucketName, objectName: p.objectName, queryValues: urlValues, customHeader: p.customHeader, contentBody: p.reader, contentLength: p.size, contentMD5Base64: p.md5Base64, contentSHA256Hex: p.sha256Hex, streamSha256: p.streamSha256, trailer: p.trailer, } // Execute PUT on each part. resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) defer closeResponse(resp) if err != nil { return ObjectPart{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) } } // Once successfully uploaded, return completed part. h := resp.Header objPart := ObjectPart{ ChecksumCRC32: h.Get("x-amz-checksum-crc32"), ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), ChecksumSHA1: h.Get("x-amz-checksum-sha1"), ChecksumSHA256: h.Get("x-amz-checksum-sha256"), } objPart.Size = p.size objPart.PartNumber = p.partNumber // Trim off the odd double quotes from ETag in the beginning and end. objPart.ETag = trimEtag(h.Get("ETag")) return objPart, nil } // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, complete completeMultipartUpload, opts PutObjectOptions, ) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { return UploadInfo{}, err } // Initialize url queries. urlValues := make(url.Values) urlValues.Set("uploadId", uploadID) // Marshal complete multipart body. completeMultipartUploadBytes, err := xml.Marshal(complete) if err != nil { return UploadInfo{}, err } headers := opts.Header() if s3utils.IsAmazonEndpoint(*c.endpointURL) { headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload } // Instantiate all the complete multipart buffer. completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, contentBody: completeMultipartUploadBuffer, contentLength: int64(len(completeMultipartUploadBytes)), contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), customHeader: headers, } // Execute POST to complete multipart upload for an objectName. resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) defer closeResponse(resp) if err != nil { return UploadInfo{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) } } // Read resp.Body into a []bytes to parse for Error response inside the body var b []byte b, err = io.ReadAll(resp.Body) if err != nil { return UploadInfo{}, err } // Decode completed multipart upload response on success. completeMultipartUploadResult := completeMultipartUploadResult{} err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) if err != nil { // xml parsing failure due to presence an ill-formed xml fragment return UploadInfo{}, err } else if completeMultipartUploadResult.Bucket == "" { // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values // of the members. // Decode completed multipart upload response on failure completeMultipartUploadErr := ErrorResponse{} err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) if err != nil { // xml parsing failure due to presence an ill-formed xml fragment return UploadInfo{}, err } return UploadInfo{}, completeMultipartUploadErr } // extract lifecycle expiry date and rule ID expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) return UploadInfo{ Bucket: completeMultipartUploadResult.Bucket, Key: completeMultipartUploadResult.Key, ETag: trimEtag(completeMultipartUploadResult.ETag), VersionID: resp.Header.Get(amzVersionID), Location: completeMultipartUploadResult.Location, Expiration: expTime, ExpirationRuleID: ruleID, ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, }, nil }
{ if opts.UserMetadata == nil { opts.UserMetadata = make(map[string]string, 1) } opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" }
conditional_block
api-put-object-multipart.go
/* * MinIO Go Library for Amazon S3 Compatible Cloud Storage * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package minio import ( "bytes" "context" "encoding/base64" "encoding/hex" "encoding/xml" "fmt" "hash/crc32" "io" "net/http" "net/url" "sort" "strconv" "strings" "github.com/google/uuid" "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/s3utils" ) func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions, ) (info UploadInfo, err error) { info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) if err != nil { errResp := ToErrorResponse(err) // Verify if multipart functionality is not available, if not // fall back to single PutObject operation. if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { // Verify if size of reader is greater than '5GiB'. if size > maxSinglePutObjectSize { return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. return c.putObject(ctx, bucketName, objectName, reader, size, opts) } } return info, err } func (c *Client)
(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err } if err = s3utils.CheckValidObjectName(objectName); err != nil { return UploadInfo{}, err } // Total data read and written to server. should be equal to // 'size' at the end of the call. var totalUploadedSize int64 // Complete multipart upload. var complMultipartUpload completeMultipartUpload // Calculate the optimal parts info for a given size. totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) if err != nil { return UploadInfo{}, err } // Choose hash algorithms to be calculated by hashCopyN, // avoid sha256 with non-v4 signature request or // HTTPS connection. hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) if len(hashSums) == 0 { if opts.UserMetadata == nil { opts.UserMetadata = make(map[string]string, 1) } opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") defer func() { if err != nil { c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() // Part number always starts with '1'. partNumber := 1 // Initialize parts uploaded map. partsInfo := make(map[int]ObjectPart) // Create a buffer. buf := make([]byte, partSize) // Create checksums // CRC32C is ~50% faster on AMD64 @ 30GB/s var crcBytes []byte customHeader := make(http.Header) crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) for partNumber <= totalPartsCount { length, rErr := readFull(reader, buf) if rErr == io.EOF && partNumber > 1 { break } if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { return UploadInfo{}, rErr } // Calculates hash sums while copying partSize bytes into cw. for k, v := range hashAlgos { v.Write(buf[:length]) hashSums[k] = v.Sum(nil) v.Close() } // Update progress reader appropriately to the latest offset // as we read from the source. rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Checksums.. var ( md5Base64 string sha256Hex string ) if hashSums["md5"] != nil { md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) } if hashSums["sha256"] != nil { sha256Hex = hex.EncodeToString(hashSums["sha256"]) } if len(hashSums) == 0 { crc.Reset() crc.Write(buf[:length]) cSum := crc.Sum(nil) customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) crcBytes = append(crcBytes, cSum...) } p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} // Proceed to upload the part. objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } // Save successfully uploaded part metadata. partsInfo[partNumber] = objPart // Save successfully uploaded size. totalUploadedSize += int64(length) // Increment part number. partNumber++ // For unknown size, Read EOF we break away. // We do not have to upload till totalPartsCount. if rErr == io.EOF { break } } // Loop over total uploaded parts to save them in // Parts array before completing the multipart request. for i := 1; i < partNumber; i++ { part, ok := partsInfo[i] if !ok { return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ ETag: part.ETag, PartNumber: part.PartNumber, ChecksumCRC32: part.ChecksumCRC32, ChecksumCRC32C: part.ChecksumCRC32C, ChecksumSHA1: part.ChecksumSHA1, ChecksumSHA256: part.ChecksumSHA256, }) } // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) opts = PutObjectOptions{ ServerSideEncryption: opts.ServerSideEncryption, } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() crc.Write(crcBytes) opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} } uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err } uploadInfo.Size = totalUploadedSize return uploadInfo, nil } // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { return initiateMultipartUploadResult{}, err } // Initialize url queries. urlValues := make(url.Values) urlValues.Set("uploads", "") if opts.Internal.SourceVersionID != "" { if opts.Internal.SourceVersionID != nullVersionID { if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) } } urlValues.Set("versionId", opts.Internal.SourceVersionID) } // Set ContentType header. customHeader := opts.Header() reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, customHeader: customHeader, } // Execute POST on an objectName to initiate multipart upload. resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) defer closeResponse(resp) if err != nil { return initiateMultipartUploadResult{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) } } // Decode xml for new multipart upload. initiateMultipartUploadResult := initiateMultipartUploadResult{} err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) if err != nil { return initiateMultipartUploadResult, err } return initiateMultipartUploadResult, nil } type uploadPartParams struct { bucketName string objectName string uploadID string reader io.Reader partNumber int md5Base64 string sha256Hex string size int64 sse encrypt.ServerSide streamSha256 bool customHeader http.Header trailer http.Header } // uploadPart - Uploads a part in a multipart upload. func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { return ObjectPart{}, err } if err := s3utils.CheckValidObjectName(p.objectName); err != nil { return ObjectPart{}, err } if p.size > maxPartSize { return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) } if p.size <= -1 { return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) } if p.partNumber <= 0 { return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") } if p.uploadID == "" { return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") } // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number. urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) // Set upload id. urlValues.Set("uploadId", p.uploadID) // Set encryption headers, if any. if p.customHeader == nil { p.customHeader = make(http.Header) } // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html // Server-side encryption is supported by the S3 Multipart Upload actions. // Unless you are using a customer-provided encryption key, you don't need // to specify the encryption parameters in each UploadPart request. if p.sse != nil && p.sse.Type() == encrypt.SSEC { p.sse.Marshal(p.customHeader) } reqMetadata := requestMetadata{ bucketName: p.bucketName, objectName: p.objectName, queryValues: urlValues, customHeader: p.customHeader, contentBody: p.reader, contentLength: p.size, contentMD5Base64: p.md5Base64, contentSHA256Hex: p.sha256Hex, streamSha256: p.streamSha256, trailer: p.trailer, } // Execute PUT on each part. resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) defer closeResponse(resp) if err != nil { return ObjectPart{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) } } // Once successfully uploaded, return completed part. h := resp.Header objPart := ObjectPart{ ChecksumCRC32: h.Get("x-amz-checksum-crc32"), ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), ChecksumSHA1: h.Get("x-amz-checksum-sha1"), ChecksumSHA256: h.Get("x-amz-checksum-sha256"), } objPart.Size = p.size objPart.PartNumber = p.partNumber // Trim off the odd double quotes from ETag in the beginning and end. objPart.ETag = trimEtag(h.Get("ETag")) return objPart, nil } // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, complete completeMultipartUpload, opts PutObjectOptions, ) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { return UploadInfo{}, err } // Initialize url queries. urlValues := make(url.Values) urlValues.Set("uploadId", uploadID) // Marshal complete multipart body. completeMultipartUploadBytes, err := xml.Marshal(complete) if err != nil { return UploadInfo{}, err } headers := opts.Header() if s3utils.IsAmazonEndpoint(*c.endpointURL) { headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload } // Instantiate all the complete multipart buffer. completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, contentBody: completeMultipartUploadBuffer, contentLength: int64(len(completeMultipartUploadBytes)), contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), customHeader: headers, } // Execute POST to complete multipart upload for an objectName. resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) defer closeResponse(resp) if err != nil { return UploadInfo{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) } } // Read resp.Body into a []bytes to parse for Error response inside the body var b []byte b, err = io.ReadAll(resp.Body) if err != nil { return UploadInfo{}, err } // Decode completed multipart upload response on success. completeMultipartUploadResult := completeMultipartUploadResult{} err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) if err != nil { // xml parsing failure due to presence an ill-formed xml fragment return UploadInfo{}, err } else if completeMultipartUploadResult.Bucket == "" { // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values // of the members. // Decode completed multipart upload response on failure completeMultipartUploadErr := ErrorResponse{} err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) if err != nil { // xml parsing failure due to presence an ill-formed xml fragment return UploadInfo{}, err } return UploadInfo{}, completeMultipartUploadErr } // extract lifecycle expiry date and rule ID expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) return UploadInfo{ Bucket: completeMultipartUploadResult.Bucket, Key: completeMultipartUploadResult.Key, ETag: trimEtag(completeMultipartUploadResult.ETag), VersionID: resp.Header.Get(amzVersionID), Location: completeMultipartUploadResult.Location, Expiration: expTime, ExpirationRuleID: ruleID, ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, }, nil }
putObjectMultipartNoStream
identifier_name
api-put-object-multipart.go
/* * MinIO Go Library for Amazon S3 Compatible Cloud Storage * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package minio import ( "bytes" "context" "encoding/base64" "encoding/hex" "encoding/xml" "fmt" "hash/crc32" "io" "net/http" "net/url" "sort" "strconv" "strings" "github.com/google/uuid" "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/s3utils" ) func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions, ) (info UploadInfo, err error) { info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) if err != nil { errResp := ToErrorResponse(err) // Verify if multipart functionality is not available, if not // fall back to single PutObject operation. if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { // Verify if size of reader is greater than '5GiB'. if size > maxSinglePutObjectSize { return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. return c.putObject(ctx, bucketName, objectName, reader, size, opts) } } return info, err } func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err } if err = s3utils.CheckValidObjectName(objectName); err != nil { return UploadInfo{}, err } // Total data read and written to server. should be equal to // 'size' at the end of the call. var totalUploadedSize int64 // Complete multipart upload. var complMultipartUpload completeMultipartUpload // Calculate the optimal parts info for a given size. totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) if err != nil { return UploadInfo{}, err } // Choose hash algorithms to be calculated by hashCopyN, // avoid sha256 with non-v4 signature request or // HTTPS connection. hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) if len(hashSums) == 0 { if opts.UserMetadata == nil { opts.UserMetadata = make(map[string]string, 1) } opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") defer func() { if err != nil { c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() // Part number always starts with '1'. partNumber := 1 // Initialize parts uploaded map. partsInfo := make(map[int]ObjectPart) // Create a buffer. buf := make([]byte, partSize) // Create checksums // CRC32C is ~50% faster on AMD64 @ 30GB/s var crcBytes []byte customHeader := make(http.Header) crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) for partNumber <= totalPartsCount { length, rErr := readFull(reader, buf) if rErr == io.EOF && partNumber > 1 { break } if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { return UploadInfo{}, rErr } // Calculates hash sums while copying partSize bytes into cw. for k, v := range hashAlgos { v.Write(buf[:length]) hashSums[k] = v.Sum(nil) v.Close() } // Update progress reader appropriately to the latest offset // as we read from the source. rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Checksums.. var ( md5Base64 string sha256Hex string ) if hashSums["md5"] != nil { md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) } if hashSums["sha256"] != nil { sha256Hex = hex.EncodeToString(hashSums["sha256"]) } if len(hashSums) == 0 { crc.Reset() crc.Write(buf[:length]) cSum := crc.Sum(nil) customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) crcBytes = append(crcBytes, cSum...) } p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} // Proceed to upload the part. objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } // Save successfully uploaded part metadata. partsInfo[partNumber] = objPart // Save successfully uploaded size. totalUploadedSize += int64(length) // Increment part number. partNumber++ // For unknown size, Read EOF we break away. // We do not have to upload till totalPartsCount. if rErr == io.EOF { break } } // Loop over total uploaded parts to save them in // Parts array before completing the multipart request. for i := 1; i < partNumber; i++ { part, ok := partsInfo[i] if !ok { return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ ETag: part.ETag, PartNumber: part.PartNumber, ChecksumCRC32: part.ChecksumCRC32, ChecksumCRC32C: part.ChecksumCRC32C, ChecksumSHA1: part.ChecksumSHA1, ChecksumSHA256: part.ChecksumSHA256, }) } // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) opts = PutObjectOptions{ ServerSideEncryption: opts.ServerSideEncryption, } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() crc.Write(crcBytes) opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} } uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err } uploadInfo.Size = totalUploadedSize return uploadInfo, nil } // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { return initiateMultipartUploadResult{}, err } // Initialize url queries. urlValues := make(url.Values) urlValues.Set("uploads", "") if opts.Internal.SourceVersionID != "" { if opts.Internal.SourceVersionID != nullVersionID { if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) } } urlValues.Set("versionId", opts.Internal.SourceVersionID) } // Set ContentType header. customHeader := opts.Header() reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, customHeader: customHeader, } // Execute POST on an objectName to initiate multipart upload. resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) defer closeResponse(resp) if err != nil { return initiateMultipartUploadResult{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) } } // Decode xml for new multipart upload. initiateMultipartUploadResult := initiateMultipartUploadResult{} err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) if err != nil { return initiateMultipartUploadResult, err } return initiateMultipartUploadResult, nil } type uploadPartParams struct { bucketName string objectName string uploadID string reader io.Reader partNumber int md5Base64 string sha256Hex string size int64 sse encrypt.ServerSide streamSha256 bool customHeader http.Header trailer http.Header } // uploadPart - Uploads a part in a multipart upload. func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { return ObjectPart{}, err } if err := s3utils.CheckValidObjectName(p.objectName); err != nil { return ObjectPart{}, err } if p.size > maxPartSize { return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) } if p.size <= -1 { return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) } if p.partNumber <= 0 { return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") } if p.uploadID == "" { return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") } // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number. urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) // Set upload id. urlValues.Set("uploadId", p.uploadID) // Set encryption headers, if any. if p.customHeader == nil { p.customHeader = make(http.Header) } // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html // Server-side encryption is supported by the S3 Multipart Upload actions. // Unless you are using a customer-provided encryption key, you don't need // to specify the encryption parameters in each UploadPart request. if p.sse != nil && p.sse.Type() == encrypt.SSEC { p.sse.Marshal(p.customHeader) } reqMetadata := requestMetadata{ bucketName: p.bucketName, objectName: p.objectName, queryValues: urlValues, customHeader: p.customHeader, contentBody: p.reader, contentLength: p.size, contentMD5Base64: p.md5Base64, contentSHA256Hex: p.sha256Hex, streamSha256: p.streamSha256, trailer: p.trailer, } // Execute PUT on each part. resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) defer closeResponse(resp) if err != nil { return ObjectPart{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) } } // Once successfully uploaded, return completed part. h := resp.Header objPart := ObjectPart{ ChecksumCRC32: h.Get("x-amz-checksum-crc32"), ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), ChecksumSHA1: h.Get("x-amz-checksum-sha1"), ChecksumSHA256: h.Get("x-amz-checksum-sha256"), } objPart.Size = p.size objPart.PartNumber = p.partNumber // Trim off the odd double quotes from ETag in the beginning and end. objPart.ETag = trimEtag(h.Get("ETag")) return objPart, nil } // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, complete completeMultipartUpload, opts PutObjectOptions, ) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { return UploadInfo{}, err } // Initialize url queries. urlValues := make(url.Values) urlValues.Set("uploadId", uploadID) // Marshal complete multipart body. completeMultipartUploadBytes, err := xml.Marshal(complete) if err != nil { return UploadInfo{}, err } headers := opts.Header() if s3utils.IsAmazonEndpoint(*c.endpointURL) { headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload } // Instantiate all the complete multipart buffer. completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, contentBody: completeMultipartUploadBuffer, contentLength: int64(len(completeMultipartUploadBytes)), contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), customHeader: headers, } // Execute POST to complete multipart upload for an objectName. resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata)
if resp != nil { if resp.StatusCode != http.StatusOK { return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) } } // Read resp.Body into a []bytes to parse for Error response inside the body var b []byte b, err = io.ReadAll(resp.Body) if err != nil { return UploadInfo{}, err } // Decode completed multipart upload response on success. completeMultipartUploadResult := completeMultipartUploadResult{} err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) if err != nil { // xml parsing failure due to presence an ill-formed xml fragment return UploadInfo{}, err } else if completeMultipartUploadResult.Bucket == "" { // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values // of the members. // Decode completed multipart upload response on failure completeMultipartUploadErr := ErrorResponse{} err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) if err != nil { // xml parsing failure due to presence an ill-formed xml fragment return UploadInfo{}, err } return UploadInfo{}, completeMultipartUploadErr } // extract lifecycle expiry date and rule ID expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) return UploadInfo{ Bucket: completeMultipartUploadResult.Bucket, Key: completeMultipartUploadResult.Key, ETag: trimEtag(completeMultipartUploadResult.ETag), VersionID: resp.Header.Get(amzVersionID), Location: completeMultipartUploadResult.Location, Expiration: expTime, ExpirationRuleID: ruleID, ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, }, nil }
defer closeResponse(resp) if err != nil { return UploadInfo{}, err }
random_line_split
api-put-object-multipart.go
/* * MinIO Go Library for Amazon S3 Compatible Cloud Storage * Copyright 2015-2017 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package minio import ( "bytes" "context" "encoding/base64" "encoding/hex" "encoding/xml" "fmt" "hash/crc32" "io" "net/http" "net/url" "sort" "strconv" "strings" "github.com/google/uuid" "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/s3utils" ) func (c *Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions, ) (info UploadInfo, err error)
func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (info UploadInfo, err error) { // Input validation. if err = s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err } if err = s3utils.CheckValidObjectName(objectName); err != nil { return UploadInfo{}, err } // Total data read and written to server. should be equal to // 'size' at the end of the call. var totalUploadedSize int64 // Complete multipart upload. var complMultipartUpload completeMultipartUpload // Calculate the optimal parts info for a given size. totalPartsCount, partSize, _, err := OptimalPartInfo(-1, opts.PartSize) if err != nil { return UploadInfo{}, err } // Choose hash algorithms to be calculated by hashCopyN, // avoid sha256 with non-v4 signature request or // HTTPS connection. hashAlgos, hashSums := c.hashMaterials(opts.SendContentMd5, !opts.DisableContentSha256) if len(hashSums) == 0 { if opts.UserMetadata == nil { opts.UserMetadata = make(map[string]string, 1) } opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) if err != nil { return UploadInfo{}, err } delete(opts.UserMetadata, "X-Amz-Checksum-Algorithm") defer func() { if err != nil { c.abortMultipartUpload(ctx, bucketName, objectName, uploadID) } }() // Part number always starts with '1'. partNumber := 1 // Initialize parts uploaded map. partsInfo := make(map[int]ObjectPart) // Create a buffer. buf := make([]byte, partSize) // Create checksums // CRC32C is ~50% faster on AMD64 @ 30GB/s var crcBytes []byte customHeader := make(http.Header) crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) for partNumber <= totalPartsCount { length, rErr := readFull(reader, buf) if rErr == io.EOF && partNumber > 1 { break } if rErr != nil && rErr != io.ErrUnexpectedEOF && rErr != io.EOF { return UploadInfo{}, rErr } // Calculates hash sums while copying partSize bytes into cw. for k, v := range hashAlgos { v.Write(buf[:length]) hashSums[k] = v.Sum(nil) v.Close() } // Update progress reader appropriately to the latest offset // as we read from the source. rd := newHook(bytes.NewReader(buf[:length]), opts.Progress) // Checksums.. var ( md5Base64 string sha256Hex string ) if hashSums["md5"] != nil { md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"]) } if hashSums["sha256"] != nil { sha256Hex = hex.EncodeToString(hashSums["sha256"]) } if len(hashSums) == 0 { crc.Reset() crc.Write(buf[:length]) cSum := crc.Sum(nil) customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) crcBytes = append(crcBytes, cSum...) } p := uploadPartParams{bucketName: bucketName, objectName: objectName, uploadID: uploadID, reader: rd, partNumber: partNumber, md5Base64: md5Base64, sha256Hex: sha256Hex, size: int64(length), sse: opts.ServerSideEncryption, streamSha256: !opts.DisableContentSha256, customHeader: customHeader} // Proceed to upload the part. objPart, uerr := c.uploadPart(ctx, p) if uerr != nil { return UploadInfo{}, uerr } // Save successfully uploaded part metadata. partsInfo[partNumber] = objPart // Save successfully uploaded size. totalUploadedSize += int64(length) // Increment part number. partNumber++ // For unknown size, Read EOF we break away. // We do not have to upload till totalPartsCount. if rErr == io.EOF { break } } // Loop over total uploaded parts to save them in // Parts array before completing the multipart request. for i := 1; i < partNumber; i++ { part, ok := partsInfo[i] if !ok { return UploadInfo{}, errInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ ETag: part.ETag, PartNumber: part.PartNumber, ChecksumCRC32: part.ChecksumCRC32, ChecksumCRC32C: part.ChecksumCRC32C, ChecksumSHA1: part.ChecksumSHA1, ChecksumSHA256: part.ChecksumSHA256, }) } // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) opts = PutObjectOptions{ ServerSideEncryption: opts.ServerSideEncryption, } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() crc.Write(crcBytes) opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} } uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { return UploadInfo{}, err } uploadInfo.Size = totalUploadedSize return uploadInfo, nil } // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. func (c *Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { return initiateMultipartUploadResult{}, err } // Initialize url queries. urlValues := make(url.Values) urlValues.Set("uploads", "") if opts.Internal.SourceVersionID != "" { if opts.Internal.SourceVersionID != nullVersionID { if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { return initiateMultipartUploadResult{}, errInvalidArgument(err.Error()) } } urlValues.Set("versionId", opts.Internal.SourceVersionID) } // Set ContentType header. customHeader := opts.Header() reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, customHeader: customHeader, } // Execute POST on an objectName to initiate multipart upload. resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) defer closeResponse(resp) if err != nil { return initiateMultipartUploadResult{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { return initiateMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) } } // Decode xml for new multipart upload. initiateMultipartUploadResult := initiateMultipartUploadResult{} err = xmlDecoder(resp.Body, &initiateMultipartUploadResult) if err != nil { return initiateMultipartUploadResult, err } return initiateMultipartUploadResult, nil } type uploadPartParams struct { bucketName string objectName string uploadID string reader io.Reader partNumber int md5Base64 string sha256Hex string size int64 sse encrypt.ServerSide streamSha256 bool customHeader http.Header trailer http.Header } // uploadPart - Uploads a part in a multipart upload. func (c *Client) uploadPart(ctx context.Context, p uploadPartParams) (ObjectPart, error) { // Input validation. if err := s3utils.CheckValidBucketName(p.bucketName); err != nil { return ObjectPart{}, err } if err := s3utils.CheckValidObjectName(p.objectName); err != nil { return ObjectPart{}, err } if p.size > maxPartSize { return ObjectPart{}, errEntityTooLarge(p.size, maxPartSize, p.bucketName, p.objectName) } if p.size <= -1 { return ObjectPart{}, errEntityTooSmall(p.size, p.bucketName, p.objectName) } if p.partNumber <= 0 { return ObjectPart{}, errInvalidArgument("Part number cannot be negative or equal to zero.") } if p.uploadID == "" { return ObjectPart{}, errInvalidArgument("UploadID cannot be empty.") } // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number. urlValues.Set("partNumber", strconv.Itoa(p.partNumber)) // Set upload id. urlValues.Set("uploadId", p.uploadID) // Set encryption headers, if any. if p.customHeader == nil { p.customHeader = make(http.Header) } // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html // Server-side encryption is supported by the S3 Multipart Upload actions. // Unless you are using a customer-provided encryption key, you don't need // to specify the encryption parameters in each UploadPart request. if p.sse != nil && p.sse.Type() == encrypt.SSEC { p.sse.Marshal(p.customHeader) } reqMetadata := requestMetadata{ bucketName: p.bucketName, objectName: p.objectName, queryValues: urlValues, customHeader: p.customHeader, contentBody: p.reader, contentLength: p.size, contentMD5Base64: p.md5Base64, contentSHA256Hex: p.sha256Hex, streamSha256: p.streamSha256, trailer: p.trailer, } // Execute PUT on each part. resp, err := c.executeMethod(ctx, http.MethodPut, reqMetadata) defer closeResponse(resp) if err != nil { return ObjectPart{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { return ObjectPart{}, httpRespToErrorResponse(resp, p.bucketName, p.objectName) } } // Once successfully uploaded, return completed part. h := resp.Header objPart := ObjectPart{ ChecksumCRC32: h.Get("x-amz-checksum-crc32"), ChecksumCRC32C: h.Get("x-amz-checksum-crc32c"), ChecksumSHA1: h.Get("x-amz-checksum-sha1"), ChecksumSHA256: h.Get("x-amz-checksum-sha256"), } objPart.Size = p.size objPart.PartNumber = p.partNumber // Trim off the odd double quotes from ETag in the beginning and end. objPart.ETag = trimEtag(h.Get("ETag")) return objPart, nil } // completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts. func (c *Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string, complete completeMultipartUpload, opts PutObjectOptions, ) (UploadInfo, error) { // Input validation. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return UploadInfo{}, err } if err := s3utils.CheckValidObjectName(objectName); err != nil { return UploadInfo{}, err } // Initialize url queries. urlValues := make(url.Values) urlValues.Set("uploadId", uploadID) // Marshal complete multipart body. completeMultipartUploadBytes, err := xml.Marshal(complete) if err != nil { return UploadInfo{}, err } headers := opts.Header() if s3utils.IsAmazonEndpoint(*c.endpointURL) { headers.Del(encrypt.SseKmsKeyID) // Remove X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id not supported in CompleteMultipartUpload headers.Del(encrypt.SseGenericHeader) // Remove X-Amz-Server-Side-Encryption not supported in CompleteMultipartUpload headers.Del(encrypt.SseEncryptionContext) // Remove X-Amz-Server-Side-Encryption-Context not supported in CompleteMultipartUpload } // Instantiate all the complete multipart buffer. completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, contentBody: completeMultipartUploadBuffer, contentLength: int64(len(completeMultipartUploadBytes)), contentSHA256Hex: sum256Hex(completeMultipartUploadBytes), customHeader: headers, } // Execute POST to complete multipart upload for an objectName. resp, err := c.executeMethod(ctx, http.MethodPost, reqMetadata) defer closeResponse(resp) if err != nil { return UploadInfo{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { return UploadInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) } } // Read resp.Body into a []bytes to parse for Error response inside the body var b []byte b, err = io.ReadAll(resp.Body) if err != nil { return UploadInfo{}, err } // Decode completed multipart upload response on success. completeMultipartUploadResult := completeMultipartUploadResult{} err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) if err != nil { // xml parsing failure due to presence an ill-formed xml fragment return UploadInfo{}, err } else if completeMultipartUploadResult.Bucket == "" { // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values // of the members. // Decode completed multipart upload response on failure completeMultipartUploadErr := ErrorResponse{} err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) if err != nil { // xml parsing failure due to presence an ill-formed xml fragment return UploadInfo{}, err } return UploadInfo{}, completeMultipartUploadErr } // extract lifecycle expiry date and rule ID expTime, ruleID := amzExpirationToExpiryDateRuleID(resp.Header.Get(amzExpiration)) return UploadInfo{ Bucket: completeMultipartUploadResult.Bucket, Key: completeMultipartUploadResult.Key, ETag: trimEtag(completeMultipartUploadResult.ETag), VersionID: resp.Header.Get(amzVersionID), Location: completeMultipartUploadResult.Location, Expiration: expTime, ExpirationRuleID: ruleID, ChecksumSHA256: completeMultipartUploadResult.ChecksumSHA256, ChecksumSHA1: completeMultipartUploadResult.ChecksumSHA1, ChecksumCRC32: completeMultipartUploadResult.ChecksumCRC32, ChecksumCRC32C: completeMultipartUploadResult.ChecksumCRC32C, }, nil }
{ info, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts) if err != nil { errResp := ToErrorResponse(err) // Verify if multipart functionality is not available, if not // fall back to single PutObject operation. if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { // Verify if size of reader is greater than '5GiB'. if size > maxSinglePutObjectSize { return UploadInfo{}, errEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. return c.putObject(ctx, bucketName, objectName, reader, size, opts) } } return info, err }
identifier_body
manager.go
// Copyright 2018 The Operator-SDK Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package release import ( "bytes" "context" "encoding/json" "errors" "fmt" "strings" jsonpatch "gomodules.xyz/jsonpatch/v3" "helm.sh/helm/v3/pkg/action" cpb "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/kube" rpb "helm.sh/helm/v3/pkg/release" "helm.sh/helm/v3/pkg/releaseutil" "helm.sh/helm/v3/pkg/storage" "helm.sh/helm/v3/pkg/storage/driver" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" apitypes "k8s.io/apimachinery/pkg/types" apiutilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/discovery" "github.com/operator-framework/operator-sdk/internal/helm/internal/types" "github.com/operator-framework/operator-sdk/internal/helm/manifestutil" ) // Manager manages a Helm release. It can install, upgrade, reconcile, // and uninstall a release. type Manager interface { ReleaseName() string IsInstalled() bool IsUpgradeRequired() bool Sync(context.Context) error InstallRelease(context.Context, ...InstallOption) (*rpb.Release, error) UpgradeRelease(context.Context, ...UpgradeOption) (*rpb.Release, *rpb.Release, error) ReconcileRelease(context.Context) (*rpb.Release, error) UninstallRelease(context.Context, ...UninstallOption) (*rpb.Release, error) CleanupRelease(context.Context, string) (bool, error) } type manager struct { actionConfig *action.Configuration storageBackend *storage.Storage kubeClient kube.Interface releaseName string namespace string values map[string]interface{} status *types.HelmAppStatus isInstalled bool isUpgradeRequired bool deployedRelease *rpb.Release chart *cpb.Chart } type InstallOption func(*action.Install) error type UpgradeOption func(*action.Upgrade) error type UninstallOption func(*action.Uninstall) error // ReleaseName returns the name of the release. func (m manager) ReleaseName() string { return m.releaseName } func (m manager) IsInstalled() bool { return m.isInstalled } func (m manager) IsUpgradeRequired() bool { return m.isUpgradeRequired } // Sync ensures the Helm storage backend is in sync with the status of the // custom resource. func (m *manager) Sync(ctx context.Context) error { // Get release history for this release name releases, err := m.storageBackend.History(m.releaseName) if err != nil && !notFoundErr(err) { return fmt.Errorf("failed to retrieve release history: %w", err) } // Cleanup non-deployed release versions. If all release versions are // non-deployed, this will ensure that failed installations are correctly // retried. for _, rel := range releases { if rel.Info != nil && rel.Info.Status != rpb.StatusDeployed { _, err := m.storageBackend.Delete(rel.Name, rel.Version) if err != nil && !notFoundErr(err) { return fmt.Errorf("failed to delete stale release version: %w", err) } } } // Load the most recently deployed release from the storage backend. deployedRelease, err := m.getDeployedRelease() if errors.Is(err, driver.ErrReleaseNotFound) { return nil } if err != nil { return fmt.Errorf("failed to get deployed release: %w", err) } m.deployedRelease = deployedRelease m.isInstalled = true // Get the next candidate release to determine if an upgrade is necessary. candidateRelease, err := m.getCandidateRelease(m.namespace, m.releaseName, m.chart, m.values) if err != nil { return fmt.Errorf("failed to get candidate release: %w", err) } if deployedRelease.Manifest != candidateRelease.Manifest { m.isUpgradeRequired = true } return nil } func notFoundErr(err error) bool { return err != nil && strings.Contains(err.Error(), "not found") } func (m manager) getDeployedRelease() (*rpb.Release, error) { deployedRelease, err := m.storageBackend.Deployed(m.releaseName) if err != nil { if strings.Contains(err.Error(), "has no deployed releases") { return nil, driver.ErrReleaseNotFound } return nil, err } return deployedRelease, nil } func (m manager) getCandidateRelease(namespace, name string, chart *cpb.Chart, values map[string]interface{}) (*rpb.Release, error) { upgrade := action.NewUpgrade(m.actionConfig) upgrade.Namespace = namespace upgrade.DryRun = true return upgrade.Run(name, chart, values) } // InstallRelease performs a Helm release install. func (m manager) InstallRelease(ctx context.Context, opts ...InstallOption) (*rpb.Release, error) { install := action.NewInstall(m.actionConfig) install.ReleaseName = m.releaseName install.Namespace = m.namespace for _, o := range opts { if err := o(install); err != nil { return nil, fmt.Errorf("failed to apply install option: %w", err) } } installedRelease, err := install.Run(m.chart, m.values) if err != nil { // Workaround for helm/helm#3338 if installedRelease != nil { uninstall := action.NewUninstall(m.actionConfig) _, uninstallErr := uninstall.Run(m.releaseName) // In certain cases, InstallRelease will return a partial release in // the response even when it doesn't record the release in its release // store (e.g. when there is an error rendering the release manifest). // In that case the rollback will fail with a not found error because // there was nothing to rollback. // // Only log a message about a rollback failure if the failure was caused // by something other than the release not being found. if uninstallErr != nil && !notFoundErr(uninstallErr) { return nil, fmt.Errorf("failed installation (%s) and failed rollback: %w", err, uninstallErr) } } return nil, fmt.Errorf("failed to install release: %w", err) } return installedRelease, nil } func ForceUpgrade(force bool) UpgradeOption { return func(u *action.Upgrade) error { u.Force = force return nil } } // UpgradeRelease performs a Helm release upgrade. func (m manager) UpgradeRelease(ctx context.Context, opts ...UpgradeOption) (*rpb.Release, *rpb.Release, error) { upgrade := action.NewUpgrade(m.actionConfig) upgrade.Namespace = m.namespace for _, o := range opts { if err := o(upgrade); err != nil { return nil, nil, fmt.Errorf("failed to apply upgrade option: %w", err) } } upgradedRelease, err := upgrade.Run(m.releaseName, m.chart, m.values) if err != nil { // Workaround for helm/helm#3338 if upgradedRelease != nil { rollback := action.NewRollback(m.actionConfig) rollback.Force = true // As of Helm 2.13, if UpgradeRelease returns a non-nil release, that // means the release was also recorded in the release store. // Therefore, we should perform the rollback when we have a non-nil // release. Any rollback error here would be unexpected, so always // log both the upgrade and rollback errors. rollbackErr := rollback.Run(m.releaseName) if rollbackErr != nil { return nil, nil, fmt.Errorf("failed upgrade (%s) and failed rollback: %w", err, rollbackErr) } } return nil, nil, fmt.Errorf("failed to upgrade release: %w", err) } return m.deployedRelease, upgradedRelease, err } // ReconcileRelease creates or patches resources as necessary to match the // deployed release's manifest. func (m manager) ReconcileRelease(ctx context.Context) (*rpb.Release, error) { err := reconcileRelease(ctx, m.kubeClient, m.deployedRelease.Manifest) return m.deployedRelease, err } func reconcileRelease(_ context.Context, kubeClient kube.Interface, expectedManifest string) error { expectedInfos, err := kubeClient.Build(bytes.NewBufferString(expectedManifest), false) if err != nil { return err } return expectedInfos.Visit(func(expected *resource.Info, err error) error { if err != nil { return fmt.Errorf("visit error: %w", err) } helper := resource.NewHelper(expected.Client, expected.Mapping) existing, err := helper.Get(expected.Namespace, expected.Name) if apierrors.IsNotFound(err) { if _, err := helper.Create(expected.Namespace, true, expected.Object); err != nil { return fmt.Errorf("create error: %s", err) } return nil } else if err != nil { return fmt.Errorf("could not get object: %w", err) } // Replicate helm's patch creation, which will create a Three-Way-Merge patch for // native kubernetes Objects and fall back to a JSON merge patch for unstructured Objects such as CRDs // We also extend the JSON merge patch by ignoring "remove" operations for fields added by kubernetes // Reference in the helm source code: // https://github.com/helm/helm/blob/1c9b54ad7f62a5ce12f87c3ae55136ca20f09c98/pkg/kube/client.go#L392 patch, patchType, err := createPatch(existing, expected) if err != nil { return fmt.Errorf("error creating patch: %w", err) } if patch == nil { // nothing to do return nil } _, err = helper.Patch(expected.Namespace, expected.Name, patchType, patch, &metav1.PatchOptions{}) if err != nil
return nil }) } func createPatch(existing runtime.Object, expected *resource.Info) ([]byte, apitypes.PatchType, error) { existingJSON, err := json.Marshal(existing) if err != nil { return nil, apitypes.StrategicMergePatchType, err } expectedJSON, err := json.Marshal(expected.Object) if err != nil { return nil, apitypes.StrategicMergePatchType, err } // Get a versioned object versionedObject := kube.AsVersioned(expected) // Unstructured objects, such as CRDs, may not have an not registered error // returned from ConvertToVersion. Anything that's unstructured should // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported // on objects like CRDs. _, isUnstructured := versionedObject.(runtime.Unstructured) // On newer K8s versions, CRDs aren't unstructured but have a dedicated type _, isV1CRD := versionedObject.(*apiextv1.CustomResourceDefinition) _, isV1beta1CRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition) isCRD := isV1CRD || isV1beta1CRD if isUnstructured || isCRD { // fall back to generic JSON merge patch patch, err := createJSONMergePatch(existingJSON, expectedJSON) return patch, apitypes.JSONPatchType, err } patchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject) if err != nil { return nil, apitypes.StrategicMergePatchType, err } patch, err := strategicpatch.CreateThreeWayMergePatch(expectedJSON, expectedJSON, existingJSON, patchMeta, true) if err != nil { return nil, apitypes.StrategicMergePatchType, err } // An empty patch could be in the form of "{}" which represents an empty map out of the 3-way merge; // filter them out here too to avoid sending the apiserver empty patch requests. if len(patch) == 0 || bytes.Equal(patch, []byte("{}")) { return nil, apitypes.StrategicMergePatchType, nil } return patch, apitypes.StrategicMergePatchType, nil } func createJSONMergePatch(existingJSON, expectedJSON []byte) ([]byte, error) { ops, err := jsonpatch.CreatePatch(existingJSON, expectedJSON) if err != nil { return nil, err } // We ignore the "remove" operations from the full patch because they are // fields added by Kubernetes or by the user after the existing release // resource has been applied. The goal for this patch is to make sure that // the fields managed by the Helm chart are applied. // All "add" operations without a value (null) can be ignored patchOps := make([]jsonpatch.JsonPatchOperation, 0) for _, op := range ops { if op.Operation != "remove" && !(op.Operation == "add" && op.Value == nil) { patchOps = append(patchOps, op) } } // If there are no patch operations, return nil. Callers are expected // to check for a nil response and skip the patch operation to avoid // unnecessary chatter with the API server. if len(patchOps) == 0 { return nil, nil } return json.Marshal(patchOps) } // UninstallRelease performs a Helm release uninstall. func (m manager) UninstallRelease(ctx context.Context, opts ...UninstallOption) (*rpb.Release, error) { uninstall := action.NewUninstall(m.actionConfig) for _, o := range opts { if err := o(uninstall); err != nil { return nil, fmt.Errorf("failed to apply uninstall option: %w", err) } } uninstallResponse, err := uninstall.Run(m.releaseName) if uninstallResponse == nil { return nil, err } return uninstallResponse.Release, err } // CleanupRelease deletes resources if they are not deleted already. // Return true if all the resources are deleted, false otherwise. func (m manager) CleanupRelease(ctx context.Context, manifest string) (bool, error) { dc, err := m.actionConfig.RESTClientGetter.ToDiscoveryClient() if err != nil { return false, fmt.Errorf("failed to get Kubernetes discovery client: %w", err) } apiVersions, err := action.GetVersionSet(dc) if err != nil && !discovery.IsGroupDiscoveryFailedError(err) { return false, fmt.Errorf("failed to get apiVersions from Kubernetes: %w", err) } manifests := releaseutil.SplitManifests(manifest) _, files, err := releaseutil.SortManifests(manifests, apiVersions, releaseutil.UninstallOrder) if err != nil { return false, fmt.Errorf("failed to sort manifests: %w", err) } // do not delete resources that are annotated with the Helm resource policy 'keep' _, filesToDelete := manifestutil.FilterManifestsToKeep(files) var builder strings.Builder for _, file := range filesToDelete { builder.WriteString("\n---\n" + file.Content) } resources, err := m.kubeClient.Build(strings.NewReader(builder.String()), false) if err != nil { return false, fmt.Errorf("failed to build resources from manifests: %w", err) } if resources == nil || len(resources) <= 0 { return true, nil } for _, resource := range resources { err = resource.Get() if err != nil { if apierrors.IsNotFound(err) { continue // resource is already delete, check the next one. } return false, fmt.Errorf("failed to get resource: %w", err) } // found at least one resource that is not deleted so just delete everything again. _, errs := m.kubeClient.Delete(resources) if len(errs) > 0 { return false, fmt.Errorf("failed to delete resources: %v", apiutilerrors.NewAggregate(errs)) } return false, nil } return true, nil }
{ return fmt.Errorf("patch error: %w", err) }
conditional_block
manager.go
// Copyright 2018 The Operator-SDK Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package release import ( "bytes" "context" "encoding/json" "errors" "fmt" "strings" jsonpatch "gomodules.xyz/jsonpatch/v3" "helm.sh/helm/v3/pkg/action" cpb "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/kube" rpb "helm.sh/helm/v3/pkg/release" "helm.sh/helm/v3/pkg/releaseutil" "helm.sh/helm/v3/pkg/storage" "helm.sh/helm/v3/pkg/storage/driver" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" apitypes "k8s.io/apimachinery/pkg/types" apiutilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/discovery" "github.com/operator-framework/operator-sdk/internal/helm/internal/types" "github.com/operator-framework/operator-sdk/internal/helm/manifestutil" ) // Manager manages a Helm release. It can install, upgrade, reconcile, // and uninstall a release. type Manager interface { ReleaseName() string IsInstalled() bool IsUpgradeRequired() bool Sync(context.Context) error InstallRelease(context.Context, ...InstallOption) (*rpb.Release, error) UpgradeRelease(context.Context, ...UpgradeOption) (*rpb.Release, *rpb.Release, error) ReconcileRelease(context.Context) (*rpb.Release, error) UninstallRelease(context.Context, ...UninstallOption) (*rpb.Release, error) CleanupRelease(context.Context, string) (bool, error) } type manager struct { actionConfig *action.Configuration storageBackend *storage.Storage kubeClient kube.Interface releaseName string namespace string values map[string]interface{} status *types.HelmAppStatus isInstalled bool isUpgradeRequired bool deployedRelease *rpb.Release chart *cpb.Chart } type InstallOption func(*action.Install) error type UpgradeOption func(*action.Upgrade) error type UninstallOption func(*action.Uninstall) error // ReleaseName returns the name of the release. func (m manager) ReleaseName() string { return m.releaseName } func (m manager) IsInstalled() bool { return m.isInstalled } func (m manager) IsUpgradeRequired() bool { return m.isUpgradeRequired } // Sync ensures the Helm storage backend is in sync with the status of the // custom resource. func (m *manager) Sync(ctx context.Context) error { // Get release history for this release name releases, err := m.storageBackend.History(m.releaseName) if err != nil && !notFoundErr(err) { return fmt.Errorf("failed to retrieve release history: %w", err) } // Cleanup non-deployed release versions. If all release versions are // non-deployed, this will ensure that failed installations are correctly // retried. for _, rel := range releases { if rel.Info != nil && rel.Info.Status != rpb.StatusDeployed { _, err := m.storageBackend.Delete(rel.Name, rel.Version) if err != nil && !notFoundErr(err) { return fmt.Errorf("failed to delete stale release version: %w", err) } } } // Load the most recently deployed release from the storage backend. deployedRelease, err := m.getDeployedRelease() if errors.Is(err, driver.ErrReleaseNotFound) { return nil } if err != nil { return fmt.Errorf("failed to get deployed release: %w", err) } m.deployedRelease = deployedRelease m.isInstalled = true // Get the next candidate release to determine if an upgrade is necessary. candidateRelease, err := m.getCandidateRelease(m.namespace, m.releaseName, m.chart, m.values) if err != nil { return fmt.Errorf("failed to get candidate release: %w", err) } if deployedRelease.Manifest != candidateRelease.Manifest { m.isUpgradeRequired = true } return nil } func notFoundErr(err error) bool
func (m manager) getDeployedRelease() (*rpb.Release, error) { deployedRelease, err := m.storageBackend.Deployed(m.releaseName) if err != nil { if strings.Contains(err.Error(), "has no deployed releases") { return nil, driver.ErrReleaseNotFound } return nil, err } return deployedRelease, nil } func (m manager) getCandidateRelease(namespace, name string, chart *cpb.Chart, values map[string]interface{}) (*rpb.Release, error) { upgrade := action.NewUpgrade(m.actionConfig) upgrade.Namespace = namespace upgrade.DryRun = true return upgrade.Run(name, chart, values) } // InstallRelease performs a Helm release install. func (m manager) InstallRelease(ctx context.Context, opts ...InstallOption) (*rpb.Release, error) { install := action.NewInstall(m.actionConfig) install.ReleaseName = m.releaseName install.Namespace = m.namespace for _, o := range opts { if err := o(install); err != nil { return nil, fmt.Errorf("failed to apply install option: %w", err) } } installedRelease, err := install.Run(m.chart, m.values) if err != nil { // Workaround for helm/helm#3338 if installedRelease != nil { uninstall := action.NewUninstall(m.actionConfig) _, uninstallErr := uninstall.Run(m.releaseName) // In certain cases, InstallRelease will return a partial release in // the response even when it doesn't record the release in its release // store (e.g. when there is an error rendering the release manifest). // In that case the rollback will fail with a not found error because // there was nothing to rollback. // // Only log a message about a rollback failure if the failure was caused // by something other than the release not being found. if uninstallErr != nil && !notFoundErr(uninstallErr) { return nil, fmt.Errorf("failed installation (%s) and failed rollback: %w", err, uninstallErr) } } return nil, fmt.Errorf("failed to install release: %w", err) } return installedRelease, nil } func ForceUpgrade(force bool) UpgradeOption { return func(u *action.Upgrade) error { u.Force = force return nil } } // UpgradeRelease performs a Helm release upgrade. func (m manager) UpgradeRelease(ctx context.Context, opts ...UpgradeOption) (*rpb.Release, *rpb.Release, error) { upgrade := action.NewUpgrade(m.actionConfig) upgrade.Namespace = m.namespace for _, o := range opts { if err := o(upgrade); err != nil { return nil, nil, fmt.Errorf("failed to apply upgrade option: %w", err) } } upgradedRelease, err := upgrade.Run(m.releaseName, m.chart, m.values) if err != nil { // Workaround for helm/helm#3338 if upgradedRelease != nil { rollback := action.NewRollback(m.actionConfig) rollback.Force = true // As of Helm 2.13, if UpgradeRelease returns a non-nil release, that // means the release was also recorded in the release store. // Therefore, we should perform the rollback when we have a non-nil // release. Any rollback error here would be unexpected, so always // log both the upgrade and rollback errors. rollbackErr := rollback.Run(m.releaseName) if rollbackErr != nil { return nil, nil, fmt.Errorf("failed upgrade (%s) and failed rollback: %w", err, rollbackErr) } } return nil, nil, fmt.Errorf("failed to upgrade release: %w", err) } return m.deployedRelease, upgradedRelease, err } // ReconcileRelease creates or patches resources as necessary to match the // deployed release's manifest. func (m manager) ReconcileRelease(ctx context.Context) (*rpb.Release, error) { err := reconcileRelease(ctx, m.kubeClient, m.deployedRelease.Manifest) return m.deployedRelease, err } func reconcileRelease(_ context.Context, kubeClient kube.Interface, expectedManifest string) error { expectedInfos, err := kubeClient.Build(bytes.NewBufferString(expectedManifest), false) if err != nil { return err } return expectedInfos.Visit(func(expected *resource.Info, err error) error { if err != nil { return fmt.Errorf("visit error: %w", err) } helper := resource.NewHelper(expected.Client, expected.Mapping) existing, err := helper.Get(expected.Namespace, expected.Name) if apierrors.IsNotFound(err) { if _, err := helper.Create(expected.Namespace, true, expected.Object); err != nil { return fmt.Errorf("create error: %s", err) } return nil } else if err != nil { return fmt.Errorf("could not get object: %w", err) } // Replicate helm's patch creation, which will create a Three-Way-Merge patch for // native kubernetes Objects and fall back to a JSON merge patch for unstructured Objects such as CRDs // We also extend the JSON merge patch by ignoring "remove" operations for fields added by kubernetes // Reference in the helm source code: // https://github.com/helm/helm/blob/1c9b54ad7f62a5ce12f87c3ae55136ca20f09c98/pkg/kube/client.go#L392 patch, patchType, err := createPatch(existing, expected) if err != nil { return fmt.Errorf("error creating patch: %w", err) } if patch == nil { // nothing to do return nil } _, err = helper.Patch(expected.Namespace, expected.Name, patchType, patch, &metav1.PatchOptions{}) if err != nil { return fmt.Errorf("patch error: %w", err) } return nil }) } func createPatch(existing runtime.Object, expected *resource.Info) ([]byte, apitypes.PatchType, error) { existingJSON, err := json.Marshal(existing) if err != nil { return nil, apitypes.StrategicMergePatchType, err } expectedJSON, err := json.Marshal(expected.Object) if err != nil { return nil, apitypes.StrategicMergePatchType, err } // Get a versioned object versionedObject := kube.AsVersioned(expected) // Unstructured objects, such as CRDs, may not have an not registered error // returned from ConvertToVersion. Anything that's unstructured should // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported // on objects like CRDs. _, isUnstructured := versionedObject.(runtime.Unstructured) // On newer K8s versions, CRDs aren't unstructured but have a dedicated type _, isV1CRD := versionedObject.(*apiextv1.CustomResourceDefinition) _, isV1beta1CRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition) isCRD := isV1CRD || isV1beta1CRD if isUnstructured || isCRD { // fall back to generic JSON merge patch patch, err := createJSONMergePatch(existingJSON, expectedJSON) return patch, apitypes.JSONPatchType, err } patchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject) if err != nil { return nil, apitypes.StrategicMergePatchType, err } patch, err := strategicpatch.CreateThreeWayMergePatch(expectedJSON, expectedJSON, existingJSON, patchMeta, true) if err != nil { return nil, apitypes.StrategicMergePatchType, err } // An empty patch could be in the form of "{}" which represents an empty map out of the 3-way merge; // filter them out here too to avoid sending the apiserver empty patch requests. if len(patch) == 0 || bytes.Equal(patch, []byte("{}")) { return nil, apitypes.StrategicMergePatchType, nil } return patch, apitypes.StrategicMergePatchType, nil } func createJSONMergePatch(existingJSON, expectedJSON []byte) ([]byte, error) { ops, err := jsonpatch.CreatePatch(existingJSON, expectedJSON) if err != nil { return nil, err } // We ignore the "remove" operations from the full patch because they are // fields added by Kubernetes or by the user after the existing release // resource has been applied. The goal for this patch is to make sure that // the fields managed by the Helm chart are applied. // All "add" operations without a value (null) can be ignored patchOps := make([]jsonpatch.JsonPatchOperation, 0) for _, op := range ops { if op.Operation != "remove" && !(op.Operation == "add" && op.Value == nil) { patchOps = append(patchOps, op) } } // If there are no patch operations, return nil. Callers are expected // to check for a nil response and skip the patch operation to avoid // unnecessary chatter with the API server. if len(patchOps) == 0 { return nil, nil } return json.Marshal(patchOps) } // UninstallRelease performs a Helm release uninstall. func (m manager) UninstallRelease(ctx context.Context, opts ...UninstallOption) (*rpb.Release, error) { uninstall := action.NewUninstall(m.actionConfig) for _, o := range opts { if err := o(uninstall); err != nil { return nil, fmt.Errorf("failed to apply uninstall option: %w", err) } } uninstallResponse, err := uninstall.Run(m.releaseName) if uninstallResponse == nil { return nil, err } return uninstallResponse.Release, err } // CleanupRelease deletes resources if they are not deleted already. // Return true if all the resources are deleted, false otherwise. func (m manager) CleanupRelease(ctx context.Context, manifest string) (bool, error) { dc, err := m.actionConfig.RESTClientGetter.ToDiscoveryClient() if err != nil { return false, fmt.Errorf("failed to get Kubernetes discovery client: %w", err) } apiVersions, err := action.GetVersionSet(dc) if err != nil && !discovery.IsGroupDiscoveryFailedError(err) { return false, fmt.Errorf("failed to get apiVersions from Kubernetes: %w", err) } manifests := releaseutil.SplitManifests(manifest) _, files, err := releaseutil.SortManifests(manifests, apiVersions, releaseutil.UninstallOrder) if err != nil { return false, fmt.Errorf("failed to sort manifests: %w", err) } // do not delete resources that are annotated with the Helm resource policy 'keep' _, filesToDelete := manifestutil.FilterManifestsToKeep(files) var builder strings.Builder for _, file := range filesToDelete { builder.WriteString("\n---\n" + file.Content) } resources, err := m.kubeClient.Build(strings.NewReader(builder.String()), false) if err != nil { return false, fmt.Errorf("failed to build resources from manifests: %w", err) } if resources == nil || len(resources) <= 0 { return true, nil } for _, resource := range resources { err = resource.Get() if err != nil { if apierrors.IsNotFound(err) { continue // resource is already delete, check the next one. } return false, fmt.Errorf("failed to get resource: %w", err) } // found at least one resource that is not deleted so just delete everything again. _, errs := m.kubeClient.Delete(resources) if len(errs) > 0 { return false, fmt.Errorf("failed to delete resources: %v", apiutilerrors.NewAggregate(errs)) } return false, nil } return true, nil }
{ return err != nil && strings.Contains(err.Error(), "not found") }
identifier_body
manager.go
// Copyright 2018 The Operator-SDK Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package release import ( "bytes" "context" "encoding/json" "errors" "fmt" "strings" jsonpatch "gomodules.xyz/jsonpatch/v3" "helm.sh/helm/v3/pkg/action" cpb "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/kube" rpb "helm.sh/helm/v3/pkg/release" "helm.sh/helm/v3/pkg/releaseutil" "helm.sh/helm/v3/pkg/storage" "helm.sh/helm/v3/pkg/storage/driver" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" apitypes "k8s.io/apimachinery/pkg/types" apiutilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/discovery" "github.com/operator-framework/operator-sdk/internal/helm/internal/types" "github.com/operator-framework/operator-sdk/internal/helm/manifestutil" ) // Manager manages a Helm release. It can install, upgrade, reconcile, // and uninstall a release. type Manager interface { ReleaseName() string IsInstalled() bool IsUpgradeRequired() bool Sync(context.Context) error InstallRelease(context.Context, ...InstallOption) (*rpb.Release, error) UpgradeRelease(context.Context, ...UpgradeOption) (*rpb.Release, *rpb.Release, error) ReconcileRelease(context.Context) (*rpb.Release, error) UninstallRelease(context.Context, ...UninstallOption) (*rpb.Release, error) CleanupRelease(context.Context, string) (bool, error) } type manager struct { actionConfig *action.Configuration storageBackend *storage.Storage kubeClient kube.Interface releaseName string namespace string values map[string]interface{} status *types.HelmAppStatus isInstalled bool isUpgradeRequired bool deployedRelease *rpb.Release chart *cpb.Chart } type InstallOption func(*action.Install) error type UpgradeOption func(*action.Upgrade) error type UninstallOption func(*action.Uninstall) error // ReleaseName returns the name of the release. func (m manager)
() string { return m.releaseName } func (m manager) IsInstalled() bool { return m.isInstalled } func (m manager) IsUpgradeRequired() bool { return m.isUpgradeRequired } // Sync ensures the Helm storage backend is in sync with the status of the // custom resource. func (m *manager) Sync(ctx context.Context) error { // Get release history for this release name releases, err := m.storageBackend.History(m.releaseName) if err != nil && !notFoundErr(err) { return fmt.Errorf("failed to retrieve release history: %w", err) } // Cleanup non-deployed release versions. If all release versions are // non-deployed, this will ensure that failed installations are correctly // retried. for _, rel := range releases { if rel.Info != nil && rel.Info.Status != rpb.StatusDeployed { _, err := m.storageBackend.Delete(rel.Name, rel.Version) if err != nil && !notFoundErr(err) { return fmt.Errorf("failed to delete stale release version: %w", err) } } } // Load the most recently deployed release from the storage backend. deployedRelease, err := m.getDeployedRelease() if errors.Is(err, driver.ErrReleaseNotFound) { return nil } if err != nil { return fmt.Errorf("failed to get deployed release: %w", err) } m.deployedRelease = deployedRelease m.isInstalled = true // Get the next candidate release to determine if an upgrade is necessary. candidateRelease, err := m.getCandidateRelease(m.namespace, m.releaseName, m.chart, m.values) if err != nil { return fmt.Errorf("failed to get candidate release: %w", err) } if deployedRelease.Manifest != candidateRelease.Manifest { m.isUpgradeRequired = true } return nil } func notFoundErr(err error) bool { return err != nil && strings.Contains(err.Error(), "not found") } func (m manager) getDeployedRelease() (*rpb.Release, error) { deployedRelease, err := m.storageBackend.Deployed(m.releaseName) if err != nil { if strings.Contains(err.Error(), "has no deployed releases") { return nil, driver.ErrReleaseNotFound } return nil, err } return deployedRelease, nil } func (m manager) getCandidateRelease(namespace, name string, chart *cpb.Chart, values map[string]interface{}) (*rpb.Release, error) { upgrade := action.NewUpgrade(m.actionConfig) upgrade.Namespace = namespace upgrade.DryRun = true return upgrade.Run(name, chart, values) } // InstallRelease performs a Helm release install. func (m manager) InstallRelease(ctx context.Context, opts ...InstallOption) (*rpb.Release, error) { install := action.NewInstall(m.actionConfig) install.ReleaseName = m.releaseName install.Namespace = m.namespace for _, o := range opts { if err := o(install); err != nil { return nil, fmt.Errorf("failed to apply install option: %w", err) } } installedRelease, err := install.Run(m.chart, m.values) if err != nil { // Workaround for helm/helm#3338 if installedRelease != nil { uninstall := action.NewUninstall(m.actionConfig) _, uninstallErr := uninstall.Run(m.releaseName) // In certain cases, InstallRelease will return a partial release in // the response even when it doesn't record the release in its release // store (e.g. when there is an error rendering the release manifest). // In that case the rollback will fail with a not found error because // there was nothing to rollback. // // Only log a message about a rollback failure if the failure was caused // by something other than the release not being found. if uninstallErr != nil && !notFoundErr(uninstallErr) { return nil, fmt.Errorf("failed installation (%s) and failed rollback: %w", err, uninstallErr) } } return nil, fmt.Errorf("failed to install release: %w", err) } return installedRelease, nil } func ForceUpgrade(force bool) UpgradeOption { return func(u *action.Upgrade) error { u.Force = force return nil } } // UpgradeRelease performs a Helm release upgrade. func (m manager) UpgradeRelease(ctx context.Context, opts ...UpgradeOption) (*rpb.Release, *rpb.Release, error) { upgrade := action.NewUpgrade(m.actionConfig) upgrade.Namespace = m.namespace for _, o := range opts { if err := o(upgrade); err != nil { return nil, nil, fmt.Errorf("failed to apply upgrade option: %w", err) } } upgradedRelease, err := upgrade.Run(m.releaseName, m.chart, m.values) if err != nil { // Workaround for helm/helm#3338 if upgradedRelease != nil { rollback := action.NewRollback(m.actionConfig) rollback.Force = true // As of Helm 2.13, if UpgradeRelease returns a non-nil release, that // means the release was also recorded in the release store. // Therefore, we should perform the rollback when we have a non-nil // release. Any rollback error here would be unexpected, so always // log both the upgrade and rollback errors. rollbackErr := rollback.Run(m.releaseName) if rollbackErr != nil { return nil, nil, fmt.Errorf("failed upgrade (%s) and failed rollback: %w", err, rollbackErr) } } return nil, nil, fmt.Errorf("failed to upgrade release: %w", err) } return m.deployedRelease, upgradedRelease, err } // ReconcileRelease creates or patches resources as necessary to match the // deployed release's manifest. func (m manager) ReconcileRelease(ctx context.Context) (*rpb.Release, error) { err := reconcileRelease(ctx, m.kubeClient, m.deployedRelease.Manifest) return m.deployedRelease, err } func reconcileRelease(_ context.Context, kubeClient kube.Interface, expectedManifest string) error { expectedInfos, err := kubeClient.Build(bytes.NewBufferString(expectedManifest), false) if err != nil { return err } return expectedInfos.Visit(func(expected *resource.Info, err error) error { if err != nil { return fmt.Errorf("visit error: %w", err) } helper := resource.NewHelper(expected.Client, expected.Mapping) existing, err := helper.Get(expected.Namespace, expected.Name) if apierrors.IsNotFound(err) { if _, err := helper.Create(expected.Namespace, true, expected.Object); err != nil { return fmt.Errorf("create error: %s", err) } return nil } else if err != nil { return fmt.Errorf("could not get object: %w", err) } // Replicate helm's patch creation, which will create a Three-Way-Merge patch for // native kubernetes Objects and fall back to a JSON merge patch for unstructured Objects such as CRDs // We also extend the JSON merge patch by ignoring "remove" operations for fields added by kubernetes // Reference in the helm source code: // https://github.com/helm/helm/blob/1c9b54ad7f62a5ce12f87c3ae55136ca20f09c98/pkg/kube/client.go#L392 patch, patchType, err := createPatch(existing, expected) if err != nil { return fmt.Errorf("error creating patch: %w", err) } if patch == nil { // nothing to do return nil } _, err = helper.Patch(expected.Namespace, expected.Name, patchType, patch, &metav1.PatchOptions{}) if err != nil { return fmt.Errorf("patch error: %w", err) } return nil }) } func createPatch(existing runtime.Object, expected *resource.Info) ([]byte, apitypes.PatchType, error) { existingJSON, err := json.Marshal(existing) if err != nil { return nil, apitypes.StrategicMergePatchType, err } expectedJSON, err := json.Marshal(expected.Object) if err != nil { return nil, apitypes.StrategicMergePatchType, err } // Get a versioned object versionedObject := kube.AsVersioned(expected) // Unstructured objects, such as CRDs, may not have an not registered error // returned from ConvertToVersion. Anything that's unstructured should // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported // on objects like CRDs. _, isUnstructured := versionedObject.(runtime.Unstructured) // On newer K8s versions, CRDs aren't unstructured but have a dedicated type _, isV1CRD := versionedObject.(*apiextv1.CustomResourceDefinition) _, isV1beta1CRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition) isCRD := isV1CRD || isV1beta1CRD if isUnstructured || isCRD { // fall back to generic JSON merge patch patch, err := createJSONMergePatch(existingJSON, expectedJSON) return patch, apitypes.JSONPatchType, err } patchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject) if err != nil { return nil, apitypes.StrategicMergePatchType, err } patch, err := strategicpatch.CreateThreeWayMergePatch(expectedJSON, expectedJSON, existingJSON, patchMeta, true) if err != nil { return nil, apitypes.StrategicMergePatchType, err } // An empty patch could be in the form of "{}" which represents an empty map out of the 3-way merge; // filter them out here too to avoid sending the apiserver empty patch requests. if len(patch) == 0 || bytes.Equal(patch, []byte("{}")) { return nil, apitypes.StrategicMergePatchType, nil } return patch, apitypes.StrategicMergePatchType, nil } func createJSONMergePatch(existingJSON, expectedJSON []byte) ([]byte, error) { ops, err := jsonpatch.CreatePatch(existingJSON, expectedJSON) if err != nil { return nil, err } // We ignore the "remove" operations from the full patch because they are // fields added by Kubernetes or by the user after the existing release // resource has been applied. The goal for this patch is to make sure that // the fields managed by the Helm chart are applied. // All "add" operations without a value (null) can be ignored patchOps := make([]jsonpatch.JsonPatchOperation, 0) for _, op := range ops { if op.Operation != "remove" && !(op.Operation == "add" && op.Value == nil) { patchOps = append(patchOps, op) } } // If there are no patch operations, return nil. Callers are expected // to check for a nil response and skip the patch operation to avoid // unnecessary chatter with the API server. if len(patchOps) == 0 { return nil, nil } return json.Marshal(patchOps) } // UninstallRelease performs a Helm release uninstall. func (m manager) UninstallRelease(ctx context.Context, opts ...UninstallOption) (*rpb.Release, error) { uninstall := action.NewUninstall(m.actionConfig) for _, o := range opts { if err := o(uninstall); err != nil { return nil, fmt.Errorf("failed to apply uninstall option: %w", err) } } uninstallResponse, err := uninstall.Run(m.releaseName) if uninstallResponse == nil { return nil, err } return uninstallResponse.Release, err } // CleanupRelease deletes resources if they are not deleted already. // Return true if all the resources are deleted, false otherwise. func (m manager) CleanupRelease(ctx context.Context, manifest string) (bool, error) { dc, err := m.actionConfig.RESTClientGetter.ToDiscoveryClient() if err != nil { return false, fmt.Errorf("failed to get Kubernetes discovery client: %w", err) } apiVersions, err := action.GetVersionSet(dc) if err != nil && !discovery.IsGroupDiscoveryFailedError(err) { return false, fmt.Errorf("failed to get apiVersions from Kubernetes: %w", err) } manifests := releaseutil.SplitManifests(manifest) _, files, err := releaseutil.SortManifests(manifests, apiVersions, releaseutil.UninstallOrder) if err != nil { return false, fmt.Errorf("failed to sort manifests: %w", err) } // do not delete resources that are annotated with the Helm resource policy 'keep' _, filesToDelete := manifestutil.FilterManifestsToKeep(files) var builder strings.Builder for _, file := range filesToDelete { builder.WriteString("\n---\n" + file.Content) } resources, err := m.kubeClient.Build(strings.NewReader(builder.String()), false) if err != nil { return false, fmt.Errorf("failed to build resources from manifests: %w", err) } if resources == nil || len(resources) <= 0 { return true, nil } for _, resource := range resources { err = resource.Get() if err != nil { if apierrors.IsNotFound(err) { continue // resource is already delete, check the next one. } return false, fmt.Errorf("failed to get resource: %w", err) } // found at least one resource that is not deleted so just delete everything again. _, errs := m.kubeClient.Delete(resources) if len(errs) > 0 { return false, fmt.Errorf("failed to delete resources: %v", apiutilerrors.NewAggregate(errs)) } return false, nil } return true, nil }
ReleaseName
identifier_name
manager.go
// Copyright 2018 The Operator-SDK Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package release import ( "bytes" "context" "encoding/json" "errors" "fmt" "strings" jsonpatch "gomodules.xyz/jsonpatch/v3" "helm.sh/helm/v3/pkg/action" cpb "helm.sh/helm/v3/pkg/chart" "helm.sh/helm/v3/pkg/kube" rpb "helm.sh/helm/v3/pkg/release" "helm.sh/helm/v3/pkg/releaseutil" "helm.sh/helm/v3/pkg/storage" "helm.sh/helm/v3/pkg/storage/driver" apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" apitypes "k8s.io/apimachinery/pkg/types" apiutilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/discovery" "github.com/operator-framework/operator-sdk/internal/helm/internal/types" "github.com/operator-framework/operator-sdk/internal/helm/manifestutil" ) // Manager manages a Helm release. It can install, upgrade, reconcile, // and uninstall a release. type Manager interface { ReleaseName() string IsInstalled() bool IsUpgradeRequired() bool Sync(context.Context) error InstallRelease(context.Context, ...InstallOption) (*rpb.Release, error) UpgradeRelease(context.Context, ...UpgradeOption) (*rpb.Release, *rpb.Release, error) ReconcileRelease(context.Context) (*rpb.Release, error) UninstallRelease(context.Context, ...UninstallOption) (*rpb.Release, error) CleanupRelease(context.Context, string) (bool, error) } type manager struct { actionConfig *action.Configuration storageBackend *storage.Storage kubeClient kube.Interface releaseName string namespace string values map[string]interface{} status *types.HelmAppStatus isInstalled bool isUpgradeRequired bool deployedRelease *rpb.Release chart *cpb.Chart } type InstallOption func(*action.Install) error type UpgradeOption func(*action.Upgrade) error type UninstallOption func(*action.Uninstall) error // ReleaseName returns the name of the release. func (m manager) ReleaseName() string { return m.releaseName } func (m manager) IsInstalled() bool { return m.isInstalled } func (m manager) IsUpgradeRequired() bool { return m.isUpgradeRequired } // Sync ensures the Helm storage backend is in sync with the status of the // custom resource. func (m *manager) Sync(ctx context.Context) error { // Get release history for this release name releases, err := m.storageBackend.History(m.releaseName) if err != nil && !notFoundErr(err) { return fmt.Errorf("failed to retrieve release history: %w", err) } // Cleanup non-deployed release versions. If all release versions are // non-deployed, this will ensure that failed installations are correctly // retried. for _, rel := range releases { if rel.Info != nil && rel.Info.Status != rpb.StatusDeployed { _, err := m.storageBackend.Delete(rel.Name, rel.Version) if err != nil && !notFoundErr(err) { return fmt.Errorf("failed to delete stale release version: %w", err) } } } // Load the most recently deployed release from the storage backend. deployedRelease, err := m.getDeployedRelease() if errors.Is(err, driver.ErrReleaseNotFound) { return nil } if err != nil { return fmt.Errorf("failed to get deployed release: %w", err) } m.deployedRelease = deployedRelease m.isInstalled = true // Get the next candidate release to determine if an upgrade is necessary. candidateRelease, err := m.getCandidateRelease(m.namespace, m.releaseName, m.chart, m.values) if err != nil { return fmt.Errorf("failed to get candidate release: %w", err) } if deployedRelease.Manifest != candidateRelease.Manifest { m.isUpgradeRequired = true } return nil } func notFoundErr(err error) bool { return err != nil && strings.Contains(err.Error(), "not found") } func (m manager) getDeployedRelease() (*rpb.Release, error) { deployedRelease, err := m.storageBackend.Deployed(m.releaseName) if err != nil { if strings.Contains(err.Error(), "has no deployed releases") { return nil, driver.ErrReleaseNotFound } return nil, err } return deployedRelease, nil } func (m manager) getCandidateRelease(namespace, name string, chart *cpb.Chart, values map[string]interface{}) (*rpb.Release, error) { upgrade := action.NewUpgrade(m.actionConfig) upgrade.Namespace = namespace upgrade.DryRun = true return upgrade.Run(name, chart, values) } // InstallRelease performs a Helm release install. func (m manager) InstallRelease(ctx context.Context, opts ...InstallOption) (*rpb.Release, error) { install := action.NewInstall(m.actionConfig) install.ReleaseName = m.releaseName install.Namespace = m.namespace for _, o := range opts { if err := o(install); err != nil { return nil, fmt.Errorf("failed to apply install option: %w", err) } } installedRelease, err := install.Run(m.chart, m.values) if err != nil { // Workaround for helm/helm#3338 if installedRelease != nil {
// the response even when it doesn't record the release in its release // store (e.g. when there is an error rendering the release manifest). // In that case the rollback will fail with a not found error because // there was nothing to rollback. // // Only log a message about a rollback failure if the failure was caused // by something other than the release not being found. if uninstallErr != nil && !notFoundErr(uninstallErr) { return nil, fmt.Errorf("failed installation (%s) and failed rollback: %w", err, uninstallErr) } } return nil, fmt.Errorf("failed to install release: %w", err) } return installedRelease, nil } func ForceUpgrade(force bool) UpgradeOption { return func(u *action.Upgrade) error { u.Force = force return nil } } // UpgradeRelease performs a Helm release upgrade. func (m manager) UpgradeRelease(ctx context.Context, opts ...UpgradeOption) (*rpb.Release, *rpb.Release, error) { upgrade := action.NewUpgrade(m.actionConfig) upgrade.Namespace = m.namespace for _, o := range opts { if err := o(upgrade); err != nil { return nil, nil, fmt.Errorf("failed to apply upgrade option: %w", err) } } upgradedRelease, err := upgrade.Run(m.releaseName, m.chart, m.values) if err != nil { // Workaround for helm/helm#3338 if upgradedRelease != nil { rollback := action.NewRollback(m.actionConfig) rollback.Force = true // As of Helm 2.13, if UpgradeRelease returns a non-nil release, that // means the release was also recorded in the release store. // Therefore, we should perform the rollback when we have a non-nil // release. Any rollback error here would be unexpected, so always // log both the upgrade and rollback errors. rollbackErr := rollback.Run(m.releaseName) if rollbackErr != nil { return nil, nil, fmt.Errorf("failed upgrade (%s) and failed rollback: %w", err, rollbackErr) } } return nil, nil, fmt.Errorf("failed to upgrade release: %w", err) } return m.deployedRelease, upgradedRelease, err } // ReconcileRelease creates or patches resources as necessary to match the // deployed release's manifest. func (m manager) ReconcileRelease(ctx context.Context) (*rpb.Release, error) { err := reconcileRelease(ctx, m.kubeClient, m.deployedRelease.Manifest) return m.deployedRelease, err } func reconcileRelease(_ context.Context, kubeClient kube.Interface, expectedManifest string) error { expectedInfos, err := kubeClient.Build(bytes.NewBufferString(expectedManifest), false) if err != nil { return err } return expectedInfos.Visit(func(expected *resource.Info, err error) error { if err != nil { return fmt.Errorf("visit error: %w", err) } helper := resource.NewHelper(expected.Client, expected.Mapping) existing, err := helper.Get(expected.Namespace, expected.Name) if apierrors.IsNotFound(err) { if _, err := helper.Create(expected.Namespace, true, expected.Object); err != nil { return fmt.Errorf("create error: %s", err) } return nil } else if err != nil { return fmt.Errorf("could not get object: %w", err) } // Replicate helm's patch creation, which will create a Three-Way-Merge patch for // native kubernetes Objects and fall back to a JSON merge patch for unstructured Objects such as CRDs // We also extend the JSON merge patch by ignoring "remove" operations for fields added by kubernetes // Reference in the helm source code: // https://github.com/helm/helm/blob/1c9b54ad7f62a5ce12f87c3ae55136ca20f09c98/pkg/kube/client.go#L392 patch, patchType, err := createPatch(existing, expected) if err != nil { return fmt.Errorf("error creating patch: %w", err) } if patch == nil { // nothing to do return nil } _, err = helper.Patch(expected.Namespace, expected.Name, patchType, patch, &metav1.PatchOptions{}) if err != nil { return fmt.Errorf("patch error: %w", err) } return nil }) } func createPatch(existing runtime.Object, expected *resource.Info) ([]byte, apitypes.PatchType, error) { existingJSON, err := json.Marshal(existing) if err != nil { return nil, apitypes.StrategicMergePatchType, err } expectedJSON, err := json.Marshal(expected.Object) if err != nil { return nil, apitypes.StrategicMergePatchType, err } // Get a versioned object versionedObject := kube.AsVersioned(expected) // Unstructured objects, such as CRDs, may not have an not registered error // returned from ConvertToVersion. Anything that's unstructured should // use the jsonpatch.CreateMergePatch. Strategic Merge Patch is not supported // on objects like CRDs. _, isUnstructured := versionedObject.(runtime.Unstructured) // On newer K8s versions, CRDs aren't unstructured but have a dedicated type _, isV1CRD := versionedObject.(*apiextv1.CustomResourceDefinition) _, isV1beta1CRD := versionedObject.(*apiextv1beta1.CustomResourceDefinition) isCRD := isV1CRD || isV1beta1CRD if isUnstructured || isCRD { // fall back to generic JSON merge patch patch, err := createJSONMergePatch(existingJSON, expectedJSON) return patch, apitypes.JSONPatchType, err } patchMeta, err := strategicpatch.NewPatchMetaFromStruct(versionedObject) if err != nil { return nil, apitypes.StrategicMergePatchType, err } patch, err := strategicpatch.CreateThreeWayMergePatch(expectedJSON, expectedJSON, existingJSON, patchMeta, true) if err != nil { return nil, apitypes.StrategicMergePatchType, err } // An empty patch could be in the form of "{}" which represents an empty map out of the 3-way merge; // filter them out here too to avoid sending the apiserver empty patch requests. if len(patch) == 0 || bytes.Equal(patch, []byte("{}")) { return nil, apitypes.StrategicMergePatchType, nil } return patch, apitypes.StrategicMergePatchType, nil } func createJSONMergePatch(existingJSON, expectedJSON []byte) ([]byte, error) { ops, err := jsonpatch.CreatePatch(existingJSON, expectedJSON) if err != nil { return nil, err } // We ignore the "remove" operations from the full patch because they are // fields added by Kubernetes or by the user after the existing release // resource has been applied. The goal for this patch is to make sure that // the fields managed by the Helm chart are applied. // All "add" operations without a value (null) can be ignored patchOps := make([]jsonpatch.JsonPatchOperation, 0) for _, op := range ops { if op.Operation != "remove" && !(op.Operation == "add" && op.Value == nil) { patchOps = append(patchOps, op) } } // If there are no patch operations, return nil. Callers are expected // to check for a nil response and skip the patch operation to avoid // unnecessary chatter with the API server. if len(patchOps) == 0 { return nil, nil } return json.Marshal(patchOps) } // UninstallRelease performs a Helm release uninstall. func (m manager) UninstallRelease(ctx context.Context, opts ...UninstallOption) (*rpb.Release, error) { uninstall := action.NewUninstall(m.actionConfig) for _, o := range opts { if err := o(uninstall); err != nil { return nil, fmt.Errorf("failed to apply uninstall option: %w", err) } } uninstallResponse, err := uninstall.Run(m.releaseName) if uninstallResponse == nil { return nil, err } return uninstallResponse.Release, err } // CleanupRelease deletes resources if they are not deleted already. // Return true if all the resources are deleted, false otherwise. func (m manager) CleanupRelease(ctx context.Context, manifest string) (bool, error) { dc, err := m.actionConfig.RESTClientGetter.ToDiscoveryClient() if err != nil { return false, fmt.Errorf("failed to get Kubernetes discovery client: %w", err) } apiVersions, err := action.GetVersionSet(dc) if err != nil && !discovery.IsGroupDiscoveryFailedError(err) { return false, fmt.Errorf("failed to get apiVersions from Kubernetes: %w", err) } manifests := releaseutil.SplitManifests(manifest) _, files, err := releaseutil.SortManifests(manifests, apiVersions, releaseutil.UninstallOrder) if err != nil { return false, fmt.Errorf("failed to sort manifests: %w", err) } // do not delete resources that are annotated with the Helm resource policy 'keep' _, filesToDelete := manifestutil.FilterManifestsToKeep(files) var builder strings.Builder for _, file := range filesToDelete { builder.WriteString("\n---\n" + file.Content) } resources, err := m.kubeClient.Build(strings.NewReader(builder.String()), false) if err != nil { return false, fmt.Errorf("failed to build resources from manifests: %w", err) } if resources == nil || len(resources) <= 0 { return true, nil } for _, resource := range resources { err = resource.Get() if err != nil { if apierrors.IsNotFound(err) { continue // resource is already delete, check the next one. } return false, fmt.Errorf("failed to get resource: %w", err) } // found at least one resource that is not deleted so just delete everything again. _, errs := m.kubeClient.Delete(resources) if len(errs) > 0 { return false, fmt.Errorf("failed to delete resources: %v", apiutilerrors.NewAggregate(errs)) } return false, nil } return true, nil }
uninstall := action.NewUninstall(m.actionConfig) _, uninstallErr := uninstall.Run(m.releaseName) // In certain cases, InstallRelease will return a partial release in
random_line_split
player.go
package proxy import ( "context" "crypto/rand" "encoding/binary" "encoding/hex" "encoding/json" "errors" "go.minekube.com/common/minecraft/component" "go.minekube.com/common/minecraft/component/codec/legacy" "go.minekube.com/gate/pkg/command"
"go.minekube.com/gate/pkg/edition/java/proto/packet/title" "go.minekube.com/gate/pkg/edition/java/proto/util" "go.minekube.com/gate/pkg/edition/java/proto/version" "go.minekube.com/gate/pkg/edition/java/proxy/message" "go.minekube.com/gate/pkg/edition/java/proxy/player" "go.minekube.com/gate/pkg/gate/proto" "go.minekube.com/gate/pkg/runtime/logr" "go.minekube.com/gate/pkg/util/permission" "go.minekube.com/gate/pkg/util/sets" "go.minekube.com/gate/pkg/util/uuid" "go.uber.org/atomic" "net" "strings" "sync" "time" ) // Player is a connected Minecraft player. type Player interface { Inbound command.Source message.ChannelMessageSource message.ChannelMessageSink ID() uuid.UUID // The Minecraft ID of the player. Username() string // The username of the player. // CurrentServer returns the current server connection of the player. CurrentServer() ServerConnection // May be nil, if there is no backend server connection! Ping() time.Duration // The player's ping or -1 if currently unknown. OnlineMode() bool // Whether the player was authenticated with Mojang's session servers. // CreateConnectionRequest creates a connection request to begin switching the backend server. CreateConnectionRequest(target RegisteredServer) ConnectionRequest GameProfile() profile.GameProfile // Returns the player's game profile. Settings() player.Settings // The players client settings. Returns player.DefaultSettings if not yet unknown. // Disconnect disconnects the player with a reason. // Once called, further interface calls to this player become undefined. Disconnect(reason component.Component) // SpoofChatInput sends chats input onto the player's current server as if // they typed it into the client chat box. SpoofChatInput(input string) error // SendResourcePack sends the specified resource pack from url to the user. If at all possible, // send the resource pack with a sha1 hash using SendResourcePackWithHash. To monitor the status // of the sent resource pack, subscribe to PlayerResourcePackStatusEvent. SendResourcePack(url string) error // SendResourcePackWithHash sends the specified resource pack from url to the user, // using the specified 20-byte SHA-1 hash of the resource pack file. To monitor the // status of the sent resource pack, subscribe to PlayerResourcePackStatusEvent. SendResourcePackWithHash(url string, sha1Hash []byte) error // SendActionBar sends an action bar to the player. SendActionBar(msg component.Component) error // SendMessageWith sends a chat message with optional modifications. SendMessageWith(msg component.Component, opts ...MessageOption) error player.TabList // TODO add title and more } type connectedPlayer struct { *minecraftConn log logr.Logger virtualHost net.Addr onlineMode bool profile *profile.GameProfile ping atomic.Duration permFunc permission.Func // This field is true if this connection is being disconnected // due to another connection logging in with the same GameProfile. disconnectDueToDuplicateConnection atomic.Bool pluginChannelsMu sync.RWMutex // Protects following field pluginChannels sets.String // Known plugin channels *tabList // Player's tab list mu sync.RWMutex // Protects following fields connectedServer_ *serverConnection connInFlight *serverConnection settings player.Settings modInfo *modinfo.ModInfo connPhase clientConnectionPhase serversToTry []string // names of servers to try if we got disconnected from previous tryIndex int } var _ Player = (*connectedPlayer)(nil) func newConnectedPlayer( conn *minecraftConn, profile *profile.GameProfile, virtualHost net.Addr, onlineMode bool, ) *connectedPlayer { ping := atomic.Duration{} ping.Store(-1) return &connectedPlayer{ minecraftConn: conn, log: conn.log.WithName("player").WithValues( "name", profile.Name, "id", profile.ID), profile: profile, virtualHost: virtualHost, onlineMode: onlineMode, pluginChannels: sets.NewString(), // Should we limit the size to 1024 channels? connPhase: conn.Type().initialClientPhase(), ping: ping, tabList: newTabList(conn), permFunc: func(string) permission.TriState { return permission.Undefined }, } } func (p *connectedPlayer) connectionInFlight() *serverConnection { p.mu.RLock() defer p.mu.RUnlock() return p.connInFlight } func (p *connectedPlayer) phase() clientConnectionPhase { p.mu.RLock() defer p.mu.RUnlock() return p.connPhase } func (p *connectedPlayer) HasPermission(permission string) bool { return p.PermissionValue(permission).Bool() } func (p *connectedPlayer) PermissionValue(permission string) permission.TriState { return p.permFunc(permission) } func (p *connectedPlayer) Ping() time.Duration { return p.ping.Load() } func (p *connectedPlayer) OnlineMode() bool { return p.onlineMode } func (p *connectedPlayer) GameProfile() profile.GameProfile { return *p.profile } var ( ErrNoBackendConnection = errors.New("player has no backend server connection yet") ErrTooLongChatMessage = errors.New("server bound chat message can not exceed 256 characters") ) func (p *connectedPlayer) SpoofChatInput(input string) error { if len(input) > packet.MaxServerBoundMessageLength { return ErrTooLongChatMessage } serverMc, ok := p.ensureBackendConnection() if !ok { return ErrNoBackendConnection } return serverMc.WritePacket(&packet.Chat{ Message: input, Type: packet.ChatMessageType, }) } func (p *connectedPlayer) ensureBackendConnection() (*minecraftConn, bool) { p.mu.RLock() defer p.mu.RUnlock() if p.connectedServer_ == nil { // Player has no backend connection. return nil, false } serverMc := p.connectedServer_.conn() if serverMc == nil { // Player's backend connection is not yet connected to a server. return nil, false } return serverMc, true } func (p *connectedPlayer) SendResourcePack(url string) error { return p.WritePacket(&packet.ResourcePackRequest{ Url: url, Hash: "", }) } func (p *connectedPlayer) SendResourcePackWithHash(url string, sha1Hash []byte) error { if len(sha1Hash) != 20 { return errors.New("hash length must be 20") } return p.WritePacket(&packet.ResourcePackRequest{ Url: url, Hash: hex.EncodeToString(sha1Hash), }) } func (p *connectedPlayer) VirtualHost() net.Addr { return p.virtualHost } func (p *connectedPlayer) Active() bool { return !p.minecraftConn.Closed() } // MessageOption is an option for Player.SendMessageWith. type MessageOption func(c *packet.Chat) // MessageWithSender modifies the sender identity of the chat message. func MessageWithSender(id uuid.UUID) MessageOption { return func(c *packet.Chat) { c.Sender = id } } // MessageType is a chat message type. type MessageType uint8 // Chat message types. const ( // ChatMessageType is a standard chat message. ChatMessageType MessageType = iota // SystemMessageType is a system chat message. // e.g. client is willing to accept messages from commands, // but does not want general chat from other players. SystemMessageType ) // MessageWithType modifies chat message type. func MessageWithType(t MessageType) MessageOption { return func(c *packet.Chat) { if t == SystemMessageType { c.Type = packet.SystemMessageType } else { c.Type = packet.ChatMessageType } } } func (p *connectedPlayer) SendMessage(msg component.Component) error { return p.SendMessageWith(msg) } func (p *connectedPlayer) SendMessageWith(msg component.Component, opts ...MessageOption) error { if msg == nil { return nil // skip nil message } m := new(strings.Builder) if err := util.JsonCodec(p.Protocol()).Marshal(m, msg); err != nil { return err } chat := &packet.Chat{ Message: m.String(), Type: packet.ChatMessageType, Sender: uuid.Nil, } for _, o := range opts { o(chat) } return p.WritePacket(chat) } var legacyJsonCodec = &legacy.Legacy{} func (p *connectedPlayer) SendActionBar(msg component.Component) error { if msg == nil { return nil // skip nil message } protocol := p.Protocol() if protocol.GreaterEqual(version.Minecraft_1_11) { // Use the title packet instead. pkt, err := title.New(protocol, &title.Builder{ Action: title.SetActionBar, Component: msg, }) if err != nil { return err } return p.WritePacket(pkt) } // Due to issues with action bar packets, we'll need to convert the text message into a // legacy message and then put the legacy text into a component... (╯°□°)╯︵ ┻━┻! b := new(strings.Builder) if err := legacyJsonCodec.Marshal(b, msg); err != nil { return err } m, err := json.Marshal(map[string]string{"text": b.String()}) if err != nil { return err } return p.WritePacket(&packet.Chat{ Message: string(m), Type: packet.GameInfoMessageType, Sender: uuid.Nil, }) } func (p *connectedPlayer) SendPluginMessage(identifier message.ChannelIdentifier, data []byte) error { return p.WritePacket(&plugin.Message{ Channel: identifier.ID(), Data: data, }) } // TODO add header/footer, title & boss bar methods // Finds another server to attempt to log into, if we were unexpectedly disconnected from the server. // current is the current server of the player is on, so we skip this server and not connect to it. // current can be nil if there is no current server. // MAY RETURN NIL if no next server available! func (p *connectedPlayer) nextServerToTry(current RegisteredServer) RegisteredServer { p.mu.Lock() defer p.mu.Unlock() if len(p.serversToTry) == 0 { p.serversToTry = p.proxy.Config().ForcedHosts[p.virtualHost.String()] } if len(p.serversToTry) == 0 { p.serversToTry = p.proxy.Config().Try } sameName := func(rs RegisteredServer, name string) bool { return rs.ServerInfo().Name() == name } for i := p.tryIndex; i < len(p.serversToTry); i++ { toTry := p.serversToTry[i] if (p.connectedServer_ != nil && sameName(p.connectedServer_.Server(), toTry)) || (p.connInFlight != nil && sameName(p.connInFlight.Server(), toTry)) || (current != nil && sameName(current, toTry)) { continue } p.tryIndex = i if s := p.proxy.Server(toTry); s != nil { return s } } return nil } // player's connection is closed at this point, // now need to disconnect backend server connection, if any. func (p *connectedPlayer) teardown() { p.mu.RLock() connInFlight := p.connInFlight connectedServer := p.connectedServer_ p.mu.RUnlock() if connInFlight != nil { connInFlight.disconnect() } if connectedServer != nil { connectedServer.disconnect() } var status LoginStatus if p.proxy.unregisterConnection(p) { if p.disconnectDueToDuplicateConnection.Load() { status = ConflictingLoginStatus } else { status = SuccessfulLoginStatus } } else { if p.knownDisconnect.Load() { status = CanceledByProxyLoginStatus } else { status = CanceledByUserLoginStatus } } p.proxy.event.Fire(&DisconnectEvent{ player: p, loginStatus: status, }) } // may be nil! func (p *connectedPlayer) CurrentServer() ServerConnection { if cs := p.connectedServer(); cs != nil { return cs } // We must return an explicit nil, not a (*serverConnection)(nil). return nil } func (p *connectedPlayer) connectedServer() *serverConnection { p.mu.RLock() defer p.mu.RUnlock() return p.connectedServer_ } func (p *connectedPlayer) Username() string { return p.profile.Name } func (p *connectedPlayer) ID() uuid.UUID { return p.profile.ID } func (p *connectedPlayer) Disconnect(reason component.Component) { if !p.Active() { return } var r string b := new(strings.Builder) if (&legacy.Legacy{}).Marshal(b, reason) == nil { r = b.String() } if p.closeWith(packet.DisconnectWithProtocol(reason, p.Protocol())) == nil { p.log.Info("Player has been disconnected", "reason", r) } } func (p *connectedPlayer) String() string { return p.profile.Name } func (p *connectedPlayer) sendLegacyForgeHandshakeResetPacket() { p.phase().resetConnectionPhase(p) } func (p *connectedPlayer) setPhase(phase *legacyForgeHandshakeClientPhase) { p.mu.Lock() defer p.mu.Unlock() p.connPhase = phase } // may return nil func (p *connectedPlayer) ModInfo() *modinfo.ModInfo { p.mu.RLock() defer p.mu.RUnlock() return p.modInfo } func (p *connectedPlayer) setModInfo(info *modinfo.ModInfo) { p.mu.Lock() p.modInfo = info p.mu.Unlock() if info != nil { p.proxy.Event().Fire(&PlayerModInfoEvent{ player: p, modInfo: *info, }) } } // NOTE: the returned set is not goroutine-safe and must not be modified, // it is only for reading!!! func (p *connectedPlayer) knownChannels() sets.String { p.pluginChannelsMu.RLock() defer p.pluginChannelsMu.RUnlock() return p.pluginChannels } // runs fn while pluginChannels is locked. Used for modifying channel set. func (p *connectedPlayer) lockedKnownChannels(fn func(knownChannels sets.String)) { p.pluginChannelsMu.RUnlock() defer p.pluginChannelsMu.RLock() fn(p.pluginChannels) } // Determines whether or not we can forward a plugin message onto the client. // message - plugin message to forward to the client func (p *connectedPlayer) canForwardPluginMessage(protocol proto.Protocol, message *plugin.Message) bool { var minecraftOrFmlMessage bool // By default, all internal Minecraft and Forge channels are forwarded from the server. if int(protocol) <= int(version.Minecraft_1_12_2.Protocol) { channel := message.Channel minecraftOrFmlMessage = strings.HasPrefix(channel, "MC|") || strings.HasPrefix(channel, forge.LegacyHandshakeChannel) || plugin.LegacyRegister(message) || plugin.LegacyUnregister(message) } else { minecraftOrFmlMessage = strings.HasPrefix(message.Channel, "minecraft:") } // Otherwise, we need to see if the player already knows this channel or it's known by the proxy. return minecraftOrFmlMessage || p.knownChannels().Has(message.Channel) } func (p *connectedPlayer) setConnectedServer(conn *serverConnection) { p.mu.Lock() p.connectedServer_ = conn p.tryIndex = 0 // reset since we got connected to a server if conn == p.connInFlight { p.connInFlight = nil } p.mu.Unlock() } func (p *connectedPlayer) setSettings(settings *packet.ClientSettings) { wrapped := player.NewSettings(settings) p.mu.Lock() p.settings = wrapped p.mu.Unlock() p.proxy.Event().Fire(&PlayerSettingsChangedEvent{ player: p, settings: wrapped, }) } func (p *connectedPlayer) Closed() <-chan struct{} { return p.minecraftConn.closed } // Settings returns the players client settings. // If not known already, returns player.DefaultSettings. func (p *connectedPlayer) Settings() player.Settings { p.mu.RLock() defer p.mu.RUnlock() if p.settings != nil { return p.settings } return player.DefaultSettings } // returns a new player context that is canceled when: // - connection disconnects // - parent was canceled func (c *minecraftConn) newContext(parent context.Context) (ctx context.Context, cancel func()) { ctx, cancel = context.WithCancel(parent) go func() { select { case <-ctx.Done(): case <-c.closed: cancel() } }() return ctx, cancel } func randomUint64() uint64 { buf := make([]byte, 8) _, _ = rand.Read(buf) // Always succeeds, no need to check error return binary.LittleEndian.Uint64(buf) }
"go.minekube.com/gate/pkg/edition/java/forge" "go.minekube.com/gate/pkg/edition/java/modinfo" "go.minekube.com/gate/pkg/edition/java/profile" "go.minekube.com/gate/pkg/edition/java/proto/packet" "go.minekube.com/gate/pkg/edition/java/proto/packet/plugin"
random_line_split
player.go
package proxy import ( "context" "crypto/rand" "encoding/binary" "encoding/hex" "encoding/json" "errors" "go.minekube.com/common/minecraft/component" "go.minekube.com/common/minecraft/component/codec/legacy" "go.minekube.com/gate/pkg/command" "go.minekube.com/gate/pkg/edition/java/forge" "go.minekube.com/gate/pkg/edition/java/modinfo" "go.minekube.com/gate/pkg/edition/java/profile" "go.minekube.com/gate/pkg/edition/java/proto/packet" "go.minekube.com/gate/pkg/edition/java/proto/packet/plugin" "go.minekube.com/gate/pkg/edition/java/proto/packet/title" "go.minekube.com/gate/pkg/edition/java/proto/util" "go.minekube.com/gate/pkg/edition/java/proto/version" "go.minekube.com/gate/pkg/edition/java/proxy/message" "go.minekube.com/gate/pkg/edition/java/proxy/player" "go.minekube.com/gate/pkg/gate/proto" "go.minekube.com/gate/pkg/runtime/logr" "go.minekube.com/gate/pkg/util/permission" "go.minekube.com/gate/pkg/util/sets" "go.minekube.com/gate/pkg/util/uuid" "go.uber.org/atomic" "net" "strings" "sync" "time" ) // Player is a connected Minecraft player. type Player interface { Inbound command.Source message.ChannelMessageSource message.ChannelMessageSink ID() uuid.UUID // The Minecraft ID of the player. Username() string // The username of the player. // CurrentServer returns the current server connection of the player. CurrentServer() ServerConnection // May be nil, if there is no backend server connection! Ping() time.Duration // The player's ping or -1 if currently unknown. OnlineMode() bool // Whether the player was authenticated with Mojang's session servers. // CreateConnectionRequest creates a connection request to begin switching the backend server. CreateConnectionRequest(target RegisteredServer) ConnectionRequest GameProfile() profile.GameProfile // Returns the player's game profile. Settings() player.Settings // The players client settings. Returns player.DefaultSettings if not yet unknown. // Disconnect disconnects the player with a reason. // Once called, further interface calls to this player become undefined. Disconnect(reason component.Component) // SpoofChatInput sends chats input onto the player's current server as if // they typed it into the client chat box. SpoofChatInput(input string) error // SendResourcePack sends the specified resource pack from url to the user. If at all possible, // send the resource pack with a sha1 hash using SendResourcePackWithHash. To monitor the status // of the sent resource pack, subscribe to PlayerResourcePackStatusEvent. SendResourcePack(url string) error // SendResourcePackWithHash sends the specified resource pack from url to the user, // using the specified 20-byte SHA-1 hash of the resource pack file. To monitor the // status of the sent resource pack, subscribe to PlayerResourcePackStatusEvent. SendResourcePackWithHash(url string, sha1Hash []byte) error // SendActionBar sends an action bar to the player. SendActionBar(msg component.Component) error // SendMessageWith sends a chat message with optional modifications. SendMessageWith(msg component.Component, opts ...MessageOption) error player.TabList // TODO add title and more } type connectedPlayer struct { *minecraftConn log logr.Logger virtualHost net.Addr onlineMode bool profile *profile.GameProfile ping atomic.Duration permFunc permission.Func // This field is true if this connection is being disconnected // due to another connection logging in with the same GameProfile. disconnectDueToDuplicateConnection atomic.Bool pluginChannelsMu sync.RWMutex // Protects following field pluginChannels sets.String // Known plugin channels *tabList // Player's tab list mu sync.RWMutex // Protects following fields connectedServer_ *serverConnection connInFlight *serverConnection settings player.Settings modInfo *modinfo.ModInfo connPhase clientConnectionPhase serversToTry []string // names of servers to try if we got disconnected from previous tryIndex int } var _ Player = (*connectedPlayer)(nil) func newConnectedPlayer( conn *minecraftConn, profile *profile.GameProfile, virtualHost net.Addr, onlineMode bool, ) *connectedPlayer { ping := atomic.Duration{} ping.Store(-1) return &connectedPlayer{ minecraftConn: conn, log: conn.log.WithName("player").WithValues( "name", profile.Name, "id", profile.ID), profile: profile, virtualHost: virtualHost, onlineMode: onlineMode, pluginChannels: sets.NewString(), // Should we limit the size to 1024 channels? connPhase: conn.Type().initialClientPhase(), ping: ping, tabList: newTabList(conn), permFunc: func(string) permission.TriState { return permission.Undefined }, } } func (p *connectedPlayer) connectionInFlight() *serverConnection { p.mu.RLock() defer p.mu.RUnlock() return p.connInFlight } func (p *connectedPlayer) phase() clientConnectionPhase { p.mu.RLock() defer p.mu.RUnlock() return p.connPhase } func (p *connectedPlayer) HasPermission(permission string) bool { return p.PermissionValue(permission).Bool() } func (p *connectedPlayer) PermissionValue(permission string) permission.TriState { return p.permFunc(permission) } func (p *connectedPlayer) Ping() time.Duration { return p.ping.Load() } func (p *connectedPlayer)
() bool { return p.onlineMode } func (p *connectedPlayer) GameProfile() profile.GameProfile { return *p.profile } var ( ErrNoBackendConnection = errors.New("player has no backend server connection yet") ErrTooLongChatMessage = errors.New("server bound chat message can not exceed 256 characters") ) func (p *connectedPlayer) SpoofChatInput(input string) error { if len(input) > packet.MaxServerBoundMessageLength { return ErrTooLongChatMessage } serverMc, ok := p.ensureBackendConnection() if !ok { return ErrNoBackendConnection } return serverMc.WritePacket(&packet.Chat{ Message: input, Type: packet.ChatMessageType, }) } func (p *connectedPlayer) ensureBackendConnection() (*minecraftConn, bool) { p.mu.RLock() defer p.mu.RUnlock() if p.connectedServer_ == nil { // Player has no backend connection. return nil, false } serverMc := p.connectedServer_.conn() if serverMc == nil { // Player's backend connection is not yet connected to a server. return nil, false } return serverMc, true } func (p *connectedPlayer) SendResourcePack(url string) error { return p.WritePacket(&packet.ResourcePackRequest{ Url: url, Hash: "", }) } func (p *connectedPlayer) SendResourcePackWithHash(url string, sha1Hash []byte) error { if len(sha1Hash) != 20 { return errors.New("hash length must be 20") } return p.WritePacket(&packet.ResourcePackRequest{ Url: url, Hash: hex.EncodeToString(sha1Hash), }) } func (p *connectedPlayer) VirtualHost() net.Addr { return p.virtualHost } func (p *connectedPlayer) Active() bool { return !p.minecraftConn.Closed() } // MessageOption is an option for Player.SendMessageWith. type MessageOption func(c *packet.Chat) // MessageWithSender modifies the sender identity of the chat message. func MessageWithSender(id uuid.UUID) MessageOption { return func(c *packet.Chat) { c.Sender = id } } // MessageType is a chat message type. type MessageType uint8 // Chat message types. const ( // ChatMessageType is a standard chat message. ChatMessageType MessageType = iota // SystemMessageType is a system chat message. // e.g. client is willing to accept messages from commands, // but does not want general chat from other players. SystemMessageType ) // MessageWithType modifies chat message type. func MessageWithType(t MessageType) MessageOption { return func(c *packet.Chat) { if t == SystemMessageType { c.Type = packet.SystemMessageType } else { c.Type = packet.ChatMessageType } } } func (p *connectedPlayer) SendMessage(msg component.Component) error { return p.SendMessageWith(msg) } func (p *connectedPlayer) SendMessageWith(msg component.Component, opts ...MessageOption) error { if msg == nil { return nil // skip nil message } m := new(strings.Builder) if err := util.JsonCodec(p.Protocol()).Marshal(m, msg); err != nil { return err } chat := &packet.Chat{ Message: m.String(), Type: packet.ChatMessageType, Sender: uuid.Nil, } for _, o := range opts { o(chat) } return p.WritePacket(chat) } var legacyJsonCodec = &legacy.Legacy{} func (p *connectedPlayer) SendActionBar(msg component.Component) error { if msg == nil { return nil // skip nil message } protocol := p.Protocol() if protocol.GreaterEqual(version.Minecraft_1_11) { // Use the title packet instead. pkt, err := title.New(protocol, &title.Builder{ Action: title.SetActionBar, Component: msg, }) if err != nil { return err } return p.WritePacket(pkt) } // Due to issues with action bar packets, we'll need to convert the text message into a // legacy message and then put the legacy text into a component... (╯°□°)╯︵ ┻━┻! b := new(strings.Builder) if err := legacyJsonCodec.Marshal(b, msg); err != nil { return err } m, err := json.Marshal(map[string]string{"text": b.String()}) if err != nil { return err } return p.WritePacket(&packet.Chat{ Message: string(m), Type: packet.GameInfoMessageType, Sender: uuid.Nil, }) } func (p *connectedPlayer) SendPluginMessage(identifier message.ChannelIdentifier, data []byte) error { return p.WritePacket(&plugin.Message{ Channel: identifier.ID(), Data: data, }) } // TODO add header/footer, title & boss bar methods // Finds another server to attempt to log into, if we were unexpectedly disconnected from the server. // current is the current server of the player is on, so we skip this server and not connect to it. // current can be nil if there is no current server. // MAY RETURN NIL if no next server available! func (p *connectedPlayer) nextServerToTry(current RegisteredServer) RegisteredServer { p.mu.Lock() defer p.mu.Unlock() if len(p.serversToTry) == 0 { p.serversToTry = p.proxy.Config().ForcedHosts[p.virtualHost.String()] } if len(p.serversToTry) == 0 { p.serversToTry = p.proxy.Config().Try } sameName := func(rs RegisteredServer, name string) bool { return rs.ServerInfo().Name() == name } for i := p.tryIndex; i < len(p.serversToTry); i++ { toTry := p.serversToTry[i] if (p.connectedServer_ != nil && sameName(p.connectedServer_.Server(), toTry)) || (p.connInFlight != nil && sameName(p.connInFlight.Server(), toTry)) || (current != nil && sameName(current, toTry)) { continue } p.tryIndex = i if s := p.proxy.Server(toTry); s != nil { return s } } return nil } // player's connection is closed at this point, // now need to disconnect backend server connection, if any. func (p *connectedPlayer) teardown() { p.mu.RLock() connInFlight := p.connInFlight connectedServer := p.connectedServer_ p.mu.RUnlock() if connInFlight != nil { connInFlight.disconnect() } if connectedServer != nil { connectedServer.disconnect() } var status LoginStatus if p.proxy.unregisterConnection(p) { if p.disconnectDueToDuplicateConnection.Load() { status = ConflictingLoginStatus } else { status = SuccessfulLoginStatus } } else { if p.knownDisconnect.Load() { status = CanceledByProxyLoginStatus } else { status = CanceledByUserLoginStatus } } p.proxy.event.Fire(&DisconnectEvent{ player: p, loginStatus: status, }) } // may be nil! func (p *connectedPlayer) CurrentServer() ServerConnection { if cs := p.connectedServer(); cs != nil { return cs } // We must return an explicit nil, not a (*serverConnection)(nil). return nil } func (p *connectedPlayer) connectedServer() *serverConnection { p.mu.RLock() defer p.mu.RUnlock() return p.connectedServer_ } func (p *connectedPlayer) Username() string { return p.profile.Name } func (p *connectedPlayer) ID() uuid.UUID { return p.profile.ID } func (p *connectedPlayer) Disconnect(reason component.Component) { if !p.Active() { return } var r string b := new(strings.Builder) if (&legacy.Legacy{}).Marshal(b, reason) == nil { r = b.String() } if p.closeWith(packet.DisconnectWithProtocol(reason, p.Protocol())) == nil { p.log.Info("Player has been disconnected", "reason", r) } } func (p *connectedPlayer) String() string { return p.profile.Name } func (p *connectedPlayer) sendLegacyForgeHandshakeResetPacket() { p.phase().resetConnectionPhase(p) } func (p *connectedPlayer) setPhase(phase *legacyForgeHandshakeClientPhase) { p.mu.Lock() defer p.mu.Unlock() p.connPhase = phase } // may return nil func (p *connectedPlayer) ModInfo() *modinfo.ModInfo { p.mu.RLock() defer p.mu.RUnlock() return p.modInfo } func (p *connectedPlayer) setModInfo(info *modinfo.ModInfo) { p.mu.Lock() p.modInfo = info p.mu.Unlock() if info != nil { p.proxy.Event().Fire(&PlayerModInfoEvent{ player: p, modInfo: *info, }) } } // NOTE: the returned set is not goroutine-safe and must not be modified, // it is only for reading!!! func (p *connectedPlayer) knownChannels() sets.String { p.pluginChannelsMu.RLock() defer p.pluginChannelsMu.RUnlock() return p.pluginChannels } // runs fn while pluginChannels is locked. Used for modifying channel set. func (p *connectedPlayer) lockedKnownChannels(fn func(knownChannels sets.String)) { p.pluginChannelsMu.RUnlock() defer p.pluginChannelsMu.RLock() fn(p.pluginChannels) } // Determines whether or not we can forward a plugin message onto the client. // message - plugin message to forward to the client func (p *connectedPlayer) canForwardPluginMessage(protocol proto.Protocol, message *plugin.Message) bool { var minecraftOrFmlMessage bool // By default, all internal Minecraft and Forge channels are forwarded from the server. if int(protocol) <= int(version.Minecraft_1_12_2.Protocol) { channel := message.Channel minecraftOrFmlMessage = strings.HasPrefix(channel, "MC|") || strings.HasPrefix(channel, forge.LegacyHandshakeChannel) || plugin.LegacyRegister(message) || plugin.LegacyUnregister(message) } else { minecraftOrFmlMessage = strings.HasPrefix(message.Channel, "minecraft:") } // Otherwise, we need to see if the player already knows this channel or it's known by the proxy. return minecraftOrFmlMessage || p.knownChannels().Has(message.Channel) } func (p *connectedPlayer) setConnectedServer(conn *serverConnection) { p.mu.Lock() p.connectedServer_ = conn p.tryIndex = 0 // reset since we got connected to a server if conn == p.connInFlight { p.connInFlight = nil } p.mu.Unlock() } func (p *connectedPlayer) setSettings(settings *packet.ClientSettings) { wrapped := player.NewSettings(settings) p.mu.Lock() p.settings = wrapped p.mu.Unlock() p.proxy.Event().Fire(&PlayerSettingsChangedEvent{ player: p, settings: wrapped, }) } func (p *connectedPlayer) Closed() <-chan struct{} { return p.minecraftConn.closed } // Settings returns the players client settings. // If not known already, returns player.DefaultSettings. func (p *connectedPlayer) Settings() player.Settings { p.mu.RLock() defer p.mu.RUnlock() if p.settings != nil { return p.settings } return player.DefaultSettings } // returns a new player context that is canceled when: // - connection disconnects // - parent was canceled func (c *minecraftConn) newContext(parent context.Context) (ctx context.Context, cancel func()) { ctx, cancel = context.WithCancel(parent) go func() { select { case <-ctx.Done(): case <-c.closed: cancel() } }() return ctx, cancel } func randomUint64() uint64 { buf := make([]byte, 8) _, _ = rand.Read(buf) // Always succeeds, no need to check error return binary.LittleEndian.Uint64(buf) }
OnlineMode
identifier_name
player.go
package proxy import ( "context" "crypto/rand" "encoding/binary" "encoding/hex" "encoding/json" "errors" "go.minekube.com/common/minecraft/component" "go.minekube.com/common/minecraft/component/codec/legacy" "go.minekube.com/gate/pkg/command" "go.minekube.com/gate/pkg/edition/java/forge" "go.minekube.com/gate/pkg/edition/java/modinfo" "go.minekube.com/gate/pkg/edition/java/profile" "go.minekube.com/gate/pkg/edition/java/proto/packet" "go.minekube.com/gate/pkg/edition/java/proto/packet/plugin" "go.minekube.com/gate/pkg/edition/java/proto/packet/title" "go.minekube.com/gate/pkg/edition/java/proto/util" "go.minekube.com/gate/pkg/edition/java/proto/version" "go.minekube.com/gate/pkg/edition/java/proxy/message" "go.minekube.com/gate/pkg/edition/java/proxy/player" "go.minekube.com/gate/pkg/gate/proto" "go.minekube.com/gate/pkg/runtime/logr" "go.minekube.com/gate/pkg/util/permission" "go.minekube.com/gate/pkg/util/sets" "go.minekube.com/gate/pkg/util/uuid" "go.uber.org/atomic" "net" "strings" "sync" "time" ) // Player is a connected Minecraft player. type Player interface { Inbound command.Source message.ChannelMessageSource message.ChannelMessageSink ID() uuid.UUID // The Minecraft ID of the player. Username() string // The username of the player. // CurrentServer returns the current server connection of the player. CurrentServer() ServerConnection // May be nil, if there is no backend server connection! Ping() time.Duration // The player's ping or -1 if currently unknown. OnlineMode() bool // Whether the player was authenticated with Mojang's session servers. // CreateConnectionRequest creates a connection request to begin switching the backend server. CreateConnectionRequest(target RegisteredServer) ConnectionRequest GameProfile() profile.GameProfile // Returns the player's game profile. Settings() player.Settings // The players client settings. Returns player.DefaultSettings if not yet unknown. // Disconnect disconnects the player with a reason. // Once called, further interface calls to this player become undefined. Disconnect(reason component.Component) // SpoofChatInput sends chats input onto the player's current server as if // they typed it into the client chat box. SpoofChatInput(input string) error // SendResourcePack sends the specified resource pack from url to the user. If at all possible, // send the resource pack with a sha1 hash using SendResourcePackWithHash. To monitor the status // of the sent resource pack, subscribe to PlayerResourcePackStatusEvent. SendResourcePack(url string) error // SendResourcePackWithHash sends the specified resource pack from url to the user, // using the specified 20-byte SHA-1 hash of the resource pack file. To monitor the // status of the sent resource pack, subscribe to PlayerResourcePackStatusEvent. SendResourcePackWithHash(url string, sha1Hash []byte) error // SendActionBar sends an action bar to the player. SendActionBar(msg component.Component) error // SendMessageWith sends a chat message with optional modifications. SendMessageWith(msg component.Component, opts ...MessageOption) error player.TabList // TODO add title and more } type connectedPlayer struct { *minecraftConn log logr.Logger virtualHost net.Addr onlineMode bool profile *profile.GameProfile ping atomic.Duration permFunc permission.Func // This field is true if this connection is being disconnected // due to another connection logging in with the same GameProfile. disconnectDueToDuplicateConnection atomic.Bool pluginChannelsMu sync.RWMutex // Protects following field pluginChannels sets.String // Known plugin channels *tabList // Player's tab list mu sync.RWMutex // Protects following fields connectedServer_ *serverConnection connInFlight *serverConnection settings player.Settings modInfo *modinfo.ModInfo connPhase clientConnectionPhase serversToTry []string // names of servers to try if we got disconnected from previous tryIndex int } var _ Player = (*connectedPlayer)(nil) func newConnectedPlayer( conn *minecraftConn, profile *profile.GameProfile, virtualHost net.Addr, onlineMode bool, ) *connectedPlayer { ping := atomic.Duration{} ping.Store(-1) return &connectedPlayer{ minecraftConn: conn, log: conn.log.WithName("player").WithValues( "name", profile.Name, "id", profile.ID), profile: profile, virtualHost: virtualHost, onlineMode: onlineMode, pluginChannels: sets.NewString(), // Should we limit the size to 1024 channels? connPhase: conn.Type().initialClientPhase(), ping: ping, tabList: newTabList(conn), permFunc: func(string) permission.TriState { return permission.Undefined }, } } func (p *connectedPlayer) connectionInFlight() *serverConnection { p.mu.RLock() defer p.mu.RUnlock() return p.connInFlight } func (p *connectedPlayer) phase() clientConnectionPhase { p.mu.RLock() defer p.mu.RUnlock() return p.connPhase } func (p *connectedPlayer) HasPermission(permission string) bool { return p.PermissionValue(permission).Bool() } func (p *connectedPlayer) PermissionValue(permission string) permission.TriState { return p.permFunc(permission) } func (p *connectedPlayer) Ping() time.Duration { return p.ping.Load() } func (p *connectedPlayer) OnlineMode() bool { return p.onlineMode } func (p *connectedPlayer) GameProfile() profile.GameProfile { return *p.profile } var ( ErrNoBackendConnection = errors.New("player has no backend server connection yet") ErrTooLongChatMessage = errors.New("server bound chat message can not exceed 256 characters") ) func (p *connectedPlayer) SpoofChatInput(input string) error { if len(input) > packet.MaxServerBoundMessageLength { return ErrTooLongChatMessage } serverMc, ok := p.ensureBackendConnection() if !ok { return ErrNoBackendConnection } return serverMc.WritePacket(&packet.Chat{ Message: input, Type: packet.ChatMessageType, }) } func (p *connectedPlayer) ensureBackendConnection() (*minecraftConn, bool) { p.mu.RLock() defer p.mu.RUnlock() if p.connectedServer_ == nil { // Player has no backend connection. return nil, false } serverMc := p.connectedServer_.conn() if serverMc == nil { // Player's backend connection is not yet connected to a server. return nil, false } return serverMc, true } func (p *connectedPlayer) SendResourcePack(url string) error { return p.WritePacket(&packet.ResourcePackRequest{ Url: url, Hash: "", }) } func (p *connectedPlayer) SendResourcePackWithHash(url string, sha1Hash []byte) error { if len(sha1Hash) != 20 { return errors.New("hash length must be 20") } return p.WritePacket(&packet.ResourcePackRequest{ Url: url, Hash: hex.EncodeToString(sha1Hash), }) } func (p *connectedPlayer) VirtualHost() net.Addr { return p.virtualHost } func (p *connectedPlayer) Active() bool { return !p.minecraftConn.Closed() } // MessageOption is an option for Player.SendMessageWith. type MessageOption func(c *packet.Chat) // MessageWithSender modifies the sender identity of the chat message. func MessageWithSender(id uuid.UUID) MessageOption { return func(c *packet.Chat) { c.Sender = id } } // MessageType is a chat message type. type MessageType uint8 // Chat message types. const ( // ChatMessageType is a standard chat message. ChatMessageType MessageType = iota // SystemMessageType is a system chat message. // e.g. client is willing to accept messages from commands, // but does not want general chat from other players. SystemMessageType ) // MessageWithType modifies chat message type. func MessageWithType(t MessageType) MessageOption { return func(c *packet.Chat) { if t == SystemMessageType { c.Type = packet.SystemMessageType } else { c.Type = packet.ChatMessageType } } } func (p *connectedPlayer) SendMessage(msg component.Component) error { return p.SendMessageWith(msg) } func (p *connectedPlayer) SendMessageWith(msg component.Component, opts ...MessageOption) error { if msg == nil { return nil // skip nil message } m := new(strings.Builder) if err := util.JsonCodec(p.Protocol()).Marshal(m, msg); err != nil { return err } chat := &packet.Chat{ Message: m.String(), Type: packet.ChatMessageType, Sender: uuid.Nil, } for _, o := range opts { o(chat) } return p.WritePacket(chat) } var legacyJsonCodec = &legacy.Legacy{} func (p *connectedPlayer) SendActionBar(msg component.Component) error { if msg == nil { return nil // skip nil message } protocol := p.Protocol() if protocol.GreaterEqual(version.Minecraft_1_11) { // Use the title packet instead. pkt, err := title.New(protocol, &title.Builder{ Action: title.SetActionBar, Component: msg, }) if err != nil { return err } return p.WritePacket(pkt) } // Due to issues with action bar packets, we'll need to convert the text message into a // legacy message and then put the legacy text into a component... (╯°□°)╯︵ ┻━┻! b := new(strings.Builder) if err := legacyJsonCodec.Marshal(b, msg); err != nil { return err } m, err := json.Marshal(map[string]string{"text": b.String()}) if err != nil { return err } return p.WritePacket(&packet.Chat{ Message: string(m), Type: packet.GameInfoMessageType, Sender: uuid.Nil, }) } func (p *connectedPlayer) SendPluginMessage(identifier message.ChannelIdentifier, data []byte) error { return p.WritePacket(&plugin.Message{ Channel: identifier.ID(), Data: data, }) } // TODO add header/footer, title & boss bar methods // Finds another server to attempt to log into, if we were unexpectedly disconnected from the server. // current is the current server of the player is on, so we skip this server and not connect to it. // current can be nil if there is no current server. // MAY RETURN NIL if no next server available! func (p *connectedPlayer) nextServerToTry(current RegisteredServer) RegisteredServer { p.mu.Lock() defer p.mu.Unlock() if len(p.serversToTry) == 0 { p.serversToTry = p.proxy.Config().ForcedHosts[p.virtualHost.String()] } if len(p.serversToTry) == 0 { p.serversToTry = p.proxy.Config().Try } sameName := func(rs RegisteredServer, name string) bool { return rs.ServerInfo().Name() == name } for i := p.tryIndex; i < len(p.serversToTry); i++ { toTry := p.serversToTry[i] if (p.connectedServer_ != nil && sameName(p.connectedServer_.Server(), toTry)) || (p.connInFlight != nil && sameName(p.connInFlight.Server(), toTry)) || (current != nil && sameName(current, toTry)) { continue } p.tryIndex = i if s := p.proxy.Server(toTry); s != nil { return s } } return nil } // player's connection is closed at this point, // now need to disconnect backend server connection, if any. func (p *connectedPlayer) teardown() { p.mu.RLock() connInFlight := p.connInFlight connectedServer := p.connectedServer_ p.mu.RUnlock() if connInFlight != nil { connInFlight.disconnect() } if connectedServer != nil { connectedServer.disconnect() } var status LoginStatus if p.proxy.unregisterConnection(p) { if p.disconnectDueToDuplicateConnection.Load() { status = ConflictingLoginStatus } else { status = SuccessfulLoginStatus } } else { if p.knownDisconnect.Load() { status = CanceledByProxyLoginStatus } else { status = CanceledByUserLoginStatus } } p.proxy.event.Fire(&DisconnectEvent{ player: p, loginStatus: status, }) } // may be nil! func (p *connectedPlayer) CurrentServer() ServerConnection { if cs := p.co
ctedPlayer) connectedServer() *serverConnection { p.mu.RLock() defer p.mu.RUnlock() return p.connectedServer_ } func (p *connectedPlayer) Username() string { return p.profile.Name } func (p *connectedPlayer) ID() uuid.UUID { return p.profile.ID } func (p *connectedPlayer) Disconnect(reason component.Component) { if !p.Active() { return } var r string b := new(strings.Builder) if (&legacy.Legacy{}).Marshal(b, reason) == nil { r = b.String() } if p.closeWith(packet.DisconnectWithProtocol(reason, p.Protocol())) == nil { p.log.Info("Player has been disconnected", "reason", r) } } func (p *connectedPlayer) String() string { return p.profile.Name } func (p *connectedPlayer) sendLegacyForgeHandshakeResetPacket() { p.phase().resetConnectionPhase(p) } func (p *connectedPlayer) setPhase(phase *legacyForgeHandshakeClientPhase) { p.mu.Lock() defer p.mu.Unlock() p.connPhase = phase } // may return nil func (p *connectedPlayer) ModInfo() *modinfo.ModInfo { p.mu.RLock() defer p.mu.RUnlock() return p.modInfo } func (p *connectedPlayer) setModInfo(info *modinfo.ModInfo) { p.mu.Lock() p.modInfo = info p.mu.Unlock() if info != nil { p.proxy.Event().Fire(&PlayerModInfoEvent{ player: p, modInfo: *info, }) } } // NOTE: the returned set is not goroutine-safe and must not be modified, // it is only for reading!!! func (p *connectedPlayer) knownChannels() sets.String { p.pluginChannelsMu.RLock() defer p.pluginChannelsMu.RUnlock() return p.pluginChannels } // runs fn while pluginChannels is locked. Used for modifying channel set. func (p *connectedPlayer) lockedKnownChannels(fn func(knownChannels sets.String)) { p.pluginChannelsMu.RUnlock() defer p.pluginChannelsMu.RLock() fn(p.pluginChannels) } // Determines whether or not we can forward a plugin message onto the client. // message - plugin message to forward to the client func (p *connectedPlayer) canForwardPluginMessage(protocol proto.Protocol, message *plugin.Message) bool { var minecraftOrFmlMessage bool // By default, all internal Minecraft and Forge channels are forwarded from the server. if int(protocol) <= int(version.Minecraft_1_12_2.Protocol) { channel := message.Channel minecraftOrFmlMessage = strings.HasPrefix(channel, "MC|") || strings.HasPrefix(channel, forge.LegacyHandshakeChannel) || plugin.LegacyRegister(message) || plugin.LegacyUnregister(message) } else { minecraftOrFmlMessage = strings.HasPrefix(message.Channel, "minecraft:") } // Otherwise, we need to see if the player already knows this channel or it's known by the proxy. return minecraftOrFmlMessage || p.knownChannels().Has(message.Channel) } func (p *connectedPlayer) setConnectedServer(conn *serverConnection) { p.mu.Lock() p.connectedServer_ = conn p.tryIndex = 0 // reset since we got connected to a server if conn == p.connInFlight { p.connInFlight = nil } p.mu.Unlock() } func (p *connectedPlayer) setSettings(settings *packet.ClientSettings) { wrapped := player.NewSettings(settings) p.mu.Lock() p.settings = wrapped p.mu.Unlock() p.proxy.Event().Fire(&PlayerSettingsChangedEvent{ player: p, settings: wrapped, }) } func (p *connectedPlayer) Closed() <-chan struct{} { return p.minecraftConn.closed } // Settings returns the players client settings. // If not known already, returns player.DefaultSettings. func (p *connectedPlayer) Settings() player.Settings { p.mu.RLock() defer p.mu.RUnlock() if p.settings != nil { return p.settings } return player.DefaultSettings } // returns a new player context that is canceled when: // - connection disconnects // - parent was canceled func (c *minecraftConn) newContext(parent context.Context) (ctx context.Context, cancel func()) { ctx, cancel = context.WithCancel(parent) go func() { select { case <-ctx.Done(): case <-c.closed: cancel() } }() return ctx, cancel } func randomUint64() uint64 { buf := make([]byte, 8) _, _ = rand.Read(buf) // Always succeeds, no need to check error return binary.LittleEndian.Uint64(buf) }
nnectedServer(); cs != nil { return cs } // We must return an explicit nil, not a (*serverConnection)(nil). return nil } func (p *conne
identifier_body
player.go
package proxy import ( "context" "crypto/rand" "encoding/binary" "encoding/hex" "encoding/json" "errors" "go.minekube.com/common/minecraft/component" "go.minekube.com/common/minecraft/component/codec/legacy" "go.minekube.com/gate/pkg/command" "go.minekube.com/gate/pkg/edition/java/forge" "go.minekube.com/gate/pkg/edition/java/modinfo" "go.minekube.com/gate/pkg/edition/java/profile" "go.minekube.com/gate/pkg/edition/java/proto/packet" "go.minekube.com/gate/pkg/edition/java/proto/packet/plugin" "go.minekube.com/gate/pkg/edition/java/proto/packet/title" "go.minekube.com/gate/pkg/edition/java/proto/util" "go.minekube.com/gate/pkg/edition/java/proto/version" "go.minekube.com/gate/pkg/edition/java/proxy/message" "go.minekube.com/gate/pkg/edition/java/proxy/player" "go.minekube.com/gate/pkg/gate/proto" "go.minekube.com/gate/pkg/runtime/logr" "go.minekube.com/gate/pkg/util/permission" "go.minekube.com/gate/pkg/util/sets" "go.minekube.com/gate/pkg/util/uuid" "go.uber.org/atomic" "net" "strings" "sync" "time" ) // Player is a connected Minecraft player. type Player interface { Inbound command.Source message.ChannelMessageSource message.ChannelMessageSink ID() uuid.UUID // The Minecraft ID of the player. Username() string // The username of the player. // CurrentServer returns the current server connection of the player. CurrentServer() ServerConnection // May be nil, if there is no backend server connection! Ping() time.Duration // The player's ping or -1 if currently unknown. OnlineMode() bool // Whether the player was authenticated with Mojang's session servers. // CreateConnectionRequest creates a connection request to begin switching the backend server. CreateConnectionRequest(target RegisteredServer) ConnectionRequest GameProfile() profile.GameProfile // Returns the player's game profile. Settings() player.Settings // The players client settings. Returns player.DefaultSettings if not yet unknown. // Disconnect disconnects the player with a reason. // Once called, further interface calls to this player become undefined. Disconnect(reason component.Component) // SpoofChatInput sends chats input onto the player's current server as if // they typed it into the client chat box. SpoofChatInput(input string) error // SendResourcePack sends the specified resource pack from url to the user. If at all possible, // send the resource pack with a sha1 hash using SendResourcePackWithHash. To monitor the status // of the sent resource pack, subscribe to PlayerResourcePackStatusEvent. SendResourcePack(url string) error // SendResourcePackWithHash sends the specified resource pack from url to the user, // using the specified 20-byte SHA-1 hash of the resource pack file. To monitor the // status of the sent resource pack, subscribe to PlayerResourcePackStatusEvent. SendResourcePackWithHash(url string, sha1Hash []byte) error // SendActionBar sends an action bar to the player. SendActionBar(msg component.Component) error // SendMessageWith sends a chat message with optional modifications. SendMessageWith(msg component.Component, opts ...MessageOption) error player.TabList // TODO add title and more } type connectedPlayer struct { *minecraftConn log logr.Logger virtualHost net.Addr onlineMode bool profile *profile.GameProfile ping atomic.Duration permFunc permission.Func // This field is true if this connection is being disconnected // due to another connection logging in with the same GameProfile. disconnectDueToDuplicateConnection atomic.Bool pluginChannelsMu sync.RWMutex // Protects following field pluginChannels sets.String // Known plugin channels *tabList // Player's tab list mu sync.RWMutex // Protects following fields connectedServer_ *serverConnection connInFlight *serverConnection settings player.Settings modInfo *modinfo.ModInfo connPhase clientConnectionPhase serversToTry []string // names of servers to try if we got disconnected from previous tryIndex int } var _ Player = (*connectedPlayer)(nil) func newConnectedPlayer( conn *minecraftConn, profile *profile.GameProfile, virtualHost net.Addr, onlineMode bool, ) *connectedPlayer { ping := atomic.Duration{} ping.Store(-1) return &connectedPlayer{ minecraftConn: conn, log: conn.log.WithName("player").WithValues( "name", profile.Name, "id", profile.ID), profile: profile, virtualHost: virtualHost, onlineMode: onlineMode, pluginChannels: sets.NewString(), // Should we limit the size to 1024 channels? connPhase: conn.Type().initialClientPhase(), ping: ping, tabList: newTabList(conn), permFunc: func(string) permission.TriState { return permission.Undefined }, } } func (p *connectedPlayer) connectionInFlight() *serverConnection { p.mu.RLock() defer p.mu.RUnlock() return p.connInFlight } func (p *connectedPlayer) phase() clientConnectionPhase { p.mu.RLock() defer p.mu.RUnlock() return p.connPhase } func (p *connectedPlayer) HasPermission(permission string) bool { return p.PermissionValue(permission).Bool() } func (p *connectedPlayer) PermissionValue(permission string) permission.TriState { return p.permFunc(permission) } func (p *connectedPlayer) Ping() time.Duration { return p.ping.Load() } func (p *connectedPlayer) OnlineMode() bool { return p.onlineMode } func (p *connectedPlayer) GameProfile() profile.GameProfile { return *p.profile } var ( ErrNoBackendConnection = errors.New("player has no backend server connection yet") ErrTooLongChatMessage = errors.New("server bound chat message can not exceed 256 characters") ) func (p *connectedPlayer) SpoofChatInput(input string) error { if len(input) > packet.MaxServerBoundMessageLength { return ErrTooLongChatMessage } serverMc, ok := p.ensureBackendConnection() if !ok { return ErrNoBackendConnection } return serverMc.WritePacket(&packet.Chat{ Message: input, Type: packet.ChatMessageType, }) } func (p *connectedPlayer) ensureBackendConnection() (*minecraftConn, bool) { p.mu.RLock() defer p.mu.RUnlock() if p.connectedServer_ == nil { // Player has no backend connection. return nil, false } serverMc := p.connectedServer_.conn() if serverMc == nil { // Player's backend connection is not yet connected to a server. return nil, false } return serverMc, true } func (p *connectedPlayer) SendResourcePack(url string) error { return p.WritePacket(&packet.ResourcePackRequest{ Url: url, Hash: "", }) } func (p *connectedPlayer) SendResourcePackWithHash(url string, sha1Hash []byte) error { if len(sha1Hash) != 20 { return errors.New("hash length must be 20") } return p.WritePacket(&packet.ResourcePackRequest{ Url: url, Hash: hex.EncodeToString(sha1Hash), }) } func (p *connectedPlayer) VirtualHost() net.Addr { return p.virtualHost } func (p *connectedPlayer) Active() bool { return !p.minecraftConn.Closed() } // MessageOption is an option for Player.SendMessageWith. type MessageOption func(c *packet.Chat) // MessageWithSender modifies the sender identity of the chat message. func MessageWithSender(id uuid.UUID) MessageOption { return func(c *packet.Chat) { c.Sender = id } } // MessageType is a chat message type. type MessageType uint8 // Chat message types. const ( // ChatMessageType is a standard chat message. ChatMessageType MessageType = iota // SystemMessageType is a system chat message. // e.g. client is willing to accept messages from commands, // but does not want general chat from other players. SystemMessageType ) // MessageWithType modifies chat message type. func MessageWithType(t MessageType) MessageOption { return func(c *packet.Chat) { if t == SystemMessageType { c.Type = packet.SystemMessageType } else { c.Type = packet.ChatMessageType } } } func (p *connectedPlayer) SendMessage(msg component.Component) error { return p.SendMessageWith(msg) } func (p *connectedPlayer) SendMessageWith(msg component.Component, opts ...MessageOption) error { if msg == nil { return nil // skip nil message } m := new(strings.Builder) if err := util.JsonCodec(p.Protocol()).Marshal(m, msg); err != nil { return err } chat := &packet.Chat{ Message: m.String(), Type: packet.ChatMessageType, Sender: uuid.Nil, } for _, o := range opts { o(chat) } return p.WritePacket(chat) } var legacyJsonCodec = &legacy.Legacy{} func (p *connectedPlayer) SendActionBar(msg component.Component) error { if msg == nil { return nil // skip nil message } protocol := p.Protocol() if protocol.GreaterEqual(version.Minecraft_1_11) { // Use the title packet instead. pkt, err := title.New(protocol, &title.Builder{ Action: title.SetActionBar, Component: msg, }) if err != nil { return err } return p.WritePacket(pkt) } // Due to issues with action bar packets, we'll need to convert the text message into a // legacy message and then put the legacy text into a component... (╯°□°)╯︵ ┻━┻! b := new(strings.Builder) if err := legacyJsonCodec.Marshal(b, msg); err != nil { return err } m, err := json.Marshal(map[string]string{"text": b.String()}) if err != nil { return err } return p.WritePacket(&packet.Chat{ Message: string(m), Type: packet.GameInfoMessageType, Sender: uuid.Nil, }) } func (p *connectedPlayer) SendPluginMessage(identifier message.ChannelIdentifier, data []byte) error { return p.WritePacket(&plugin.Message{ Channel: identifier.ID(), Data: data, }) } // TODO add header/footer, title & boss bar methods // Finds another server to attempt to log into, if we were unexpectedly disconnected from the server. // current is the current server of the player is on, so we skip this server and not connect to it. // current can be nil if there is no current server. // MAY RETURN NIL if no next server available! func (p *connectedPlayer) nextServerToTry(current RegisteredServer) RegisteredServer { p.mu.Lock() defer p.mu.Unlock() if len(p.serversToTry) == 0 { p.serversToTry = p.proxy.Config().ForcedHosts[p.virtualHost.String()] } if len(p.serversToTry) == 0 { p.serversToTry = p.proxy.Config().Try } sameName := func(rs RegisteredServer, name string) bool { return rs.ServerInfo().Name() == name } for i := p.tryIndex; i < len(p.serversToTry); i++ { toTry := p.serversToTry[i] if (p.connectedServer_ != nil && sameName(p.connectedServer_.Server(), toTry)) || (p.connInFlight != nil && sameName(p.connInFlight.Server(), toTry)) || (current != nil && sameName(current, toTry)) { continue
i if s := p.proxy.Server(toTry); s != nil { return s } } return nil } // player's connection is closed at this point, // now need to disconnect backend server connection, if any. func (p *connectedPlayer) teardown() { p.mu.RLock() connInFlight := p.connInFlight connectedServer := p.connectedServer_ p.mu.RUnlock() if connInFlight != nil { connInFlight.disconnect() } if connectedServer != nil { connectedServer.disconnect() } var status LoginStatus if p.proxy.unregisterConnection(p) { if p.disconnectDueToDuplicateConnection.Load() { status = ConflictingLoginStatus } else { status = SuccessfulLoginStatus } } else { if p.knownDisconnect.Load() { status = CanceledByProxyLoginStatus } else { status = CanceledByUserLoginStatus } } p.proxy.event.Fire(&DisconnectEvent{ player: p, loginStatus: status, }) } // may be nil! func (p *connectedPlayer) CurrentServer() ServerConnection { if cs := p.connectedServer(); cs != nil { return cs } // We must return an explicit nil, not a (*serverConnection)(nil). return nil } func (p *connectedPlayer) connectedServer() *serverConnection { p.mu.RLock() defer p.mu.RUnlock() return p.connectedServer_ } func (p *connectedPlayer) Username() string { return p.profile.Name } func (p *connectedPlayer) ID() uuid.UUID { return p.profile.ID } func (p *connectedPlayer) Disconnect(reason component.Component) { if !p.Active() { return } var r string b := new(strings.Builder) if (&legacy.Legacy{}).Marshal(b, reason) == nil { r = b.String() } if p.closeWith(packet.DisconnectWithProtocol(reason, p.Protocol())) == nil { p.log.Info("Player has been disconnected", "reason", r) } } func (p *connectedPlayer) String() string { return p.profile.Name } func (p *connectedPlayer) sendLegacyForgeHandshakeResetPacket() { p.phase().resetConnectionPhase(p) } func (p *connectedPlayer) setPhase(phase *legacyForgeHandshakeClientPhase) { p.mu.Lock() defer p.mu.Unlock() p.connPhase = phase } // may return nil func (p *connectedPlayer) ModInfo() *modinfo.ModInfo { p.mu.RLock() defer p.mu.RUnlock() return p.modInfo } func (p *connectedPlayer) setModInfo(info *modinfo.ModInfo) { p.mu.Lock() p.modInfo = info p.mu.Unlock() if info != nil { p.proxy.Event().Fire(&PlayerModInfoEvent{ player: p, modInfo: *info, }) } } // NOTE: the returned set is not goroutine-safe and must not be modified, // it is only for reading!!! func (p *connectedPlayer) knownChannels() sets.String { p.pluginChannelsMu.RLock() defer p.pluginChannelsMu.RUnlock() return p.pluginChannels } // runs fn while pluginChannels is locked. Used for modifying channel set. func (p *connectedPlayer) lockedKnownChannels(fn func(knownChannels sets.String)) { p.pluginChannelsMu.RUnlock() defer p.pluginChannelsMu.RLock() fn(p.pluginChannels) } // Determines whether or not we can forward a plugin message onto the client. // message - plugin message to forward to the client func (p *connectedPlayer) canForwardPluginMessage(protocol proto.Protocol, message *plugin.Message) bool { var minecraftOrFmlMessage bool // By default, all internal Minecraft and Forge channels are forwarded from the server. if int(protocol) <= int(version.Minecraft_1_12_2.Protocol) { channel := message.Channel minecraftOrFmlMessage = strings.HasPrefix(channel, "MC|") || strings.HasPrefix(channel, forge.LegacyHandshakeChannel) || plugin.LegacyRegister(message) || plugin.LegacyUnregister(message) } else { minecraftOrFmlMessage = strings.HasPrefix(message.Channel, "minecraft:") } // Otherwise, we need to see if the player already knows this channel or it's known by the proxy. return minecraftOrFmlMessage || p.knownChannels().Has(message.Channel) } func (p *connectedPlayer) setConnectedServer(conn *serverConnection) { p.mu.Lock() p.connectedServer_ = conn p.tryIndex = 0 // reset since we got connected to a server if conn == p.connInFlight { p.connInFlight = nil } p.mu.Unlock() } func (p *connectedPlayer) setSettings(settings *packet.ClientSettings) { wrapped := player.NewSettings(settings) p.mu.Lock() p.settings = wrapped p.mu.Unlock() p.proxy.Event().Fire(&PlayerSettingsChangedEvent{ player: p, settings: wrapped, }) } func (p *connectedPlayer) Closed() <-chan struct{} { return p.minecraftConn.closed } // Settings returns the players client settings. // If not known already, returns player.DefaultSettings. func (p *connectedPlayer) Settings() player.Settings { p.mu.RLock() defer p.mu.RUnlock() if p.settings != nil { return p.settings } return player.DefaultSettings } // returns a new player context that is canceled when: // - connection disconnects // - parent was canceled func (c *minecraftConn) newContext(parent context.Context) (ctx context.Context, cancel func()) { ctx, cancel = context.WithCancel(parent) go func() { select { case <-ctx.Done(): case <-c.closed: cancel() } }() return ctx, cancel } func randomUint64() uint64 { buf := make([]byte, 8) _, _ = rand.Read(buf) // Always succeeds, no need to check error return binary.LittleEndian.Uint64(buf) }
} p.tryIndex =
conditional_block
dream.go
package main import ( "bytes" "fmt" "os" "os/exec" "strings" "time" "github.com/TableMountain/goydl" "github.com/gin-gonic/gin" "github.com/sirupsen/logrus" "github.com/skratchdot/open-golang/open" ) var isJob bool var basePath string // Truncate todo func
(t time.Time) time.Time { return t.Truncate(24 * time.Hour) } func now() string { return time.Now().Format(time.Kitchen) } // Dream is exported so it can be an api, haha what fun. Games perhaps? Stock trading? Some real time video effect? func Dream(c *gin.Context) { start := time.Now() defer func() { elapsed := fmt.Sprintf("%s %s", now(), time.Since(start)) elapsed = strings.Split(elapsed, ".")[0] + "s" Log.Info("job took ", elapsed) mel.Broadcast([]byte(elapsed)) }() yt := c.PostForm("yt") fps := c.PostForm("fps") ov := c.PostForm("ov") //data the user uploaded we want ovf := c.PostForm("ovf") of := c.PostForm("of") oo := c.PostForm("oo") it := c.PostForm("iterations") oc := c.PostForm("octaves") la := c.PostForm("layer") rl := c.PostForm("rl") Log.Info("rl: ", rl) ow := c.PostForm("ow") li := c.PostForm("li") iw := c.PostForm("iw") rle := c.PostForm("rle") ocs := c.PostForm("ocscale") // stretch:=c.Postform("stretchvideo") isJob = true defer func() { isJob = false }() Log.WithFields(logrus.Fields{ "event": "new job started", }) jobLog.WithFields(logrus.Fields{ "time": time.Now().UTC().UnixNano(), "title": name, "fps": fps, "it": it, "oc": oc, "la": la, "rl": rl, "ow": ow, "li": li, "iw": iw, "rle": rle, }) Log.Info("base path is ", basePath) newJobLog(name) //let's save interesting job metadata for the user in a tidy format (err logs, srv logs kept with the binary or maybe put in bind dir? wip) jobLog.WithFields(logrus.Fields{ "fps": fps, "iterations": it, "octaves": oc, "layer": la, "linear increase": li, "iteration waver": iw, "octave waver": ow, "randomization type": rl, "random layer every n frames": rle, }).Info("job name: ", name) // var uploadedFile, framesDirPath string var name, fullName, ext string if yt != "" { //if "yt" checkbox checked youtubeDl := goydl.NewYoutubeDl() for { //we loop until we got an acceptable ytURL fmt.Println("waiting...") youtubeDl.VideoURL = ytURL fmt.Println("videoURL:", ytURL) if ytURL == "" { //we didn't get a url, so just cancel the job Log.Info("the url was blank (therefore no good ytURL yet), so just cancel the job") return } info, err := youtubeDl.GetInfo() if err != nil { Log.WithFields(logrus.Fields{ "event": "ytdl", "error": err, }).Error("we should never fail here") continue } fmt.Println(youtubeDl.VideoURL, "blah") ext = info.Ext name = strings.Split(info.Title, " ")[0] fullName = name + ".mp4" if alreadyHave(basePath + "/frames/" + name) { name = renamer(name) fullName = name + ".mp4" Log.Info("\nwe renamed as: ", fullName) } uploadedFile := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name) fmt.Println("uploaded file: ", uploadedFile) youtubeDl.Options.Output.Value = uploadedFile youtubeDl.Options.Format.Value = "mp4" cmd, err := youtubeDl.Download(youtubeDl.VideoURL) if err != nil { Log.WithFields(logrus.Fields{ "event": "error", "err": err, "uploadedFile": uploadedFile, }).Error("dl'ing from yt failed w err") } else { Log.WithFields(logrus.Fields{ "event": "download", "path": uploadedFile, }).Info("downloaded a yt video") println("starting download") cmd.Wait() println("finished download") // make new folder for job framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name) if _, err := os.Stat(framesDirPath); os.IsNotExist(err) { if err = os.Mkdir(framesDirPath, 0777); err != nil { Log.Error("failed to make a new job dir w/ error: ", err) } Log.Info("frames folder for new job was created at ", framesDirPath) } break //we got our file, now we move on, we don't need to keep listening for URL } } } else { // if no youtube, then get file from form upload file, err := c.FormFile("file") if err != nil { Log.Error("failed to get file", err) //although this might not be an error as we support ytdl now return } name = strings.Split(file.Filename, ".")[0] fullName = file.Filename ext = strings.Split(fullName, ".")[1] if alreadyHave(basePath + "/frames/" + name) { name = renamer(name) fullName = name + "." + strings.Split(file.Filename, ".")[1] Log.Info("\nwe renamed as: ", fullName) } // make new folder for job framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name) if _, err := os.Stat(framesDirPath); os.IsNotExist(err) { if err = os.Mkdir(framesDirPath, 0777); err != nil { Log.Error("failed to make a new job dir w/ error: ", err) } Log.Info("frames folder for new job was created at ", framesDirPath) } uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName) if err := c.SaveUploadedFile(file, uploadedFile); err != nil { Log.Error("failed to save file at path ", uploadedFile, " err is: ", err) } else { Log.Info("saved file at path ", uploadedFile) } } // make a new output folder outputPath := fmt.Sprintf("%s/output", framesDirPath) if _, err := os.Stat(outputPath); os.IsNotExist(err) { os.Mkdir(outputPath, 0777) Log.Info("output folder for new job was created at ", outputPath) } Log.Info("saved output dir at path ", outputPath) uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName) mel.Broadcast([]byte(name)) itsAVideo := false // decide what to do with the file we've gotten, if it's an image: if ext == "png" { //it's perfect, leave it alone... } else if ext == "jpg" || ext == "jpeg" { cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, framesDirPath+"/"+name+".png").CombinedOutput() if err != nil { Log.Error("oops, failed trying to make some image of ext ", ext, " to png") } else { Log.Info("that's great, we got an image, those are easy, ffmpeg said:", string(cmd)) } } else if ext == "gif" { itsAVideo = true Log.Info("trying to convert a gif") // ffmpeg -f gif -i giphy-downsized.gif -pix_fmt yuv420p -c:v libx264 -movflags +faststart -filter:v crop='floor(in_w/2)*2:floor(in_h/2)*2' BAR.mp4 savedMp4 := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name) cmd := exec.Command("ffmpeg", "-f", "gif", "-i", uploadedFile, "-pix_fmt", "yuv420p", "-c:v", "libx264", "-movflags", "+faststart", "-filter:v", "crop='floor(in_w/2)*2:floor(in_h/2)*2'", savedMp4) cmd.Stdin = strings.NewReader("") var out bytes.Buffer cmd.Stdout = &out err := cmd.Run() if err != nil { Log.Error("failed to make mp4 from gif ", err) } else { uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4" Log.Info("made mp4 from GIF") } } else { // if file not gif or img try to make it mp4 itsAVideo = true Log.Info("ext: ", ext) Log.Info("file.filename ", fullName) if ext != "mp4" { cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, strings.Split(uploadedFile, ".")[0]+".mp4").CombinedOutput() if err != nil { Log.Error("failed to make a .any to .mp4 , ", err) } else { Log.Info("made a ", ext, " into .mp4 with cmd ", string(cmd)) err := os.Remove(uploadedFile) if err != nil { Log.Info("err removing original .mp4 as err: ", err) } else { uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4" Log.Info("deleted original at ext: ", ext) } } } } // open finder if of == "of" { open.Run(framesDirPath) } if oo == "oo" { open.Run(outputPath) } if itsAVideo { // create frames from mp4 framesOut := fmt.Sprintf("%s/frames/%s/%s.png", basePath, name, "%d") Log.Info("framesOut: ", framesOut) cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, "-vf", "fps="+fps, "-c:v", "png", framesOut).CombinedOutput() if err != nil { Log.Error("failed to make frames", err) } else { Log.Info("made frames from MP4 with cmd: ", string(cmd)) } } Log.Info("entering dreamer goroutine") // deep dream the frames cmd, err := exec.Command("python3", "folder.py", "--input", framesDirPath, "-os", ocs, "-it", it, "-oc", oc, "-la", la, "-rl", rl, "-rle", rle, "-li", li, "-iw", iw, "-ow", ow).CombinedOutput() if err != nil { Log.WithFields(logrus.Fields{ "event": "folder.py", }).Error("failed to dream", err) z := fmt.Sprintf("FAIL: python borked: %s", err.Error()) mel.Broadcast([]byte(z)) } Log.Info("done w/ dream loop, python said: ", string(cmd)) // add metadata file la = strings.Replace(la, "/", "", 1) //we want a file not to make a /conv directory or w/e // f, err := os.Create(outputPath + "/it" + it + "oc" + oc + "ch" + ch + "os" + ocs + "la" + la) // if err != nil { // Log.Error("failed to make metadata file: ", err) // } // f.Close() // make a jpg, cause png's are hella MB, and who wants that on /r/ or fb? nobody cause it fk's dl speed if !itsAVideo { cmd, err := exec.Command("ffmpeg", "-i", outputPath+"/1.png", basePath+"/images/"+"it"+it+"oc"+oc+"os"+ocs+"la"+la+".jpg").CombinedOutput() if err != nil { Log.Error("failed to jpg the png", err) } Log.Info("jpg'd to ", string(cmd)) return //if it's not a video, don't make an output.mp4 } // put frames together into an mp4 in videos dir newVideo := fmt.Sprintf("%s/videos/%s", basePath, name+".mp4") frames := fmt.Sprintf("%s/output/%s.png", framesDirPath, "%d") Log.Info("frames to be turned into mp4 at: ", frames) // framesDir := fmt.Sprintf("%s/output/%s.png", framesDirPath, "%d") // ffmpeg -r 5 -f image2 -i '%d.png' -vcodec libx264 -crf 25 -pix_fmt yuv420p out.mp4 cmd, err = exec.Command("ffmpeg", "-r", fps, "-f", "image2", "-i", frames, "-vcodec", "libx264", "-crf", "25", "-pix_fmt", "yuv420p", newVideo).CombinedOutput() if err != nil { Log.Error("still failing to output a video meh, ", err) } else { Log.Info("\nmade mp4 from frames") } if ov == "ov" { open.Run(basePath + "/videos") } // is there sound? audio, err := exec.Command("ffprobe", uploadedFile, "-show_streams", "-select_streams", "a", "-loglevel", "error").CombinedOutput() if err != nil { Log.Error("Failed to test audio, ", err) } // add sound back in if there is any // ffmpeg -i 2171447000212516064.mp4 -i gold.mp4 -map 0:v -map 1:a output.mp4 if len(audio) > 1 { Log.Info("there's sound in this clip") out, err := exec.Command("ffmpeg", "-y", "-i", newVideo, "-i", uploadedFile, "-map", "0:v", "-map", "1:a", basePath+"/videos/audio_"+name+".mp4").CombinedOutput() if err != nil { Log.Error("failed to add sound back", err) } else { Log.Info("fffmpeg added sound:", string(out)) if ovf == "ovf" { open.Run(basePath + "/videos/audio_" + name + ".mp4") } // todo remove newVideo, so we only save one video, the one w/ audio } } else { Log.Info("there's no sound") if ovf == "ovf" { open.Run(newVideo) } } //stretch video enabled? // ffmpeg -i input.mp4 -vf scale=ih*16/9:ih,scale=iw:-2,setsar=1 -crf 20 -c:a copy YT.mp4 // ffmpeg -i out.mp4 -vf scale=720x406,setdar=16:9 z.mp4 // http://www.bugcodemaster.com/article/changing-resolution-video-using-ffmpeg // out, err := exec.Command("ffmpeg", "-y", "-i", newVideo, "-i", uploadedFile, "-map", "0:v", "-map", "1:a", basePath+"/videos/"+name+"_audio.mp4").CombinedOutput() // if err != nil { // Log.Error("failed to add sound back", err) // } else { // Log.Info("fffmpeg added sound:", string(out)) // os.Remove(newVideo) //remove video w/o sound, we don't need it // if ovf == "ovf" { // open.Run(basePath + "/videos/" + name + "_audio.mp4") // } // // todo remove newVideo, so we only save one w/ audio // } }
Truncate
identifier_name
dream.go
package main import ( "bytes" "fmt" "os" "os/exec" "strings" "time" "github.com/TableMountain/goydl" "github.com/gin-gonic/gin" "github.com/sirupsen/logrus" "github.com/skratchdot/open-golang/open" ) var isJob bool var basePath string // Truncate todo func Truncate(t time.Time) time.Time { return t.Truncate(24 * time.Hour) } func now() string { return time.Now().Format(time.Kitchen) } // Dream is exported so it can be an api, haha what fun. Games perhaps? Stock trading? Some real time video effect? func Dream(c *gin.Context) { start := time.Now() defer func() { elapsed := fmt.Sprintf("%s %s", now(), time.Since(start)) elapsed = strings.Split(elapsed, ".")[0] + "s" Log.Info("job took ", elapsed) mel.Broadcast([]byte(elapsed)) }() yt := c.PostForm("yt") fps := c.PostForm("fps") ov := c.PostForm("ov") //data the user uploaded we want ovf := c.PostForm("ovf") of := c.PostForm("of") oo := c.PostForm("oo") it := c.PostForm("iterations") oc := c.PostForm("octaves") la := c.PostForm("layer") rl := c.PostForm("rl") Log.Info("rl: ", rl) ow := c.PostForm("ow") li := c.PostForm("li") iw := c.PostForm("iw") rle := c.PostForm("rle") ocs := c.PostForm("ocscale") // stretch:=c.Postform("stretchvideo") isJob = true defer func() { isJob = false }() Log.WithFields(logrus.Fields{ "event": "new job started", }) jobLog.WithFields(logrus.Fields{ "time": time.Now().UTC().UnixNano(), "title": name, "fps": fps, "it": it, "oc": oc, "la": la, "rl": rl, "ow": ow, "li": li, "iw": iw, "rle": rle, }) Log.Info("base path is ", basePath) newJobLog(name) //let's save interesting job metadata for the user in a tidy format (err logs, srv logs kept with the binary or maybe put in bind dir? wip) jobLog.WithFields(logrus.Fields{ "fps": fps, "iterations": it, "octaves": oc, "layer": la, "linear increase": li, "iteration waver": iw, "octave waver": ow, "randomization type": rl, "random layer every n frames": rle, }).Info("job name: ", name) // var uploadedFile, framesDirPath string var name, fullName, ext string if yt != "" { //if "yt" checkbox checked youtubeDl := goydl.NewYoutubeDl() for { //we loop until we got an acceptable ytURL fmt.Println("waiting...") youtubeDl.VideoURL = ytURL fmt.Println("videoURL:", ytURL) if ytURL == "" { //we didn't get a url, so just cancel the job Log.Info("the url was blank (therefore no good ytURL yet), so just cancel the job") return } info, err := youtubeDl.GetInfo() if err != nil { Log.WithFields(logrus.Fields{ "event": "ytdl", "error": err, }).Error("we should never fail here") continue } fmt.Println(youtubeDl.VideoURL, "blah") ext = info.Ext name = strings.Split(info.Title, " ")[0] fullName = name + ".mp4" if alreadyHave(basePath + "/frames/" + name) { name = renamer(name) fullName = name + ".mp4" Log.Info("\nwe renamed as: ", fullName) } uploadedFile := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name) fmt.Println("uploaded file: ", uploadedFile) youtubeDl.Options.Output.Value = uploadedFile youtubeDl.Options.Format.Value = "mp4" cmd, err := youtubeDl.Download(youtubeDl.VideoURL) if err != nil { Log.WithFields(logrus.Fields{ "event": "error", "err": err, "uploadedFile": uploadedFile, }).Error("dl'ing from yt failed w err") } else { Log.WithFields(logrus.Fields{ "event": "download", "path": uploadedFile, }).Info("downloaded a yt video") println("starting download") cmd.Wait() println("finished download") // make new folder for job framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name) if _, err := os.Stat(framesDirPath); os.IsNotExist(err) { if err = os.Mkdir(framesDirPath, 0777); err != nil { Log.Error("failed to make a new job dir w/ error: ", err) } Log.Info("frames folder for new job was created at ", framesDirPath) } break //we got our file, now we move on, we don't need to keep listening for URL
} else { // if no youtube, then get file from form upload file, err := c.FormFile("file") if err != nil { Log.Error("failed to get file", err) //although this might not be an error as we support ytdl now return } name = strings.Split(file.Filename, ".")[0] fullName = file.Filename ext = strings.Split(fullName, ".")[1] if alreadyHave(basePath + "/frames/" + name) { name = renamer(name) fullName = name + "." + strings.Split(file.Filename, ".")[1] Log.Info("\nwe renamed as: ", fullName) } // make new folder for job framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name) if _, err := os.Stat(framesDirPath); os.IsNotExist(err) { if err = os.Mkdir(framesDirPath, 0777); err != nil { Log.Error("failed to make a new job dir w/ error: ", err) } Log.Info("frames folder for new job was created at ", framesDirPath) } uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName) if err := c.SaveUploadedFile(file, uploadedFile); err != nil { Log.Error("failed to save file at path ", uploadedFile, " err is: ", err) } else { Log.Info("saved file at path ", uploadedFile) } } // make a new output folder outputPath := fmt.Sprintf("%s/output", framesDirPath) if _, err := os.Stat(outputPath); os.IsNotExist(err) { os.Mkdir(outputPath, 0777) Log.Info("output folder for new job was created at ", outputPath) } Log.Info("saved output dir at path ", outputPath) uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName) mel.Broadcast([]byte(name)) itsAVideo := false // decide what to do with the file we've gotten, if it's an image: if ext == "png" { //it's perfect, leave it alone... } else if ext == "jpg" || ext == "jpeg" { cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, framesDirPath+"/"+name+".png").CombinedOutput() if err != nil { Log.Error("oops, failed trying to make some image of ext ", ext, " to png") } else { Log.Info("that's great, we got an image, those are easy, ffmpeg said:", string(cmd)) } } else if ext == "gif" { itsAVideo = true Log.Info("trying to convert a gif") // ffmpeg -f gif -i giphy-downsized.gif -pix_fmt yuv420p -c:v libx264 -movflags +faststart -filter:v crop='floor(in_w/2)*2:floor(in_h/2)*2' BAR.mp4 savedMp4 := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name) cmd := exec.Command("ffmpeg", "-f", "gif", "-i", uploadedFile, "-pix_fmt", "yuv420p", "-c:v", "libx264", "-movflags", "+faststart", "-filter:v", "crop='floor(in_w/2)*2:floor(in_h/2)*2'", savedMp4) cmd.Stdin = strings.NewReader("") var out bytes.Buffer cmd.Stdout = &out err := cmd.Run() if err != nil { Log.Error("failed to make mp4 from gif ", err) } else { uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4" Log.Info("made mp4 from GIF") } } else { // if file not gif or img try to make it mp4 itsAVideo = true Log.Info("ext: ", ext) Log.Info("file.filename ", fullName) if ext != "mp4" { cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, strings.Split(uploadedFile, ".")[0]+".mp4").CombinedOutput() if err != nil { Log.Error("failed to make a .any to .mp4 , ", err) } else { Log.Info("made a ", ext, " into .mp4 with cmd ", string(cmd)) err := os.Remove(uploadedFile) if err != nil { Log.Info("err removing original .mp4 as err: ", err) } else { uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4" Log.Info("deleted original at ext: ", ext) } } } } // open finder if of == "of" { open.Run(framesDirPath) } if oo == "oo" { open.Run(outputPath) } if itsAVideo { // create frames from mp4 framesOut := fmt.Sprintf("%s/frames/%s/%s.png", basePath, name, "%d") Log.Info("framesOut: ", framesOut) cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, "-vf", "fps="+fps, "-c:v", "png", framesOut).CombinedOutput() if err != nil { Log.Error("failed to make frames", err) } else { Log.Info("made frames from MP4 with cmd: ", string(cmd)) } } Log.Info("entering dreamer goroutine") // deep dream the frames cmd, err := exec.Command("python3", "folder.py", "--input", framesDirPath, "-os", ocs, "-it", it, "-oc", oc, "-la", la, "-rl", rl, "-rle", rle, "-li", li, "-iw", iw, "-ow", ow).CombinedOutput() if err != nil { Log.WithFields(logrus.Fields{ "event": "folder.py", }).Error("failed to dream", err) z := fmt.Sprintf("FAIL: python borked: %s", err.Error()) mel.Broadcast([]byte(z)) } Log.Info("done w/ dream loop, python said: ", string(cmd)) // add metadata file la = strings.Replace(la, "/", "", 1) //we want a file not to make a /conv directory or w/e // f, err := os.Create(outputPath + "/it" + it + "oc" + oc + "ch" + ch + "os" + ocs + "la" + la) // if err != nil { // Log.Error("failed to make metadata file: ", err) // } // f.Close() // make a jpg, cause png's are hella MB, and who wants that on /r/ or fb? nobody cause it fk's dl speed if !itsAVideo { cmd, err := exec.Command("ffmpeg", "-i", outputPath+"/1.png", basePath+"/images/"+"it"+it+"oc"+oc+"os"+ocs+"la"+la+".jpg").CombinedOutput() if err != nil { Log.Error("failed to jpg the png", err) } Log.Info("jpg'd to ", string(cmd)) return //if it's not a video, don't make an output.mp4 } // put frames together into an mp4 in videos dir newVideo := fmt.Sprintf("%s/videos/%s", basePath, name+".mp4") frames := fmt.Sprintf("%s/output/%s.png", framesDirPath, "%d") Log.Info("frames to be turned into mp4 at: ", frames) // framesDir := fmt.Sprintf("%s/output/%s.png", framesDirPath, "%d") // ffmpeg -r 5 -f image2 -i '%d.png' -vcodec libx264 -crf 25 -pix_fmt yuv420p out.mp4 cmd, err = exec.Command("ffmpeg", "-r", fps, "-f", "image2", "-i", frames, "-vcodec", "libx264", "-crf", "25", "-pix_fmt", "yuv420p", newVideo).CombinedOutput() if err != nil { Log.Error("still failing to output a video meh, ", err) } else { Log.Info("\nmade mp4 from frames") } if ov == "ov" { open.Run(basePath + "/videos") } // is there sound? audio, err := exec.Command("ffprobe", uploadedFile, "-show_streams", "-select_streams", "a", "-loglevel", "error").CombinedOutput() if err != nil { Log.Error("Failed to test audio, ", err) } // add sound back in if there is any // ffmpeg -i 2171447000212516064.mp4 -i gold.mp4 -map 0:v -map 1:a output.mp4 if len(audio) > 1 { Log.Info("there's sound in this clip") out, err := exec.Command("ffmpeg", "-y", "-i", newVideo, "-i", uploadedFile, "-map", "0:v", "-map", "1:a", basePath+"/videos/audio_"+name+".mp4").CombinedOutput() if err != nil { Log.Error("failed to add sound back", err) } else { Log.Info("fffmpeg added sound:", string(out)) if ovf == "ovf" { open.Run(basePath + "/videos/audio_" + name + ".mp4") } // todo remove newVideo, so we only save one video, the one w/ audio } } else { Log.Info("there's no sound") if ovf == "ovf" { open.Run(newVideo) } } //stretch video enabled? // ffmpeg -i input.mp4 -vf scale=ih*16/9:ih,scale=iw:-2,setsar=1 -crf 20 -c:a copy YT.mp4 // ffmpeg -i out.mp4 -vf scale=720x406,setdar=16:9 z.mp4 // http://www.bugcodemaster.com/article/changing-resolution-video-using-ffmpeg // out, err := exec.Command("ffmpeg", "-y", "-i", newVideo, "-i", uploadedFile, "-map", "0:v", "-map", "1:a", basePath+"/videos/"+name+"_audio.mp4").CombinedOutput() // if err != nil { // Log.Error("failed to add sound back", err) // } else { // Log.Info("fffmpeg added sound:", string(out)) // os.Remove(newVideo) //remove video w/o sound, we don't need it // if ovf == "ovf" { // open.Run(basePath + "/videos/" + name + "_audio.mp4") // } // // todo remove newVideo, so we only save one w/ audio // } }
} }
random_line_split
dream.go
package main import ( "bytes" "fmt" "os" "os/exec" "strings" "time" "github.com/TableMountain/goydl" "github.com/gin-gonic/gin" "github.com/sirupsen/logrus" "github.com/skratchdot/open-golang/open" ) var isJob bool var basePath string // Truncate todo func Truncate(t time.Time) time.Time
func now() string { return time.Now().Format(time.Kitchen) } // Dream is exported so it can be an api, haha what fun. Games perhaps? Stock trading? Some real time video effect? func Dream(c *gin.Context) { start := time.Now() defer func() { elapsed := fmt.Sprintf("%s %s", now(), time.Since(start)) elapsed = strings.Split(elapsed, ".")[0] + "s" Log.Info("job took ", elapsed) mel.Broadcast([]byte(elapsed)) }() yt := c.PostForm("yt") fps := c.PostForm("fps") ov := c.PostForm("ov") //data the user uploaded we want ovf := c.PostForm("ovf") of := c.PostForm("of") oo := c.PostForm("oo") it := c.PostForm("iterations") oc := c.PostForm("octaves") la := c.PostForm("layer") rl := c.PostForm("rl") Log.Info("rl: ", rl) ow := c.PostForm("ow") li := c.PostForm("li") iw := c.PostForm("iw") rle := c.PostForm("rle") ocs := c.PostForm("ocscale") // stretch:=c.Postform("stretchvideo") isJob = true defer func() { isJob = false }() Log.WithFields(logrus.Fields{ "event": "new job started", }) jobLog.WithFields(logrus.Fields{ "time": time.Now().UTC().UnixNano(), "title": name, "fps": fps, "it": it, "oc": oc, "la": la, "rl": rl, "ow": ow, "li": li, "iw": iw, "rle": rle, }) Log.Info("base path is ", basePath) newJobLog(name) //let's save interesting job metadata for the user in a tidy format (err logs, srv logs kept with the binary or maybe put in bind dir? wip) jobLog.WithFields(logrus.Fields{ "fps": fps, "iterations": it, "octaves": oc, "layer": la, "linear increase": li, "iteration waver": iw, "octave waver": ow, "randomization type": rl, "random layer every n frames": rle, }).Info("job name: ", name) // var uploadedFile, framesDirPath string var name, fullName, ext string if yt != "" { //if "yt" checkbox checked youtubeDl := goydl.NewYoutubeDl() for { //we loop until we got an acceptable ytURL fmt.Println("waiting...") youtubeDl.VideoURL = ytURL fmt.Println("videoURL:", ytURL) if ytURL == "" { //we didn't get a url, so just cancel the job Log.Info("the url was blank (therefore no good ytURL yet), so just cancel the job") return } info, err := youtubeDl.GetInfo() if err != nil { Log.WithFields(logrus.Fields{ "event": "ytdl", "error": err, }).Error("we should never fail here") continue } fmt.Println(youtubeDl.VideoURL, "blah") ext = info.Ext name = strings.Split(info.Title, " ")[0] fullName = name + ".mp4" if alreadyHave(basePath + "/frames/" + name) { name = renamer(name) fullName = name + ".mp4" Log.Info("\nwe renamed as: ", fullName) } uploadedFile := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name) fmt.Println("uploaded file: ", uploadedFile) youtubeDl.Options.Output.Value = uploadedFile youtubeDl.Options.Format.Value = "mp4" cmd, err := youtubeDl.Download(youtubeDl.VideoURL) if err != nil { Log.WithFields(logrus.Fields{ "event": "error", "err": err, "uploadedFile": uploadedFile, }).Error("dl'ing from yt failed w err") } else { Log.WithFields(logrus.Fields{ "event": "download", "path": uploadedFile, }).Info("downloaded a yt video") println("starting download") cmd.Wait() println("finished download") // make new folder for job framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name) if _, err := os.Stat(framesDirPath); os.IsNotExist(err) { if err = os.Mkdir(framesDirPath, 0777); err != nil { Log.Error("failed to make a new job dir w/ error: ", err) } Log.Info("frames folder for new job was created at ", framesDirPath) } break //we got our file, now we move on, we don't need to keep listening for URL } } } else { // if no youtube, then get file from form upload file, err := c.FormFile("file") if err != nil { Log.Error("failed to get file", err) //although this might not be an error as we support ytdl now return } name = strings.Split(file.Filename, ".")[0] fullName = file.Filename ext = strings.Split(fullName, ".")[1] if alreadyHave(basePath + "/frames/" + name) { name = renamer(name) fullName = name + "." + strings.Split(file.Filename, ".")[1] Log.Info("\nwe renamed as: ", fullName) } // make new folder for job framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name) if _, err := os.Stat(framesDirPath); os.IsNotExist(err) { if err = os.Mkdir(framesDirPath, 0777); err != nil { Log.Error("failed to make a new job dir w/ error: ", err) } Log.Info("frames folder for new job was created at ", framesDirPath) } uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName) if err := c.SaveUploadedFile(file, uploadedFile); err != nil { Log.Error("failed to save file at path ", uploadedFile, " err is: ", err) } else { Log.Info("saved file at path ", uploadedFile) } } // make a new output folder outputPath := fmt.Sprintf("%s/output", framesDirPath) if _, err := os.Stat(outputPath); os.IsNotExist(err) { os.Mkdir(outputPath, 0777) Log.Info("output folder for new job was created at ", outputPath) } Log.Info("saved output dir at path ", outputPath) uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName) mel.Broadcast([]byte(name)) itsAVideo := false // decide what to do with the file we've gotten, if it's an image: if ext == "png" { //it's perfect, leave it alone... } else if ext == "jpg" || ext == "jpeg" { cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, framesDirPath+"/"+name+".png").CombinedOutput() if err != nil { Log.Error("oops, failed trying to make some image of ext ", ext, " to png") } else { Log.Info("that's great, we got an image, those are easy, ffmpeg said:", string(cmd)) } } else if ext == "gif" { itsAVideo = true Log.Info("trying to convert a gif") // ffmpeg -f gif -i giphy-downsized.gif -pix_fmt yuv420p -c:v libx264 -movflags +faststart -filter:v crop='floor(in_w/2)*2:floor(in_h/2)*2' BAR.mp4 savedMp4 := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name) cmd := exec.Command("ffmpeg", "-f", "gif", "-i", uploadedFile, "-pix_fmt", "yuv420p", "-c:v", "libx264", "-movflags", "+faststart", "-filter:v", "crop='floor(in_w/2)*2:floor(in_h/2)*2'", savedMp4) cmd.Stdin = strings.NewReader("") var out bytes.Buffer cmd.Stdout = &out err := cmd.Run() if err != nil { Log.Error("failed to make mp4 from gif ", err) } else { uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4" Log.Info("made mp4 from GIF") } } else { // if file not gif or img try to make it mp4 itsAVideo = true Log.Info("ext: ", ext) Log.Info("file.filename ", fullName) if ext != "mp4" { cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, strings.Split(uploadedFile, ".")[0]+".mp4").CombinedOutput() if err != nil { Log.Error("failed to make a .any to .mp4 , ", err) } else { Log.Info("made a ", ext, " into .mp4 with cmd ", string(cmd)) err := os.Remove(uploadedFile) if err != nil { Log.Info("err removing original .mp4 as err: ", err) } else { uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4" Log.Info("deleted original at ext: ", ext) } } } } // open finder if of == "of" { open.Run(framesDirPath) } if oo == "oo" { open.Run(outputPath) } if itsAVideo { // create frames from mp4 framesOut := fmt.Sprintf("%s/frames/%s/%s.png", basePath, name, "%d") Log.Info("framesOut: ", framesOut) cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, "-vf", "fps="+fps, "-c:v", "png", framesOut).CombinedOutput() if err != nil { Log.Error("failed to make frames", err) } else { Log.Info("made frames from MP4 with cmd: ", string(cmd)) } } Log.Info("entering dreamer goroutine") // deep dream the frames cmd, err := exec.Command("python3", "folder.py", "--input", framesDirPath, "-os", ocs, "-it", it, "-oc", oc, "-la", la, "-rl", rl, "-rle", rle, "-li", li, "-iw", iw, "-ow", ow).CombinedOutput() if err != nil { Log.WithFields(logrus.Fields{ "event": "folder.py", }).Error("failed to dream", err) z := fmt.Sprintf("FAIL: python borked: %s", err.Error()) mel.Broadcast([]byte(z)) } Log.Info("done w/ dream loop, python said: ", string(cmd)) // add metadata file la = strings.Replace(la, "/", "", 1) //we want a file not to make a /conv directory or w/e // f, err := os.Create(outputPath + "/it" + it + "oc" + oc + "ch" + ch + "os" + ocs + "la" + la) // if err != nil { // Log.Error("failed to make metadata file: ", err) // } // f.Close() // make a jpg, cause png's are hella MB, and who wants that on /r/ or fb? nobody cause it fk's dl speed if !itsAVideo { cmd, err := exec.Command("ffmpeg", "-i", outputPath+"/1.png", basePath+"/images/"+"it"+it+"oc"+oc+"os"+ocs+"la"+la+".jpg").CombinedOutput() if err != nil { Log.Error("failed to jpg the png", err) } Log.Info("jpg'd to ", string(cmd)) return //if it's not a video, don't make an output.mp4 } // put frames together into an mp4 in videos dir newVideo := fmt.Sprintf("%s/videos/%s", basePath, name+".mp4") frames := fmt.Sprintf("%s/output/%s.png", framesDirPath, "%d") Log.Info("frames to be turned into mp4 at: ", frames) // framesDir := fmt.Sprintf("%s/output/%s.png", framesDirPath, "%d") // ffmpeg -r 5 -f image2 -i '%d.png' -vcodec libx264 -crf 25 -pix_fmt yuv420p out.mp4 cmd, err = exec.Command("ffmpeg", "-r", fps, "-f", "image2", "-i", frames, "-vcodec", "libx264", "-crf", "25", "-pix_fmt", "yuv420p", newVideo).CombinedOutput() if err != nil { Log.Error("still failing to output a video meh, ", err) } else { Log.Info("\nmade mp4 from frames") } if ov == "ov" { open.Run(basePath + "/videos") } // is there sound? audio, err := exec.Command("ffprobe", uploadedFile, "-show_streams", "-select_streams", "a", "-loglevel", "error").CombinedOutput() if err != nil { Log.Error("Failed to test audio, ", err) } // add sound back in if there is any // ffmpeg -i 2171447000212516064.mp4 -i gold.mp4 -map 0:v -map 1:a output.mp4 if len(audio) > 1 { Log.Info("there's sound in this clip") out, err := exec.Command("ffmpeg", "-y", "-i", newVideo, "-i", uploadedFile, "-map", "0:v", "-map", "1:a", basePath+"/videos/audio_"+name+".mp4").CombinedOutput() if err != nil { Log.Error("failed to add sound back", err) } else { Log.Info("fffmpeg added sound:", string(out)) if ovf == "ovf" { open.Run(basePath + "/videos/audio_" + name + ".mp4") } // todo remove newVideo, so we only save one video, the one w/ audio } } else { Log.Info("there's no sound") if ovf == "ovf" { open.Run(newVideo) } } //stretch video enabled? // ffmpeg -i input.mp4 -vf scale=ih*16/9:ih,scale=iw:-2,setsar=1 -crf 20 -c:a copy YT.mp4 // ffmpeg -i out.mp4 -vf scale=720x406,setdar=16:9 z.mp4 // http://www.bugcodemaster.com/article/changing-resolution-video-using-ffmpeg // out, err := exec.Command("ffmpeg", "-y", "-i", newVideo, "-i", uploadedFile, "-map", "0:v", "-map", "1:a", basePath+"/videos/"+name+"_audio.mp4").CombinedOutput() // if err != nil { // Log.Error("failed to add sound back", err) // } else { // Log.Info("fffmpeg added sound:", string(out)) // os.Remove(newVideo) //remove video w/o sound, we don't need it // if ovf == "ovf" { // open.Run(basePath + "/videos/" + name + "_audio.mp4") // } // // todo remove newVideo, so we only save one w/ audio // } }
{ return t.Truncate(24 * time.Hour) }
identifier_body
dream.go
package main import ( "bytes" "fmt" "os" "os/exec" "strings" "time" "github.com/TableMountain/goydl" "github.com/gin-gonic/gin" "github.com/sirupsen/logrus" "github.com/skratchdot/open-golang/open" ) var isJob bool var basePath string // Truncate todo func Truncate(t time.Time) time.Time { return t.Truncate(24 * time.Hour) } func now() string { return time.Now().Format(time.Kitchen) } // Dream is exported so it can be an api, haha what fun. Games perhaps? Stock trading? Some real time video effect? func Dream(c *gin.Context) { start := time.Now() defer func() { elapsed := fmt.Sprintf("%s %s", now(), time.Since(start)) elapsed = strings.Split(elapsed, ".")[0] + "s" Log.Info("job took ", elapsed) mel.Broadcast([]byte(elapsed)) }() yt := c.PostForm("yt") fps := c.PostForm("fps") ov := c.PostForm("ov") //data the user uploaded we want ovf := c.PostForm("ovf") of := c.PostForm("of") oo := c.PostForm("oo") it := c.PostForm("iterations") oc := c.PostForm("octaves") la := c.PostForm("layer") rl := c.PostForm("rl") Log.Info("rl: ", rl) ow := c.PostForm("ow") li := c.PostForm("li") iw := c.PostForm("iw") rle := c.PostForm("rle") ocs := c.PostForm("ocscale") // stretch:=c.Postform("stretchvideo") isJob = true defer func() { isJob = false }() Log.WithFields(logrus.Fields{ "event": "new job started", }) jobLog.WithFields(logrus.Fields{ "time": time.Now().UTC().UnixNano(), "title": name, "fps": fps, "it": it, "oc": oc, "la": la, "rl": rl, "ow": ow, "li": li, "iw": iw, "rle": rle, }) Log.Info("base path is ", basePath) newJobLog(name) //let's save interesting job metadata for the user in a tidy format (err logs, srv logs kept with the binary or maybe put in bind dir? wip) jobLog.WithFields(logrus.Fields{ "fps": fps, "iterations": it, "octaves": oc, "layer": la, "linear increase": li, "iteration waver": iw, "octave waver": ow, "randomization type": rl, "random layer every n frames": rle, }).Info("job name: ", name) // var uploadedFile, framesDirPath string var name, fullName, ext string if yt != "" { //if "yt" checkbox checked youtubeDl := goydl.NewYoutubeDl() for { //we loop until we got an acceptable ytURL fmt.Println("waiting...") youtubeDl.VideoURL = ytURL fmt.Println("videoURL:", ytURL) if ytURL == "" { //we didn't get a url, so just cancel the job Log.Info("the url was blank (therefore no good ytURL yet), so just cancel the job") return } info, err := youtubeDl.GetInfo() if err != nil { Log.WithFields(logrus.Fields{ "event": "ytdl", "error": err, }).Error("we should never fail here") continue } fmt.Println(youtubeDl.VideoURL, "blah") ext = info.Ext name = strings.Split(info.Title, " ")[0] fullName = name + ".mp4" if alreadyHave(basePath + "/frames/" + name) { name = renamer(name) fullName = name + ".mp4" Log.Info("\nwe renamed as: ", fullName) } uploadedFile := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name) fmt.Println("uploaded file: ", uploadedFile) youtubeDl.Options.Output.Value = uploadedFile youtubeDl.Options.Format.Value = "mp4" cmd, err := youtubeDl.Download(youtubeDl.VideoURL) if err != nil { Log.WithFields(logrus.Fields{ "event": "error", "err": err, "uploadedFile": uploadedFile, }).Error("dl'ing from yt failed w err") } else { Log.WithFields(logrus.Fields{ "event": "download", "path": uploadedFile, }).Info("downloaded a yt video") println("starting download") cmd.Wait() println("finished download") // make new folder for job framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name) if _, err := os.Stat(framesDirPath); os.IsNotExist(err)
break //we got our file, now we move on, we don't need to keep listening for URL } } } else { // if no youtube, then get file from form upload file, err := c.FormFile("file") if err != nil { Log.Error("failed to get file", err) //although this might not be an error as we support ytdl now return } name = strings.Split(file.Filename, ".")[0] fullName = file.Filename ext = strings.Split(fullName, ".")[1] if alreadyHave(basePath + "/frames/" + name) { name = renamer(name) fullName = name + "." + strings.Split(file.Filename, ".")[1] Log.Info("\nwe renamed as: ", fullName) } // make new folder for job framesDirPath = fmt.Sprintf("%s/frames/%s", basePath, name) if _, err := os.Stat(framesDirPath); os.IsNotExist(err) { if err = os.Mkdir(framesDirPath, 0777); err != nil { Log.Error("failed to make a new job dir w/ error: ", err) } Log.Info("frames folder for new job was created at ", framesDirPath) } uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName) if err := c.SaveUploadedFile(file, uploadedFile); err != nil { Log.Error("failed to save file at path ", uploadedFile, " err is: ", err) } else { Log.Info("saved file at path ", uploadedFile) } } // make a new output folder outputPath := fmt.Sprintf("%s/output", framesDirPath) if _, err := os.Stat(outputPath); os.IsNotExist(err) { os.Mkdir(outputPath, 0777) Log.Info("output folder for new job was created at ", outputPath) } Log.Info("saved output dir at path ", outputPath) uploadedFile = fmt.Sprintf("%s/%s", framesDirPath, fullName) mel.Broadcast([]byte(name)) itsAVideo := false // decide what to do with the file we've gotten, if it's an image: if ext == "png" { //it's perfect, leave it alone... } else if ext == "jpg" || ext == "jpeg" { cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, framesDirPath+"/"+name+".png").CombinedOutput() if err != nil { Log.Error("oops, failed trying to make some image of ext ", ext, " to png") } else { Log.Info("that's great, we got an image, those are easy, ffmpeg said:", string(cmd)) } } else if ext == "gif" { itsAVideo = true Log.Info("trying to convert a gif") // ffmpeg -f gif -i giphy-downsized.gif -pix_fmt yuv420p -c:v libx264 -movflags +faststart -filter:v crop='floor(in_w/2)*2:floor(in_h/2)*2' BAR.mp4 savedMp4 := fmt.Sprintf("%s/frames/%s/%s.mp4", basePath, name, name) cmd := exec.Command("ffmpeg", "-f", "gif", "-i", uploadedFile, "-pix_fmt", "yuv420p", "-c:v", "libx264", "-movflags", "+faststart", "-filter:v", "crop='floor(in_w/2)*2:floor(in_h/2)*2'", savedMp4) cmd.Stdin = strings.NewReader("") var out bytes.Buffer cmd.Stdout = &out err := cmd.Run() if err != nil { Log.Error("failed to make mp4 from gif ", err) } else { uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4" Log.Info("made mp4 from GIF") } } else { // if file not gif or img try to make it mp4 itsAVideo = true Log.Info("ext: ", ext) Log.Info("file.filename ", fullName) if ext != "mp4" { cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, strings.Split(uploadedFile, ".")[0]+".mp4").CombinedOutput() if err != nil { Log.Error("failed to make a .any to .mp4 , ", err) } else { Log.Info("made a ", ext, " into .mp4 with cmd ", string(cmd)) err := os.Remove(uploadedFile) if err != nil { Log.Info("err removing original .mp4 as err: ", err) } else { uploadedFile = strings.Split(uploadedFile, ".")[0] + ".mp4" Log.Info("deleted original at ext: ", ext) } } } } // open finder if of == "of" { open.Run(framesDirPath) } if oo == "oo" { open.Run(outputPath) } if itsAVideo { // create frames from mp4 framesOut := fmt.Sprintf("%s/frames/%s/%s.png", basePath, name, "%d") Log.Info("framesOut: ", framesOut) cmd, err := exec.Command("ffmpeg", "-i", uploadedFile, "-vf", "fps="+fps, "-c:v", "png", framesOut).CombinedOutput() if err != nil { Log.Error("failed to make frames", err) } else { Log.Info("made frames from MP4 with cmd: ", string(cmd)) } } Log.Info("entering dreamer goroutine") // deep dream the frames cmd, err := exec.Command("python3", "folder.py", "--input", framesDirPath, "-os", ocs, "-it", it, "-oc", oc, "-la", la, "-rl", rl, "-rle", rle, "-li", li, "-iw", iw, "-ow", ow).CombinedOutput() if err != nil { Log.WithFields(logrus.Fields{ "event": "folder.py", }).Error("failed to dream", err) z := fmt.Sprintf("FAIL: python borked: %s", err.Error()) mel.Broadcast([]byte(z)) } Log.Info("done w/ dream loop, python said: ", string(cmd)) // add metadata file la = strings.Replace(la, "/", "", 1) //we want a file not to make a /conv directory or w/e // f, err := os.Create(outputPath + "/it" + it + "oc" + oc + "ch" + ch + "os" + ocs + "la" + la) // if err != nil { // Log.Error("failed to make metadata file: ", err) // } // f.Close() // make a jpg, cause png's are hella MB, and who wants that on /r/ or fb? nobody cause it fk's dl speed if !itsAVideo { cmd, err := exec.Command("ffmpeg", "-i", outputPath+"/1.png", basePath+"/images/"+"it"+it+"oc"+oc+"os"+ocs+"la"+la+".jpg").CombinedOutput() if err != nil { Log.Error("failed to jpg the png", err) } Log.Info("jpg'd to ", string(cmd)) return //if it's not a video, don't make an output.mp4 } // put frames together into an mp4 in videos dir newVideo := fmt.Sprintf("%s/videos/%s", basePath, name+".mp4") frames := fmt.Sprintf("%s/output/%s.png", framesDirPath, "%d") Log.Info("frames to be turned into mp4 at: ", frames) // framesDir := fmt.Sprintf("%s/output/%s.png", framesDirPath, "%d") // ffmpeg -r 5 -f image2 -i '%d.png' -vcodec libx264 -crf 25 -pix_fmt yuv420p out.mp4 cmd, err = exec.Command("ffmpeg", "-r", fps, "-f", "image2", "-i", frames, "-vcodec", "libx264", "-crf", "25", "-pix_fmt", "yuv420p", newVideo).CombinedOutput() if err != nil { Log.Error("still failing to output a video meh, ", err) } else { Log.Info("\nmade mp4 from frames") } if ov == "ov" { open.Run(basePath + "/videos") } // is there sound? audio, err := exec.Command("ffprobe", uploadedFile, "-show_streams", "-select_streams", "a", "-loglevel", "error").CombinedOutput() if err != nil { Log.Error("Failed to test audio, ", err) } // add sound back in if there is any // ffmpeg -i 2171447000212516064.mp4 -i gold.mp4 -map 0:v -map 1:a output.mp4 if len(audio) > 1 { Log.Info("there's sound in this clip") out, err := exec.Command("ffmpeg", "-y", "-i", newVideo, "-i", uploadedFile, "-map", "0:v", "-map", "1:a", basePath+"/videos/audio_"+name+".mp4").CombinedOutput() if err != nil { Log.Error("failed to add sound back", err) } else { Log.Info("fffmpeg added sound:", string(out)) if ovf == "ovf" { open.Run(basePath + "/videos/audio_" + name + ".mp4") } // todo remove newVideo, so we only save one video, the one w/ audio } } else { Log.Info("there's no sound") if ovf == "ovf" { open.Run(newVideo) } } //stretch video enabled? // ffmpeg -i input.mp4 -vf scale=ih*16/9:ih,scale=iw:-2,setsar=1 -crf 20 -c:a copy YT.mp4 // ffmpeg -i out.mp4 -vf scale=720x406,setdar=16:9 z.mp4 // http://www.bugcodemaster.com/article/changing-resolution-video-using-ffmpeg // out, err := exec.Command("ffmpeg", "-y", "-i", newVideo, "-i", uploadedFile, "-map", "0:v", "-map", "1:a", basePath+"/videos/"+name+"_audio.mp4").CombinedOutput() // if err != nil { // Log.Error("failed to add sound back", err) // } else { // Log.Info("fffmpeg added sound:", string(out)) // os.Remove(newVideo) //remove video w/o sound, we don't need it // if ovf == "ovf" { // open.Run(basePath + "/videos/" + name + "_audio.mp4") // } // // todo remove newVideo, so we only save one w/ audio // } }
{ if err = os.Mkdir(framesDirPath, 0777); err != nil { Log.Error("failed to make a new job dir w/ error: ", err) } Log.Info("frames folder for new job was created at ", framesDirPath) }
conditional_block
buffer.go
package gui import ( "fmt" "io/ioutil" "log" "os" "path" "regexp" "runtime" "strings" "time" "unicode" "github.com/felixangell/go-rope" "github.com/felixangell/phi-editor/cfg" "github.com/felixangell/strife" "github.com/veandco/go-sdl2/sdl" ) var ( timer int64 = 0 reset_timer int64 = 0 should_draw bool = true should_flash bool ) // TODO: allow font setting or whatever type camera struct { x int y int } type Buffer struct { BaseComponent HasFocus bool index int parent *View font *strife.Font contents []*rope.Rope curs *Cursor cfg *cfg.TomlConfig cam *camera filePath string languageInfo string } func NewBuffer(conf *cfg.TomlConfig, parent *View, index int) *Buffer { config := conf if config == nil { config = cfg.NewDefaultConfig() } buffContents := []*rope.Rope{} buff := &Buffer{ index: index, parent: parent, contents: buffContents, curs: &Cursor{}, cfg: config, filePath: "/tmp/phi_file_" + time.Now().String(), // TODO make this a randomly chosen temp file cam: &camera{0, 0}, } return buff } func (b *Buffer) OpenFile(filePath string) { b.filePath = filePath log.Println("Opening file ", filePath) ext := path.Ext(filePath) lang, err := b.cfg.GetLanguageFromExt(ext) if err != nil { log.Println(err.Error()) } else { log.Println("- this file is a ", lang, " language program") b.languageInfo = lang } // if the file doesn't exist, try to create it before reading it if _, err := os.Stat(filePath); os.IsNotExist(err) { f, err := os.Create(filePath) if err != nil { panic(err) } else { f.Close() } } contents, err := ioutil.ReadFile(filePath) if err != nil { panic(err) } lines := strings.Split(string(contents), "\n") for _, line := range lines { b.appendLine(line) } } func (b *Buffer) OnDispose() { // hm! // os.Remove(b.fileHandle) } func (b *Buffer) OnInit() {} func (b *Buffer) appendLine(val string) { b.contents = append(b.contents, rope.New(val)) // because we've added a new line // we have to set the x to the start b.curs.x = 0 } func (b *Buffer) insertRune(r rune) { log.Println("Inserting rune ", r, " into current line at ", b.curs.x, ":", b.curs.y) log.Println("Line before insert> ", b.contents[b.curs.y]) b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(r)) b.curs.move(1, 0) } // TODO handle EVERYTHING but for now im handling // my UK macbook key layout. var shiftAlternative = map[rune]rune{ '1': '!', '2': '@', '3': '£', '4': '$', '5': '%', '6': '^', '7': '&', '8': '*', '9': '(', '0': ')', '-': '_', '=': '+', '`': '~', '/': '?', '.': '>', ',': '<', '[': '{', ']': '}', ';': ':', '\'': '"', '\\': '|', '§': '±', } var altAlternative = map[rune]rune{ '1': '¡', '2': '€', '3': '#', '4': '¢', '5': '∞', '6': '§', '7': '¶', '8': '•', '9': 'ª', '0': 'º', '-': '–', '=': '≠', '`': '`', '/': '÷', '.': '≥', ',': '≤', '[': '“', ']': '‘', ';': '…', '\'': 'æ', '\\': '«', } func (b *Buffer) processTextInput(r rune) bool { if ALT_DOWN && r == '\t' { // nop, we dont want to // insert tabs when we // alt tab out of view of this app return true } // only do the alt alternatives on mac osx // todo change this so it's not checking on every // input if runtime.GOOS == "darwin" && ALT_DOWN { if val, ok := altAlternative[r]; ok { r = val } } if CAPS_LOCK { if unicode.IsLetter(r) { r = unicode.ToUpper(r) } } if CONTROL_DOWN { actionName, actionExists := cfg.Shortcuts.Controls[string(unicode.ToLower(r))] if actionExists { if proc, ok := actions[actionName]; ok { return proc(b) } } else { log.Println("warning, unimplemented shortcut ctrl+", unicode.ToLower(r), actionName) } } if SUPER_DOWN { actionName, actionExists := cfg.Shortcuts.Supers[string(unicode.ToLower(r))] if actionExists { if proc, ok := actions[actionName]; ok { return proc(b) } } else { log.Println("warning, unimplemented shortcut ctrl+", unicode.ToLower(r), actionName) } } if SHIFT_DOWN { // if it's a letter convert to uppercase if unicode.IsLetter(r) { r = unicode.ToUpper(r) } else { // otherwise we have to look in our trusy // shift mapping thing. if val, ok := shiftAlternative[r]; ok { r = val } } } // NOTE: we have to do this AFTER we map the // shift combo for the value! // this will not insert a ), }, or ] if there // is one to the right of us... basically // this escapes out of a closing bracket // rather than inserting a new one IF we are inside // brackets. if b.cfg.Editor.Match_Braces { if r == ')' || r == '}' || r == ']' { currLine := b.contents[b.curs.y] if b.curs.x < currLine.Len() { curr := currLine.Index(b.curs.x + 1) if curr == r { b.curs.move(1, 0) return true } else { log.Print("no it's ", curr) } } } } b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(r)) b.curs.move(1, 0) // we don't need to match braces // let's not continue any further if !b.cfg.Editor.Match_Braces { return true } // TODO: shall we match single quotes and double quotes too? matchingPair := int(r) // the offset in the ASCII Table is +2 for { and for [ // but its +1 for parenthesis ( offset := 2 switch r { case '(': offset = 1 fallthrough case '{': fallthrough case '[': matchingPair += offset b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(rune(matchingPair))) } return true } func remove(slice []*rope.Rope, s int) []*rope.Rope { return append(slice[:s], slice[s+1:]...) } func (b *Buffer) deleteNext() { b.moveRight() b.deletePrev() } func (b *Buffer) deletePrev() { if b.curs.x > 0 { offs := -1 if !b.cfg.Editor.Tabs_Are_Spaces { if b.contents[b.curs.y].Index(b.curs.x) == '\t' { offs = int(-b.cfg.Editor.Tab_Size) } } else if b.cfg.Editor.Hungry_Backspace && b.curs.x >= int(b.cfg.Editor.Tab_Size) { // cut out the last {TAB_SIZE} amount of characters // and check em tabSize := int(b.cfg.Editor.Tab_Size) lastTabSizeChars := b.contents[b.curs.y].Substr(b.curs.x+1-tabSize, tabSize).String() if strings.Compare(lastTabSizeChars, b.makeTab()) == 0 { // delete {TAB_SIZE} amount of characters // from the cursors x pos for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ { b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1) b.curs.move(-1, 0) } return } } b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1) b.curs.moveRender(-1, 0, offs, 0) } else if b.curs.x == 0 && b.curs.y > 0 { // start of line, wrap to previous prevLineLen := b.contents[b.curs.y-1].Len() b.contents[b.curs.y-1] = b.contents[b.curs.y-1].Concat(b.contents[b.curs.y]) b.contents = append(b.contents[:b.curs.y], b.contents[b.curs.y+1:]...) b.curs.move(prevLineLen, -1) } } func (b *Buffer) deleteBeforeCursor() { // delete so we're at the end // of the previous line if b.curs.x == 0 { b.deletePrev() return } for b.curs.x > 0 { b.deletePrev() } } func (b *Buffer) moveLeft() { if b.curs.x == 0 && b.curs.y > 0 { b.curs.move(b.contents[b.curs.y-1].Len(), -1) } else if b.curs.x > 0 { b.curs.move(-1, 0) } } func (b *Buffer) moveRight() { currLineLength := b.contents[b.curs.y].Len() if b.curs.x >= currLineLength && b.curs.y < len(b.contents)-1 { // we're at the end of the line and we have // some lines after, let's wrap around b.curs.move(0, 1) b.curs.move(-currLineLength, 0) } else if b.curs.x < b.contents[b.curs.y].Len() { // we have characters to the right, let's move along b.curs.move(1, 0) } } func (b *Buffer) moveToEndOfLine() { lineLen := b.contents[b.curs.y].Len() if b.curs.x > lineLen { distToMove := b.curs.x - lineLen for i := 0; i < distToMove; i++ { b.moveLeft() } } } func (b *Buffer) moveUp() { if b.curs.y > 0 { b.curs.move(0, -1) } } func (b *Buffer) moveDown() { if b.curs.y < len(b.contents) { b.curs.move(0, 1) } } func (b *Buffer) swapLineUp() bool { if b.curs.y
urrLine := b.contents[b.curs.y] prevLine := b.contents[b.curs.y-1] b.contents[b.curs.y-1] = currLine b.contents[b.curs.y] = prevLine b.moveUp() } return true } func (b *Buffer) swapLineDown() bool { if b.curs.y < len(b.contents) { currLine := b.contents[b.curs.y] nextLine := b.contents[b.curs.y+1] b.contents[b.curs.y+1] = currLine b.contents[b.curs.y] = nextLine b.moveDown() } return true } func (b *Buffer) scrollUp() { if b.cam.y > 0 { // TODO move the cursor down 45 lines // IF the buffer exceeds the window size. lineScrollAmount := 10 b.cam.y -= lineScrollAmount for i := 0; i < lineScrollAmount; i++ { b.moveUp() } } } func (b *Buffer) scrollDown() { if b.cam.y < len(b.contents) { // TODO move the cursor down 45 lines // IF the buffer exceeds the window size. lineScrollAmount := 10 b.cam.y += lineScrollAmount for i := 0; i < lineScrollAmount; i++ { b.moveDown() } } } // processes a key press. returns if there // was a key that MODIFIED the buffer. func (b *Buffer) processActionKey(key int) bool { switch key { case sdl.K_CAPSLOCK: CAPS_LOCK = !CAPS_LOCK return true case sdl.K_RETURN: if SUPER_DOWN { // in sublime this goes // into the next block // nicely indented! } initial_x := b.curs.x prevLineLen := b.contents[b.curs.y].Len() var newRope *rope.Rope if initial_x < prevLineLen && initial_x > 0 { // we're not at the end of the line, but we're not at // the start, i.e. we're SPLITTING the line left, right := b.contents[b.curs.y].Split(initial_x) newRope = right b.contents[b.curs.y] = left } else if initial_x == 0 { // we're at the start of a line, so we want to // shift the line down and insert an empty line // above it! b.contents = append(b.contents, new(rope.Rope)) // grow copy(b.contents[b.curs.y+1:], b.contents[b.curs.y:]) // shift b.contents[b.curs.y] = new(rope.Rope) // set b.curs.move(0, 1) return true } else { // we're at the end of a line newRope = new(rope.Rope) } b.curs.move(0, 1) for x := 0; x < initial_x; x++ { // TODO(Felix): there's a bug here where // this doesn't account for the rendered x // position when we use tabs as tabs and not spaces b.curs.move(-1, 0) } b.contents = append(b.contents, nil) copy(b.contents[b.curs.y+1:], b.contents[b.curs.y:]) b.contents[b.curs.y] = newRope return true case sdl.K_BACKSPACE: if SUPER_DOWN { b.deleteBeforeCursor() } else { b.deletePrev() } return true case sdl.K_RIGHT: currLineLength := b.contents[b.curs.y].Len() if CONTROL_DOWN && b.parent != nil { b.parent.ChangeFocus(1) return true } if SUPER_DOWN { for b.curs.x < currLineLength { b.curs.move(1, 0) } return true } // FIXME this is weird! if ALT_DOWN { currLine := b.contents[b.curs.y] var i int for i = b.curs.x + 1; i < currLine.Len(); i++ { curr := currLine.Index(i) if curr <= ' ' || curr == '_' { break } } for j := 0; j < i; j++ { b.moveRight() } return true } b.moveRight() return true case sdl.K_LEFT: if CONTROL_DOWN && b.parent != nil { b.parent.ChangeFocus(-1) return true } if SUPER_DOWN { // TODO go to the nearest \t // if no \t (i.e. start of line) go to // the start of the line! b.curs.gotoStart() } if ALT_DOWN { currLine := b.contents[b.curs.y] i := b.curs.x for i > 0 { currChar := currLine.Index(i) // TODO is a seperator thing? if currChar <= ' ' || currChar == '_' { // move over one more? i = i - 1 break } i = i - 1 } start := b.curs.x for j := 0; j < start-i; j++ { b.moveLeft() } return true } b.moveLeft() return true case sdl.K_UP: if ALT_DOWN { return b.swapLineUp() } if SUPER_DOWN { // go to the start of the file } if b.curs.y > 0 { offs := 0 prevLineLen := b.contents[b.curs.y-1].Len() if b.curs.x > prevLineLen { offs = prevLineLen - b.curs.x } // TODO: offset should account for tabs b.curs.move(offs, -1) } return true case sdl.K_DOWN: if ALT_DOWN { return b.swapLineDown() } if SUPER_DOWN { // go to the end of the file } if b.curs.y < len(b.contents)-1 { offs := 0 nextLineLen := b.contents[b.curs.y+1].Len() if b.curs.x > nextLineLen { offs = nextLineLen - b.curs.x } // TODO: offset should account for tabs b.curs.move(offs, 1) } return true case sdl.K_TAB: if b.cfg.Editor.Tabs_Are_Spaces { // make an empty rune array of TAB_SIZE, cast to string // and insert it. b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, b.makeTab()) b.curs.move(int(b.cfg.Editor.Tab_Size), 0) } else { b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string('\t')) // the actual position is + 1, but we make it // move by TAB_SIZE characters on the view. b.curs.moveRender(1, 0, int(b.cfg.Editor.Tab_Size), 0) } return true case sdl.K_END: currLine := b.contents[b.curs.y] if b.curs.x < currLine.Len() { distToMove := currLine.Len() - b.curs.x b.curs.move(distToMove, 0) } return true case sdl.K_HOME: if b.curs.x > 0 { b.curs.move(-b.curs.x, 0) } return true case sdl.K_PAGEUP: b.scrollUp() return true case sdl.K_PAGEDOWN: b.scrollDown() return true case sdl.K_DELETE: b.deleteNext() return true case sdl.K_LGUI: fallthrough case sdl.K_RGUI: fallthrough case sdl.K_LALT: fallthrough case sdl.K_RALT: fallthrough case sdl.K_LCTRL: fallthrough case sdl.K_RCTRL: fallthrough case sdl.K_LSHIFT: fallthrough case sdl.K_RSHIFT: return true } return false } var ( SHIFT_DOWN bool = false SUPER_DOWN = false // cmd on mac, ctrl on windows CONTROL_DOWN = false // what is this on windows? ALT_DOWN = false // option on mac CAPS_LOCK = false ) // TODO(Felix) this is really stupid func (b *Buffer) makeTab() string { blah := []rune{} for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ { blah = append(blah, ' ') } return string(blah) } func (b *Buffer) HandleEvent(evt strife.StrifeEvent) { switch event := evt.(type) { case *strife.MouseWheelEvent: if event.Y > 0 { b.scrollDown() } if event.Y < 0 { b.scrollUp() } } } func (b *Buffer) OnUpdate() bool { if !b.HasFocus { return false } prev_x := b.curs.x prev_y := b.curs.y SHIFT_DOWN = strife.KeyPressed(sdl.K_LSHIFT) || strife.KeyPressed(sdl.K_RSHIFT) SUPER_DOWN = strife.KeyPressed(sdl.K_LGUI) || strife.KeyPressed(sdl.K_RGUI) ALT_DOWN = strife.KeyPressed(sdl.K_LALT) || strife.KeyPressed(sdl.K_RALT) CONTROL_DOWN = strife.KeyPressed(sdl.K_LCTRL) || strife.KeyPressed(sdl.K_RCTRL) if strife.PollKeys() { keyCode := strife.PopKey() // try process this key input as an // action first actionPerformed := b.processActionKey(keyCode) if actionPerformed { return true } textEntered := b.processTextInput(rune(keyCode)) if textEntered { return true } } // FIXME handle focus properly if b.inputHandler == nil { return false } if b.curs.x != prev_x || b.curs.y != prev_y { should_draw = true should_flash = false reset_timer = strife.CurrentTimeMillis() } // fixme to not use CurrentTimeMillis if !should_flash && strife.CurrentTimeMillis()-reset_timer > b.cfg.Cursor.Reset_Delay { should_flash = true } if strife.CurrentTimeMillis()-timer > b.cfg.Cursor.Flash_Rate && (should_flash && b.cfg.Cursor.Flash) { timer = strife.CurrentTimeMillis() should_draw = !should_draw } return false } type syntaxRuneInfo struct { background int foreground int length int } // dimensions of the last character we rendered var last_w, last_h int // editor x and y offsets var ex, ey = 0, 0 var compiledRegex = map[string]*regexp.Regexp{} func (b *Buffer) renderAt(ctx *strife.Renderer, rx int, ry int) { // BACKGROUND ctx.SetColor(strife.HexRGB(b.cfg.Theme.Background)) ctx.Rect(b.x, b.y, b.w, b.h, strife.Fill) if b.cfg.Editor.Highlight_Line && b.HasFocus { ctx.SetColor(strife.Black) // highlight_line_col? ctx.Rect(ex+rx, ey+(ry+b.curs.ry*last_h)-(b.cam.y*last_h), b.w, last_h, strife.Fill) } // render the ol' cursor if should_draw && b.cfg.Cursor.Draw && b.HasFocus { cursorWidth := b.cfg.Cursor.GetCaretWidth() if cursorWidth == -1 { cursorWidth = last_w } ctx.SetColor(strife.HexRGB(b.cfg.Theme.Cursor)) // caret colour ctx.Rect(ex+(rx+b.curs.rx*last_w)-(b.cam.x*last_w), (ry+b.curs.ry*last_h)-(b.cam.y*last_h), cursorWidth, last_h, strife.Fill) } var visibleLines int = 50 // last_h > 0 means we have done // a render. if int(last_h) > 0 && int(b.h) != 0 { // render an extra three lines just // so we dont cut anything off if its // not evenly divisible visibleLines = (int(b.h) / int(last_h)) + 3 } start := b.cam.y upper := b.cam.y + visibleLines if upper > len(b.contents) { upper = len(b.contents) } numLines := len(b.contents) var y_col int for lineNum, rope := range b.contents[start:upper] { currLine := []rune(rope.String()) // char index => colour matches := map[int]syntaxRuneInfo{} stuff := b.cfg.Syntax[b.languageInfo] subjects := make([]cfg.SyntaxCriteria, len(stuff)) colours := make([]int, len(stuff)) idx := 0 for _, criteria := range stuff { colours[idx] = criteria.Colour subjects[idx] = criteria idx++ } // HOLY SLOW BATMAN for charIndex := 0; charIndex < len(currLine); charIndex++ { for syntaxIndex, syntax := range subjects { if syntax.Pattern != "" { // we have a regex pattern // FIXME this is also very slow! // we could easily compile all of these // regular expressions when we load the // syntax highlighter. a := string(currLine[charIndex:]) // no need to compile the same regex // pattern multiple times. regex, ok := compiledRegex[syntax.Pattern] if !ok { var err error regex, err = regexp.Compile(syntax.Pattern) if err != nil { log.Println(err.Error()) } } matched := regex.FindString(a) if matched != "" && len(matched) > 0 { // for some reason this affects the whole line if _, ok := matches[charIndex]; !ok { matches[charIndex] = syntaxRuneInfo{colours[syntaxIndex], -1, len(matched)} charIndex = charIndex + len(matched) } } } else { for _, subject := range syntax.Match { if charIndex+len(subject)+1 > len(currLine) { continue } a := currLine[charIndex : charIndex+len(subject)+1] // we only want to match words. so we check that it has a space // before or after the subject word. if strings.Compare(string(a), subject+" ") == 0 || strings.Compare(string(a), " "+subject) == 0 { if _, ok := matches[charIndex]; !ok { matches[charIndex] = syntaxRuneInfo{colours[syntaxIndex], -1, len(string(a))} break } charIndex += len(subject) } } } } } colorStack := []int{} var x_col int for idx, char := range currLine { switch char { case '\n': x_col = 0 y_col += 1 continue case '\t': x_col += b.cfg.Editor.Tab_Size continue } x_col += 1 ctx.SetColor(strife.HexRGB(b.cfg.Theme.Foreground)) // if we're currently over a character then set // the font colour to something else // ONLY SET THE COLOUR IF WE HAVE FOCUS ALSO! if b.HasFocus && b.curs.x+1 == x_col && b.curs.y == y_col && should_draw { ctx.SetColor(strife.HexRGB(b.cfg.Theme.Cursor_Invert)) } if info, ok := matches[idx]; ok { for i := 0; i < info.length; i++ { colorStack = append(colorStack, info.background) } } if len(colorStack) > 0 { var a int32 a, colorStack = int32(colorStack[len(colorStack)-1]), colorStack[:len(colorStack)-1] ctx.SetColor(strife.HexRGB(a)) } last_w, last_h = ctx.String(string(char), ex+(rx+((x_col-1)*last_w)), (ry + (y_col * last_h))) } if b.cfg.Editor.Show_Line_Numbers { gutterPadPx := 10 numLinesWidth := len(string(numLines)) + 1 gutterWidth := last_w*numLinesWidth + (gutterPadPx * 2) // render the line numbers ctx.SetColor(strife.HexRGB(b.cfg.Theme.Background)) ctx.Rect(rx, (ry + (y_col * last_h)), gutterWidth, b.h, strife.Fill) ctx.SetColor(strife.HexRGB(b.cfg.Theme.Foreground)) ctx.String(fmt.Sprintf("%*d", numLinesWidth, start+lineNum), rx+gutterPadPx, (ry + (y_col * last_h))) ex = gutterWidth } y_col += 1 } } func (b *Buffer) OnRender(ctx *strife.Renderer) { b.renderAt(ctx, b.x, b.y) }
> 0 { c
identifier_name
buffer.go
package gui import ( "fmt" "io/ioutil" "log" "os" "path" "regexp" "runtime" "strings" "time" "unicode" "github.com/felixangell/go-rope" "github.com/felixangell/phi-editor/cfg" "github.com/felixangell/strife" "github.com/veandco/go-sdl2/sdl" ) var ( timer int64 = 0 reset_timer int64 = 0 should_draw bool = true should_flash bool ) // TODO: allow font setting or whatever type camera struct { x int y int } type Buffer struct { BaseComponent HasFocus bool index int parent *View font *strife.Font contents []*rope.Rope curs *Cursor cfg *cfg.TomlConfig cam *camera filePath string languageInfo string } func NewBuffer(conf *cfg.TomlConfig, parent *View, index int) *Buffer { config := conf if config == nil { config = cfg.NewDefaultConfig() } buffContents := []*rope.Rope{} buff := &Buffer{ index: index, parent: parent, contents: buffContents, curs: &Cursor{}, cfg: config, filePath: "/tmp/phi_file_" + time.Now().String(), // TODO make this a randomly chosen temp file cam: &camera{0, 0}, } return buff } func (b *Buffer) OpenFile(filePath string) { b.filePath = filePath log.Println("Opening file ", filePath) ext := path.Ext(filePath) lang, err := b.cfg.GetLanguageFromExt(ext) if err != nil { log.Println(err.Error()) } else { log.Println("- this file is a ", lang, " language program") b.languageInfo = lang } // if the file doesn't exist, try to create it before reading it if _, err := os.Stat(filePath); os.IsNotExist(err) { f, err := os.Create(filePath) if err != nil { panic(err) } else { f.Close() } } contents, err := ioutil.ReadFile(filePath) if err != nil { panic(err) } lines := strings.Split(string(contents), "\n") for _, line := range lines { b.appendLine(line) } } func (b *Buffer) OnDispose() { // hm! // os.Remove(b.fileHandle) } func (b *Buffer) OnInit() {} func (b *Buffer) appendLine(val string) { b.contents = append(b.contents, rope.New(val)) // because we've added a new line // we have to set the x to the start b.curs.x = 0 } func (b *Buffer) insertRune(r rune) { log.Println("Inserting rune ", r, " into current line at ", b.curs.x, ":", b.curs.y) log.Println("Line before insert> ", b.contents[b.curs.y]) b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(r)) b.curs.move(1, 0) } // TODO handle EVERYTHING but for now im handling // my UK macbook key layout. var shiftAlternative = map[rune]rune{ '1': '!', '2': '@', '3': '£', '4': '$', '5': '%', '6': '^', '7': '&', '8': '*', '9': '(', '0': ')', '-': '_', '=': '+', '`': '~', '/': '?', '.': '>', ',': '<', '[': '{', ']': '}', ';': ':', '\'': '"', '\\': '|', '§': '±', } var altAlternative = map[rune]rune{ '1': '¡', '2': '€', '3': '#', '4': '¢', '5': '∞', '6': '§', '7': '¶', '8': '•', '9': 'ª', '0': 'º', '-': '–', '=': '≠', '`': '`', '/': '÷', '.': '≥', ',': '≤', '[': '“', ']': '‘', ';': '…', '\'': 'æ', '\\': '«', } func (b *Buffer) processTextInput(r rune) bool { if ALT_DOWN && r == '\t' { // nop, we dont want to // insert tabs when we // alt tab out of view of this app return true } // only do the alt alternatives on mac osx // todo change this so it's not checking on every // input if runtime.GOOS == "darwin" && ALT_DOWN { if val, ok := altAlternative[r]; ok { r = val } } if CAPS_LOCK { if unicode.IsLetter(r) { r = unicode.ToUpper(r) } } if CONTROL_DOWN { actionName, actionExists := cfg.Shortcuts.Controls[string(unicode.ToLower(r))] if actionExists { if proc, ok := actions[actionName]; ok { return proc(b) } } else { log.Println("warning, unimplemented shortcut ctrl+", unicode.ToLower(r), actionName) } } if SUPER_DOWN { actionName, actionExists := cfg.Shortcuts.Supers[string(unicode.ToLower(r))] if actionExists { if proc, ok := actions[actionName]; ok { return proc(b) } } else { log.Println("warning, unimplemented shortcut ctrl+", unicode.ToLower(r), actionName) } } if SHIFT_DOWN { // if it's a letter convert to uppercase if unicode.IsLetter(r) { r = unicode.ToUpper(r) } else { // otherwise we have to look in our trusy // shift mapping thing. if val, ok := shiftAlternative[r]; ok { r = val } } } // NOTE: we have to do this AFTER we map the // shift combo for the value! // this will not insert a ), }, or ] if there // is one to the right of us... basically // this escapes out of a closing bracket // rather than inserting a new one IF we are inside // brackets. if b.cfg.Editor.Match_Braces { if r == ')' || r == '}' || r == ']' { currLine := b.contents[b.curs.y] if b.curs.x < currLine.Len() { curr := currLine.Index(b.curs.x + 1) if curr == r { b.curs.move(1, 0) return true } else { log.Print("no it's ", curr) } } } } b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(r)) b.curs.move(1, 0) // we don't need to match braces // let's not continue any further if !b.cfg.Editor.Match_Braces { return true } // TODO: shall we match single quotes and double quotes too? matchingPair := int(r) // the offset in the ASCII Table is +2 for { and for [ // but its +1 for parenthesis ( offset := 2 switch r { case '(': offset = 1 fallthrough case '{': fallthrough case '[': matchingPair += offset b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(rune(matchingPair))) } return true } func remove(slice []*rope.Rope, s int) []*rope.Rope { return append(slice[:s], slice[s+1:]...) } func (b *Buffer) deleteNext() { b.moveRight() b.deletePrev() } func (b *Buffer) deletePrev() { if b.curs.x > 0 { offs := -1 if !b.cfg.Editor.Tabs_Are_Spaces { if b.contents[b.curs.y].Index(b.curs.x) == '\t' { offs = int(-b.cfg.Editor.Tab_Size) } } else if b.cfg.Editor.Hungry_Backspace && b.curs.x >= int(b.cfg.Editor.Tab_Size) { // cut out the last {TAB_SIZE} amount of characters // and check em tabSize := int(b.cfg.Editor.Tab_Size) lastTabSizeChars := b.contents[b.curs.y].Substr(b.curs.x+1-tabSize, tabSize).String() if strings.Compare(lastTabSizeChars, b.makeTab()) == 0 { // delete {TAB_SIZE} amount of characters // from the cursors x pos for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ { b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1) b.curs.move(-1, 0) } return } } b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1) b.curs.moveRender(-1, 0, offs, 0) } else if b.curs.x == 0 && b.curs.y > 0 { // start of line, wrap to previous prevLineLen := b.contents[b.curs.y-1].Len() b.contents[b.curs.y-1] = b.contents[b.curs.y-1].Concat(b.contents[b.curs.y]) b.contents = append(b.contents[:b.curs.y], b.contents[b.curs.y+1:]...) b.curs.move(prevLineLen, -1) } } func (b *Buffer) deleteBeforeCursor() { // delete so we're at the end // of the previous line if b.curs.x == 0 { b.deletePrev() return } for b.curs.x > 0 { b.deletePrev() } } func (b *Buffer) moveLeft() { if b.curs.x == 0 && b.curs.y > 0 { b.curs.move(b.contents[b.curs.y-1].Len(), -1) } else if b.curs.x > 0 { b.curs.move(-1, 0) } } func (b *Buffer) moveRight() { currLineLength := b.contents[b.curs.y].Len() if b.curs.x >= currLineLength && b.curs.y < len(b.contents)-1 { // we're at the end of the line and we have // some lines after, let's wrap around b.curs.move(0, 1) b.curs.move(-currLineLength, 0) } else if b.curs.x < b.contents[b.curs.y].Len() { // we have characters to the right, let's move along b.curs.move(1, 0) } } func (b *Buffer) moveToEndOfLine() { lineLen := b.contents[b.curs.y].Len() if b.curs.x > lineLen { distToMove := b.curs.x - lineLen for i := 0; i < distToMove; i++ { b.moveLeft() } } } func (b *Buffer) moveUp() { if b.curs.y > 0 { b.curs.move(0, -1) } } func (b *Buffer) moveDown() { if b.curs.y < len(b.contents) { b.curs.move(0, 1) } } func (b *Buffer) swapLineUp() bool { if b.curs.y > 0 { currLine := b.contents[b.cur
) swapLineDown() bool { if b.curs.y < len(b.contents) { currLine := b.contents[b.curs.y] nextLine := b.contents[b.curs.y+1] b.contents[b.curs.y+1] = currLine b.contents[b.curs.y] = nextLine b.moveDown() } return true } func (b *Buffer) scrollUp() { if b.cam.y > 0 { // TODO move the cursor down 45 lines // IF the buffer exceeds the window size. lineScrollAmount := 10 b.cam.y -= lineScrollAmount for i := 0; i < lineScrollAmount; i++ { b.moveUp() } } } func (b *Buffer) scrollDown() { if b.cam.y < len(b.contents) { // TODO move the cursor down 45 lines // IF the buffer exceeds the window size. lineScrollAmount := 10 b.cam.y += lineScrollAmount for i := 0; i < lineScrollAmount; i++ { b.moveDown() } } } // processes a key press. returns if there // was a key that MODIFIED the buffer. func (b *Buffer) processActionKey(key int) bool { switch key { case sdl.K_CAPSLOCK: CAPS_LOCK = !CAPS_LOCK return true case sdl.K_RETURN: if SUPER_DOWN { // in sublime this goes // into the next block // nicely indented! } initial_x := b.curs.x prevLineLen := b.contents[b.curs.y].Len() var newRope *rope.Rope if initial_x < prevLineLen && initial_x > 0 { // we're not at the end of the line, but we're not at // the start, i.e. we're SPLITTING the line left, right := b.contents[b.curs.y].Split(initial_x) newRope = right b.contents[b.curs.y] = left } else if initial_x == 0 { // we're at the start of a line, so we want to // shift the line down and insert an empty line // above it! b.contents = append(b.contents, new(rope.Rope)) // grow copy(b.contents[b.curs.y+1:], b.contents[b.curs.y:]) // shift b.contents[b.curs.y] = new(rope.Rope) // set b.curs.move(0, 1) return true } else { // we're at the end of a line newRope = new(rope.Rope) } b.curs.move(0, 1) for x := 0; x < initial_x; x++ { // TODO(Felix): there's a bug here where // this doesn't account for the rendered x // position when we use tabs as tabs and not spaces b.curs.move(-1, 0) } b.contents = append(b.contents, nil) copy(b.contents[b.curs.y+1:], b.contents[b.curs.y:]) b.contents[b.curs.y] = newRope return true case sdl.K_BACKSPACE: if SUPER_DOWN { b.deleteBeforeCursor() } else { b.deletePrev() } return true case sdl.K_RIGHT: currLineLength := b.contents[b.curs.y].Len() if CONTROL_DOWN && b.parent != nil { b.parent.ChangeFocus(1) return true } if SUPER_DOWN { for b.curs.x < currLineLength { b.curs.move(1, 0) } return true } // FIXME this is weird! if ALT_DOWN { currLine := b.contents[b.curs.y] var i int for i = b.curs.x + 1; i < currLine.Len(); i++ { curr := currLine.Index(i) if curr <= ' ' || curr == '_' { break } } for j := 0; j < i; j++ { b.moveRight() } return true } b.moveRight() return true case sdl.K_LEFT: if CONTROL_DOWN && b.parent != nil { b.parent.ChangeFocus(-1) return true } if SUPER_DOWN { // TODO go to the nearest \t // if no \t (i.e. start of line) go to // the start of the line! b.curs.gotoStart() } if ALT_DOWN { currLine := b.contents[b.curs.y] i := b.curs.x for i > 0 { currChar := currLine.Index(i) // TODO is a seperator thing? if currChar <= ' ' || currChar == '_' { // move over one more? i = i - 1 break } i = i - 1 } start := b.curs.x for j := 0; j < start-i; j++ { b.moveLeft() } return true } b.moveLeft() return true case sdl.K_UP: if ALT_DOWN { return b.swapLineUp() } if SUPER_DOWN { // go to the start of the file } if b.curs.y > 0 { offs := 0 prevLineLen := b.contents[b.curs.y-1].Len() if b.curs.x > prevLineLen { offs = prevLineLen - b.curs.x } // TODO: offset should account for tabs b.curs.move(offs, -1) } return true case sdl.K_DOWN: if ALT_DOWN { return b.swapLineDown() } if SUPER_DOWN { // go to the end of the file } if b.curs.y < len(b.contents)-1 { offs := 0 nextLineLen := b.contents[b.curs.y+1].Len() if b.curs.x > nextLineLen { offs = nextLineLen - b.curs.x } // TODO: offset should account for tabs b.curs.move(offs, 1) } return true case sdl.K_TAB: if b.cfg.Editor.Tabs_Are_Spaces { // make an empty rune array of TAB_SIZE, cast to string // and insert it. b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, b.makeTab()) b.curs.move(int(b.cfg.Editor.Tab_Size), 0) } else { b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string('\t')) // the actual position is + 1, but we make it // move by TAB_SIZE characters on the view. b.curs.moveRender(1, 0, int(b.cfg.Editor.Tab_Size), 0) } return true case sdl.K_END: currLine := b.contents[b.curs.y] if b.curs.x < currLine.Len() { distToMove := currLine.Len() - b.curs.x b.curs.move(distToMove, 0) } return true case sdl.K_HOME: if b.curs.x > 0 { b.curs.move(-b.curs.x, 0) } return true case sdl.K_PAGEUP: b.scrollUp() return true case sdl.K_PAGEDOWN: b.scrollDown() return true case sdl.K_DELETE: b.deleteNext() return true case sdl.K_LGUI: fallthrough case sdl.K_RGUI: fallthrough case sdl.K_LALT: fallthrough case sdl.K_RALT: fallthrough case sdl.K_LCTRL: fallthrough case sdl.K_RCTRL: fallthrough case sdl.K_LSHIFT: fallthrough case sdl.K_RSHIFT: return true } return false } var ( SHIFT_DOWN bool = false SUPER_DOWN = false // cmd on mac, ctrl on windows CONTROL_DOWN = false // what is this on windows? ALT_DOWN = false // option on mac CAPS_LOCK = false ) // TODO(Felix) this is really stupid func (b *Buffer) makeTab() string { blah := []rune{} for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ { blah = append(blah, ' ') } return string(blah) } func (b *Buffer) HandleEvent(evt strife.StrifeEvent) { switch event := evt.(type) { case *strife.MouseWheelEvent: if event.Y > 0 { b.scrollDown() } if event.Y < 0 { b.scrollUp() } } } func (b *Buffer) OnUpdate() bool { if !b.HasFocus { return false } prev_x := b.curs.x prev_y := b.curs.y SHIFT_DOWN = strife.KeyPressed(sdl.K_LSHIFT) || strife.KeyPressed(sdl.K_RSHIFT) SUPER_DOWN = strife.KeyPressed(sdl.K_LGUI) || strife.KeyPressed(sdl.K_RGUI) ALT_DOWN = strife.KeyPressed(sdl.K_LALT) || strife.KeyPressed(sdl.K_RALT) CONTROL_DOWN = strife.KeyPressed(sdl.K_LCTRL) || strife.KeyPressed(sdl.K_RCTRL) if strife.PollKeys() { keyCode := strife.PopKey() // try process this key input as an // action first actionPerformed := b.processActionKey(keyCode) if actionPerformed { return true } textEntered := b.processTextInput(rune(keyCode)) if textEntered { return true } } // FIXME handle focus properly if b.inputHandler == nil { return false } if b.curs.x != prev_x || b.curs.y != prev_y { should_draw = true should_flash = false reset_timer = strife.CurrentTimeMillis() } // fixme to not use CurrentTimeMillis if !should_flash && strife.CurrentTimeMillis()-reset_timer > b.cfg.Cursor.Reset_Delay { should_flash = true } if strife.CurrentTimeMillis()-timer > b.cfg.Cursor.Flash_Rate && (should_flash && b.cfg.Cursor.Flash) { timer = strife.CurrentTimeMillis() should_draw = !should_draw } return false } type syntaxRuneInfo struct { background int foreground int length int } // dimensions of the last character we rendered var last_w, last_h int // editor x and y offsets var ex, ey = 0, 0 var compiledRegex = map[string]*regexp.Regexp{} func (b *Buffer) renderAt(ctx *strife.Renderer, rx int, ry int) { // BACKGROUND ctx.SetColor(strife.HexRGB(b.cfg.Theme.Background)) ctx.Rect(b.x, b.y, b.w, b.h, strife.Fill) if b.cfg.Editor.Highlight_Line && b.HasFocus { ctx.SetColor(strife.Black) // highlight_line_col? ctx.Rect(ex+rx, ey+(ry+b.curs.ry*last_h)-(b.cam.y*last_h), b.w, last_h, strife.Fill) } // render the ol' cursor if should_draw && b.cfg.Cursor.Draw && b.HasFocus { cursorWidth := b.cfg.Cursor.GetCaretWidth() if cursorWidth == -1 { cursorWidth = last_w } ctx.SetColor(strife.HexRGB(b.cfg.Theme.Cursor)) // caret colour ctx.Rect(ex+(rx+b.curs.rx*last_w)-(b.cam.x*last_w), (ry+b.curs.ry*last_h)-(b.cam.y*last_h), cursorWidth, last_h, strife.Fill) } var visibleLines int = 50 // last_h > 0 means we have done // a render. if int(last_h) > 0 && int(b.h) != 0 { // render an extra three lines just // so we dont cut anything off if its // not evenly divisible visibleLines = (int(b.h) / int(last_h)) + 3 } start := b.cam.y upper := b.cam.y + visibleLines if upper > len(b.contents) { upper = len(b.contents) } numLines := len(b.contents) var y_col int for lineNum, rope := range b.contents[start:upper] { currLine := []rune(rope.String()) // char index => colour matches := map[int]syntaxRuneInfo{} stuff := b.cfg.Syntax[b.languageInfo] subjects := make([]cfg.SyntaxCriteria, len(stuff)) colours := make([]int, len(stuff)) idx := 0 for _, criteria := range stuff { colours[idx] = criteria.Colour subjects[idx] = criteria idx++ } // HOLY SLOW BATMAN for charIndex := 0; charIndex < len(currLine); charIndex++ { for syntaxIndex, syntax := range subjects { if syntax.Pattern != "" { // we have a regex pattern // FIXME this is also very slow! // we could easily compile all of these // regular expressions when we load the // syntax highlighter. a := string(currLine[charIndex:]) // no need to compile the same regex // pattern multiple times. regex, ok := compiledRegex[syntax.Pattern] if !ok { var err error regex, err = regexp.Compile(syntax.Pattern) if err != nil { log.Println(err.Error()) } } matched := regex.FindString(a) if matched != "" && len(matched) > 0 { // for some reason this affects the whole line if _, ok := matches[charIndex]; !ok { matches[charIndex] = syntaxRuneInfo{colours[syntaxIndex], -1, len(matched)} charIndex = charIndex + len(matched) } } } else { for _, subject := range syntax.Match { if charIndex+len(subject)+1 > len(currLine) { continue } a := currLine[charIndex : charIndex+len(subject)+1] // we only want to match words. so we check that it has a space // before or after the subject word. if strings.Compare(string(a), subject+" ") == 0 || strings.Compare(string(a), " "+subject) == 0 { if _, ok := matches[charIndex]; !ok { matches[charIndex] = syntaxRuneInfo{colours[syntaxIndex], -1, len(string(a))} break } charIndex += len(subject) } } } } } colorStack := []int{} var x_col int for idx, char := range currLine { switch char { case '\n': x_col = 0 y_col += 1 continue case '\t': x_col += b.cfg.Editor.Tab_Size continue } x_col += 1 ctx.SetColor(strife.HexRGB(b.cfg.Theme.Foreground)) // if we're currently over a character then set // the font colour to something else // ONLY SET THE COLOUR IF WE HAVE FOCUS ALSO! if b.HasFocus && b.curs.x+1 == x_col && b.curs.y == y_col && should_draw { ctx.SetColor(strife.HexRGB(b.cfg.Theme.Cursor_Invert)) } if info, ok := matches[idx]; ok { for i := 0; i < info.length; i++ { colorStack = append(colorStack, info.background) } } if len(colorStack) > 0 { var a int32 a, colorStack = int32(colorStack[len(colorStack)-1]), colorStack[:len(colorStack)-1] ctx.SetColor(strife.HexRGB(a)) } last_w, last_h = ctx.String(string(char), ex+(rx+((x_col-1)*last_w)), (ry + (y_col * last_h))) } if b.cfg.Editor.Show_Line_Numbers { gutterPadPx := 10 numLinesWidth := len(string(numLines)) + 1 gutterWidth := last_w*numLinesWidth + (gutterPadPx * 2) // render the line numbers ctx.SetColor(strife.HexRGB(b.cfg.Theme.Background)) ctx.Rect(rx, (ry + (y_col * last_h)), gutterWidth, b.h, strife.Fill) ctx.SetColor(strife.HexRGB(b.cfg.Theme.Foreground)) ctx.String(fmt.Sprintf("%*d", numLinesWidth, start+lineNum), rx+gutterPadPx, (ry + (y_col * last_h))) ex = gutterWidth } y_col += 1 } } func (b *Buffer) OnRender(ctx *strife.Renderer) { b.renderAt(ctx, b.x, b.y) }
s.y] prevLine := b.contents[b.curs.y-1] b.contents[b.curs.y-1] = currLine b.contents[b.curs.y] = prevLine b.moveUp() } return true } func (b *Buffer
conditional_block
buffer.go
package gui import ( "fmt" "io/ioutil" "log" "os" "path" "regexp" "runtime" "strings" "time" "unicode" "github.com/felixangell/go-rope" "github.com/felixangell/phi-editor/cfg" "github.com/felixangell/strife" "github.com/veandco/go-sdl2/sdl" ) var ( timer int64 = 0 reset_timer int64 = 0 should_draw bool = true should_flash bool ) // TODO: allow font setting or whatever type camera struct { x int y int } type Buffer struct { BaseComponent HasFocus bool index int parent *View font *strife.Font contents []*rope.Rope curs *Cursor cfg *cfg.TomlConfig cam *camera filePath string languageInfo string } func NewBuffer(conf *cfg.TomlConfig, parent *View, index int) *Buffer { config := conf if config == nil { config = cfg.NewDefaultConfig() } buffContents := []*rope.Rope{} buff := &Buffer{ index: index, parent: parent, contents: buffContents, curs: &Cursor{}, cfg: config, filePath: "/tmp/phi_file_" + time.Now().String(), // TODO make this a randomly chosen temp file cam: &camera{0, 0}, } return buff } func (b *Buffer) OpenFile(filePath string) { b.filePath = filePath log.Println("Opening file ", filePath) ext := path.Ext(filePath) lang, err := b.cfg.GetLanguageFromExt(ext) if err != nil { log.Println(err.Error()) } else { log.Println("- this file is a ", lang, " language program") b.languageInfo = lang } // if the file doesn't exist, try to create it before reading it if _, err := os.Stat(filePath); os.IsNotExist(err) { f, err := os.Create(filePath) if err != nil { panic(err) } else { f.Close() } } contents, err := ioutil.ReadFile(filePath) if err != nil { panic(err) } lines := strings.Split(string(contents), "\n") for _, line := range lines { b.appendLine(line) } } func (b *Buffer) OnDispose() { // hm! // os.Remove(b.fileHandle) } func (b *Buffer) OnInit() {} func (b *Buffer) appendLine(val string) { b.contents = append(b.contents, rope.New(val)) // because we've added a new line // we have to set the x to the start b.curs.x = 0 } func (b *Buffer) insertRune(r rune) { log.Println("Inserting rune ", r, " into current line at ", b.curs.x, ":", b.curs.y) log.Println("Line before insert> ", b.contents[b.curs.y]) b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(r)) b.curs.move(1, 0)
// my UK macbook key layout. var shiftAlternative = map[rune]rune{ '1': '!', '2': '@', '3': '£', '4': '$', '5': '%', '6': '^', '7': '&', '8': '*', '9': '(', '0': ')', '-': '_', '=': '+', '`': '~', '/': '?', '.': '>', ',': '<', '[': '{', ']': '}', ';': ':', '\'': '"', '\\': '|', '§': '±', } var altAlternative = map[rune]rune{ '1': '¡', '2': '€', '3': '#', '4': '¢', '5': '∞', '6': '§', '7': '¶', '8': '•', '9': 'ª', '0': 'º', '-': '–', '=': '≠', '`': '`', '/': '÷', '.': '≥', ',': '≤', '[': '“', ']': '‘', ';': '…', '\'': 'æ', '\\': '«', } func (b *Buffer) processTextInput(r rune) bool { if ALT_DOWN && r == '\t' { // nop, we dont want to // insert tabs when we // alt tab out of view of this app return true } // only do the alt alternatives on mac osx // todo change this so it's not checking on every // input if runtime.GOOS == "darwin" && ALT_DOWN { if val, ok := altAlternative[r]; ok { r = val } } if CAPS_LOCK { if unicode.IsLetter(r) { r = unicode.ToUpper(r) } } if CONTROL_DOWN { actionName, actionExists := cfg.Shortcuts.Controls[string(unicode.ToLower(r))] if actionExists { if proc, ok := actions[actionName]; ok { return proc(b) } } else { log.Println("warning, unimplemented shortcut ctrl+", unicode.ToLower(r), actionName) } } if SUPER_DOWN { actionName, actionExists := cfg.Shortcuts.Supers[string(unicode.ToLower(r))] if actionExists { if proc, ok := actions[actionName]; ok { return proc(b) } } else { log.Println("warning, unimplemented shortcut ctrl+", unicode.ToLower(r), actionName) } } if SHIFT_DOWN { // if it's a letter convert to uppercase if unicode.IsLetter(r) { r = unicode.ToUpper(r) } else { // otherwise we have to look in our trusy // shift mapping thing. if val, ok := shiftAlternative[r]; ok { r = val } } } // NOTE: we have to do this AFTER we map the // shift combo for the value! // this will not insert a ), }, or ] if there // is one to the right of us... basically // this escapes out of a closing bracket // rather than inserting a new one IF we are inside // brackets. if b.cfg.Editor.Match_Braces { if r == ')' || r == '}' || r == ']' { currLine := b.contents[b.curs.y] if b.curs.x < currLine.Len() { curr := currLine.Index(b.curs.x + 1) if curr == r { b.curs.move(1, 0) return true } else { log.Print("no it's ", curr) } } } } b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(r)) b.curs.move(1, 0) // we don't need to match braces // let's not continue any further if !b.cfg.Editor.Match_Braces { return true } // TODO: shall we match single quotes and double quotes too? matchingPair := int(r) // the offset in the ASCII Table is +2 for { and for [ // but its +1 for parenthesis ( offset := 2 switch r { case '(': offset = 1 fallthrough case '{': fallthrough case '[': matchingPair += offset b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(rune(matchingPair))) } return true } func remove(slice []*rope.Rope, s int) []*rope.Rope { return append(slice[:s], slice[s+1:]...) } func (b *Buffer) deleteNext() { b.moveRight() b.deletePrev() } func (b *Buffer) deletePrev() { if b.curs.x > 0 { offs := -1 if !b.cfg.Editor.Tabs_Are_Spaces { if b.contents[b.curs.y].Index(b.curs.x) == '\t' { offs = int(-b.cfg.Editor.Tab_Size) } } else if b.cfg.Editor.Hungry_Backspace && b.curs.x >= int(b.cfg.Editor.Tab_Size) { // cut out the last {TAB_SIZE} amount of characters // and check em tabSize := int(b.cfg.Editor.Tab_Size) lastTabSizeChars := b.contents[b.curs.y].Substr(b.curs.x+1-tabSize, tabSize).String() if strings.Compare(lastTabSizeChars, b.makeTab()) == 0 { // delete {TAB_SIZE} amount of characters // from the cursors x pos for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ { b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1) b.curs.move(-1, 0) } return } } b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1) b.curs.moveRender(-1, 0, offs, 0) } else if b.curs.x == 0 && b.curs.y > 0 { // start of line, wrap to previous prevLineLen := b.contents[b.curs.y-1].Len() b.contents[b.curs.y-1] = b.contents[b.curs.y-1].Concat(b.contents[b.curs.y]) b.contents = append(b.contents[:b.curs.y], b.contents[b.curs.y+1:]...) b.curs.move(prevLineLen, -1) } } func (b *Buffer) deleteBeforeCursor() { // delete so we're at the end // of the previous line if b.curs.x == 0 { b.deletePrev() return } for b.curs.x > 0 { b.deletePrev() } } func (b *Buffer) moveLeft() { if b.curs.x == 0 && b.curs.y > 0 { b.curs.move(b.contents[b.curs.y-1].Len(), -1) } else if b.curs.x > 0 { b.curs.move(-1, 0) } } func (b *Buffer) moveRight() { currLineLength := b.contents[b.curs.y].Len() if b.curs.x >= currLineLength && b.curs.y < len(b.contents)-1 { // we're at the end of the line and we have // some lines after, let's wrap around b.curs.move(0, 1) b.curs.move(-currLineLength, 0) } else if b.curs.x < b.contents[b.curs.y].Len() { // we have characters to the right, let's move along b.curs.move(1, 0) } } func (b *Buffer) moveToEndOfLine() { lineLen := b.contents[b.curs.y].Len() if b.curs.x > lineLen { distToMove := b.curs.x - lineLen for i := 0; i < distToMove; i++ { b.moveLeft() } } } func (b *Buffer) moveUp() { if b.curs.y > 0 { b.curs.move(0, -1) } } func (b *Buffer) moveDown() { if b.curs.y < len(b.contents) { b.curs.move(0, 1) } } func (b *Buffer) swapLineUp() bool { if b.curs.y > 0 { currLine := b.contents[b.curs.y] prevLine := b.contents[b.curs.y-1] b.contents[b.curs.y-1] = currLine b.contents[b.curs.y] = prevLine b.moveUp() } return true } func (b *Buffer) swapLineDown() bool { if b.curs.y < len(b.contents) { currLine := b.contents[b.curs.y] nextLine := b.contents[b.curs.y+1] b.contents[b.curs.y+1] = currLine b.contents[b.curs.y] = nextLine b.moveDown() } return true } func (b *Buffer) scrollUp() { if b.cam.y > 0 { // TODO move the cursor down 45 lines // IF the buffer exceeds the window size. lineScrollAmount := 10 b.cam.y -= lineScrollAmount for i := 0; i < lineScrollAmount; i++ { b.moveUp() } } } func (b *Buffer) scrollDown() { if b.cam.y < len(b.contents) { // TODO move the cursor down 45 lines // IF the buffer exceeds the window size. lineScrollAmount := 10 b.cam.y += lineScrollAmount for i := 0; i < lineScrollAmount; i++ { b.moveDown() } } } // processes a key press. returns if there // was a key that MODIFIED the buffer. func (b *Buffer) processActionKey(key int) bool { switch key { case sdl.K_CAPSLOCK: CAPS_LOCK = !CAPS_LOCK return true case sdl.K_RETURN: if SUPER_DOWN { // in sublime this goes // into the next block // nicely indented! } initial_x := b.curs.x prevLineLen := b.contents[b.curs.y].Len() var newRope *rope.Rope if initial_x < prevLineLen && initial_x > 0 { // we're not at the end of the line, but we're not at // the start, i.e. we're SPLITTING the line left, right := b.contents[b.curs.y].Split(initial_x) newRope = right b.contents[b.curs.y] = left } else if initial_x == 0 { // we're at the start of a line, so we want to // shift the line down and insert an empty line // above it! b.contents = append(b.contents, new(rope.Rope)) // grow copy(b.contents[b.curs.y+1:], b.contents[b.curs.y:]) // shift b.contents[b.curs.y] = new(rope.Rope) // set b.curs.move(0, 1) return true } else { // we're at the end of a line newRope = new(rope.Rope) } b.curs.move(0, 1) for x := 0; x < initial_x; x++ { // TODO(Felix): there's a bug here where // this doesn't account for the rendered x // position when we use tabs as tabs and not spaces b.curs.move(-1, 0) } b.contents = append(b.contents, nil) copy(b.contents[b.curs.y+1:], b.contents[b.curs.y:]) b.contents[b.curs.y] = newRope return true case sdl.K_BACKSPACE: if SUPER_DOWN { b.deleteBeforeCursor() } else { b.deletePrev() } return true case sdl.K_RIGHT: currLineLength := b.contents[b.curs.y].Len() if CONTROL_DOWN && b.parent != nil { b.parent.ChangeFocus(1) return true } if SUPER_DOWN { for b.curs.x < currLineLength { b.curs.move(1, 0) } return true } // FIXME this is weird! if ALT_DOWN { currLine := b.contents[b.curs.y] var i int for i = b.curs.x + 1; i < currLine.Len(); i++ { curr := currLine.Index(i) if curr <= ' ' || curr == '_' { break } } for j := 0; j < i; j++ { b.moveRight() } return true } b.moveRight() return true case sdl.K_LEFT: if CONTROL_DOWN && b.parent != nil { b.parent.ChangeFocus(-1) return true } if SUPER_DOWN { // TODO go to the nearest \t // if no \t (i.e. start of line) go to // the start of the line! b.curs.gotoStart() } if ALT_DOWN { currLine := b.contents[b.curs.y] i := b.curs.x for i > 0 { currChar := currLine.Index(i) // TODO is a seperator thing? if currChar <= ' ' || currChar == '_' { // move over one more? i = i - 1 break } i = i - 1 } start := b.curs.x for j := 0; j < start-i; j++ { b.moveLeft() } return true } b.moveLeft() return true case sdl.K_UP: if ALT_DOWN { return b.swapLineUp() } if SUPER_DOWN { // go to the start of the file } if b.curs.y > 0 { offs := 0 prevLineLen := b.contents[b.curs.y-1].Len() if b.curs.x > prevLineLen { offs = prevLineLen - b.curs.x } // TODO: offset should account for tabs b.curs.move(offs, -1) } return true case sdl.K_DOWN: if ALT_DOWN { return b.swapLineDown() } if SUPER_DOWN { // go to the end of the file } if b.curs.y < len(b.contents)-1 { offs := 0 nextLineLen := b.contents[b.curs.y+1].Len() if b.curs.x > nextLineLen { offs = nextLineLen - b.curs.x } // TODO: offset should account for tabs b.curs.move(offs, 1) } return true case sdl.K_TAB: if b.cfg.Editor.Tabs_Are_Spaces { // make an empty rune array of TAB_SIZE, cast to string // and insert it. b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, b.makeTab()) b.curs.move(int(b.cfg.Editor.Tab_Size), 0) } else { b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string('\t')) // the actual position is + 1, but we make it // move by TAB_SIZE characters on the view. b.curs.moveRender(1, 0, int(b.cfg.Editor.Tab_Size), 0) } return true case sdl.K_END: currLine := b.contents[b.curs.y] if b.curs.x < currLine.Len() { distToMove := currLine.Len() - b.curs.x b.curs.move(distToMove, 0) } return true case sdl.K_HOME: if b.curs.x > 0 { b.curs.move(-b.curs.x, 0) } return true case sdl.K_PAGEUP: b.scrollUp() return true case sdl.K_PAGEDOWN: b.scrollDown() return true case sdl.K_DELETE: b.deleteNext() return true case sdl.K_LGUI: fallthrough case sdl.K_RGUI: fallthrough case sdl.K_LALT: fallthrough case sdl.K_RALT: fallthrough case sdl.K_LCTRL: fallthrough case sdl.K_RCTRL: fallthrough case sdl.K_LSHIFT: fallthrough case sdl.K_RSHIFT: return true } return false } var ( SHIFT_DOWN bool = false SUPER_DOWN = false // cmd on mac, ctrl on windows CONTROL_DOWN = false // what is this on windows? ALT_DOWN = false // option on mac CAPS_LOCK = false ) // TODO(Felix) this is really stupid func (b *Buffer) makeTab() string { blah := []rune{} for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ { blah = append(blah, ' ') } return string(blah) } func (b *Buffer) HandleEvent(evt strife.StrifeEvent) { switch event := evt.(type) { case *strife.MouseWheelEvent: if event.Y > 0 { b.scrollDown() } if event.Y < 0 { b.scrollUp() } } } func (b *Buffer) OnUpdate() bool { if !b.HasFocus { return false } prev_x := b.curs.x prev_y := b.curs.y SHIFT_DOWN = strife.KeyPressed(sdl.K_LSHIFT) || strife.KeyPressed(sdl.K_RSHIFT) SUPER_DOWN = strife.KeyPressed(sdl.K_LGUI) || strife.KeyPressed(sdl.K_RGUI) ALT_DOWN = strife.KeyPressed(sdl.K_LALT) || strife.KeyPressed(sdl.K_RALT) CONTROL_DOWN = strife.KeyPressed(sdl.K_LCTRL) || strife.KeyPressed(sdl.K_RCTRL) if strife.PollKeys() { keyCode := strife.PopKey() // try process this key input as an // action first actionPerformed := b.processActionKey(keyCode) if actionPerformed { return true } textEntered := b.processTextInput(rune(keyCode)) if textEntered { return true } } // FIXME handle focus properly if b.inputHandler == nil { return false } if b.curs.x != prev_x || b.curs.y != prev_y { should_draw = true should_flash = false reset_timer = strife.CurrentTimeMillis() } // fixme to not use CurrentTimeMillis if !should_flash && strife.CurrentTimeMillis()-reset_timer > b.cfg.Cursor.Reset_Delay { should_flash = true } if strife.CurrentTimeMillis()-timer > b.cfg.Cursor.Flash_Rate && (should_flash && b.cfg.Cursor.Flash) { timer = strife.CurrentTimeMillis() should_draw = !should_draw } return false } type syntaxRuneInfo struct { background int foreground int length int } // dimensions of the last character we rendered var last_w, last_h int // editor x and y offsets var ex, ey = 0, 0 var compiledRegex = map[string]*regexp.Regexp{} func (b *Buffer) renderAt(ctx *strife.Renderer, rx int, ry int) { // BACKGROUND ctx.SetColor(strife.HexRGB(b.cfg.Theme.Background)) ctx.Rect(b.x, b.y, b.w, b.h, strife.Fill) if b.cfg.Editor.Highlight_Line && b.HasFocus { ctx.SetColor(strife.Black) // highlight_line_col? ctx.Rect(ex+rx, ey+(ry+b.curs.ry*last_h)-(b.cam.y*last_h), b.w, last_h, strife.Fill) } // render the ol' cursor if should_draw && b.cfg.Cursor.Draw && b.HasFocus { cursorWidth := b.cfg.Cursor.GetCaretWidth() if cursorWidth == -1 { cursorWidth = last_w } ctx.SetColor(strife.HexRGB(b.cfg.Theme.Cursor)) // caret colour ctx.Rect(ex+(rx+b.curs.rx*last_w)-(b.cam.x*last_w), (ry+b.curs.ry*last_h)-(b.cam.y*last_h), cursorWidth, last_h, strife.Fill) } var visibleLines int = 50 // last_h > 0 means we have done // a render. if int(last_h) > 0 && int(b.h) != 0 { // render an extra three lines just // so we dont cut anything off if its // not evenly divisible visibleLines = (int(b.h) / int(last_h)) + 3 } start := b.cam.y upper := b.cam.y + visibleLines if upper > len(b.contents) { upper = len(b.contents) } numLines := len(b.contents) var y_col int for lineNum, rope := range b.contents[start:upper] { currLine := []rune(rope.String()) // char index => colour matches := map[int]syntaxRuneInfo{} stuff := b.cfg.Syntax[b.languageInfo] subjects := make([]cfg.SyntaxCriteria, len(stuff)) colours := make([]int, len(stuff)) idx := 0 for _, criteria := range stuff { colours[idx] = criteria.Colour subjects[idx] = criteria idx++ } // HOLY SLOW BATMAN for charIndex := 0; charIndex < len(currLine); charIndex++ { for syntaxIndex, syntax := range subjects { if syntax.Pattern != "" { // we have a regex pattern // FIXME this is also very slow! // we could easily compile all of these // regular expressions when we load the // syntax highlighter. a := string(currLine[charIndex:]) // no need to compile the same regex // pattern multiple times. regex, ok := compiledRegex[syntax.Pattern] if !ok { var err error regex, err = regexp.Compile(syntax.Pattern) if err != nil { log.Println(err.Error()) } } matched := regex.FindString(a) if matched != "" && len(matched) > 0 { // for some reason this affects the whole line if _, ok := matches[charIndex]; !ok { matches[charIndex] = syntaxRuneInfo{colours[syntaxIndex], -1, len(matched)} charIndex = charIndex + len(matched) } } } else { for _, subject := range syntax.Match { if charIndex+len(subject)+1 > len(currLine) { continue } a := currLine[charIndex : charIndex+len(subject)+1] // we only want to match words. so we check that it has a space // before or after the subject word. if strings.Compare(string(a), subject+" ") == 0 || strings.Compare(string(a), " "+subject) == 0 { if _, ok := matches[charIndex]; !ok { matches[charIndex] = syntaxRuneInfo{colours[syntaxIndex], -1, len(string(a))} break } charIndex += len(subject) } } } } } colorStack := []int{} var x_col int for idx, char := range currLine { switch char { case '\n': x_col = 0 y_col += 1 continue case '\t': x_col += b.cfg.Editor.Tab_Size continue } x_col += 1 ctx.SetColor(strife.HexRGB(b.cfg.Theme.Foreground)) // if we're currently over a character then set // the font colour to something else // ONLY SET THE COLOUR IF WE HAVE FOCUS ALSO! if b.HasFocus && b.curs.x+1 == x_col && b.curs.y == y_col && should_draw { ctx.SetColor(strife.HexRGB(b.cfg.Theme.Cursor_Invert)) } if info, ok := matches[idx]; ok { for i := 0; i < info.length; i++ { colorStack = append(colorStack, info.background) } } if len(colorStack) > 0 { var a int32 a, colorStack = int32(colorStack[len(colorStack)-1]), colorStack[:len(colorStack)-1] ctx.SetColor(strife.HexRGB(a)) } last_w, last_h = ctx.String(string(char), ex+(rx+((x_col-1)*last_w)), (ry + (y_col * last_h))) } if b.cfg.Editor.Show_Line_Numbers { gutterPadPx := 10 numLinesWidth := len(string(numLines)) + 1 gutterWidth := last_w*numLinesWidth + (gutterPadPx * 2) // render the line numbers ctx.SetColor(strife.HexRGB(b.cfg.Theme.Background)) ctx.Rect(rx, (ry + (y_col * last_h)), gutterWidth, b.h, strife.Fill) ctx.SetColor(strife.HexRGB(b.cfg.Theme.Foreground)) ctx.String(fmt.Sprintf("%*d", numLinesWidth, start+lineNum), rx+gutterPadPx, (ry + (y_col * last_h))) ex = gutterWidth } y_col += 1 } } func (b *Buffer) OnRender(ctx *strife.Renderer) { b.renderAt(ctx, b.x, b.y) }
} // TODO handle EVERYTHING but for now im handling
random_line_split
buffer.go
package gui import ( "fmt" "io/ioutil" "log" "os" "path" "regexp" "runtime" "strings" "time" "unicode" "github.com/felixangell/go-rope" "github.com/felixangell/phi-editor/cfg" "github.com/felixangell/strife" "github.com/veandco/go-sdl2/sdl" ) var ( timer int64 = 0 reset_timer int64 = 0 should_draw bool = true should_flash bool ) // TODO: allow font setting or whatever type camera struct { x int y int } type Buffer struct { BaseComponent HasFocus bool index int parent *View font *strife.Font contents []*rope.Rope curs *Cursor cfg *cfg.TomlConfig cam *camera filePath string languageInfo string } func NewBuffer(conf *cfg.TomlConfig, parent *View, index int) *Buffer { config := conf if config == nil { config = cfg.NewDefaultConfig() } buffContents := []*rope.Rope{} buff := &Buffer{ index: index, parent: parent, contents: buffContents, curs: &Cursor{}, cfg: config, filePath: "/tmp/phi_file_" + time.Now().String(), // TODO make this a randomly chosen temp file cam: &camera{0, 0}, } return buff } func (b *Buffer) OpenFile(filePath string) { b.filePath = filePath log.Println("Opening file ", filePath) ext := path.Ext(filePath) lang, err := b.cfg.GetLanguageFromExt(ext) if err != nil { log.Println(err.Error()) } else { log.Println("- this file is a ", lang, " language program") b.languageInfo = lang } // if the file doesn't exist, try to create it before reading it if _, err := os.Stat(filePath); os.IsNotExist(err) { f, err := os.Create(filePath) if err != nil { panic(err) } else { f.Close() } } contents, err := ioutil.ReadFile(filePath) if err != nil { panic(err) } lines := strings.Split(string(contents), "\n") for _, line := range lines { b.appendLine(line) } } func (b *Buffer) OnDispose() { // hm! // os.Remove(b.fileHandle) } func (b *Buffer) OnInit() {} func (b *Buffer) appendLine(val string) { b.contents = append(b.contents, rope.New(val)) // because we've added a new line // we have to set the x to the start b.curs.x = 0 } func (b *Buffer) insertRune(r rune) { log.Println("Inserting rune ", r, " into current line at ", b.curs.x, ":", b.curs.y) log.Println("Line before insert> ", b.contents[b.curs.y]) b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(r)) b.curs.move(1, 0) } // TODO handle EVERYTHING but for now im handling // my UK macbook key layout. var shiftAlternative = map[rune]rune{ '1': '!', '2': '@', '3': '£', '4': '$', '5': '%', '6': '^', '7': '&', '8': '*', '9': '(', '0': ')', '-': '_', '=': '+', '`': '~', '/': '?', '.': '>', ',': '<', '[': '{', ']': '}', ';': ':', '\'': '"', '\\': '|', '§': '±', } var altAlternative = map[rune]rune{ '1': '¡', '2': '€', '3': '#', '4': '¢', '5': '∞', '6': '§', '7': '¶', '8': '•', '9': 'ª', '0': 'º', '-': '–', '=': '≠', '`': '`', '/': '÷', '.': '≥', ',': '≤', '[': '“', ']': '‘', ';': '…', '\'': 'æ', '\\': '«', } func (b *Buffer) processTextInput(r rune) bool { if ALT_DOWN && r == '\t' { // nop, we dont want to // insert tabs when we // alt tab out of view of this app return true } // only do the alt alternatives on mac osx // todo change this so it's not checking on every // input if runtime.GOOS == "darwin" && ALT_DOWN { if val, ok := altAlternative[r]; ok { r = val } } if CAPS_LOCK { if unicode.IsLetter(r) { r = unicode.ToUpper(r) } } if CONTROL_DOWN { actionName, actionExists := cfg.Shortcuts.Controls[string(unicode.ToLower(r))] if actionExists { if proc, ok := actions[actionName]; ok { return proc(b) } } else { log.Println("warning, unimplemented shortcut ctrl+", unicode.ToLower(r), actionName) } } if SUPER_DOWN { actionName, actionExists := cfg.Shortcuts.Supers[string(unicode.ToLower(r))] if actionExists { if proc, ok := actions[actionName]; ok { return proc(b) } } else { log.Println("warning, unimplemented shortcut ctrl+", unicode.ToLower(r), actionName) } } if SHIFT_DOWN { // if it's a letter convert to uppercase if unicode.IsLetter(r) { r = unicode.ToUpper(r) } else { // otherwise we have to look in our trusy // shift mapping thing. if val, ok := shiftAlternative[r]; ok { r = val } } } // NOTE: we have to do this AFTER we map the // shift combo for the value! // this will not insert a ), }, or ] if there // is one to the right of us... basically // this escapes out of a closing bracket // rather than inserting a new one IF we are inside // brackets. if b.cfg.Editor.Match_Braces { if r == ')' || r == '}' || r == ']' { currLine := b.contents[b.curs.y] if b.curs.x < currLine.Len() { curr := currLine.Index(b.curs.x + 1) if curr == r { b.curs.move(1, 0) return true } else { log.Print("no it's ", curr) } } } } b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(r)) b.curs.move(1, 0) // we don't need to match braces // let's not continue any further if !b.cfg.Editor.Match_Braces { return true } // TODO: shall we match single quotes and double quotes too? matchingPair := int(r) // the offset in the ASCII Table is +2 for { and for [ // but its +1 for parenthesis ( offset := 2 switch r { case '(': offset = 1 fallthrough case '{': fallthrough case '[': matchingPair += offset b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string(rune(matchingPair))) } return true } func remove(slice []*rope.Rope, s int) []*rope.Rope { return append(slice[:s], slice[s+1:]...) } func (b *Buffer) deleteNext() { b.moveRight() b.deletePrev() } func (b *Buffer) deletePrev() { if b.curs.x > 0 { offs := -1 if !b.cfg.Editor.Tabs_Are_Spaces { if b.contents[b.curs.y].Index(b.curs.x) == '\t' { offs = int(-b.cfg.Editor.Tab_Size) } } else if b.cfg.Editor.Hungry_Backspace && b.curs.x >= int(b.cfg.Editor.Tab_Size) { // cut out the last {TAB_SIZE} amount of characters // and check em tabSize := int(b.cfg.Editor.Tab_Size) lastTabSizeChars := b.contents[b.curs.y].Substr(b.curs.x+1-tabSize, tabSize).String() if strings.Compare(lastTabSizeChars, b.makeTab()) == 0 { // delete {TAB_SIZE} amount of characters // from the cursors x pos for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ { b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1) b.curs.move(-1, 0) } return } } b.contents[b.curs.y] = b.contents[b.curs.y].Delete(b.curs.x, 1) b.curs.moveRender(-1, 0, offs, 0) } else if b.curs.x == 0 && b.curs.y > 0 { // start of line, wrap to previous prevLineLen := b.contents[b.curs.y-1].Len() b.contents[b.curs.y-1] = b.contents[b.curs.y-1].Concat(b.contents[b.curs.y]) b.contents = append(b.contents[:b.curs.y], b.contents[b.curs.y+1:]...) b.curs.move(prevLineLen, -1) } } func (b *Buffer) deleteBeforeCursor() { // delete so we're at the end // of the previous line if b.curs.x == 0 { b.deletePrev() return } for b.curs.x > 0 { b.deletePrev() } } func (b *Buffer) moveLeft() { if b.curs.x == 0 && b.curs.y > 0 { b.curs.move(b.contents[b.curs.y-1].Len(), -1) } else if b.curs.x > 0 { b.curs.move(-1, 0) } } func (b *Buffer) moveRight() { currLineLength := b.contents[b.curs.y].Len() if b.curs.x >= currLineLength && b.curs.y < len(b.contents)-1 { // we're at the end of the line and we have // some lines after, let's wrap around b.curs.move(0, 1) b.curs.move(-currLineLength, 0) } else if b.curs.x < b.contents[b.curs.y].Len() { // we have characters to the right, let's move along b.curs.move(1, 0) } } func (b *Buffer) moveToEndOfLine() { lineLen := b.contents[b.curs.y].Len() if b.curs.x > lineLen { distToMove := b.curs.x - lineLen for i := 0; i < distToMove; i++ { b.moveLeft() } } } func (b *Buffer) moveUp() { if b.curs.y > 0 { b.curs.move(0, -1) } } func (b *Buffer) moveDown() { if b.curs.y < len(b.contents) { b.curs.move(0, 1) } } func (b *Buffer) swapLineUp() bool { if b.curs.y > 0 { currLine := b.contents[b.curs.y] prevLine := b.contents[b.curs.y-1] b.contents[b.curs.y-1] = currLine b.contents[b.curs.y] = prevLine b.moveUp() } return true } func (b *Buffer) swapLineDown() bool { if b.curs.y < len(b.contents) { currLine := b.contents[b.curs.y] nextLine := b.contents[b.curs.y+1] b.contents[b.curs.y+1] = currLine b.contents[b.curs.y] = nextLine b.moveDown() } return true } func (b *Buffer) scrollUp() { if b.cam.y > 0 { // TODO move the cursor down 45 lines // IF the buffer exceeds the window size. lineScrollAmount := 10 b.cam.y -= lineScrollAmount for i := 0; i < lineScrollAmount; i++ { b.moveUp() } } } func (b *Buffer) scrollDown() { if b.cam.y < len(b.contents) { // TODO move the cursor down 45 lines // IF the buffer exceeds the window size. lineScrollAmount := 10 b.cam.y += lineScrollAmount for i := 0; i < lineScrollAmount; i++ { b.moveDown() } } } // processes a key press. returns if there // was a key that MODIFIED the buffer. func (b *Buffer) processActionKey(key int) bool { switch key { case sdl.K_CAPSLOCK: CAPS_LOCK = !CAPS_LOCK return true case sdl.K_RETURN: if SUPER_DOWN { // in sublime this goes // into the next block // nicely indented! } initial_x := b.curs.x prevLineLen := b.contents[b.curs.y].Len() var newRope *rope.Rope if initial_x < prevLineLen && initial_x > 0 { // we're not at the end of the line, but we're not at // the start, i.e. we're SPLITTING the line left, right := b.contents[b.curs.y].Split(initial_x) newRope = right b.contents[b.curs.y] = left } else if initial_x == 0 { // we're at the start of a line, so we want to // shift the line down and insert an empty line // above it! b.contents = append(b.contents, new(rope.Rope)) // grow copy(b.contents[b.curs.y+1:], b.contents[b.curs.y:]) // shift b.contents[b.curs.y] = new(rope.Rope) // set b.curs.move(0, 1) return true } else { // we're at the end of a line newRope = new(rope.Rope) } b.curs.move(0, 1) for x := 0; x < initial_x; x++ { // TODO(Felix): there's a bug here where // this doesn't account for the rendered x // position when we use tabs as tabs and not spaces b.curs.move(-1, 0) } b.contents = append(b.contents, nil) copy(b.contents[b.curs.y+1:], b.contents[b.curs.y:]) b.contents[b.curs.y] = newRope return true case sdl.K_BACKSPACE: if SUPER_DOWN { b.deleteBeforeCursor() } else { b.deletePrev() } return true case sdl.K_RIGHT: currLineLength := b.contents[b.curs.y].Len() if CONTROL_DOWN && b.parent != nil { b.parent.ChangeFocus(1) return true } if SUPER_DOWN { for b.curs.x < currLineLength { b.curs.move(1, 0) } return true } // FIXME this is weird! if ALT_DOWN { currLine := b.contents[b.curs.y] var i int for i = b.curs.x + 1; i < currLine.Len(); i++ { curr := currLine.Index(i) if curr <= ' ' || curr == '_' { break } } for j := 0; j < i; j++ { b.moveRight() } return true } b.moveRight() return true case sdl.K_LEFT: if CONTROL_DOWN && b.parent != nil { b.parent.ChangeFocus(-1) return true } if SUPER_DOWN { // TODO go to the nearest \t // if no \t (i.e. start of line) go to // the start of the line! b.curs.gotoStart() } if ALT_DOWN { currLine := b.contents[b.curs.y] i := b.curs.x for i > 0 { currChar := currLine.Index(i) // TODO is a seperator thing? if currChar <= ' ' || currChar == '_' { // move over one more? i = i - 1 break } i = i - 1 } start := b.curs.x for j := 0; j < start-i; j++ { b.moveLeft() } return true } b.moveLeft() return true case sdl.K_UP: if ALT_DOWN { return b.swapLineUp() } if SUPER_DOWN { // go to the start of the file } if b.curs.y > 0 { offs := 0 prevLineLen := b.contents[b.curs.y-1].Len() if b.curs.x > prevLineLen { offs = prevLineLen - b.curs.x } // TODO: offset should account for tabs b.curs.move(offs, -1) } return true case sdl.K_DOWN: if ALT_DOWN { return b.swapLineDown() } if SUPER_DOWN { // go to the end of the file } if b.curs.y < len(b.contents)-1 { offs := 0 nextLineLen := b.contents[b.curs.y+1].Len() if b.curs.x > nextLineLen { offs = nextLineLen - b.curs.x } // TODO: offset should account for tabs b.curs.move(offs, 1) } return true case sdl.K_TAB: if b.cfg.Editor.Tabs_Are_Spaces { // make an empty rune array of TAB_SIZE, cast to string // and insert it. b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, b.makeTab()) b.curs.move(int(b.cfg.Editor.Tab_Size), 0) } else { b.contents[b.curs.y] = b.contents[b.curs.y].Insert(b.curs.x, string('\t')) // the actual position is + 1, but we make it // move by TAB_SIZE characters on the view. b.curs.moveRender(1, 0, int(b.cfg.Editor.Tab_Size), 0) } return true case sdl.K_END: currLine := b.contents[b.curs.y] if b.curs.x < currLine.Len() { distToMove := currLine.Len() - b.curs.x b.curs.move(distToMove, 0) } return true case sdl.K_HOME: if b.curs.x > 0 { b.curs.move(-b.curs.x, 0) } return true case sdl.K_PAGEUP: b.scrollUp() return true case sdl.K_PAGEDOWN: b.scrollDown() return true case sdl.K_DELETE: b.deleteNext() return true case sdl.K_LGUI: fallthrough case sdl.K_RGUI: fallthrough case sdl.K_LALT: fallthrough case sdl.K_RALT: fallthrough case sdl.K_LCTRL: fallthrough case sdl.K_RCTRL: fallthrough case sdl.K_LSHIFT: fallthrough case sdl.K_RSHIFT: return true } return false } var ( SHIFT_DOWN bool = false SUPER_DOWN = false // cmd on mac, ctrl on windows CONTROL_DOWN = false // what is this on windows? ALT_DOWN = false // option on mac CAPS_LOCK = false ) // TODO(Felix) this is really stupid func (b *Buffer) makeTab() string { blah := []rune{} for i := 0; i < int(b.cfg.Editor.Tab_Size); i++ { blah = append(blah, ' ') } return string(blah) } func (b *Buffer) HandleEvent(evt strife.StrifeEvent) { switch event := evt.(type) { case *strife.MouseWheelEvent: if event.Y > 0 { b.scrollDown() } if event.Y < 0 { b.scrollUp() } } } func (b *Buffer) OnUpdate() bool { if !b.HasFocus { return false } prev_x := b.curs.x prev_y := b.curs.y SHIFT_DOWN = strife.KeyPressed(sdl.K_LSHIFT) || strife.KeyPressed(sdl.K_RSHIFT) SUPER_DOWN = strife.KeyPressed(sdl.K_LGUI) || strife.KeyPressed(sdl.K_RGUI) ALT_DOWN = strife.KeyPressed(sdl.K_LALT) || strife.KeyPressed(sdl.K_RALT) CONTROL_DOWN = strife.KeyPressed(sdl.K_LCTRL) || strife.KeyPressed(sdl.K_RCTRL) if strife.PollKeys() { keyCode := strife.PopKey() // try process this key input as an // action first actionPerformed := b.processActionKey(keyCode) if actionPerformed { return true } textEntered := b.processTextInput(rune(keyCode)) if textEntered { return true } } // FIXME handle focus properly if b.inputHandler == nil { return false } if b.curs.x != prev_x || b.curs.y != prev_y { should_draw = true should_flash = false reset_timer = strife.CurrentTimeMillis() } // fixme to not use CurrentTimeMillis if !should_flash && strife.CurrentTimeMillis()-reset_timer > b.cfg.Cursor.Reset_Delay { should_flash = true } if strife.CurrentTimeMillis()-timer > b.cfg.Cursor.Flash_Rate && (should_flash && b.cfg.Cursor.Flash) { timer = strife.CurrentTimeMillis() should_draw = !should_draw } return false } type syntaxRuneInfo struct { background int foreground int length int } // dimensions of the last character we rendered var last_w, last_h int // editor x and y offsets var ex, ey = 0, 0 var compiledRegex = map[string]*regexp.Regexp{} func (b *Buffer) renderAt(ctx *strife.Renderer, rx int, ry int) { // BACKGROUND ctx.SetColor(strife.HexRGB(b.cfg.Theme.Background)) ctx.Rect(b.x, b.y, b.w, b.h, strife.Fill) if b.cfg.Editor.Highlight_Line && b.HasFocus { ctx.SetColor(strife.Black) // highlight_line_col? ctx.Rect(ex+rx, ey+(ry+b.curs.ry*last_h)-(b.cam.y*last_h), b.w, last_h, strife.Fill) } // render the ol' cursor if should_draw && b.cfg.Cursor.Draw && b.HasFocus { cursorWidth := b.cfg.Cursor.GetCaretWidth() if cursorWidth == -1 { cursorWidth = last_w } ctx.SetColor(strife.HexRGB(b.cfg.Theme.Cursor)) // caret colour ctx.Rect(ex+(rx+b.curs.rx*last_w)-(b.cam.x*last_w), (ry+b.curs.ry*last_h)-(b.cam.y*last_h), cursorWidth, last_h, strife.Fill) } var visibleLines int = 50 // last_h > 0 means we have done // a render. if int(last_h) > 0 && int(b.h) != 0 { // render an extra three lines just // so we dont cut anything off if its // not evenly divisible visibleLines = (int(b.h) / int(last_h)) + 3 } start := b.cam.y upper := b.cam.y + visibleLines if upper > len(b.contents) { upper = len(b.contents) } numLines := len(b.contents) var y_col int for lineNum, rope := range b.contents[start:upper] { currLine := []rune(rope.String()) // char index => colour matches := map[int]syntaxRuneInfo{} stuff := b.cfg.Syntax[b.languageInfo] subjects := make([]cfg.SyntaxCriteria, len(stuff)) colours := make([]int, len(stuff)) idx := 0 for _, criteria := range stuff { colours[idx] = criteria.Colour subjects[idx] = criteria idx++ } // HOLY SLOW BATMAN for charIndex := 0; charIndex < len(currLine); charIndex++ { for syntaxIndex, syntax := range subjects { if syntax.Pattern != "" { // we have a regex pattern // FIXME this is also very slow! // we could easily compile all of these // regular expressions when we load the // syntax highlighter. a := string(currLine[charIndex:]) // no need to compile the same regex // pattern multiple times. regex, ok := compiledRegex[syntax.Pattern] if !ok { var err error regex, err = regexp.Compile(syntax.Pattern) if err != nil { log.Println(err.Error()) } } matched := regex.FindString(a) if matched != "" && len(matched) > 0 { // for some reason this affects the whole line if _, ok := matches[charIndex]; !ok { matches[charIndex] = syntaxRuneInfo{colours[syntaxIndex], -1, len(matched)} charIndex = charIndex + len(matched) } } } else { for _, subject := range syntax.Match { if charIndex+len(subject)+1 > len(currLine) { continue } a := currLine[charIndex : charIndex+len(subject)+1] // we only want to match words. so we check that it has a space // before or after the subject word. if strings.Compare(string(a), subject+" ") == 0 || strings.Compare(string(a), " "+subject) == 0 { if _, ok := matches[charIndex]; !ok { matches[charIndex] = syntaxRuneInfo{colours[syntaxIndex], -1, len(string(a))} break } charIndex += len(subject) } } } } } colorStack := []int{} var x_col int for idx, char := range currLine { switch char { case '\n': x_col = 0 y_col += 1 continue case '\t': x_col += b.cfg.Editor.Tab_Size continue } x_col += 1 ctx.SetColor(strife.HexRGB(b.cfg.Theme.Foreground)) // if we're currently over a character then set // the font colour to something else // ONLY SET THE COLOUR IF WE HAVE FOCUS ALSO! if b.HasFocus && b.curs.x+1 == x_col && b.curs.y == y_col && should_draw { ctx.SetColor(strife.HexRGB(b.cfg.Theme.Cursor_Invert)) } if info, ok := matches[idx]; ok { for i := 0; i < info.length; i++ { colorStack = append(colorStack, info.background) } } if len(colorStack) > 0 { var a int32 a, colorStack = int32(colorStack[len(colorStack)-1]), colorStack[:len(colorStack)-1] ctx.SetColor(strife.HexRGB(a)) } last_w, last_h = ctx.String(string(char), ex+(rx+((x_col-1)*last_w)), (ry + (y_col * last_h))) } if b.cfg.Editor.Show_Line_Numbers { gutterPadPx := 10 numLinesWidth := len(string(numLines)) + 1 gutterWidth := last_w*numLinesWidth + (gutterPadPx * 2) // render the line numbers ctx.SetColor(strife.HexRGB(b.cfg.Theme.Background)) ctx.Rect(rx, (ry + (y_col * last_h)), gutterWidth, b.h, strife.Fill) ctx.SetColor(strife.HexRGB(b.cfg.Theme.Foreground)) ctx.String(fmt.Sprintf("%*d", numLinesWidth, start+lineNum), rx+gutterPadPx, (ry + (y_col * last_h))) ex = gutterWidth } y_col += 1 } } func (b *Buffer) OnRender(ctx *strife.Renderer) { b.renderAt(ctx, b.x, b.y) }
identifier_body
lib.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! This crate implements IEEE Std 802.11-2016 MLME as a library for hardware that supports //! SoftMAC. This is distinct from FullMAC, which is implemented by drivers and firmware. The //! implementation is broadly divided between client and AP stations, with some shared components //! and state machine infrastructure. See the [`client`] and [`ap`] modules. //! //! [`ap`]: crate::ap //! [`client`]: crate::client mod akm_algorithm; pub mod ap; pub mod auth; mod block_ack; pub mod buffer; pub mod client; mod ddk_converter; pub mod device; pub mod disconnect; pub mod error; pub mod key; mod logger; mod minstrel; #[allow(unused)] // TODO(fxbug.dev/79543): Remove annotation once used. mod probe_sequence; pub use {ddk_converter::*, wlan_common as common}; use { anyhow::{anyhow, bail, Error}, banjo_fuchsia_hardware_wlan_softmac as banjo_wlan_softmac, banjo_fuchsia_wlan_common as banjo_common, device::{Device, DeviceInterface}, fidl_fuchsia_wlan_mlme as fidl_mlme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{ channel::{mpsc, oneshot}, select, StreamExt, }, log::{error, info, warn}, parking_lot::Mutex, std::sync::Arc, std::time::Duration, }; pub trait MlmeImpl { type Config: Send; type TimerEvent; fn new( config: Self::Config, device: Device, buf_provider: buffer::BufferProvider, scheduler: common::timer::Timer<Self::TimerEvent>, ) -> Self; fn handle_mlme_message(&mut self, msg: fidl_mlme::MlmeRequest) -> Result<(), Error>; fn handle_mac_frame_rx(&mut self, bytes: &[u8], rx_info: banjo_wlan_softmac::WlanRxInfo); fn handle_eth_frame_tx(&mut self, bytes: &[u8]) -> Result<(), Error>; fn handle_scan_complete(&mut self, status: zx::Status, scan_id: u64); fn handle_timeout(&mut self, event_id: common::timer::EventId, event: Self::TimerEvent); fn access_device(&mut self) -> &mut Device; } pub struct MinstrelTimer { timer: wlan_common::timer::Timer<()>, current_timer: Option<common::timer::EventId>, } impl minstrel::TimerManager for MinstrelTimer { fn schedule(&mut self, from_now: Duration) { self.current_timer.replace(self.timer.schedule_after(from_now.into(), ())); } fn cancel(&mut self) { self.current_timer.take(); } } type MinstrelWrapper = Arc<Mutex<minstrel::MinstrelRateSelector<MinstrelTimer>>>; // We support a fake MLME internal representation that allows tests written in C++ to manually // tweak the system time. // TODO(fxbug.dev/45464): Remove when tests are all in Rust. enum MlmeHandleInternal { Real { join_handle: std::thread::JoinHandle<()>, }, Fake { executor: fasync::TestExecutor, future: std::pin::Pin<Box<dyn futures::Future<Output = ()>>>, }, } /// MlmeHandle is the only access we have to our MLME after spinning it off into its own /// event loop thread. pub struct MlmeHandle { driver_event_sink: mpsc::UnboundedSender<DriverEvent>, internal: Option<MlmeHandleInternal>, } impl MlmeHandle { pub fn stop(&mut self) { if let Err(e) = self.driver_event_sink.unbounded_send(DriverEvent::Stop) { error!("Cannot signal MLME event loop thread: {}", e); } match self.internal.take() { Some(MlmeHandleInternal::Real { join_handle }) => { // This unwrap will only fail if the thread panics. if let Err(e) = join_handle.join() { error!("MLME event loop thread panicked: {:?}", e); } } Some(MlmeHandleInternal::Fake { mut executor, mut future }) => { // Verify that our main thread would exit now. assert!(executor.run_until_stalled(&mut future.as_mut()).is_ready()); } None => warn!("Called stop on already stopped MLME"), } } pub fn delete(mut self) { if self.internal.is_some() { warn!("Called delete on MlmeHandle before calling stop."); self.stop() } } pub fn queue_eth_frame_tx(&mut self, bytes: Vec<u8>) -> Result<(), Error> { self.driver_event_sink .unbounded_send(DriverEvent::EthFrameTx { bytes: bytes.into() }) .map_err(|e| e.into()) } // Fns used to interact with an MLME running in test mode. // TODO(fxbug.dev/45464): Remove when tests are all in Rust. pub fn advance_fake_time(&mut self, nanos: i64) { match &mut self.internal { Some(MlmeHandleInternal::Real { .. }) => { panic!("Called advance_fake_time on a real MLME") } Some(MlmeHandleInternal::Fake { executor, future }) => { let time = executor.now(); executor.set_fake_time(time + fasync::Duration::from_nanos(nanos)); executor.wake_expired_timers(); let _ = executor.run_until_stalled(&mut future.as_mut()); } None => panic!("Called advance_fake_time on stopped MLME"), } } pub fn run_until_stalled(&mut self) { match &mut self.internal { Some(MlmeHandleInternal::Real { .. }) => { panic!("Called run_until_stalled on a real MLME") } Some(MlmeHandleInternal::Fake { executor, future }) => { let _ = executor.run_until_stalled(&mut future.as_mut()); } None => panic!("Called run_until_stalled on stopped MLME"), } } } // DriverEventSink is used by other devices to interact with our main loop thread. All // events from our ethernet device or vendor device are converted to DriverEvents // and sent through this sink, where they can then be handled serially. Multiple copies of // DriverEventSink may be safely passed between threads, including one that is used by our // vendor driver as the context for wlan_softmac_ifc_protocol_ops. struct DriverEventSink(pub mpsc::UnboundedSender<DriverEvent>); // TODO(fxbug.dev/29063): Remove copies from MacFrame and EthFrame. pub enum DriverEvent { // Indicates that the device is being removed and our main loop should exit. Stop, // TODO(fxbug.dev/43456): We need to keep stats for these events and respond to StatsQueryRequest. // Indicates receipt of a MAC frame from a peer. MacFrameRx { bytes: Vec<u8>, rx_info: banjo_wlan_softmac::WlanRxInfo }, // Requests transmission of an ethernet frame over the air. EthFrameTx { bytes: Vec<u8> }, // Reports a scan is complete. ScanComplete { status: zx::Status, scan_id: u64 }, // Reports the result of an attempted frame transmission. TxStatusReport { tx_status: banjo_common::WlanTxStatus }, // Reports the current status of the vendor driver. Status { status: u32 }, } pub struct Mlme<T: MlmeImpl> { mlme_impl: T, minstrel: Option<MinstrelWrapper>, // A stream of requests coming from the parent SME of this MLME. mlme_request_stream: fidl_mlme::MlmeRequestStream, // A stream of events initiated by C++ device drivers and then buffered here // by our MlmeHandle. driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>, time_stream: common::timer::TimeStream<T::TimerEvent>, minstrel_time_stream: common::timer::TimeStream<()>, } fn should_enable_minstrel(mac_sublayer: &banjo_common::MacSublayerSupport) -> bool
const MINSTREL_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100); // Remedy for fxbug.dev/8165 (fxbug.dev/33151) // See |DATA_FRAME_INTERVAL_NANOS| // in //src/connectivity/wlan/testing/hw-sim/test/rate_selection/src/lib.rs // Ensure at least one probe frame (generated every 16 data frames) // in every cycle: // 16 <= (MINSTREL_UPDATE_INTERVAL_HW_SIM / MINSTREL_DATA_FRAME_INTERVAL_NANOS * 1e6) < 32. const MINSTREL_UPDATE_INTERVAL_HW_SIM: std::time::Duration = std::time::Duration::from_millis(83); // Require a static lifetime so we can move this MLME into an event loop task. impl<T: 'static + MlmeImpl> Mlme<T> { pub fn start( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, ) -> Result<MlmeHandle, Error> { let (driver_event_sink, driver_event_stream) = mpsc::unbounded(); // This sink is used both by the inderlying iface to forward up driver events, as well // as via the MlmeHandle to send ethernet frames and terminate MLME. let driver_event_sink_clone = driver_event_sink.clone(); let (startup_sender, startup_receiver) = oneshot::channel(); // Everything else happens in a new thread so that we can switch into an async context // without requiring all parts of MLME to impl Send. let join_handle = std::thread::spawn(move || { info!("Starting WLAN MLME main loop"); let mut executor = fasync::LocalExecutor::new().unwrap(); let future = Self::main_loop_thread( config, device, buf_provider, driver_event_sink_clone, driver_event_stream, startup_sender, ); executor.run_singlethreaded(future); }); let mut executor = fasync::LocalExecutor::new().unwrap(); let startup_result = executor.run_singlethreaded(startup_receiver); match startup_result.map_err(|e| Error::from(e)) { Ok(Ok(())) => Ok(MlmeHandle { driver_event_sink, internal: Some(MlmeHandleInternal::Real { join_handle }), }), Err(err) | Ok(Err(err)) => match join_handle.join() { Ok(()) => bail!("Failed to start the MLME event loop: {:?}", err), Err(panic_err) => { bail!("MLME event loop failed and then panicked: {}, {:?}", err, panic_err) } }, } } // Create an MLME in a test configuration. This MLME will never do anything unless it's progressed // using MlmeHandle::advance_fake_time and MlmeHandle::run_until_stalled. pub fn start_test( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, ) -> MlmeHandle { let executor = fasync::TestExecutor::new_with_fake_time().unwrap(); Self::start_test_with_executor(config, device, buf_provider, executor) } pub fn start_test_with_executor( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, mut executor: fasync::TestExecutor, ) -> MlmeHandle { let (driver_event_sink, driver_event_stream) = mpsc::unbounded(); let driver_event_sink_clone = driver_event_sink.clone(); let (startup_sender, mut startup_receiver) = oneshot::channel(); let mut future = Box::pin(Self::main_loop_thread( config, device, buf_provider, driver_event_sink_clone, driver_event_stream, startup_sender, )); let _ = executor.run_until_stalled(&mut future.as_mut()); startup_receiver .try_recv() .unwrap() .expect("Test MLME setup stalled.") .expect("Test MLME setup failed."); MlmeHandle { driver_event_sink, internal: Some(MlmeHandleInternal::Fake { executor, future }), } } async fn main_loop_thread( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, driver_event_sink: mpsc::UnboundedSender<DriverEvent>, driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>, startup_sender: oneshot::Sender<Result<(), Error>>, ) { let mut driver_event_sink = Box::new(DriverEventSink(driver_event_sink)); let ifc = device::WlanSoftmacIfcProtocol::new(driver_event_sink.as_mut()); // Indicate to the vendor driver that we can start sending and receiving info. Any messages received from the // driver before we start our MLME will be safely buffered in our driver_event_sink. // Note that device.start will copy relevant fields out of ifc, so dropping it after this is fine. // The returned value is the MLME server end of the channel wlanmevicemonitor created to connect MLME and SME. let mlme_protocol_handle_via_iface_creation = match device.start(&ifc) { Ok(handle) => handle, Err(e) => { // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender.send(Err(anyhow!("device.start failed: {}", e))).unwrap(); return; } }; let channel = zx::Channel::from(mlme_protocol_handle_via_iface_creation); let server = fidl::endpoints::ServerEnd::<fidl_mlme::MlmeMarker>::new(channel); let (mlme_request_stream, control_handle) = match server.into_stream_and_control_handle() { Ok(res) => res, Err(e) => { // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender .send(Err(anyhow!("Failed to get MLME request stream: {}", e))) .unwrap(); return; } }; let device_mac_sublayer_support = device.mac_sublayer_support(); let (minstrel_timer, minstrel_time_stream) = common::timer::create_timer(); let update_interval = if device_mac_sublayer_support.device.is_synthetic { MINSTREL_UPDATE_INTERVAL_HW_SIM } else { MINSTREL_UPDATE_INTERVAL }; let minstrel = if should_enable_minstrel(&device_mac_sublayer_support) { let timer_manager = MinstrelTimer { timer: minstrel_timer, current_timer: None }; let probe_sequence = probe_sequence::ProbeSequence::random_new(); Some(Arc::new(Mutex::new(minstrel::MinstrelRateSelector::new( timer_manager, update_interval, probe_sequence, )))) } else { None }; let new_device = Device::new(device, minstrel.clone(), control_handle); let (timer, time_stream) = common::timer::create_timer(); let mlme_impl = T::new(config, new_device, buf_provider, timer); let mlme = Self { mlme_impl, minstrel, mlme_request_stream, driver_event_stream, time_stream, minstrel_time_stream, }; // Startup is complete. Signal the main thread to proceed. // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender.send(Ok(())).unwrap(); let result = Self::run_main_loop(mlme).await; match result { Ok(()) => info!("MLME event loop exited gracefully."), Err(e) => error!("MLME event loop exited with error: {:?}", e), } } /// Begin processing MLME events. /// Does not return until iface destruction is requested via DriverEvent::Stop, unless /// a critical error occurs. Note that MlmeHandle::stop will work in either case. pub async fn run_main_loop(mut self) -> Result<(), Error> { let mut timer_stream = common::timer::make_async_timed_event_stream(self.time_stream).fuse(); let mut minstrel_timer_stream = common::timer::make_async_timed_event_stream(self.minstrel_time_stream).fuse(); loop { select! { // Process requests from SME. mlme_request = self.mlme_request_stream.next() => match mlme_request { Some(req) => { match req { Ok(req) => { let method_name = req.method_name(); if let Err(e) = self.mlme_impl.handle_mlme_message(req) { info!("Failed to handle mlme {} request: {}", method_name, e); } } Err(e) => { info!("Failure while receiving mlme request: {}", e); } } } None => bail!("MLME request stream terminated unexpectedly."), }, // Process requests from our C++ drivers. driver_event = self.driver_event_stream.next() => match driver_event { Some(event) => match event { // DriverEvent::Stop indicates a safe shutdown. DriverEvent::Stop => return Ok(()), DriverEvent::MacFrameRx { bytes, rx_info } => { self.mlme_impl.handle_mac_frame_rx(&bytes[..], rx_info); } DriverEvent::EthFrameTx { bytes } => { if let Err(e) = self.mlme_impl.handle_eth_frame_tx(&bytes[..]) { // TODO(fxbug.dev/45464): Keep a counter of these failures. info!("Failed to handle eth frame: {}", e); } } DriverEvent::ScanComplete { status, scan_id } => { self.mlme_impl.handle_scan_complete(status, scan_id) }, DriverEvent::TxStatusReport { tx_status } => { if let Some(minstrel) = self.minstrel.as_ref() { minstrel.lock().handle_tx_status_report(&tx_status) } } DriverEvent::Status { status } => { self.mlme_impl.access_device().set_eth_status(status) } }, None => bail!("Driver event stream terminated unexpectedly."), }, timed_event = timer_stream.select_next_some() => { self.mlme_impl.handle_timeout(timed_event.id, timed_event.event); } _minstrel_timeout = minstrel_timer_stream.select_next_some() => { if let Some(minstrel) = self.minstrel.as_ref() { minstrel.lock().handle_timeout() } } } } } } #[cfg(test)] mod test_utils { use { super::*, banjo_fuchsia_hardware_wlan_associnfo as banjo_wlan_associnfo, banjo_fuchsia_wlan_common as banjo_common, fidl::endpoints::RequestStream, std::default::Default, }; #[derive(Copy, Clone, Debug)] pub struct MockWlanRxInfo { pub rx_flags: banjo_wlan_softmac::WlanRxInfoFlags, pub valid_fields: u32, pub phy: banjo_common::WlanPhyType, pub data_rate: u32, pub channel: banjo_common::WlanChannel, pub mcs: u8, pub rssi_dbm: i8, pub snr_dbh: i16, } impl Default for MockWlanRxInfo { fn default() -> Self { Self { valid_fields: banjo_wlan_associnfo::WlanRxInfoValid::CHAN_WIDTH.0 | banjo_wlan_associnfo::WlanRxInfoValid::RSSI.0 | banjo_wlan_associnfo::WlanRxInfoValid::SNR.0, channel: banjo_common::WlanChannel { primary: 1, cbw: banjo_common::ChannelBandwidth::CBW20, secondary80: 0, }, rssi_dbm: -40, snr_dbh: 35, // Default to 0 for these fields since there are no // other reasonable values to mock. rx_flags: banjo_wlan_softmac::WlanRxInfoFlags(0), phy: banjo_common::WlanPhyType::DSSS, data_rate: 0, mcs: 0, } } } impl From<MockWlanRxInfo> for banjo_wlan_softmac::WlanRxInfo { fn from(mock_rx_info: MockWlanRxInfo) -> banjo_wlan_softmac::WlanRxInfo { banjo_wlan_softmac::WlanRxInfo { rx_flags: mock_rx_info.rx_flags, valid_fields: mock_rx_info.valid_fields, phy: mock_rx_info.phy, data_rate: mock_rx_info.data_rate, channel: mock_rx_info.channel, mcs: mock_rx_info.mcs, rssi_dbm: mock_rx_info.rssi_dbm, snr_dbh: mock_rx_info.snr_dbh, } } } pub(crate) fn fake_control_handle( // We use this unused parameter to ensure that an executor exists. _exec: &fuchsia_async::TestExecutor, ) -> (fidl_mlme::MlmeControlHandle, fuchsia_zircon::Channel) { let (c1, c2) = fuchsia_zircon::Channel::create().unwrap(); let async_c1 = fidl::AsyncChannel::from_channel(c1).unwrap(); let request_stream = fidl_mlme::MlmeRequestStream::from_channel(async_c1); let control_handle = request_stream.control_handle(); (control_handle, c2) } pub struct FakeMlme { device: Device, } impl MlmeImpl for FakeMlme { type Config = (); type TimerEvent = (); fn new( _config: Self::Config, device: Device, _buf_provider: buffer::BufferProvider, _scheduler: common::timer::Timer<Self::TimerEvent>, ) -> Self { Self { device } } fn handle_mlme_message(&mut self, _msg: fidl_mlme::MlmeRequest) -> Result<(), Error> { unimplemented!() } fn handle_mac_frame_rx(&mut self, _bytes: &[u8], _rx_info: banjo_wlan_softmac::WlanRxInfo) { unimplemented!() } fn handle_eth_frame_tx(&mut self, _bytes: &[u8]) -> Result<(), Error> { unimplemented!() } fn handle_scan_complete(&mut self, _status: zx::Status, _scan_id: u64) { unimplemented!() } fn handle_timeout(&mut self, _event_id: common::timer::EventId, _event: Self::TimerEvent) { unimplemented!() } fn access_device(&mut self) -> &mut Device { &mut self.device } } } #[cfg(test)] mod tests { use {super::*, crate::device::test_utils::FakeDevice}; #[fuchsia::test] fn test_mlme_handle_use_after_stop() { let mut exec = fasync::TestExecutor::new().expect("failed to create an executor"); let fake_config = (); let mut fake_device = FakeDevice::new(&mut exec); let fake_buffer_provider = buffer::FakeBufferProvider::new(); let mut handle = Mlme::<test_utils::FakeMlme>::start_test_with_executor( fake_config, fake_device.as_raw_device(), fake_buffer_provider, exec, ); handle.stop(); handle .queue_eth_frame_tx(vec![0u8; 10]) .expect_err("Shouldn't be able to queue tx after stopping MLME"); } }
{ mac_sublayer.device.tx_status_report_supported && !mac_sublayer.rate_selection_offload.supported }
identifier_body
lib.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! This crate implements IEEE Std 802.11-2016 MLME as a library for hardware that supports //! SoftMAC. This is distinct from FullMAC, which is implemented by drivers and firmware. The //! implementation is broadly divided between client and AP stations, with some shared components //! and state machine infrastructure. See the [`client`] and [`ap`] modules. //! //! [`ap`]: crate::ap //! [`client`]: crate::client mod akm_algorithm; pub mod ap; pub mod auth; mod block_ack; pub mod buffer; pub mod client; mod ddk_converter; pub mod device; pub mod disconnect; pub mod error; pub mod key; mod logger; mod minstrel; #[allow(unused)] // TODO(fxbug.dev/79543): Remove annotation once used. mod probe_sequence; pub use {ddk_converter::*, wlan_common as common}; use { anyhow::{anyhow, bail, Error}, banjo_fuchsia_hardware_wlan_softmac as banjo_wlan_softmac, banjo_fuchsia_wlan_common as banjo_common, device::{Device, DeviceInterface}, fidl_fuchsia_wlan_mlme as fidl_mlme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{ channel::{mpsc, oneshot}, select, StreamExt, }, log::{error, info, warn}, parking_lot::Mutex, std::sync::Arc, std::time::Duration, }; pub trait MlmeImpl { type Config: Send; type TimerEvent; fn new( config: Self::Config, device: Device, buf_provider: buffer::BufferProvider, scheduler: common::timer::Timer<Self::TimerEvent>, ) -> Self; fn handle_mlme_message(&mut self, msg: fidl_mlme::MlmeRequest) -> Result<(), Error>; fn handle_mac_frame_rx(&mut self, bytes: &[u8], rx_info: banjo_wlan_softmac::WlanRxInfo); fn handle_eth_frame_tx(&mut self, bytes: &[u8]) -> Result<(), Error>; fn handle_scan_complete(&mut self, status: zx::Status, scan_id: u64); fn handle_timeout(&mut self, event_id: common::timer::EventId, event: Self::TimerEvent); fn access_device(&mut self) -> &mut Device; } pub struct MinstrelTimer { timer: wlan_common::timer::Timer<()>, current_timer: Option<common::timer::EventId>, } impl minstrel::TimerManager for MinstrelTimer { fn schedule(&mut self, from_now: Duration) { self.current_timer.replace(self.timer.schedule_after(from_now.into(), ())); } fn cancel(&mut self) { self.current_timer.take(); } } type MinstrelWrapper = Arc<Mutex<minstrel::MinstrelRateSelector<MinstrelTimer>>>; // We support a fake MLME internal representation that allows tests written in C++ to manually // tweak the system time. // TODO(fxbug.dev/45464): Remove when tests are all in Rust. enum MlmeHandleInternal { Real { join_handle: std::thread::JoinHandle<()>, }, Fake { executor: fasync::TestExecutor, future: std::pin::Pin<Box<dyn futures::Future<Output = ()>>>, }, } /// MlmeHandle is the only access we have to our MLME after spinning it off into its own /// event loop thread. pub struct MlmeHandle { driver_event_sink: mpsc::UnboundedSender<DriverEvent>, internal: Option<MlmeHandleInternal>, } impl MlmeHandle { pub fn stop(&mut self) { if let Err(e) = self.driver_event_sink.unbounded_send(DriverEvent::Stop) { error!("Cannot signal MLME event loop thread: {}", e); } match self.internal.take() { Some(MlmeHandleInternal::Real { join_handle }) => { // This unwrap will only fail if the thread panics. if let Err(e) = join_handle.join() { error!("MLME event loop thread panicked: {:?}", e); } } Some(MlmeHandleInternal::Fake { mut executor, mut future }) => { // Verify that our main thread would exit now. assert!(executor.run_until_stalled(&mut future.as_mut()).is_ready()); } None => warn!("Called stop on already stopped MLME"), } } pub fn delete(mut self) { if self.internal.is_some() { warn!("Called delete on MlmeHandle before calling stop."); self.stop() } } pub fn queue_eth_frame_tx(&mut self, bytes: Vec<u8>) -> Result<(), Error> { self.driver_event_sink .unbounded_send(DriverEvent::EthFrameTx { bytes: bytes.into() }) .map_err(|e| e.into()) } // Fns used to interact with an MLME running in test mode. // TODO(fxbug.dev/45464): Remove when tests are all in Rust. pub fn advance_fake_time(&mut self, nanos: i64) { match &mut self.internal { Some(MlmeHandleInternal::Real { .. }) => { panic!("Called advance_fake_time on a real MLME") } Some(MlmeHandleInternal::Fake { executor, future }) => { let time = executor.now(); executor.set_fake_time(time + fasync::Duration::from_nanos(nanos)); executor.wake_expired_timers(); let _ = executor.run_until_stalled(&mut future.as_mut()); } None => panic!("Called advance_fake_time on stopped MLME"), } } pub fn run_until_stalled(&mut self) { match &mut self.internal { Some(MlmeHandleInternal::Real { .. }) => { panic!("Called run_until_stalled on a real MLME") } Some(MlmeHandleInternal::Fake { executor, future }) => { let _ = executor.run_until_stalled(&mut future.as_mut()); } None => panic!("Called run_until_stalled on stopped MLME"), } } } // DriverEventSink is used by other devices to interact with our main loop thread. All // events from our ethernet device or vendor device are converted to DriverEvents // and sent through this sink, where they can then be handled serially. Multiple copies of // DriverEventSink may be safely passed between threads, including one that is used by our // vendor driver as the context for wlan_softmac_ifc_protocol_ops. struct DriverEventSink(pub mpsc::UnboundedSender<DriverEvent>); // TODO(fxbug.dev/29063): Remove copies from MacFrame and EthFrame. pub enum DriverEvent { // Indicates that the device is being removed and our main loop should exit. Stop, // TODO(fxbug.dev/43456): We need to keep stats for these events and respond to StatsQueryRequest. // Indicates receipt of a MAC frame from a peer. MacFrameRx { bytes: Vec<u8>, rx_info: banjo_wlan_softmac::WlanRxInfo }, // Requests transmission of an ethernet frame over the air. EthFrameTx { bytes: Vec<u8> }, // Reports a scan is complete. ScanComplete { status: zx::Status, scan_id: u64 }, // Reports the result of an attempted frame transmission. TxStatusReport { tx_status: banjo_common::WlanTxStatus }, // Reports the current status of the vendor driver. Status { status: u32 }, } pub struct Mlme<T: MlmeImpl> { mlme_impl: T, minstrel: Option<MinstrelWrapper>, // A stream of requests coming from the parent SME of this MLME. mlme_request_stream: fidl_mlme::MlmeRequestStream, // A stream of events initiated by C++ device drivers and then buffered here // by our MlmeHandle. driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>, time_stream: common::timer::TimeStream<T::TimerEvent>, minstrel_time_stream: common::timer::TimeStream<()>, } fn should_enable_minstrel(mac_sublayer: &banjo_common::MacSublayerSupport) -> bool { mac_sublayer.device.tx_status_report_supported && !mac_sublayer.rate_selection_offload.supported } const MINSTREL_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100); // Remedy for fxbug.dev/8165 (fxbug.dev/33151) // See |DATA_FRAME_INTERVAL_NANOS| // in //src/connectivity/wlan/testing/hw-sim/test/rate_selection/src/lib.rs // Ensure at least one probe frame (generated every 16 data frames) // in every cycle: // 16 <= (MINSTREL_UPDATE_INTERVAL_HW_SIM / MINSTREL_DATA_FRAME_INTERVAL_NANOS * 1e6) < 32. const MINSTREL_UPDATE_INTERVAL_HW_SIM: std::time::Duration = std::time::Duration::from_millis(83); // Require a static lifetime so we can move this MLME into an event loop task. impl<T: 'static + MlmeImpl> Mlme<T> { pub fn start( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, ) -> Result<MlmeHandle, Error> { let (driver_event_sink, driver_event_stream) = mpsc::unbounded(); // This sink is used both by the inderlying iface to forward up driver events, as well // as via the MlmeHandle to send ethernet frames and terminate MLME. let driver_event_sink_clone = driver_event_sink.clone(); let (startup_sender, startup_receiver) = oneshot::channel(); // Everything else happens in a new thread so that we can switch into an async context // without requiring all parts of MLME to impl Send. let join_handle = std::thread::spawn(move || { info!("Starting WLAN MLME main loop"); let mut executor = fasync::LocalExecutor::new().unwrap(); let future = Self::main_loop_thread( config, device, buf_provider, driver_event_sink_clone, driver_event_stream, startup_sender, ); executor.run_singlethreaded(future); }); let mut executor = fasync::LocalExecutor::new().unwrap(); let startup_result = executor.run_singlethreaded(startup_receiver); match startup_result.map_err(|e| Error::from(e)) { Ok(Ok(())) => Ok(MlmeHandle { driver_event_sink, internal: Some(MlmeHandleInternal::Real { join_handle }), }), Err(err) | Ok(Err(err)) => match join_handle.join() { Ok(()) => bail!("Failed to start the MLME event loop: {:?}", err), Err(panic_err) => { bail!("MLME event loop failed and then panicked: {}, {:?}", err, panic_err) } }, } } // Create an MLME in a test configuration. This MLME will never do anything unless it's progressed // using MlmeHandle::advance_fake_time and MlmeHandle::run_until_stalled. pub fn start_test( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, ) -> MlmeHandle { let executor = fasync::TestExecutor::new_with_fake_time().unwrap(); Self::start_test_with_executor(config, device, buf_provider, executor) } pub fn start_test_with_executor( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, mut executor: fasync::TestExecutor, ) -> MlmeHandle { let (driver_event_sink, driver_event_stream) = mpsc::unbounded(); let driver_event_sink_clone = driver_event_sink.clone(); let (startup_sender, mut startup_receiver) = oneshot::channel(); let mut future = Box::pin(Self::main_loop_thread( config, device, buf_provider, driver_event_sink_clone, driver_event_stream, startup_sender, )); let _ = executor.run_until_stalled(&mut future.as_mut()); startup_receiver .try_recv() .unwrap() .expect("Test MLME setup stalled.") .expect("Test MLME setup failed."); MlmeHandle { driver_event_sink, internal: Some(MlmeHandleInternal::Fake { executor, future }), } } async fn main_loop_thread( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, driver_event_sink: mpsc::UnboundedSender<DriverEvent>, driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>, startup_sender: oneshot::Sender<Result<(), Error>>, ) { let mut driver_event_sink = Box::new(DriverEventSink(driver_event_sink)); let ifc = device::WlanSoftmacIfcProtocol::new(driver_event_sink.as_mut()); // Indicate to the vendor driver that we can start sending and receiving info. Any messages received from the // driver before we start our MLME will be safely buffered in our driver_event_sink. // Note that device.start will copy relevant fields out of ifc, so dropping it after this is fine. // The returned value is the MLME server end of the channel wlanmevicemonitor created to connect MLME and SME. let mlme_protocol_handle_via_iface_creation = match device.start(&ifc) { Ok(handle) => handle, Err(e) => { // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender.send(Err(anyhow!("device.start failed: {}", e))).unwrap(); return; } }; let channel = zx::Channel::from(mlme_protocol_handle_via_iface_creation); let server = fidl::endpoints::ServerEnd::<fidl_mlme::MlmeMarker>::new(channel); let (mlme_request_stream, control_handle) = match server.into_stream_and_control_handle() { Ok(res) => res, Err(e) => { // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender .send(Err(anyhow!("Failed to get MLME request stream: {}", e))) .unwrap(); return; } }; let device_mac_sublayer_support = device.mac_sublayer_support(); let (minstrel_timer, minstrel_time_stream) = common::timer::create_timer(); let update_interval = if device_mac_sublayer_support.device.is_synthetic { MINSTREL_UPDATE_INTERVAL_HW_SIM } else { MINSTREL_UPDATE_INTERVAL }; let minstrel = if should_enable_minstrel(&device_mac_sublayer_support) { let timer_manager = MinstrelTimer { timer: minstrel_timer, current_timer: None }; let probe_sequence = probe_sequence::ProbeSequence::random_new(); Some(Arc::new(Mutex::new(minstrel::MinstrelRateSelector::new( timer_manager, update_interval, probe_sequence, )))) } else { None }; let new_device = Device::new(device, minstrel.clone(), control_handle); let (timer, time_stream) = common::timer::create_timer(); let mlme_impl = T::new(config, new_device, buf_provider, timer); let mlme = Self { mlme_impl, minstrel, mlme_request_stream, driver_event_stream, time_stream, minstrel_time_stream, }; // Startup is complete. Signal the main thread to proceed. // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender.send(Ok(())).unwrap(); let result = Self::run_main_loop(mlme).await; match result { Ok(()) => info!("MLME event loop exited gracefully."), Err(e) => error!("MLME event loop exited with error: {:?}", e), } } /// Begin processing MLME events. /// Does not return until iface destruction is requested via DriverEvent::Stop, unless /// a critical error occurs. Note that MlmeHandle::stop will work in either case. pub async fn run_main_loop(mut self) -> Result<(), Error> { let mut timer_stream = common::timer::make_async_timed_event_stream(self.time_stream).fuse(); let mut minstrel_timer_stream = common::timer::make_async_timed_event_stream(self.minstrel_time_stream).fuse(); loop { select! { // Process requests from SME. mlme_request = self.mlme_request_stream.next() => match mlme_request { Some(req) => { match req { Ok(req) => { let method_name = req.method_name(); if let Err(e) = self.mlme_impl.handle_mlme_message(req) { info!("Failed to handle mlme {} request: {}", method_name, e); } } Err(e) => { info!("Failure while receiving mlme request: {}", e); } } } None => bail!("MLME request stream terminated unexpectedly."), }, // Process requests from our C++ drivers. driver_event = self.driver_event_stream.next() => match driver_event { Some(event) => match event { // DriverEvent::Stop indicates a safe shutdown. DriverEvent::Stop => return Ok(()), DriverEvent::MacFrameRx { bytes, rx_info } => { self.mlme_impl.handle_mac_frame_rx(&bytes[..], rx_info); } DriverEvent::EthFrameTx { bytes } => { if let Err(e) = self.mlme_impl.handle_eth_frame_tx(&bytes[..]) { // TODO(fxbug.dev/45464): Keep a counter of these failures. info!("Failed to handle eth frame: {}", e); } } DriverEvent::ScanComplete { status, scan_id } => { self.mlme_impl.handle_scan_complete(status, scan_id) }, DriverEvent::TxStatusReport { tx_status } => { if let Some(minstrel) = self.minstrel.as_ref() { minstrel.lock().handle_tx_status_report(&tx_status) } } DriverEvent::Status { status } => { self.mlme_impl.access_device().set_eth_status(status) } }, None => bail!("Driver event stream terminated unexpectedly."), }, timed_event = timer_stream.select_next_some() => { self.mlme_impl.handle_timeout(timed_event.id, timed_event.event); } _minstrel_timeout = minstrel_timer_stream.select_next_some() => { if let Some(minstrel) = self.minstrel.as_ref() { minstrel.lock().handle_timeout() } } } } } } #[cfg(test)] mod test_utils { use { super::*, banjo_fuchsia_hardware_wlan_associnfo as banjo_wlan_associnfo, banjo_fuchsia_wlan_common as banjo_common, fidl::endpoints::RequestStream, std::default::Default, }; #[derive(Copy, Clone, Debug)] pub struct MockWlanRxInfo { pub rx_flags: banjo_wlan_softmac::WlanRxInfoFlags, pub valid_fields: u32, pub phy: banjo_common::WlanPhyType, pub data_rate: u32, pub channel: banjo_common::WlanChannel, pub mcs: u8, pub rssi_dbm: i8, pub snr_dbh: i16, } impl Default for MockWlanRxInfo { fn default() -> Self { Self { valid_fields: banjo_wlan_associnfo::WlanRxInfoValid::CHAN_WIDTH.0 | banjo_wlan_associnfo::WlanRxInfoValid::RSSI.0 | banjo_wlan_associnfo::WlanRxInfoValid::SNR.0, channel: banjo_common::WlanChannel { primary: 1, cbw: banjo_common::ChannelBandwidth::CBW20, secondary80: 0, }, rssi_dbm: -40, snr_dbh: 35, // Default to 0 for these fields since there are no // other reasonable values to mock. rx_flags: banjo_wlan_softmac::WlanRxInfoFlags(0), phy: banjo_common::WlanPhyType::DSSS, data_rate: 0, mcs: 0, } } } impl From<MockWlanRxInfo> for banjo_wlan_softmac::WlanRxInfo { fn from(mock_rx_info: MockWlanRxInfo) -> banjo_wlan_softmac::WlanRxInfo { banjo_wlan_softmac::WlanRxInfo { rx_flags: mock_rx_info.rx_flags, valid_fields: mock_rx_info.valid_fields, phy: mock_rx_info.phy, data_rate: mock_rx_info.data_rate, channel: mock_rx_info.channel, mcs: mock_rx_info.mcs, rssi_dbm: mock_rx_info.rssi_dbm, snr_dbh: mock_rx_info.snr_dbh, } } } pub(crate) fn fake_control_handle( // We use this unused parameter to ensure that an executor exists. _exec: &fuchsia_async::TestExecutor, ) -> (fidl_mlme::MlmeControlHandle, fuchsia_zircon::Channel) { let (c1, c2) = fuchsia_zircon::Channel::create().unwrap(); let async_c1 = fidl::AsyncChannel::from_channel(c1).unwrap(); let request_stream = fidl_mlme::MlmeRequestStream::from_channel(async_c1); let control_handle = request_stream.control_handle(); (control_handle, c2) } pub struct FakeMlme { device: Device, } impl MlmeImpl for FakeMlme { type Config = (); type TimerEvent = (); fn new( _config: Self::Config, device: Device, _buf_provider: buffer::BufferProvider, _scheduler: common::timer::Timer<Self::TimerEvent>, ) -> Self { Self { device } } fn handle_mlme_message(&mut self, _msg: fidl_mlme::MlmeRequest) -> Result<(), Error> { unimplemented!() } fn handle_mac_frame_rx(&mut self, _bytes: &[u8], _rx_info: banjo_wlan_softmac::WlanRxInfo) { unimplemented!() } fn
(&mut self, _bytes: &[u8]) -> Result<(), Error> { unimplemented!() } fn handle_scan_complete(&mut self, _status: zx::Status, _scan_id: u64) { unimplemented!() } fn handle_timeout(&mut self, _event_id: common::timer::EventId, _event: Self::TimerEvent) { unimplemented!() } fn access_device(&mut self) -> &mut Device { &mut self.device } } } #[cfg(test)] mod tests { use {super::*, crate::device::test_utils::FakeDevice}; #[fuchsia::test] fn test_mlme_handle_use_after_stop() { let mut exec = fasync::TestExecutor::new().expect("failed to create an executor"); let fake_config = (); let mut fake_device = FakeDevice::new(&mut exec); let fake_buffer_provider = buffer::FakeBufferProvider::new(); let mut handle = Mlme::<test_utils::FakeMlme>::start_test_with_executor( fake_config, fake_device.as_raw_device(), fake_buffer_provider, exec, ); handle.stop(); handle .queue_eth_frame_tx(vec![0u8; 10]) .expect_err("Shouldn't be able to queue tx after stopping MLME"); } }
handle_eth_frame_tx
identifier_name
lib.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! This crate implements IEEE Std 802.11-2016 MLME as a library for hardware that supports //! SoftMAC. This is distinct from FullMAC, which is implemented by drivers and firmware. The //! implementation is broadly divided between client and AP stations, with some shared components //! and state machine infrastructure. See the [`client`] and [`ap`] modules. //! //! [`ap`]: crate::ap //! [`client`]: crate::client mod akm_algorithm; pub mod ap; pub mod auth; mod block_ack; pub mod buffer; pub mod client; mod ddk_converter; pub mod device; pub mod disconnect; pub mod error; pub mod key; mod logger; mod minstrel; #[allow(unused)] // TODO(fxbug.dev/79543): Remove annotation once used. mod probe_sequence; pub use {ddk_converter::*, wlan_common as common}; use { anyhow::{anyhow, bail, Error}, banjo_fuchsia_hardware_wlan_softmac as banjo_wlan_softmac, banjo_fuchsia_wlan_common as banjo_common, device::{Device, DeviceInterface}, fidl_fuchsia_wlan_mlme as fidl_mlme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{ channel::{mpsc, oneshot}, select, StreamExt, }, log::{error, info, warn}, parking_lot::Mutex, std::sync::Arc, std::time::Duration, }; pub trait MlmeImpl { type Config: Send; type TimerEvent; fn new( config: Self::Config, device: Device, buf_provider: buffer::BufferProvider, scheduler: common::timer::Timer<Self::TimerEvent>, ) -> Self; fn handle_mlme_message(&mut self, msg: fidl_mlme::MlmeRequest) -> Result<(), Error>; fn handle_mac_frame_rx(&mut self, bytes: &[u8], rx_info: banjo_wlan_softmac::WlanRxInfo); fn handle_eth_frame_tx(&mut self, bytes: &[u8]) -> Result<(), Error>; fn handle_scan_complete(&mut self, status: zx::Status, scan_id: u64); fn handle_timeout(&mut self, event_id: common::timer::EventId, event: Self::TimerEvent); fn access_device(&mut self) -> &mut Device; } pub struct MinstrelTimer { timer: wlan_common::timer::Timer<()>, current_timer: Option<common::timer::EventId>, } impl minstrel::TimerManager for MinstrelTimer { fn schedule(&mut self, from_now: Duration) { self.current_timer.replace(self.timer.schedule_after(from_now.into(), ())); } fn cancel(&mut self) { self.current_timer.take(); } } type MinstrelWrapper = Arc<Mutex<minstrel::MinstrelRateSelector<MinstrelTimer>>>; // We support a fake MLME internal representation that allows tests written in C++ to manually // tweak the system time. // TODO(fxbug.dev/45464): Remove when tests are all in Rust. enum MlmeHandleInternal { Real { join_handle: std::thread::JoinHandle<()>, }, Fake { executor: fasync::TestExecutor, future: std::pin::Pin<Box<dyn futures::Future<Output = ()>>>, }, } /// MlmeHandle is the only access we have to our MLME after spinning it off into its own /// event loop thread. pub struct MlmeHandle { driver_event_sink: mpsc::UnboundedSender<DriverEvent>, internal: Option<MlmeHandleInternal>, } impl MlmeHandle { pub fn stop(&mut self) { if let Err(e) = self.driver_event_sink.unbounded_send(DriverEvent::Stop) { error!("Cannot signal MLME event loop thread: {}", e); } match self.internal.take() { Some(MlmeHandleInternal::Real { join_handle }) => { // This unwrap will only fail if the thread panics. if let Err(e) = join_handle.join() { error!("MLME event loop thread panicked: {:?}", e); } } Some(MlmeHandleInternal::Fake { mut executor, mut future }) => { // Verify that our main thread would exit now. assert!(executor.run_until_stalled(&mut future.as_mut()).is_ready()); } None => warn!("Called stop on already stopped MLME"), } } pub fn delete(mut self) { if self.internal.is_some() { warn!("Called delete on MlmeHandle before calling stop."); self.stop() } } pub fn queue_eth_frame_tx(&mut self, bytes: Vec<u8>) -> Result<(), Error> { self.driver_event_sink .unbounded_send(DriverEvent::EthFrameTx { bytes: bytes.into() }) .map_err(|e| e.into()) } // Fns used to interact with an MLME running in test mode. // TODO(fxbug.dev/45464): Remove when tests are all in Rust. pub fn advance_fake_time(&mut self, nanos: i64) { match &mut self.internal { Some(MlmeHandleInternal::Real { .. }) => { panic!("Called advance_fake_time on a real MLME") } Some(MlmeHandleInternal::Fake { executor, future }) => { let time = executor.now(); executor.set_fake_time(time + fasync::Duration::from_nanos(nanos)); executor.wake_expired_timers(); let _ = executor.run_until_stalled(&mut future.as_mut()); } None => panic!("Called advance_fake_time on stopped MLME"), } } pub fn run_until_stalled(&mut self) { match &mut self.internal { Some(MlmeHandleInternal::Real { .. }) => { panic!("Called run_until_stalled on a real MLME") } Some(MlmeHandleInternal::Fake { executor, future }) => { let _ = executor.run_until_stalled(&mut future.as_mut()); } None => panic!("Called run_until_stalled on stopped MLME"), } } } // DriverEventSink is used by other devices to interact with our main loop thread. All // events from our ethernet device or vendor device are converted to DriverEvents // and sent through this sink, where they can then be handled serially. Multiple copies of // DriverEventSink may be safely passed between threads, including one that is used by our // vendor driver as the context for wlan_softmac_ifc_protocol_ops. struct DriverEventSink(pub mpsc::UnboundedSender<DriverEvent>); // TODO(fxbug.dev/29063): Remove copies from MacFrame and EthFrame. pub enum DriverEvent { // Indicates that the device is being removed and our main loop should exit. Stop, // TODO(fxbug.dev/43456): We need to keep stats for these events and respond to StatsQueryRequest. // Indicates receipt of a MAC frame from a peer. MacFrameRx { bytes: Vec<u8>, rx_info: banjo_wlan_softmac::WlanRxInfo }, // Requests transmission of an ethernet frame over the air. EthFrameTx { bytes: Vec<u8> }, // Reports a scan is complete. ScanComplete { status: zx::Status, scan_id: u64 }, // Reports the result of an attempted frame transmission. TxStatusReport { tx_status: banjo_common::WlanTxStatus }, // Reports the current status of the vendor driver. Status { status: u32 }, } pub struct Mlme<T: MlmeImpl> { mlme_impl: T, minstrel: Option<MinstrelWrapper>, // A stream of requests coming from the parent SME of this MLME. mlme_request_stream: fidl_mlme::MlmeRequestStream, // A stream of events initiated by C++ device drivers and then buffered here // by our MlmeHandle. driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>, time_stream: common::timer::TimeStream<T::TimerEvent>, minstrel_time_stream: common::timer::TimeStream<()>, } fn should_enable_minstrel(mac_sublayer: &banjo_common::MacSublayerSupport) -> bool { mac_sublayer.device.tx_status_report_supported && !mac_sublayer.rate_selection_offload.supported } const MINSTREL_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100); // Remedy for fxbug.dev/8165 (fxbug.dev/33151) // See |DATA_FRAME_INTERVAL_NANOS| // in //src/connectivity/wlan/testing/hw-sim/test/rate_selection/src/lib.rs // Ensure at least one probe frame (generated every 16 data frames) // in every cycle: // 16 <= (MINSTREL_UPDATE_INTERVAL_HW_SIM / MINSTREL_DATA_FRAME_INTERVAL_NANOS * 1e6) < 32. const MINSTREL_UPDATE_INTERVAL_HW_SIM: std::time::Duration = std::time::Duration::from_millis(83); // Require a static lifetime so we can move this MLME into an event loop task. impl<T: 'static + MlmeImpl> Mlme<T> { pub fn start( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, ) -> Result<MlmeHandle, Error> { let (driver_event_sink, driver_event_stream) = mpsc::unbounded(); // This sink is used both by the inderlying iface to forward up driver events, as well // as via the MlmeHandle to send ethernet frames and terminate MLME. let driver_event_sink_clone = driver_event_sink.clone(); let (startup_sender, startup_receiver) = oneshot::channel(); // Everything else happens in a new thread so that we can switch into an async context // without requiring all parts of MLME to impl Send. let join_handle = std::thread::spawn(move || { info!("Starting WLAN MLME main loop"); let mut executor = fasync::LocalExecutor::new().unwrap(); let future = Self::main_loop_thread( config, device, buf_provider, driver_event_sink_clone, driver_event_stream, startup_sender, ); executor.run_singlethreaded(future); }); let mut executor = fasync::LocalExecutor::new().unwrap(); let startup_result = executor.run_singlethreaded(startup_receiver); match startup_result.map_err(|e| Error::from(e)) { Ok(Ok(())) => Ok(MlmeHandle { driver_event_sink, internal: Some(MlmeHandleInternal::Real { join_handle }), }), Err(err) | Ok(Err(err)) => match join_handle.join() { Ok(()) => bail!("Failed to start the MLME event loop: {:?}", err), Err(panic_err) => { bail!("MLME event loop failed and then panicked: {}, {:?}", err, panic_err) } }, } } // Create an MLME in a test configuration. This MLME will never do anything unless it's progressed // using MlmeHandle::advance_fake_time and MlmeHandle::run_until_stalled. pub fn start_test( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, ) -> MlmeHandle { let executor = fasync::TestExecutor::new_with_fake_time().unwrap(); Self::start_test_with_executor(config, device, buf_provider, executor) } pub fn start_test_with_executor( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, mut executor: fasync::TestExecutor, ) -> MlmeHandle { let (driver_event_sink, driver_event_stream) = mpsc::unbounded(); let driver_event_sink_clone = driver_event_sink.clone(); let (startup_sender, mut startup_receiver) = oneshot::channel(); let mut future = Box::pin(Self::main_loop_thread( config, device, buf_provider, driver_event_sink_clone, driver_event_stream, startup_sender, )); let _ = executor.run_until_stalled(&mut future.as_mut()); startup_receiver .try_recv() .unwrap() .expect("Test MLME setup stalled.") .expect("Test MLME setup failed."); MlmeHandle { driver_event_sink, internal: Some(MlmeHandleInternal::Fake { executor, future }), } } async fn main_loop_thread( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, driver_event_sink: mpsc::UnboundedSender<DriverEvent>, driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>, startup_sender: oneshot::Sender<Result<(), Error>>, ) { let mut driver_event_sink = Box::new(DriverEventSink(driver_event_sink)); let ifc = device::WlanSoftmacIfcProtocol::new(driver_event_sink.as_mut()); // Indicate to the vendor driver that we can start sending and receiving info. Any messages received from the // driver before we start our MLME will be safely buffered in our driver_event_sink. // Note that device.start will copy relevant fields out of ifc, so dropping it after this is fine. // The returned value is the MLME server end of the channel wlanmevicemonitor created to connect MLME and SME. let mlme_protocol_handle_via_iface_creation = match device.start(&ifc) { Ok(handle) => handle, Err(e) =>
}; let channel = zx::Channel::from(mlme_protocol_handle_via_iface_creation); let server = fidl::endpoints::ServerEnd::<fidl_mlme::MlmeMarker>::new(channel); let (mlme_request_stream, control_handle) = match server.into_stream_and_control_handle() { Ok(res) => res, Err(e) => { // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender .send(Err(anyhow!("Failed to get MLME request stream: {}", e))) .unwrap(); return; } }; let device_mac_sublayer_support = device.mac_sublayer_support(); let (minstrel_timer, minstrel_time_stream) = common::timer::create_timer(); let update_interval = if device_mac_sublayer_support.device.is_synthetic { MINSTREL_UPDATE_INTERVAL_HW_SIM } else { MINSTREL_UPDATE_INTERVAL }; let minstrel = if should_enable_minstrel(&device_mac_sublayer_support) { let timer_manager = MinstrelTimer { timer: minstrel_timer, current_timer: None }; let probe_sequence = probe_sequence::ProbeSequence::random_new(); Some(Arc::new(Mutex::new(minstrel::MinstrelRateSelector::new( timer_manager, update_interval, probe_sequence, )))) } else { None }; let new_device = Device::new(device, minstrel.clone(), control_handle); let (timer, time_stream) = common::timer::create_timer(); let mlme_impl = T::new(config, new_device, buf_provider, timer); let mlme = Self { mlme_impl, minstrel, mlme_request_stream, driver_event_stream, time_stream, minstrel_time_stream, }; // Startup is complete. Signal the main thread to proceed. // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender.send(Ok(())).unwrap(); let result = Self::run_main_loop(mlme).await; match result { Ok(()) => info!("MLME event loop exited gracefully."), Err(e) => error!("MLME event loop exited with error: {:?}", e), } } /// Begin processing MLME events. /// Does not return until iface destruction is requested via DriverEvent::Stop, unless /// a critical error occurs. Note that MlmeHandle::stop will work in either case. pub async fn run_main_loop(mut self) -> Result<(), Error> { let mut timer_stream = common::timer::make_async_timed_event_stream(self.time_stream).fuse(); let mut minstrel_timer_stream = common::timer::make_async_timed_event_stream(self.minstrel_time_stream).fuse(); loop { select! { // Process requests from SME. mlme_request = self.mlme_request_stream.next() => match mlme_request { Some(req) => { match req { Ok(req) => { let method_name = req.method_name(); if let Err(e) = self.mlme_impl.handle_mlme_message(req) { info!("Failed to handle mlme {} request: {}", method_name, e); } } Err(e) => { info!("Failure while receiving mlme request: {}", e); } } } None => bail!("MLME request stream terminated unexpectedly."), }, // Process requests from our C++ drivers. driver_event = self.driver_event_stream.next() => match driver_event { Some(event) => match event { // DriverEvent::Stop indicates a safe shutdown. DriverEvent::Stop => return Ok(()), DriverEvent::MacFrameRx { bytes, rx_info } => { self.mlme_impl.handle_mac_frame_rx(&bytes[..], rx_info); } DriverEvent::EthFrameTx { bytes } => { if let Err(e) = self.mlme_impl.handle_eth_frame_tx(&bytes[..]) { // TODO(fxbug.dev/45464): Keep a counter of these failures. info!("Failed to handle eth frame: {}", e); } } DriverEvent::ScanComplete { status, scan_id } => { self.mlme_impl.handle_scan_complete(status, scan_id) }, DriverEvent::TxStatusReport { tx_status } => { if let Some(minstrel) = self.minstrel.as_ref() { minstrel.lock().handle_tx_status_report(&tx_status) } } DriverEvent::Status { status } => { self.mlme_impl.access_device().set_eth_status(status) } }, None => bail!("Driver event stream terminated unexpectedly."), }, timed_event = timer_stream.select_next_some() => { self.mlme_impl.handle_timeout(timed_event.id, timed_event.event); } _minstrel_timeout = minstrel_timer_stream.select_next_some() => { if let Some(minstrel) = self.minstrel.as_ref() { minstrel.lock().handle_timeout() } } } } } } #[cfg(test)] mod test_utils { use { super::*, banjo_fuchsia_hardware_wlan_associnfo as banjo_wlan_associnfo, banjo_fuchsia_wlan_common as banjo_common, fidl::endpoints::RequestStream, std::default::Default, }; #[derive(Copy, Clone, Debug)] pub struct MockWlanRxInfo { pub rx_flags: banjo_wlan_softmac::WlanRxInfoFlags, pub valid_fields: u32, pub phy: banjo_common::WlanPhyType, pub data_rate: u32, pub channel: banjo_common::WlanChannel, pub mcs: u8, pub rssi_dbm: i8, pub snr_dbh: i16, } impl Default for MockWlanRxInfo { fn default() -> Self { Self { valid_fields: banjo_wlan_associnfo::WlanRxInfoValid::CHAN_WIDTH.0 | banjo_wlan_associnfo::WlanRxInfoValid::RSSI.0 | banjo_wlan_associnfo::WlanRxInfoValid::SNR.0, channel: banjo_common::WlanChannel { primary: 1, cbw: banjo_common::ChannelBandwidth::CBW20, secondary80: 0, }, rssi_dbm: -40, snr_dbh: 35, // Default to 0 for these fields since there are no // other reasonable values to mock. rx_flags: banjo_wlan_softmac::WlanRxInfoFlags(0), phy: banjo_common::WlanPhyType::DSSS, data_rate: 0, mcs: 0, } } } impl From<MockWlanRxInfo> for banjo_wlan_softmac::WlanRxInfo { fn from(mock_rx_info: MockWlanRxInfo) -> banjo_wlan_softmac::WlanRxInfo { banjo_wlan_softmac::WlanRxInfo { rx_flags: mock_rx_info.rx_flags, valid_fields: mock_rx_info.valid_fields, phy: mock_rx_info.phy, data_rate: mock_rx_info.data_rate, channel: mock_rx_info.channel, mcs: mock_rx_info.mcs, rssi_dbm: mock_rx_info.rssi_dbm, snr_dbh: mock_rx_info.snr_dbh, } } } pub(crate) fn fake_control_handle( // We use this unused parameter to ensure that an executor exists. _exec: &fuchsia_async::TestExecutor, ) -> (fidl_mlme::MlmeControlHandle, fuchsia_zircon::Channel) { let (c1, c2) = fuchsia_zircon::Channel::create().unwrap(); let async_c1 = fidl::AsyncChannel::from_channel(c1).unwrap(); let request_stream = fidl_mlme::MlmeRequestStream::from_channel(async_c1); let control_handle = request_stream.control_handle(); (control_handle, c2) } pub struct FakeMlme { device: Device, } impl MlmeImpl for FakeMlme { type Config = (); type TimerEvent = (); fn new( _config: Self::Config, device: Device, _buf_provider: buffer::BufferProvider, _scheduler: common::timer::Timer<Self::TimerEvent>, ) -> Self { Self { device } } fn handle_mlme_message(&mut self, _msg: fidl_mlme::MlmeRequest) -> Result<(), Error> { unimplemented!() } fn handle_mac_frame_rx(&mut self, _bytes: &[u8], _rx_info: banjo_wlan_softmac::WlanRxInfo) { unimplemented!() } fn handle_eth_frame_tx(&mut self, _bytes: &[u8]) -> Result<(), Error> { unimplemented!() } fn handle_scan_complete(&mut self, _status: zx::Status, _scan_id: u64) { unimplemented!() } fn handle_timeout(&mut self, _event_id: common::timer::EventId, _event: Self::TimerEvent) { unimplemented!() } fn access_device(&mut self) -> &mut Device { &mut self.device } } } #[cfg(test)] mod tests { use {super::*, crate::device::test_utils::FakeDevice}; #[fuchsia::test] fn test_mlme_handle_use_after_stop() { let mut exec = fasync::TestExecutor::new().expect("failed to create an executor"); let fake_config = (); let mut fake_device = FakeDevice::new(&mut exec); let fake_buffer_provider = buffer::FakeBufferProvider::new(); let mut handle = Mlme::<test_utils::FakeMlme>::start_test_with_executor( fake_config, fake_device.as_raw_device(), fake_buffer_provider, exec, ); handle.stop(); handle .queue_eth_frame_tx(vec![0u8; 10]) .expect_err("Shouldn't be able to queue tx after stopping MLME"); } }
{ // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender.send(Err(anyhow!("device.start failed: {}", e))).unwrap(); return; }
conditional_block
lib.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. //! This crate implements IEEE Std 802.11-2016 MLME as a library for hardware that supports //! SoftMAC. This is distinct from FullMAC, which is implemented by drivers and firmware. The //! implementation is broadly divided between client and AP stations, with some shared components //! and state machine infrastructure. See the [`client`] and [`ap`] modules. //! //! [`ap`]: crate::ap //! [`client`]: crate::client mod akm_algorithm; pub mod ap; pub mod auth; mod block_ack; pub mod buffer; pub mod client; mod ddk_converter; pub mod device; pub mod disconnect; pub mod error; pub mod key; mod logger; mod minstrel; #[allow(unused)] // TODO(fxbug.dev/79543): Remove annotation once used. mod probe_sequence; pub use {ddk_converter::*, wlan_common as common}; use { anyhow::{anyhow, bail, Error}, banjo_fuchsia_hardware_wlan_softmac as banjo_wlan_softmac, banjo_fuchsia_wlan_common as banjo_common, device::{Device, DeviceInterface}, fidl_fuchsia_wlan_mlme as fidl_mlme, fuchsia_async as fasync, fuchsia_zircon as zx, futures::{ channel::{mpsc, oneshot}, select, StreamExt, }, log::{error, info, warn}, parking_lot::Mutex, std::sync::Arc, std::time::Duration, }; pub trait MlmeImpl { type Config: Send; type TimerEvent; fn new( config: Self::Config, device: Device, buf_provider: buffer::BufferProvider, scheduler: common::timer::Timer<Self::TimerEvent>, ) -> Self; fn handle_mlme_message(&mut self, msg: fidl_mlme::MlmeRequest) -> Result<(), Error>; fn handle_mac_frame_rx(&mut self, bytes: &[u8], rx_info: banjo_wlan_softmac::WlanRxInfo); fn handle_eth_frame_tx(&mut self, bytes: &[u8]) -> Result<(), Error>; fn handle_scan_complete(&mut self, status: zx::Status, scan_id: u64); fn handle_timeout(&mut self, event_id: common::timer::EventId, event: Self::TimerEvent); fn access_device(&mut self) -> &mut Device; } pub struct MinstrelTimer { timer: wlan_common::timer::Timer<()>, current_timer: Option<common::timer::EventId>, } impl minstrel::TimerManager for MinstrelTimer { fn schedule(&mut self, from_now: Duration) { self.current_timer.replace(self.timer.schedule_after(from_now.into(), ())); } fn cancel(&mut self) { self.current_timer.take(); } } type MinstrelWrapper = Arc<Mutex<minstrel::MinstrelRateSelector<MinstrelTimer>>>; // We support a fake MLME internal representation that allows tests written in C++ to manually // tweak the system time. // TODO(fxbug.dev/45464): Remove when tests are all in Rust. enum MlmeHandleInternal { Real { join_handle: std::thread::JoinHandle<()>, }, Fake { executor: fasync::TestExecutor, future: std::pin::Pin<Box<dyn futures::Future<Output = ()>>>, }, } /// MlmeHandle is the only access we have to our MLME after spinning it off into its own /// event loop thread. pub struct MlmeHandle { driver_event_sink: mpsc::UnboundedSender<DriverEvent>, internal: Option<MlmeHandleInternal>, } impl MlmeHandle { pub fn stop(&mut self) { if let Err(e) = self.driver_event_sink.unbounded_send(DriverEvent::Stop) { error!("Cannot signal MLME event loop thread: {}", e); } match self.internal.take() { Some(MlmeHandleInternal::Real { join_handle }) => { // This unwrap will only fail if the thread panics. if let Err(e) = join_handle.join() { error!("MLME event loop thread panicked: {:?}", e); } } Some(MlmeHandleInternal::Fake { mut executor, mut future }) => { // Verify that our main thread would exit now. assert!(executor.run_until_stalled(&mut future.as_mut()).is_ready()); } None => warn!("Called stop on already stopped MLME"), } } pub fn delete(mut self) { if self.internal.is_some() { warn!("Called delete on MlmeHandle before calling stop."); self.stop() } } pub fn queue_eth_frame_tx(&mut self, bytes: Vec<u8>) -> Result<(), Error> { self.driver_event_sink .unbounded_send(DriverEvent::EthFrameTx { bytes: bytes.into() }) .map_err(|e| e.into()) } // Fns used to interact with an MLME running in test mode. // TODO(fxbug.dev/45464): Remove when tests are all in Rust. pub fn advance_fake_time(&mut self, nanos: i64) { match &mut self.internal { Some(MlmeHandleInternal::Real { .. }) => { panic!("Called advance_fake_time on a real MLME") } Some(MlmeHandleInternal::Fake { executor, future }) => { let time = executor.now(); executor.set_fake_time(time + fasync::Duration::from_nanos(nanos)); executor.wake_expired_timers(); let _ = executor.run_until_stalled(&mut future.as_mut()); } None => panic!("Called advance_fake_time on stopped MLME"), } } pub fn run_until_stalled(&mut self) { match &mut self.internal { Some(MlmeHandleInternal::Real { .. }) => { panic!("Called run_until_stalled on a real MLME") } Some(MlmeHandleInternal::Fake { executor, future }) => { let _ = executor.run_until_stalled(&mut future.as_mut()); } None => panic!("Called run_until_stalled on stopped MLME"), } } } // DriverEventSink is used by other devices to interact with our main loop thread. All // events from our ethernet device or vendor device are converted to DriverEvents // and sent through this sink, where they can then be handled serially. Multiple copies of // DriverEventSink may be safely passed between threads, including one that is used by our // vendor driver as the context for wlan_softmac_ifc_protocol_ops. struct DriverEventSink(pub mpsc::UnboundedSender<DriverEvent>); // TODO(fxbug.dev/29063): Remove copies from MacFrame and EthFrame. pub enum DriverEvent { // Indicates that the device is being removed and our main loop should exit. Stop, // TODO(fxbug.dev/43456): We need to keep stats for these events and respond to StatsQueryRequest. // Indicates receipt of a MAC frame from a peer. MacFrameRx { bytes: Vec<u8>, rx_info: banjo_wlan_softmac::WlanRxInfo }, // Requests transmission of an ethernet frame over the air. EthFrameTx { bytes: Vec<u8> }, // Reports a scan is complete. ScanComplete { status: zx::Status, scan_id: u64 }, // Reports the result of an attempted frame transmission. TxStatusReport { tx_status: banjo_common::WlanTxStatus }, // Reports the current status of the vendor driver. Status { status: u32 }, } pub struct Mlme<T: MlmeImpl> { mlme_impl: T, minstrel: Option<MinstrelWrapper>, // A stream of requests coming from the parent SME of this MLME. mlme_request_stream: fidl_mlme::MlmeRequestStream, // A stream of events initiated by C++ device drivers and then buffered here // by our MlmeHandle. driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>, time_stream: common::timer::TimeStream<T::TimerEvent>, minstrel_time_stream: common::timer::TimeStream<()>, } fn should_enable_minstrel(mac_sublayer: &banjo_common::MacSublayerSupport) -> bool { mac_sublayer.device.tx_status_report_supported && !mac_sublayer.rate_selection_offload.supported } const MINSTREL_UPDATE_INTERVAL: std::time::Duration = std::time::Duration::from_millis(100); // Remedy for fxbug.dev/8165 (fxbug.dev/33151) // See |DATA_FRAME_INTERVAL_NANOS| // in //src/connectivity/wlan/testing/hw-sim/test/rate_selection/src/lib.rs // Ensure at least one probe frame (generated every 16 data frames) // in every cycle: // 16 <= (MINSTREL_UPDATE_INTERVAL_HW_SIM / MINSTREL_DATA_FRAME_INTERVAL_NANOS * 1e6) < 32. const MINSTREL_UPDATE_INTERVAL_HW_SIM: std::time::Duration = std::time::Duration::from_millis(83); // Require a static lifetime so we can move this MLME into an event loop task. impl<T: 'static + MlmeImpl> Mlme<T> { pub fn start( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, ) -> Result<MlmeHandle, Error> { let (driver_event_sink, driver_event_stream) = mpsc::unbounded(); // This sink is used both by the inderlying iface to forward up driver events, as well // as via the MlmeHandle to send ethernet frames and terminate MLME. let driver_event_sink_clone = driver_event_sink.clone(); let (startup_sender, startup_receiver) = oneshot::channel(); // Everything else happens in a new thread so that we can switch into an async context // without requiring all parts of MLME to impl Send. let join_handle = std::thread::spawn(move || { info!("Starting WLAN MLME main loop"); let mut executor = fasync::LocalExecutor::new().unwrap(); let future = Self::main_loop_thread( config, device, buf_provider, driver_event_sink_clone, driver_event_stream, startup_sender, ); executor.run_singlethreaded(future); }); let mut executor = fasync::LocalExecutor::new().unwrap(); let startup_result = executor.run_singlethreaded(startup_receiver); match startup_result.map_err(|e| Error::from(e)) { Ok(Ok(())) => Ok(MlmeHandle { driver_event_sink, internal: Some(MlmeHandleInternal::Real { join_handle }), }), Err(err) | Ok(Err(err)) => match join_handle.join() { Ok(()) => bail!("Failed to start the MLME event loop: {:?}", err), Err(panic_err) => { bail!("MLME event loop failed and then panicked: {}, {:?}", err, panic_err) } }, } } // Create an MLME in a test configuration. This MLME will never do anything unless it's progressed // using MlmeHandle::advance_fake_time and MlmeHandle::run_until_stalled. pub fn start_test( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, ) -> MlmeHandle { let executor = fasync::TestExecutor::new_with_fake_time().unwrap(); Self::start_test_with_executor(config, device, buf_provider, executor) } pub fn start_test_with_executor( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, mut executor: fasync::TestExecutor, ) -> MlmeHandle { let (driver_event_sink, driver_event_stream) = mpsc::unbounded(); let driver_event_sink_clone = driver_event_sink.clone(); let (startup_sender, mut startup_receiver) = oneshot::channel(); let mut future = Box::pin(Self::main_loop_thread( config, device, buf_provider, driver_event_sink_clone, driver_event_stream, startup_sender,
startup_receiver .try_recv() .unwrap() .expect("Test MLME setup stalled.") .expect("Test MLME setup failed."); MlmeHandle { driver_event_sink, internal: Some(MlmeHandleInternal::Fake { executor, future }), } } async fn main_loop_thread( config: T::Config, device: DeviceInterface, buf_provider: buffer::BufferProvider, driver_event_sink: mpsc::UnboundedSender<DriverEvent>, driver_event_stream: mpsc::UnboundedReceiver<DriverEvent>, startup_sender: oneshot::Sender<Result<(), Error>>, ) { let mut driver_event_sink = Box::new(DriverEventSink(driver_event_sink)); let ifc = device::WlanSoftmacIfcProtocol::new(driver_event_sink.as_mut()); // Indicate to the vendor driver that we can start sending and receiving info. Any messages received from the // driver before we start our MLME will be safely buffered in our driver_event_sink. // Note that device.start will copy relevant fields out of ifc, so dropping it after this is fine. // The returned value is the MLME server end of the channel wlanmevicemonitor created to connect MLME and SME. let mlme_protocol_handle_via_iface_creation = match device.start(&ifc) { Ok(handle) => handle, Err(e) => { // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender.send(Err(anyhow!("device.start failed: {}", e))).unwrap(); return; } }; let channel = zx::Channel::from(mlme_protocol_handle_via_iface_creation); let server = fidl::endpoints::ServerEnd::<fidl_mlme::MlmeMarker>::new(channel); let (mlme_request_stream, control_handle) = match server.into_stream_and_control_handle() { Ok(res) => res, Err(e) => { // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender .send(Err(anyhow!("Failed to get MLME request stream: {}", e))) .unwrap(); return; } }; let device_mac_sublayer_support = device.mac_sublayer_support(); let (minstrel_timer, minstrel_time_stream) = common::timer::create_timer(); let update_interval = if device_mac_sublayer_support.device.is_synthetic { MINSTREL_UPDATE_INTERVAL_HW_SIM } else { MINSTREL_UPDATE_INTERVAL }; let minstrel = if should_enable_minstrel(&device_mac_sublayer_support) { let timer_manager = MinstrelTimer { timer: minstrel_timer, current_timer: None }; let probe_sequence = probe_sequence::ProbeSequence::random_new(); Some(Arc::new(Mutex::new(minstrel::MinstrelRateSelector::new( timer_manager, update_interval, probe_sequence, )))) } else { None }; let new_device = Device::new(device, minstrel.clone(), control_handle); let (timer, time_stream) = common::timer::create_timer(); let mlme_impl = T::new(config, new_device, buf_provider, timer); let mlme = Self { mlme_impl, minstrel, mlme_request_stream, driver_event_stream, time_stream, minstrel_time_stream, }; // Startup is complete. Signal the main thread to proceed. // Failure to unwrap indicates a critical failure in the driver init thread. startup_sender.send(Ok(())).unwrap(); let result = Self::run_main_loop(mlme).await; match result { Ok(()) => info!("MLME event loop exited gracefully."), Err(e) => error!("MLME event loop exited with error: {:?}", e), } } /// Begin processing MLME events. /// Does not return until iface destruction is requested via DriverEvent::Stop, unless /// a critical error occurs. Note that MlmeHandle::stop will work in either case. pub async fn run_main_loop(mut self) -> Result<(), Error> { let mut timer_stream = common::timer::make_async_timed_event_stream(self.time_stream).fuse(); let mut minstrel_timer_stream = common::timer::make_async_timed_event_stream(self.minstrel_time_stream).fuse(); loop { select! { // Process requests from SME. mlme_request = self.mlme_request_stream.next() => match mlme_request { Some(req) => { match req { Ok(req) => { let method_name = req.method_name(); if let Err(e) = self.mlme_impl.handle_mlme_message(req) { info!("Failed to handle mlme {} request: {}", method_name, e); } } Err(e) => { info!("Failure while receiving mlme request: {}", e); } } } None => bail!("MLME request stream terminated unexpectedly."), }, // Process requests from our C++ drivers. driver_event = self.driver_event_stream.next() => match driver_event { Some(event) => match event { // DriverEvent::Stop indicates a safe shutdown. DriverEvent::Stop => return Ok(()), DriverEvent::MacFrameRx { bytes, rx_info } => { self.mlme_impl.handle_mac_frame_rx(&bytes[..], rx_info); } DriverEvent::EthFrameTx { bytes } => { if let Err(e) = self.mlme_impl.handle_eth_frame_tx(&bytes[..]) { // TODO(fxbug.dev/45464): Keep a counter of these failures. info!("Failed to handle eth frame: {}", e); } } DriverEvent::ScanComplete { status, scan_id } => { self.mlme_impl.handle_scan_complete(status, scan_id) }, DriverEvent::TxStatusReport { tx_status } => { if let Some(minstrel) = self.minstrel.as_ref() { minstrel.lock().handle_tx_status_report(&tx_status) } } DriverEvent::Status { status } => { self.mlme_impl.access_device().set_eth_status(status) } }, None => bail!("Driver event stream terminated unexpectedly."), }, timed_event = timer_stream.select_next_some() => { self.mlme_impl.handle_timeout(timed_event.id, timed_event.event); } _minstrel_timeout = minstrel_timer_stream.select_next_some() => { if let Some(minstrel) = self.minstrel.as_ref() { minstrel.lock().handle_timeout() } } } } } } #[cfg(test)] mod test_utils { use { super::*, banjo_fuchsia_hardware_wlan_associnfo as banjo_wlan_associnfo, banjo_fuchsia_wlan_common as banjo_common, fidl::endpoints::RequestStream, std::default::Default, }; #[derive(Copy, Clone, Debug)] pub struct MockWlanRxInfo { pub rx_flags: banjo_wlan_softmac::WlanRxInfoFlags, pub valid_fields: u32, pub phy: banjo_common::WlanPhyType, pub data_rate: u32, pub channel: banjo_common::WlanChannel, pub mcs: u8, pub rssi_dbm: i8, pub snr_dbh: i16, } impl Default for MockWlanRxInfo { fn default() -> Self { Self { valid_fields: banjo_wlan_associnfo::WlanRxInfoValid::CHAN_WIDTH.0 | banjo_wlan_associnfo::WlanRxInfoValid::RSSI.0 | banjo_wlan_associnfo::WlanRxInfoValid::SNR.0, channel: banjo_common::WlanChannel { primary: 1, cbw: banjo_common::ChannelBandwidth::CBW20, secondary80: 0, }, rssi_dbm: -40, snr_dbh: 35, // Default to 0 for these fields since there are no // other reasonable values to mock. rx_flags: banjo_wlan_softmac::WlanRxInfoFlags(0), phy: banjo_common::WlanPhyType::DSSS, data_rate: 0, mcs: 0, } } } impl From<MockWlanRxInfo> for banjo_wlan_softmac::WlanRxInfo { fn from(mock_rx_info: MockWlanRxInfo) -> banjo_wlan_softmac::WlanRxInfo { banjo_wlan_softmac::WlanRxInfo { rx_flags: mock_rx_info.rx_flags, valid_fields: mock_rx_info.valid_fields, phy: mock_rx_info.phy, data_rate: mock_rx_info.data_rate, channel: mock_rx_info.channel, mcs: mock_rx_info.mcs, rssi_dbm: mock_rx_info.rssi_dbm, snr_dbh: mock_rx_info.snr_dbh, } } } pub(crate) fn fake_control_handle( // We use this unused parameter to ensure that an executor exists. _exec: &fuchsia_async::TestExecutor, ) -> (fidl_mlme::MlmeControlHandle, fuchsia_zircon::Channel) { let (c1, c2) = fuchsia_zircon::Channel::create().unwrap(); let async_c1 = fidl::AsyncChannel::from_channel(c1).unwrap(); let request_stream = fidl_mlme::MlmeRequestStream::from_channel(async_c1); let control_handle = request_stream.control_handle(); (control_handle, c2) } pub struct FakeMlme { device: Device, } impl MlmeImpl for FakeMlme { type Config = (); type TimerEvent = (); fn new( _config: Self::Config, device: Device, _buf_provider: buffer::BufferProvider, _scheduler: common::timer::Timer<Self::TimerEvent>, ) -> Self { Self { device } } fn handle_mlme_message(&mut self, _msg: fidl_mlme::MlmeRequest) -> Result<(), Error> { unimplemented!() } fn handle_mac_frame_rx(&mut self, _bytes: &[u8], _rx_info: banjo_wlan_softmac::WlanRxInfo) { unimplemented!() } fn handle_eth_frame_tx(&mut self, _bytes: &[u8]) -> Result<(), Error> { unimplemented!() } fn handle_scan_complete(&mut self, _status: zx::Status, _scan_id: u64) { unimplemented!() } fn handle_timeout(&mut self, _event_id: common::timer::EventId, _event: Self::TimerEvent) { unimplemented!() } fn access_device(&mut self) -> &mut Device { &mut self.device } } } #[cfg(test)] mod tests { use {super::*, crate::device::test_utils::FakeDevice}; #[fuchsia::test] fn test_mlme_handle_use_after_stop() { let mut exec = fasync::TestExecutor::new().expect("failed to create an executor"); let fake_config = (); let mut fake_device = FakeDevice::new(&mut exec); let fake_buffer_provider = buffer::FakeBufferProvider::new(); let mut handle = Mlme::<test_utils::FakeMlme>::start_test_with_executor( fake_config, fake_device.as_raw_device(), fake_buffer_provider, exec, ); handle.stop(); handle .queue_eth_frame_tx(vec![0u8; 10]) .expect_err("Shouldn't be able to queue tx after stopping MLME"); } }
)); let _ = executor.run_until_stalled(&mut future.as_mut());
random_line_split
io_export_arm.py
# Armory Mesh Exporter # http://armory3d.org/ # # Based on Open Game Engine Exchange # http://opengex.org/ # Export plugin for Blender by Eric Lengyel # Copyright 2015, Terathon Software LLC # # This software is licensed under the Creative Commons # Attribution-ShareAlike 3.0 Unported License: # http://creativecommons.org/licenses/by-sa/3.0/deed.en_US bl_info = { "name": "Armory Mesh Exporter", "category": "Import-Export", "location": "File -> Export", "description": "Armory mesh data", "author": "Armory3D.org", "version": (2019, 6, 0), "blender": (2, 80, 0), "wiki_url": "http://armory3d.org/iron", "tracker_url": "https://github.com/armory3d/iron/issues" } from bpy_extras.io_utils import ExportHelper import os import bpy import math from mathutils import * import time import numpy as np NodeTypeNode = 0 NodeTypeBone = 1 NodeTypeMesh = 2 NodeTypeLight = 3 NodeTypeCamera = 4 NodeTypeSpeaker = 5 NodeTypeDecal = 6 NodeTypeProbe = 7 AnimationTypeSampled = 0 AnimationTypeLinear = 1 AnimationTypeBezier = 2 ExportEpsilon = 1.0e-6 structIdentifier = ["object", "bone_object", "mesh_object", "light_object", "camera_object", "speaker_object", "decal_object", "probe_object"] subtranslationName = ["xloc", "yloc", "zloc"] subrotationName = ["xrot", "yrot", "zrot"] subscaleName = ["xscl", "yscl", "zscl"] deltaSubtranslationName = ["dxloc", "dyloc", "dzloc"] deltaSubrotationName = ["dxrot", "dyrot", "dzrot"] deltaSubscaleName = ["dxscl", "dyscl", "dzscl"] axisName = ["x", "y", "z"] class ArmoryExporter(bpy.types.Operator, ExportHelper): '''Export to Armory format''' bl_idname = "export_scene.arm" bl_label = "Export Armory" filename_ext = ".arm" def write_matrix(self, matrix): return [matrix[0][0], matrix[0][1], matrix[0][2], matrix[0][3], matrix[1][0], matrix[1][1], matrix[1][2], matrix[1][3], matrix[2][0], matrix[2][1], matrix[2][2], matrix[2][3], matrix[3][0], matrix[3][1], matrix[3][2], matrix[3][3]] def write_mesh(self, bobject, o): self.output['mesh_datas'].append(o) def calc_aabb(self, bobject): aabb_center = 0.125 * sum((Vector(b) for b in bobject.bound_box), Vector()) bobject.data.arm_aabb = [ \ abs((bobject.bound_box[6][0] - bobject.bound_box[0][0]) / 2 + abs(aabb_center[0])) * 2, \ abs((bobject.bound_box[6][1] - bobject.bound_box[0][1]) / 2 + abs(aabb_center[1])) * 2, \ abs((bobject.bound_box[6][2] - bobject.bound_box[0][2]) / 2 + abs(aabb_center[2])) * 2 \ ] def export_mesh_data(self, exportMesh, bobject, o, has_armature=False): exportMesh.calc_normals_split() # exportMesh.calc_loop_triangles() loops = exportMesh.loops num_verts = len(loops) num_uv_layers = len(exportMesh.uv_layers) has_tex = num_uv_layers > 0 has_tex1 = num_uv_layers > 1 num_colors = len(exportMesh.vertex_colors) has_col = num_colors > 0 has_tang = has_tex pdata = np.empty(num_verts * 4, dtype='<f4') # p.xyz, n.z ndata = np.empty(num_verts * 2, dtype='<f4') # n.xy if has_tex: t0map = 0 # Get active uvmap t0data = np.empty(num_verts * 2, dtype='<f4') uv_layers = exportMesh.uv_layers if uv_layers != None: if 'UVMap_baked' in uv_layers: for i in range(0, len(uv_layers)): if uv_layers[i].name == 'UVMap_baked': t0map = i break else: for i in range(0, len(uv_layers)): if uv_layers[i].active_render: t0map = i break if has_tex1: t1map = 1 if t0map == 0 else 0 t1data = np.empty(num_verts * 2, dtype='<f4') # Scale for packed coords maxdim = 1.0 lay0 = uv_layers[t0map] # TODO: handle t1map for v in lay0.data: if abs(v.uv[0]) > maxdim: maxdim = abs(v.uv[0]) if abs(v.uv[1]) > maxdim: maxdim = abs(v.uv[1]) if maxdim > 1: o['scale_tex'] = maxdim invscale_tex = (1 / o['scale_tex']) * 32767 else: invscale_tex = 1 * 32767 if has_tang: exportMesh.calc_tangents(uvmap=lay0.name) tangdata = np.empty(num_verts * 3, dtype='<f4') if has_col: cdata = np.empty(num_verts * 3, dtype='<f4') # Scale for packed coords maxdim = max(bobject.data.arm_aabb[0], max(bobject.data.arm_aabb[1], bobject.data.arm_aabb[2])) if maxdim > 2: o['scale_pos'] = maxdim / 2 else: o['scale_pos'] = 1.0 if has_armature: # Allow up to 2x bigger bounds for skinned mesh o['scale_pos'] *= 2.0 scale_pos = o['scale_pos'] invscale_pos = (1 / scale_pos) * 32767 verts = exportMesh.vertices if has_tex: lay0 = exportMesh.uv_layers[t0map] if has_tex1: lay1 = exportMesh.uv_layers[t1map] for i, loop in enumerate(loops): v = verts[loop.vertex_index] co = v.co normal = loop.normal tang = loop.tangent i4 = i * 4 i2 = i * 2 pdata[i4 ] = co[0] pdata[i4 + 1] = co[1] pdata[i4 + 2] = co[2] pdata[i4 + 3] = normal[2] * scale_pos # Cancel scale ndata[i2 ] = normal[0] ndata[i2 + 1] = normal[1] if has_tex: uv = lay0.data[loop.index].uv t0data[i2 ] = uv[0] t0data[i2 + 1] = 1.0 - uv[1] # Reverse Y if has_tex1: uv = lay1.data[loop.index].uv t1data[i2 ] = uv[0] t1data[i2 + 1] = 1.0 - uv[1] if has_tang: i3 = i * 3 tangdata[i3 ] = tang[0] tangdata[i3 + 1] = tang[1] tangdata[i3 + 2] = tang[2] if has_col: i3 = i * 3 cdata[i3 ] = pow(v.col[0], 2.2) cdata[i3 + 1] = pow(v.col[1], 2.2) cdata[i3 + 2] = pow(v.col[2], 2.2) mats = exportMesh.materials poly_map = [] for i in range(max(len(mats), 1)): poly_map.append([]) for poly in exportMesh.polygons: poly_map[poly.material_index].append(poly) o['index_arrays'] = [] for index, polys in enumerate(poly_map): tris = 0 for poly in polys: tris += poly.loop_total - 2 if tris == 0: # No face assigned continue prim = np.empty(tris * 3, dtype='<i4') i = 0 for poly in polys: first = poly.loop_start total = poly.loop_total if total == 3: prim[i ] = loops[first ].index prim[i + 1] = loops[first + 1].index prim[i + 2] = loops[first + 2].index i += 3 else: for j in range(total - 2): prim[i ] = loops[first + total - 1].index prim[i + 1] = loops[first + j ].index prim[i + 2] = loops[first + j + 1 ].index i += 3 ia = {} ia['values'] = prim ia['material'] = 0 if len(mats) > 1: for i in range(len(mats)): # Multi-mat mesh if (mats[i] == mats[index]): # Default material for empty slots ia['material'] = i break o['index_arrays'].append(ia) # Pack pdata *= invscale_pos ndata *= 32767 pdata = np.array(pdata, dtype='<i2') ndata = np.array(ndata, dtype='<i2') if has_tex: t0data *= invscale_tex t0data = np.array(t0data, dtype='<i2') if has_tex1: t1data *= invscale_tex t1data = np.array(t1data, dtype='<i2') if has_col: cdata *= 32767 cdata = np.array(cdata, dtype='<i2') if has_tang: tangdata *= 32767 tangdata = np.array(tangdata, dtype='<i2') # Output o['vertex_arrays'] = [] o['vertex_arrays'].append({ 'attrib': 'pos', 'values': pdata }) o['vertex_arrays'].append({ 'attrib': 'nor', 'values': ndata }) if has_tex: o['vertex_arrays'].append({ 'attrib': 'tex', 'values': t0data }) if has_tex1: o['vertex_arrays'].append({ 'attrib': 'tex1', 'values': t1data }) if has_col: o['vertex_arrays'].append({ 'attrib': 'col', 'values': cdata }) if has_tang: o['vertex_arrays'].append({ 'attrib': 'tang', 'values': tangdata }) def export_mesh(self, bobject, scene): # This function exports a single mesh object print('Exporting mesh ' + bobject.data.name) o = {} o['name'] = bobject.name mesh = bobject.data armature = bobject.find_armature() apply_modifiers = not armature bobject_eval = bobject.evaluated_get(self.depsgraph) if apply_modifiers else bobject exportMesh = bobject_eval.to_mesh() self.calc_aabb(bobject) self.export_mesh_data(exportMesh, bobject, o, has_armature=armature != None) # if armature: # self.export_skin(bobject, armature, exportMesh, o) self.write_mesh(bobject, o) bobject_eval.to_mesh_clear() def export_objects(self, scene): meshes = [] self.output['mesh_datas'] = []; for o in scene.objects: if o.type == 'MESH' and o.data != None and o.data not in meshes: meshes.append(o.data) self.export_mesh(o, scene) def write_arm(self, filepath, output): with open(filepath, 'wb') as f: f.write(packb(output)) def execute(self, context): profile_time = time.time() self.depsgraph = context.evaluated_depsgraph_get() self.output = {} self.export_objects(context.scene) self.write_arm(self.filepath, self.output) print('Scene exported in ' + str(time.time() - profile_time)) return {'FINISHED'} def menu_func(self, context): self.layout.operator(ArmoryExporter.bl_idname, text="Armory (.arm)") def register(): bpy.utils.register_class(ArmoryExporter) bpy.types.TOPBAR_MT_file_export.append(menu_func) def unregister(): bpy.types.TOPBAR_MT_file_export.remove(menu_func) bpy.utils.unregister_class(ArmoryExporter) if __name__ == "__main__": register() # Msgpack parser with typed arrays # Based on u-msgpack-python v2.4.1 - v at sergeev.io # https://github.com/vsergeev/u-msgpack-python # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import struct import io import numpy as np def _pack_integer(obj, fp): if obj < 0: if obj >= -32: fp.write(struct.pack("b", obj)) elif obj >= -2**(8 - 1): fp.write(b"\xd0" + struct.pack("b", obj)) elif obj >= -2**(16 - 1): fp.write(b"\xd1" + struct.pack("<h", obj)) elif obj >= -2**(32 - 1): fp.write(b"\xd2" + struct.pack("<i", obj)) elif obj >= -2**(64 - 1): fp.write(b"\xd3" + struct.pack("<q", obj)) else: raise Exception("huge signed int") else: if obj <= 127: fp.write(struct.pack("B", obj)) elif obj <= 2**8 - 1: fp.write(b"\xcc" + struct.pack("B", obj)) elif obj <= 2**16 - 1: fp.write(b"\xcd" + struct.pack("<H", obj)) elif obj <= 2**32 - 1: fp.write(b"\xce" + struct.pack("<I", obj)) elif obj <= 2**64 - 1: fp.write(b"\xcf" + struct.pack("<Q", obj)) else: raise Exception("huge unsigned int") def _pack_nil(obj, fp): fp.write(b"\xc0") def _pack_boolean(obj, fp): fp.write(b"\xc3" if obj else b"\xc2") def _pack_float(obj, fp): # NOTE: forced 32-bit floats for Armory # fp.write(b"\xcb" + struct.pack("<d", obj)) # Double fp.write(b"\xca" + struct.pack("<f", obj)) def _pack_string(obj, fp): obj = obj.encode('utf-8') if len(obj) <= 31: fp.write(struct.pack("B", 0xa0 | len(obj)) + obj) elif len(obj) <= 2**8 - 1: fp.write(b"\xd9" + struct.pack("B", len(obj)) + obj) elif len(obj) <= 2**16 - 1: fp.write(b"\xda" + struct.pack("<H", len(obj)) + obj) elif len(obj) <= 2**32 - 1: fp.write(b"\xdb" + struct.pack("<I", len(obj)) + obj) else: raise Exception("huge string") def _pack_binary(obj, fp): if len(obj) <= 2**8 - 1: fp.write(b"\xc4" + struct.pack("B", len(obj)) + obj) elif len(obj) <= 2**16 - 1: fp.write(b"\xc5" + struct.pack("<H", len(obj)) + obj) elif len(obj) <= 2**32 - 1: fp.write(b"\xc6" + struct.pack("<I", len(obj)) + obj) else: raise Exception("huge binary string") def _pack_array(obj, fp): if len(obj) <= 15: fp.write(struct.pack("B", 0x90 | len(obj))) elif len(obj) <= 2**16 - 1: fp.write(b"\xdc" + struct.pack("<H", len(obj))) elif len(obj) <= 2**32 - 1: fp.write(b"\xdd" + struct.pack("<I", len(obj))) else: raise Exception("huge array") if len(obj) > 0 and isinstance(obj[0], float): fp.write(b"\xca") for e in obj: fp.write(struct.pack("<f", e)) elif len(obj) > 0 and isinstance(obj[0], bool): for e in obj: pack(e, fp) elif len(obj) > 0 and isinstance(obj[0], int): fp.write(b"\xd2") for e in obj: fp.write(struct.pack("<i", e)) # Float32 elif len(obj) > 0 and isinstance(obj[0], np.float32): fp.write(b"\xca") fp.write(obj.tobytes()) # Int32 elif len(obj) > 0 and isinstance(obj[0], np.int32): fp.write(b"\xd2") fp.write(obj.tobytes()) # Int16 elif len(obj) > 0 and isinstance(obj[0], np.int16): fp.write(b"\xd1") fp.write(obj.tobytes()) # Regular else: for e in obj: pack(e, fp) def
(obj, fp): if len(obj) <= 15: fp.write(struct.pack("B", 0x80 | len(obj))) elif len(obj) <= 2**16 - 1: fp.write(b"\xde" + struct.pack("<H", len(obj))) elif len(obj) <= 2**32 - 1: fp.write(b"\xdf" + struct.pack("<I", len(obj))) else: raise Exception("huge array") for k, v in obj.items(): pack(k, fp) pack(v, fp) def pack(obj, fp): if obj is None: _pack_nil(obj, fp) elif isinstance(obj, bool): _pack_boolean(obj, fp) elif isinstance(obj, int): _pack_integer(obj, fp) elif isinstance(obj, float): _pack_float(obj, fp) elif isinstance(obj, str): _pack_string(obj, fp) elif isinstance(obj, bytes): _pack_binary(obj, fp) elif isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, np.ndarray): _pack_array(obj, fp) elif isinstance(obj, dict): _pack_map(obj, fp) else: raise Exception("unsupported type: %s" % str(type(obj))) def packb(obj): fp = io.BytesIO() pack(obj, fp) return fp.getvalue()
_pack_map
identifier_name
io_export_arm.py
# Armory Mesh Exporter # http://armory3d.org/ # # Based on Open Game Engine Exchange # http://opengex.org/ # Export plugin for Blender by Eric Lengyel # Copyright 2015, Terathon Software LLC # # This software is licensed under the Creative Commons # Attribution-ShareAlike 3.0 Unported License: # http://creativecommons.org/licenses/by-sa/3.0/deed.en_US bl_info = { "name": "Armory Mesh Exporter", "category": "Import-Export", "location": "File -> Export", "description": "Armory mesh data", "author": "Armory3D.org", "version": (2019, 6, 0), "blender": (2, 80, 0), "wiki_url": "http://armory3d.org/iron", "tracker_url": "https://github.com/armory3d/iron/issues" } from bpy_extras.io_utils import ExportHelper import os import bpy import math from mathutils import * import time import numpy as np NodeTypeNode = 0 NodeTypeBone = 1 NodeTypeMesh = 2 NodeTypeLight = 3 NodeTypeCamera = 4 NodeTypeSpeaker = 5 NodeTypeDecal = 6 NodeTypeProbe = 7 AnimationTypeSampled = 0 AnimationTypeLinear = 1 AnimationTypeBezier = 2 ExportEpsilon = 1.0e-6 structIdentifier = ["object", "bone_object", "mesh_object", "light_object", "camera_object", "speaker_object", "decal_object", "probe_object"] subtranslationName = ["xloc", "yloc", "zloc"] subrotationName = ["xrot", "yrot", "zrot"] subscaleName = ["xscl", "yscl", "zscl"] deltaSubtranslationName = ["dxloc", "dyloc", "dzloc"] deltaSubrotationName = ["dxrot", "dyrot", "dzrot"] deltaSubscaleName = ["dxscl", "dyscl", "dzscl"] axisName = ["x", "y", "z"] class ArmoryExporter(bpy.types.Operator, ExportHelper): '''Export to Armory format''' bl_idname = "export_scene.arm" bl_label = "Export Armory" filename_ext = ".arm" def write_matrix(self, matrix): return [matrix[0][0], matrix[0][1], matrix[0][2], matrix[0][3], matrix[1][0], matrix[1][1], matrix[1][2], matrix[1][3], matrix[2][0], matrix[2][1], matrix[2][2], matrix[2][3], matrix[3][0], matrix[3][1], matrix[3][2], matrix[3][3]] def write_mesh(self, bobject, o): self.output['mesh_datas'].append(o) def calc_aabb(self, bobject): aabb_center = 0.125 * sum((Vector(b) for b in bobject.bound_box), Vector()) bobject.data.arm_aabb = [ \ abs((bobject.bound_box[6][0] - bobject.bound_box[0][0]) / 2 + abs(aabb_center[0])) * 2, \ abs((bobject.bound_box[6][1] - bobject.bound_box[0][1]) / 2 + abs(aabb_center[1])) * 2, \ abs((bobject.bound_box[6][2] - bobject.bound_box[0][2]) / 2 + abs(aabb_center[2])) * 2 \ ] def export_mesh_data(self, exportMesh, bobject, o, has_armature=False): exportMesh.calc_normals_split() # exportMesh.calc_loop_triangles() loops = exportMesh.loops num_verts = len(loops) num_uv_layers = len(exportMesh.uv_layers) has_tex = num_uv_layers > 0 has_tex1 = num_uv_layers > 1 num_colors = len(exportMesh.vertex_colors) has_col = num_colors > 0 has_tang = has_tex pdata = np.empty(num_verts * 4, dtype='<f4') # p.xyz, n.z ndata = np.empty(num_verts * 2, dtype='<f4') # n.xy if has_tex: t0map = 0 # Get active uvmap t0data = np.empty(num_verts * 2, dtype='<f4') uv_layers = exportMesh.uv_layers if uv_layers != None: if 'UVMap_baked' in uv_layers: for i in range(0, len(uv_layers)): if uv_layers[i].name == 'UVMap_baked': t0map = i break else: for i in range(0, len(uv_layers)): if uv_layers[i].active_render: t0map = i break if has_tex1: t1map = 1 if t0map == 0 else 0 t1data = np.empty(num_verts * 2, dtype='<f4') # Scale for packed coords maxdim = 1.0 lay0 = uv_layers[t0map] # TODO: handle t1map for v in lay0.data: if abs(v.uv[0]) > maxdim: maxdim = abs(v.uv[0]) if abs(v.uv[1]) > maxdim: maxdim = abs(v.uv[1]) if maxdim > 1: o['scale_tex'] = maxdim invscale_tex = (1 / o['scale_tex']) * 32767 else: invscale_tex = 1 * 32767 if has_tang: exportMesh.calc_tangents(uvmap=lay0.name) tangdata = np.empty(num_verts * 3, dtype='<f4') if has_col: cdata = np.empty(num_verts * 3, dtype='<f4') # Scale for packed coords maxdim = max(bobject.data.arm_aabb[0], max(bobject.data.arm_aabb[1], bobject.data.arm_aabb[2])) if maxdim > 2: o['scale_pos'] = maxdim / 2 else: o['scale_pos'] = 1.0 if has_armature: # Allow up to 2x bigger bounds for skinned mesh o['scale_pos'] *= 2.0 scale_pos = o['scale_pos'] invscale_pos = (1 / scale_pos) * 32767 verts = exportMesh.vertices if has_tex: lay0 = exportMesh.uv_layers[t0map] if has_tex1: lay1 = exportMesh.uv_layers[t1map] for i, loop in enumerate(loops): v = verts[loop.vertex_index] co = v.co normal = loop.normal tang = loop.tangent i4 = i * 4 i2 = i * 2 pdata[i4 ] = co[0] pdata[i4 + 1] = co[1] pdata[i4 + 2] = co[2] pdata[i4 + 3] = normal[2] * scale_pos # Cancel scale ndata[i2 ] = normal[0] ndata[i2 + 1] = normal[1] if has_tex: uv = lay0.data[loop.index].uv t0data[i2 ] = uv[0] t0data[i2 + 1] = 1.0 - uv[1] # Reverse Y if has_tex1: uv = lay1.data[loop.index].uv t1data[i2 ] = uv[0] t1data[i2 + 1] = 1.0 - uv[1] if has_tang: i3 = i * 3 tangdata[i3 ] = tang[0] tangdata[i3 + 1] = tang[1] tangdata[i3 + 2] = tang[2] if has_col: i3 = i * 3 cdata[i3 ] = pow(v.col[0], 2.2) cdata[i3 + 1] = pow(v.col[1], 2.2) cdata[i3 + 2] = pow(v.col[2], 2.2) mats = exportMesh.materials poly_map = [] for i in range(max(len(mats), 1)): poly_map.append([]) for poly in exportMesh.polygons: poly_map[poly.material_index].append(poly) o['index_arrays'] = [] for index, polys in enumerate(poly_map): tris = 0 for poly in polys: tris += poly.loop_total - 2 if tris == 0: # No face assigned continue prim = np.empty(tris * 3, dtype='<i4') i = 0 for poly in polys: first = poly.loop_start total = poly.loop_total if total == 3: prim[i ] = loops[first ].index prim[i + 1] = loops[first + 1].index prim[i + 2] = loops[first + 2].index i += 3 else: for j in range(total - 2): prim[i ] = loops[first + total - 1].index prim[i + 1] = loops[first + j ].index prim[i + 2] = loops[first + j + 1 ].index i += 3 ia = {} ia['values'] = prim ia['material'] = 0 if len(mats) > 1: for i in range(len(mats)): # Multi-mat mesh if (mats[i] == mats[index]): # Default material for empty slots ia['material'] = i break o['index_arrays'].append(ia) # Pack pdata *= invscale_pos ndata *= 32767 pdata = np.array(pdata, dtype='<i2') ndata = np.array(ndata, dtype='<i2') if has_tex: t0data *= invscale_tex t0data = np.array(t0data, dtype='<i2') if has_tex1: t1data *= invscale_tex t1data = np.array(t1data, dtype='<i2') if has_col: cdata *= 32767 cdata = np.array(cdata, dtype='<i2') if has_tang: tangdata *= 32767 tangdata = np.array(tangdata, dtype='<i2') # Output o['vertex_arrays'] = [] o['vertex_arrays'].append({ 'attrib': 'pos', 'values': pdata }) o['vertex_arrays'].append({ 'attrib': 'nor', 'values': ndata }) if has_tex: o['vertex_arrays'].append({ 'attrib': 'tex', 'values': t0data }) if has_tex1: o['vertex_arrays'].append({ 'attrib': 'tex1', 'values': t1data }) if has_col: o['vertex_arrays'].append({ 'attrib': 'col', 'values': cdata }) if has_tang: o['vertex_arrays'].append({ 'attrib': 'tang', 'values': tangdata }) def export_mesh(self, bobject, scene): # This function exports a single mesh object print('Exporting mesh ' + bobject.data.name) o = {} o['name'] = bobject.name mesh = bobject.data armature = bobject.find_armature() apply_modifiers = not armature bobject_eval = bobject.evaluated_get(self.depsgraph) if apply_modifiers else bobject exportMesh = bobject_eval.to_mesh() self.calc_aabb(bobject) self.export_mesh_data(exportMesh, bobject, o, has_armature=armature != None) # if armature: # self.export_skin(bobject, armature, exportMesh, o) self.write_mesh(bobject, o) bobject_eval.to_mesh_clear() def export_objects(self, scene): meshes = [] self.output['mesh_datas'] = []; for o in scene.objects: if o.type == 'MESH' and o.data != None and o.data not in meshes: meshes.append(o.data) self.export_mesh(o, scene) def write_arm(self, filepath, output): with open(filepath, 'wb') as f: f.write(packb(output)) def execute(self, context): profile_time = time.time() self.depsgraph = context.evaluated_depsgraph_get() self.output = {} self.export_objects(context.scene) self.write_arm(self.filepath, self.output) print('Scene exported in ' + str(time.time() - profile_time)) return {'FINISHED'} def menu_func(self, context): self.layout.operator(ArmoryExporter.bl_idname, text="Armory (.arm)") def register(): bpy.utils.register_class(ArmoryExporter) bpy.types.TOPBAR_MT_file_export.append(menu_func) def unregister(): bpy.types.TOPBAR_MT_file_export.remove(menu_func) bpy.utils.unregister_class(ArmoryExporter) if __name__ == "__main__": register() # Msgpack parser with typed arrays # Based on u-msgpack-python v2.4.1 - v at sergeev.io # https://github.com/vsergeev/u-msgpack-python # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import struct import io import numpy as np def _pack_integer(obj, fp): if obj < 0: if obj >= -32: fp.write(struct.pack("b", obj)) elif obj >= -2**(8 - 1): fp.write(b"\xd0" + struct.pack("b", obj)) elif obj >= -2**(16 - 1): fp.write(b"\xd1" + struct.pack("<h", obj)) elif obj >= -2**(32 - 1): fp.write(b"\xd2" + struct.pack("<i", obj)) elif obj >= -2**(64 - 1): fp.write(b"\xd3" + struct.pack("<q", obj)) else: raise Exception("huge signed int") else: if obj <= 127: fp.write(struct.pack("B", obj)) elif obj <= 2**8 - 1: fp.write(b"\xcc" + struct.pack("B", obj)) elif obj <= 2**16 - 1: fp.write(b"\xcd" + struct.pack("<H", obj)) elif obj <= 2**32 - 1: fp.write(b"\xce" + struct.pack("<I", obj)) elif obj <= 2**64 - 1: fp.write(b"\xcf" + struct.pack("<Q", obj)) else: raise Exception("huge unsigned int") def _pack_nil(obj, fp): fp.write(b"\xc0") def _pack_boolean(obj, fp): fp.write(b"\xc3" if obj else b"\xc2") def _pack_float(obj, fp): # NOTE: forced 32-bit floats for Armory # fp.write(b"\xcb" + struct.pack("<d", obj)) # Double fp.write(b"\xca" + struct.pack("<f", obj)) def _pack_string(obj, fp): obj = obj.encode('utf-8') if len(obj) <= 31: fp.write(struct.pack("B", 0xa0 | len(obj)) + obj) elif len(obj) <= 2**8 - 1: fp.write(b"\xd9" + struct.pack("B", len(obj)) + obj) elif len(obj) <= 2**16 - 1: fp.write(b"\xda" + struct.pack("<H", len(obj)) + obj) elif len(obj) <= 2**32 - 1: fp.write(b"\xdb" + struct.pack("<I", len(obj)) + obj) else: raise Exception("huge string") def _pack_binary(obj, fp): if len(obj) <= 2**8 - 1: fp.write(b"\xc4" + struct.pack("B", len(obj)) + obj) elif len(obj) <= 2**16 - 1: fp.write(b"\xc5" + struct.pack("<H", len(obj)) + obj) elif len(obj) <= 2**32 - 1: fp.write(b"\xc6" + struct.pack("<I", len(obj)) + obj) else: raise Exception("huge binary string") def _pack_array(obj, fp): if len(obj) <= 15: fp.write(struct.pack("B", 0x90 | len(obj))) elif len(obj) <= 2**16 - 1: fp.write(b"\xdc" + struct.pack("<H", len(obj))) elif len(obj) <= 2**32 - 1: fp.write(b"\xdd" + struct.pack("<I", len(obj))) else: raise Exception("huge array") if len(obj) > 0 and isinstance(obj[0], float): fp.write(b"\xca") for e in obj: fp.write(struct.pack("<f", e)) elif len(obj) > 0 and isinstance(obj[0], bool): for e in obj: pack(e, fp) elif len(obj) > 0 and isinstance(obj[0], int): fp.write(b"\xd2") for e in obj: fp.write(struct.pack("<i", e)) # Float32 elif len(obj) > 0 and isinstance(obj[0], np.float32): fp.write(b"\xca") fp.write(obj.tobytes()) # Int32 elif len(obj) > 0 and isinstance(obj[0], np.int32): fp.write(b"\xd2") fp.write(obj.tobytes()) # Int16 elif len(obj) > 0 and isinstance(obj[0], np.int16): fp.write(b"\xd1") fp.write(obj.tobytes()) # Regular else: for e in obj: pack(e, fp) def _pack_map(obj, fp): if len(obj) <= 15: fp.write(struct.pack("B", 0x80 | len(obj))) elif len(obj) <= 2**16 - 1: fp.write(b"\xde" + struct.pack("<H", len(obj))) elif len(obj) <= 2**32 - 1: fp.write(b"\xdf" + struct.pack("<I", len(obj))) else: raise Exception("huge array") for k, v in obj.items(): pack(k, fp) pack(v, fp) def pack(obj, fp): if obj is None: _pack_nil(obj, fp) elif isinstance(obj, bool): _pack_boolean(obj, fp) elif isinstance(obj, int): _pack_integer(obj, fp) elif isinstance(obj, float): _pack_float(obj, fp) elif isinstance(obj, str): _pack_string(obj, fp) elif isinstance(obj, bytes): _pack_binary(obj, fp) elif isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, np.ndarray): _pack_array(obj, fp) elif isinstance(obj, dict):
else: raise Exception("unsupported type: %s" % str(type(obj))) def packb(obj): fp = io.BytesIO() pack(obj, fp) return fp.getvalue()
_pack_map(obj, fp)
conditional_block
io_export_arm.py
# Armory Mesh Exporter # http://armory3d.org/ # # Based on Open Game Engine Exchange # http://opengex.org/ # Export plugin for Blender by Eric Lengyel # Copyright 2015, Terathon Software LLC # # This software is licensed under the Creative Commons # Attribution-ShareAlike 3.0 Unported License: # http://creativecommons.org/licenses/by-sa/3.0/deed.en_US bl_info = { "name": "Armory Mesh Exporter", "category": "Import-Export", "location": "File -> Export", "description": "Armory mesh data", "author": "Armory3D.org", "version": (2019, 6, 0), "blender": (2, 80, 0), "wiki_url": "http://armory3d.org/iron", "tracker_url": "https://github.com/armory3d/iron/issues" } from bpy_extras.io_utils import ExportHelper import os import bpy import math from mathutils import * import time import numpy as np NodeTypeNode = 0 NodeTypeBone = 1 NodeTypeMesh = 2 NodeTypeLight = 3 NodeTypeCamera = 4 NodeTypeSpeaker = 5 NodeTypeDecal = 6 NodeTypeProbe = 7 AnimationTypeSampled = 0 AnimationTypeLinear = 1 AnimationTypeBezier = 2 ExportEpsilon = 1.0e-6 structIdentifier = ["object", "bone_object", "mesh_object", "light_object", "camera_object", "speaker_object", "decal_object", "probe_object"] subtranslationName = ["xloc", "yloc", "zloc"] subrotationName = ["xrot", "yrot", "zrot"] subscaleName = ["xscl", "yscl", "zscl"] deltaSubtranslationName = ["dxloc", "dyloc", "dzloc"] deltaSubrotationName = ["dxrot", "dyrot", "dzrot"] deltaSubscaleName = ["dxscl", "dyscl", "dzscl"] axisName = ["x", "y", "z"] class ArmoryExporter(bpy.types.Operator, ExportHelper): '''Export to Armory format''' bl_idname = "export_scene.arm" bl_label = "Export Armory" filename_ext = ".arm" def write_matrix(self, matrix): return [matrix[0][0], matrix[0][1], matrix[0][2], matrix[0][3], matrix[1][0], matrix[1][1], matrix[1][2], matrix[1][3], matrix[2][0], matrix[2][1], matrix[2][2], matrix[2][3], matrix[3][0], matrix[3][1], matrix[3][2], matrix[3][3]] def write_mesh(self, bobject, o): self.output['mesh_datas'].append(o) def calc_aabb(self, bobject): aabb_center = 0.125 * sum((Vector(b) for b in bobject.bound_box), Vector()) bobject.data.arm_aabb = [ \ abs((bobject.bound_box[6][0] - bobject.bound_box[0][0]) / 2 + abs(aabb_center[0])) * 2, \ abs((bobject.bound_box[6][1] - bobject.bound_box[0][1]) / 2 + abs(aabb_center[1])) * 2, \ abs((bobject.bound_box[6][2] - bobject.bound_box[0][2]) / 2 + abs(aabb_center[2])) * 2 \ ] def export_mesh_data(self, exportMesh, bobject, o, has_armature=False): exportMesh.calc_normals_split() # exportMesh.calc_loop_triangles() loops = exportMesh.loops num_verts = len(loops) num_uv_layers = len(exportMesh.uv_layers) has_tex = num_uv_layers > 0 has_tex1 = num_uv_layers > 1 num_colors = len(exportMesh.vertex_colors) has_col = num_colors > 0 has_tang = has_tex pdata = np.empty(num_verts * 4, dtype='<f4') # p.xyz, n.z ndata = np.empty(num_verts * 2, dtype='<f4') # n.xy if has_tex: t0map = 0 # Get active uvmap t0data = np.empty(num_verts * 2, dtype='<f4') uv_layers = exportMesh.uv_layers if uv_layers != None: if 'UVMap_baked' in uv_layers: for i in range(0, len(uv_layers)): if uv_layers[i].name == 'UVMap_baked': t0map = i break else: for i in range(0, len(uv_layers)): if uv_layers[i].active_render: t0map = i break if has_tex1: t1map = 1 if t0map == 0 else 0 t1data = np.empty(num_verts * 2, dtype='<f4') # Scale for packed coords maxdim = 1.0 lay0 = uv_layers[t0map] # TODO: handle t1map for v in lay0.data: if abs(v.uv[0]) > maxdim: maxdim = abs(v.uv[0]) if abs(v.uv[1]) > maxdim: maxdim = abs(v.uv[1]) if maxdim > 1: o['scale_tex'] = maxdim invscale_tex = (1 / o['scale_tex']) * 32767 else: invscale_tex = 1 * 32767 if has_tang: exportMesh.calc_tangents(uvmap=lay0.name) tangdata = np.empty(num_verts * 3, dtype='<f4') if has_col: cdata = np.empty(num_verts * 3, dtype='<f4') # Scale for packed coords maxdim = max(bobject.data.arm_aabb[0], max(bobject.data.arm_aabb[1], bobject.data.arm_aabb[2])) if maxdim > 2: o['scale_pos'] = maxdim / 2 else: o['scale_pos'] = 1.0 if has_armature: # Allow up to 2x bigger bounds for skinned mesh o['scale_pos'] *= 2.0 scale_pos = o['scale_pos'] invscale_pos = (1 / scale_pos) * 32767 verts = exportMesh.vertices if has_tex: lay0 = exportMesh.uv_layers[t0map] if has_tex1: lay1 = exportMesh.uv_layers[t1map] for i, loop in enumerate(loops): v = verts[loop.vertex_index] co = v.co normal = loop.normal tang = loop.tangent i4 = i * 4 i2 = i * 2 pdata[i4 ] = co[0] pdata[i4 + 1] = co[1] pdata[i4 + 2] = co[2] pdata[i4 + 3] = normal[2] * scale_pos # Cancel scale ndata[i2 ] = normal[0] ndata[i2 + 1] = normal[1] if has_tex: uv = lay0.data[loop.index].uv t0data[i2 ] = uv[0] t0data[i2 + 1] = 1.0 - uv[1] # Reverse Y if has_tex1: uv = lay1.data[loop.index].uv t1data[i2 ] = uv[0] t1data[i2 + 1] = 1.0 - uv[1] if has_tang: i3 = i * 3 tangdata[i3 ] = tang[0] tangdata[i3 + 1] = tang[1] tangdata[i3 + 2] = tang[2] if has_col: i3 = i * 3 cdata[i3 ] = pow(v.col[0], 2.2) cdata[i3 + 1] = pow(v.col[1], 2.2) cdata[i3 + 2] = pow(v.col[2], 2.2) mats = exportMesh.materials poly_map = [] for i in range(max(len(mats), 1)): poly_map.append([]) for poly in exportMesh.polygons: poly_map[poly.material_index].append(poly) o['index_arrays'] = [] for index, polys in enumerate(poly_map): tris = 0 for poly in polys: tris += poly.loop_total - 2 if tris == 0: # No face assigned continue prim = np.empty(tris * 3, dtype='<i4') i = 0 for poly in polys: first = poly.loop_start total = poly.loop_total if total == 3: prim[i ] = loops[first ].index prim[i + 1] = loops[first + 1].index prim[i + 2] = loops[first + 2].index i += 3 else: for j in range(total - 2): prim[i ] = loops[first + total - 1].index prim[i + 1] = loops[first + j ].index prim[i + 2] = loops[first + j + 1 ].index i += 3 ia = {} ia['values'] = prim ia['material'] = 0 if len(mats) > 1: for i in range(len(mats)): # Multi-mat mesh if (mats[i] == mats[index]): # Default material for empty slots ia['material'] = i break o['index_arrays'].append(ia) # Pack pdata *= invscale_pos ndata *= 32767 pdata = np.array(pdata, dtype='<i2') ndata = np.array(ndata, dtype='<i2') if has_tex: t0data *= invscale_tex t0data = np.array(t0data, dtype='<i2') if has_tex1: t1data *= invscale_tex t1data = np.array(t1data, dtype='<i2') if has_col: cdata *= 32767 cdata = np.array(cdata, dtype='<i2') if has_tang: tangdata *= 32767 tangdata = np.array(tangdata, dtype='<i2') # Output o['vertex_arrays'] = [] o['vertex_arrays'].append({ 'attrib': 'pos', 'values': pdata }) o['vertex_arrays'].append({ 'attrib': 'nor', 'values': ndata }) if has_tex: o['vertex_arrays'].append({ 'attrib': 'tex', 'values': t0data }) if has_tex1: o['vertex_arrays'].append({ 'attrib': 'tex1', 'values': t1data }) if has_col: o['vertex_arrays'].append({ 'attrib': 'col', 'values': cdata }) if has_tang: o['vertex_arrays'].append({ 'attrib': 'tang', 'values': tangdata }) def export_mesh(self, bobject, scene): # This function exports a single mesh object print('Exporting mesh ' + bobject.data.name) o = {} o['name'] = bobject.name mesh = bobject.data armature = bobject.find_armature() apply_modifiers = not armature bobject_eval = bobject.evaluated_get(self.depsgraph) if apply_modifiers else bobject exportMesh = bobject_eval.to_mesh() self.calc_aabb(bobject) self.export_mesh_data(exportMesh, bobject, o, has_armature=armature != None) # if armature: # self.export_skin(bobject, armature, exportMesh, o) self.write_mesh(bobject, o) bobject_eval.to_mesh_clear() def export_objects(self, scene): meshes = [] self.output['mesh_datas'] = []; for o in scene.objects: if o.type == 'MESH' and o.data != None and o.data not in meshes: meshes.append(o.data) self.export_mesh(o, scene) def write_arm(self, filepath, output): with open(filepath, 'wb') as f: f.write(packb(output)) def execute(self, context): profile_time = time.time() self.depsgraph = context.evaluated_depsgraph_get() self.output = {} self.export_objects(context.scene) self.write_arm(self.filepath, self.output) print('Scene exported in ' + str(time.time() - profile_time)) return {'FINISHED'} def menu_func(self, context): self.layout.operator(ArmoryExporter.bl_idname, text="Armory (.arm)") def register(): bpy.utils.register_class(ArmoryExporter) bpy.types.TOPBAR_MT_file_export.append(menu_func) def unregister(): bpy.types.TOPBAR_MT_file_export.remove(menu_func) bpy.utils.unregister_class(ArmoryExporter) if __name__ == "__main__": register() # Msgpack parser with typed arrays # Based on u-msgpack-python v2.4.1 - v at sergeev.io # https://github.com/vsergeev/u-msgpack-python # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import struct import io import numpy as np def _pack_integer(obj, fp): if obj < 0: if obj >= -32: fp.write(struct.pack("b", obj)) elif obj >= -2**(8 - 1): fp.write(b"\xd0" + struct.pack("b", obj)) elif obj >= -2**(16 - 1): fp.write(b"\xd1" + struct.pack("<h", obj)) elif obj >= -2**(32 - 1): fp.write(b"\xd2" + struct.pack("<i", obj)) elif obj >= -2**(64 - 1): fp.write(b"\xd3" + struct.pack("<q", obj)) else: raise Exception("huge signed int") else: if obj <= 127: fp.write(struct.pack("B", obj)) elif obj <= 2**8 - 1: fp.write(b"\xcc" + struct.pack("B", obj)) elif obj <= 2**16 - 1: fp.write(b"\xcd" + struct.pack("<H", obj)) elif obj <= 2**32 - 1: fp.write(b"\xce" + struct.pack("<I", obj)) elif obj <= 2**64 - 1: fp.write(b"\xcf" + struct.pack("<Q", obj)) else: raise Exception("huge unsigned int") def _pack_nil(obj, fp): fp.write(b"\xc0") def _pack_boolean(obj, fp): fp.write(b"\xc3" if obj else b"\xc2") def _pack_float(obj, fp): # NOTE: forced 32-bit floats for Armory # fp.write(b"\xcb" + struct.pack("<d", obj)) # Double
def _pack_string(obj, fp): obj = obj.encode('utf-8') if len(obj) <= 31: fp.write(struct.pack("B", 0xa0 | len(obj)) + obj) elif len(obj) <= 2**8 - 1: fp.write(b"\xd9" + struct.pack("B", len(obj)) + obj) elif len(obj) <= 2**16 - 1: fp.write(b"\xda" + struct.pack("<H", len(obj)) + obj) elif len(obj) <= 2**32 - 1: fp.write(b"\xdb" + struct.pack("<I", len(obj)) + obj) else: raise Exception("huge string") def _pack_binary(obj, fp): if len(obj) <= 2**8 - 1: fp.write(b"\xc4" + struct.pack("B", len(obj)) + obj) elif len(obj) <= 2**16 - 1: fp.write(b"\xc5" + struct.pack("<H", len(obj)) + obj) elif len(obj) <= 2**32 - 1: fp.write(b"\xc6" + struct.pack("<I", len(obj)) + obj) else: raise Exception("huge binary string") def _pack_array(obj, fp): if len(obj) <= 15: fp.write(struct.pack("B", 0x90 | len(obj))) elif len(obj) <= 2**16 - 1: fp.write(b"\xdc" + struct.pack("<H", len(obj))) elif len(obj) <= 2**32 - 1: fp.write(b"\xdd" + struct.pack("<I", len(obj))) else: raise Exception("huge array") if len(obj) > 0 and isinstance(obj[0], float): fp.write(b"\xca") for e in obj: fp.write(struct.pack("<f", e)) elif len(obj) > 0 and isinstance(obj[0], bool): for e in obj: pack(e, fp) elif len(obj) > 0 and isinstance(obj[0], int): fp.write(b"\xd2") for e in obj: fp.write(struct.pack("<i", e)) # Float32 elif len(obj) > 0 and isinstance(obj[0], np.float32): fp.write(b"\xca") fp.write(obj.tobytes()) # Int32 elif len(obj) > 0 and isinstance(obj[0], np.int32): fp.write(b"\xd2") fp.write(obj.tobytes()) # Int16 elif len(obj) > 0 and isinstance(obj[0], np.int16): fp.write(b"\xd1") fp.write(obj.tobytes()) # Regular else: for e in obj: pack(e, fp) def _pack_map(obj, fp): if len(obj) <= 15: fp.write(struct.pack("B", 0x80 | len(obj))) elif len(obj) <= 2**16 - 1: fp.write(b"\xde" + struct.pack("<H", len(obj))) elif len(obj) <= 2**32 - 1: fp.write(b"\xdf" + struct.pack("<I", len(obj))) else: raise Exception("huge array") for k, v in obj.items(): pack(k, fp) pack(v, fp) def pack(obj, fp): if obj is None: _pack_nil(obj, fp) elif isinstance(obj, bool): _pack_boolean(obj, fp) elif isinstance(obj, int): _pack_integer(obj, fp) elif isinstance(obj, float): _pack_float(obj, fp) elif isinstance(obj, str): _pack_string(obj, fp) elif isinstance(obj, bytes): _pack_binary(obj, fp) elif isinstance(obj, list) or isinstance(obj, tuple) or isinstance(obj, np.ndarray): _pack_array(obj, fp) elif isinstance(obj, dict): _pack_map(obj, fp) else: raise Exception("unsupported type: %s" % str(type(obj))) def packb(obj): fp = io.BytesIO() pack(obj, fp) return fp.getvalue()
fp.write(b"\xca" + struct.pack("<f", obj))
identifier_body