repository_name
stringlengths 7
55
| func_path_in_repository
stringlengths 4
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 75
104k
| language
stringclasses 1
value | func_code_string
stringlengths 75
104k
| func_code_tokens
sequencelengths 19
28.4k
| func_documentation_string
stringlengths 1
46.9k
| func_documentation_tokens
sequencelengths 1
1.97k
| split_name
stringclasses 1
value | func_code_url
stringlengths 87
315
|
---|---|---|---|---|---|---|---|---|---|---|
jleinonen/pytmatrix | pytmatrix/radar.py | Kdp | def Kdp(scatterer):
"""
Specific differential phase (K_dp) for the current setup.
Args:
scatterer: a Scatterer instance.
Returns:
K_dp [deg/km].
NOTE: This only returns the correct value if the particle diameter and
wavelength are given in [mm]. The scatterer object should be set to
forward scattering geometry before calling this function.
"""
if (scatterer.thet0 != scatterer.thet) or \
(scatterer.phi0 != scatterer.phi):
raise ValueError("A forward scattering geometry is needed to " + \
"compute the specific differential phase.")
S = scatterer.get_S()
return 1e-3 * (180.0/np.pi) * scatterer.wavelength * (S[1,1]-S[0,0]).real | python | def Kdp(scatterer):
"""
Specific differential phase (K_dp) for the current setup.
Args:
scatterer: a Scatterer instance.
Returns:
K_dp [deg/km].
NOTE: This only returns the correct value if the particle diameter and
wavelength are given in [mm]. The scatterer object should be set to
forward scattering geometry before calling this function.
"""
if (scatterer.thet0 != scatterer.thet) or \
(scatterer.phi0 != scatterer.phi):
raise ValueError("A forward scattering geometry is needed to " + \
"compute the specific differential phase.")
S = scatterer.get_S()
return 1e-3 * (180.0/np.pi) * scatterer.wavelength * (S[1,1]-S[0,0]).real | [
"def",
"Kdp",
"(",
"scatterer",
")",
":",
"if",
"(",
"scatterer",
".",
"thet0",
"!=",
"scatterer",
".",
"thet",
")",
"or",
"(",
"scatterer",
".",
"phi0",
"!=",
"scatterer",
".",
"phi",
")",
":",
"raise",
"ValueError",
"(",
"\"A forward scattering geometry is needed to \"",
"+",
"\"compute the specific differential phase.\"",
")",
"S",
"=",
"scatterer",
".",
"get_S",
"(",
")",
"return",
"1e-3",
"*",
"(",
"180.0",
"/",
"np",
".",
"pi",
")",
"*",
"scatterer",
".",
"wavelength",
"*",
"(",
"S",
"[",
"1",
",",
"1",
"]",
"-",
"S",
"[",
"0",
",",
"0",
"]",
")",
".",
"real"
] | Specific differential phase (K_dp) for the current setup.
Args:
scatterer: a Scatterer instance.
Returns:
K_dp [deg/km].
NOTE: This only returns the correct value if the particle diameter and
wavelength are given in [mm]. The scatterer object should be set to
forward scattering geometry before calling this function. | [
"Specific",
"differential",
"phase",
"(",
"K_dp",
")",
"for",
"the",
"current",
"setup",
"."
] | train | https://github.com/jleinonen/pytmatrix/blob/8803507fe5332786feab105fa74acf63e7121718/pytmatrix/radar.py#L112-L133 |
jleinonen/pytmatrix | pytmatrix/refractive.py | mg_refractive | def mg_refractive(m, mix):
"""Maxwell-Garnett EMA for the refractive index.
Args:
m: Tuple of the complex refractive indices of the media.
mix: Tuple of the volume fractions of the media, len(mix)==len(m)
(if sum(mix)!=1, these are taken relative to sum(mix))
Returns:
The Maxwell-Garnett approximation for the complex refractive index of
the effective medium
If len(m)==2, the first element is taken as the matrix and the second as
the inclusion. If len(m)>2, the media are mixed recursively so that the
last element is used as the inclusion and the second to last as the
matrix, then this mixture is used as the last element on the next
iteration, and so on.
"""
if len(m) == 2:
cF = float(mix[1]) / (mix[0]+mix[1]) * \
(m[1]**2-m[0]**2) / (m[1]**2+2*m[0]**2)
er = m[0]**2 * (1.0+2.0*cF) / (1.0-cF)
m = np.sqrt(er)
else:
m_last = mg_refractive(m[-2:], mix[-2:])
mix_last = mix[-2] + mix[-1]
m = mg_refractive(m[:-2] + (m_last,), mix[:-2] + (mix_last,))
return m | python | def mg_refractive(m, mix):
"""Maxwell-Garnett EMA for the refractive index.
Args:
m: Tuple of the complex refractive indices of the media.
mix: Tuple of the volume fractions of the media, len(mix)==len(m)
(if sum(mix)!=1, these are taken relative to sum(mix))
Returns:
The Maxwell-Garnett approximation for the complex refractive index of
the effective medium
If len(m)==2, the first element is taken as the matrix and the second as
the inclusion. If len(m)>2, the media are mixed recursively so that the
last element is used as the inclusion and the second to last as the
matrix, then this mixture is used as the last element on the next
iteration, and so on.
"""
if len(m) == 2:
cF = float(mix[1]) / (mix[0]+mix[1]) * \
(m[1]**2-m[0]**2) / (m[1]**2+2*m[0]**2)
er = m[0]**2 * (1.0+2.0*cF) / (1.0-cF)
m = np.sqrt(er)
else:
m_last = mg_refractive(m[-2:], mix[-2:])
mix_last = mix[-2] + mix[-1]
m = mg_refractive(m[:-2] + (m_last,), mix[:-2] + (mix_last,))
return m | [
"def",
"mg_refractive",
"(",
"m",
",",
"mix",
")",
":",
"if",
"len",
"(",
"m",
")",
"==",
"2",
":",
"cF",
"=",
"float",
"(",
"mix",
"[",
"1",
"]",
")",
"/",
"(",
"mix",
"[",
"0",
"]",
"+",
"mix",
"[",
"1",
"]",
")",
"*",
"(",
"m",
"[",
"1",
"]",
"**",
"2",
"-",
"m",
"[",
"0",
"]",
"**",
"2",
")",
"/",
"(",
"m",
"[",
"1",
"]",
"**",
"2",
"+",
"2",
"*",
"m",
"[",
"0",
"]",
"**",
"2",
")",
"er",
"=",
"m",
"[",
"0",
"]",
"**",
"2",
"*",
"(",
"1.0",
"+",
"2.0",
"*",
"cF",
")",
"/",
"(",
"1.0",
"-",
"cF",
")",
"m",
"=",
"np",
".",
"sqrt",
"(",
"er",
")",
"else",
":",
"m_last",
"=",
"mg_refractive",
"(",
"m",
"[",
"-",
"2",
":",
"]",
",",
"mix",
"[",
"-",
"2",
":",
"]",
")",
"mix_last",
"=",
"mix",
"[",
"-",
"2",
"]",
"+",
"mix",
"[",
"-",
"1",
"]",
"m",
"=",
"mg_refractive",
"(",
"m",
"[",
":",
"-",
"2",
"]",
"+",
"(",
"m_last",
",",
")",
",",
"mix",
"[",
":",
"-",
"2",
"]",
"+",
"(",
"mix_last",
",",
")",
")",
"return",
"m"
] | Maxwell-Garnett EMA for the refractive index.
Args:
m: Tuple of the complex refractive indices of the media.
mix: Tuple of the volume fractions of the media, len(mix)==len(m)
(if sum(mix)!=1, these are taken relative to sum(mix))
Returns:
The Maxwell-Garnett approximation for the complex refractive index of
the effective medium
If len(m)==2, the first element is taken as the matrix and the second as
the inclusion. If len(m)>2, the media are mixed recursively so that the
last element is used as the inclusion and the second to last as the
matrix, then this mixture is used as the last element on the next
iteration, and so on. | [
"Maxwell",
"-",
"Garnett",
"EMA",
"for",
"the",
"refractive",
"index",
"."
] | train | https://github.com/jleinonen/pytmatrix/blob/8803507fe5332786feab105fa74acf63e7121718/pytmatrix/refractive.py#L29-L57 |
jleinonen/pytmatrix | pytmatrix/refractive.py | bruggeman_refractive | def bruggeman_refractive(m, mix):
"""Bruggeman EMA for the refractive index.
For instructions, see mg_refractive in this module, except this routine
only works for two components.
"""
f1 = mix[0]/sum(mix)
f2 = mix[1]/sum(mix)
e1 = m[0]**2
e2 = m[1]**2
a = -2*(f1+f2)
b = (2*f1*e1 - f1*e2 + 2*f2*e2 - f2*e1)
c = (f1+f2)*e1*e2
e_eff = (-b - np.sqrt(b**2-4*a*c))/(2*a)
return np.sqrt(e_eff) | python | def bruggeman_refractive(m, mix):
"""Bruggeman EMA for the refractive index.
For instructions, see mg_refractive in this module, except this routine
only works for two components.
"""
f1 = mix[0]/sum(mix)
f2 = mix[1]/sum(mix)
e1 = m[0]**2
e2 = m[1]**2
a = -2*(f1+f2)
b = (2*f1*e1 - f1*e2 + 2*f2*e2 - f2*e1)
c = (f1+f2)*e1*e2
e_eff = (-b - np.sqrt(b**2-4*a*c))/(2*a)
return np.sqrt(e_eff) | [
"def",
"bruggeman_refractive",
"(",
"m",
",",
"mix",
")",
":",
"f1",
"=",
"mix",
"[",
"0",
"]",
"/",
"sum",
"(",
"mix",
")",
"f2",
"=",
"mix",
"[",
"1",
"]",
"/",
"sum",
"(",
"mix",
")",
"e1",
"=",
"m",
"[",
"0",
"]",
"**",
"2",
"e2",
"=",
"m",
"[",
"1",
"]",
"**",
"2",
"a",
"=",
"-",
"2",
"*",
"(",
"f1",
"+",
"f2",
")",
"b",
"=",
"(",
"2",
"*",
"f1",
"*",
"e1",
"-",
"f1",
"*",
"e2",
"+",
"2",
"*",
"f2",
"*",
"e2",
"-",
"f2",
"*",
"e1",
")",
"c",
"=",
"(",
"f1",
"+",
"f2",
")",
"*",
"e1",
"*",
"e2",
"e_eff",
"=",
"(",
"-",
"b",
"-",
"np",
".",
"sqrt",
"(",
"b",
"**",
"2",
"-",
"4",
"*",
"a",
"*",
"c",
")",
")",
"/",
"(",
"2",
"*",
"a",
")",
"return",
"np",
".",
"sqrt",
"(",
"e_eff",
")"
] | Bruggeman EMA for the refractive index.
For instructions, see mg_refractive in this module, except this routine
only works for two components. | [
"Bruggeman",
"EMA",
"for",
"the",
"refractive",
"index",
"."
] | train | https://github.com/jleinonen/pytmatrix/blob/8803507fe5332786feab105fa74acf63e7121718/pytmatrix/refractive.py#L60-L74 |
jleinonen/pytmatrix | pytmatrix/refractive.py | ice_refractive | def ice_refractive(file):
"""
Interpolator for the refractive indices of ice.
Inputs:
File to read the refractive index lookup table from.
This is supplied as "ice_refr.dat", retrieved from
http://www.atmos.washington.edu/ice_optical_constants/
Returns:
A callable object that takes as parameters the wavelength [mm]
and the snow density [g/cm^3].
"""
D = np.loadtxt(file)
log_wl = np.log10(D[:,0]/1000)
re = D[:,1]
log_im = np.log10(D[:,2])
iobj_re = interpolate.interp1d(log_wl, re)
iobj_log_im = interpolate.interp1d(log_wl, log_im)
def ref(wl, snow_density):
lwl = np.log10(wl)
try:
len(lwl)
except TypeError:
mi_sqr = complex(iobj_re(lwl), 10**iobj_log_im(lwl))**2
else:
mi_sqr = np.array([complex(a,b) for (a,b) in zip(iobj_re(lwl),
10**iobj_log_im(lwl))])**2
c = (mi_sqr-1)/(mi_sqr+2) * snow_density/ice_density
return np.sqrt( (1+2*c) / (1-c) )
return ref | python | def ice_refractive(file):
"""
Interpolator for the refractive indices of ice.
Inputs:
File to read the refractive index lookup table from.
This is supplied as "ice_refr.dat", retrieved from
http://www.atmos.washington.edu/ice_optical_constants/
Returns:
A callable object that takes as parameters the wavelength [mm]
and the snow density [g/cm^3].
"""
D = np.loadtxt(file)
log_wl = np.log10(D[:,0]/1000)
re = D[:,1]
log_im = np.log10(D[:,2])
iobj_re = interpolate.interp1d(log_wl, re)
iobj_log_im = interpolate.interp1d(log_wl, log_im)
def ref(wl, snow_density):
lwl = np.log10(wl)
try:
len(lwl)
except TypeError:
mi_sqr = complex(iobj_re(lwl), 10**iobj_log_im(lwl))**2
else:
mi_sqr = np.array([complex(a,b) for (a,b) in zip(iobj_re(lwl),
10**iobj_log_im(lwl))])**2
c = (mi_sqr-1)/(mi_sqr+2) * snow_density/ice_density
return np.sqrt( (1+2*c) / (1-c) )
return ref | [
"def",
"ice_refractive",
"(",
"file",
")",
":",
"D",
"=",
"np",
".",
"loadtxt",
"(",
"file",
")",
"log_wl",
"=",
"np",
".",
"log10",
"(",
"D",
"[",
":",
",",
"0",
"]",
"/",
"1000",
")",
"re",
"=",
"D",
"[",
":",
",",
"1",
"]",
"log_im",
"=",
"np",
".",
"log10",
"(",
"D",
"[",
":",
",",
"2",
"]",
")",
"iobj_re",
"=",
"interpolate",
".",
"interp1d",
"(",
"log_wl",
",",
"re",
")",
"iobj_log_im",
"=",
"interpolate",
".",
"interp1d",
"(",
"log_wl",
",",
"log_im",
")",
"def",
"ref",
"(",
"wl",
",",
"snow_density",
")",
":",
"lwl",
"=",
"np",
".",
"log10",
"(",
"wl",
")",
"try",
":",
"len",
"(",
"lwl",
")",
"except",
"TypeError",
":",
"mi_sqr",
"=",
"complex",
"(",
"iobj_re",
"(",
"lwl",
")",
",",
"10",
"**",
"iobj_log_im",
"(",
"lwl",
")",
")",
"**",
"2",
"else",
":",
"mi_sqr",
"=",
"np",
".",
"array",
"(",
"[",
"complex",
"(",
"a",
",",
"b",
")",
"for",
"(",
"a",
",",
"b",
")",
"in",
"zip",
"(",
"iobj_re",
"(",
"lwl",
")",
",",
"10",
"**",
"iobj_log_im",
"(",
"lwl",
")",
")",
"]",
")",
"**",
"2",
"c",
"=",
"(",
"mi_sqr",
"-",
"1",
")",
"/",
"(",
"mi_sqr",
"+",
"2",
")",
"*",
"snow_density",
"/",
"ice_density",
"return",
"np",
".",
"sqrt",
"(",
"(",
"1",
"+",
"2",
"*",
"c",
")",
"/",
"(",
"1",
"-",
"c",
")",
")",
"return",
"ref"
] | Interpolator for the refractive indices of ice.
Inputs:
File to read the refractive index lookup table from.
This is supplied as "ice_refr.dat", retrieved from
http://www.atmos.washington.edu/ice_optical_constants/
Returns:
A callable object that takes as parameters the wavelength [mm]
and the snow density [g/cm^3]. | [
"Interpolator",
"for",
"the",
"refractive",
"indices",
"of",
"ice",
"."
] | train | https://github.com/jleinonen/pytmatrix/blob/8803507fe5332786feab105fa74acf63e7121718/pytmatrix/refractive.py#L107-L142 |
jleinonen/pytmatrix | pytmatrix/tmatrix_aux.py | dsr_thurai_2007 | def dsr_thurai_2007(D_eq):
"""
Drop shape relationship function from Thurai2007
(http://dx.doi.org/10.1175/JTECH2051.1) paper.
Arguments:
D_eq: Drop volume-equivalent diameter (mm)
Returns:
r: The vertical-to-horizontal drop axis ratio. Note: the Scatterer class
expects horizontal to vertical, so you should pass 1/dsr_thurai_2007
"""
if D_eq < 0.7:
return 1.0
elif D_eq < 1.5:
return 1.173 - 0.5165*D_eq + 0.4698*D_eq**2 - 0.1317*D_eq**3 - \
8.5e-3*D_eq**4
else:
return 1.065 - 6.25e-2*D_eq - 3.99e-3*D_eq**2 + 7.66e-4*D_eq**3 - \
4.095e-5*D_eq**4 | python | def dsr_thurai_2007(D_eq):
"""
Drop shape relationship function from Thurai2007
(http://dx.doi.org/10.1175/JTECH2051.1) paper.
Arguments:
D_eq: Drop volume-equivalent diameter (mm)
Returns:
r: The vertical-to-horizontal drop axis ratio. Note: the Scatterer class
expects horizontal to vertical, so you should pass 1/dsr_thurai_2007
"""
if D_eq < 0.7:
return 1.0
elif D_eq < 1.5:
return 1.173 - 0.5165*D_eq + 0.4698*D_eq**2 - 0.1317*D_eq**3 - \
8.5e-3*D_eq**4
else:
return 1.065 - 6.25e-2*D_eq - 3.99e-3*D_eq**2 + 7.66e-4*D_eq**3 - \
4.095e-5*D_eq**4 | [
"def",
"dsr_thurai_2007",
"(",
"D_eq",
")",
":",
"if",
"D_eq",
"<",
"0.7",
":",
"return",
"1.0",
"elif",
"D_eq",
"<",
"1.5",
":",
"return",
"1.173",
"-",
"0.5165",
"*",
"D_eq",
"+",
"0.4698",
"*",
"D_eq",
"**",
"2",
"-",
"0.1317",
"*",
"D_eq",
"**",
"3",
"-",
"8.5e-3",
"*",
"D_eq",
"**",
"4",
"else",
":",
"return",
"1.065",
"-",
"6.25e-2",
"*",
"D_eq",
"-",
"3.99e-3",
"*",
"D_eq",
"**",
"2",
"+",
"7.66e-4",
"*",
"D_eq",
"**",
"3",
"-",
"4.095e-5",
"*",
"D_eq",
"**",
"4"
] | Drop shape relationship function from Thurai2007
(http://dx.doi.org/10.1175/JTECH2051.1) paper.
Arguments:
D_eq: Drop volume-equivalent diameter (mm)
Returns:
r: The vertical-to-horizontal drop axis ratio. Note: the Scatterer class
expects horizontal to vertical, so you should pass 1/dsr_thurai_2007 | [
"Drop",
"shape",
"relationship",
"function",
"from",
"Thurai2007",
"(",
"http",
":",
"//",
"dx",
".",
"doi",
".",
"org",
"/",
"10",
".",
"1175",
"/",
"JTECH2051",
".",
"1",
")",
"paper",
".",
"Arguments",
":",
"D_eq",
":",
"Drop",
"volume",
"-",
"equivalent",
"diameter",
"(",
"mm",
")"
] | train | https://github.com/jleinonen/pytmatrix/blob/8803507fe5332786feab105fa74acf63e7121718/pytmatrix/tmatrix_aux.py#L47-L66 |
edublancas/sklearn-evaluation | sklearn_evaluation/compute.py | feature_importances | def feature_importances(data, top_n=None, feature_names=None):
"""
Get and order feature importances from a scikit-learn model
or from an array-like structure.
If data is a scikit-learn model with sub-estimators (e.g. RandomForest,
AdaBoost) the function will compute the standard deviation of each
feature.
Parameters
----------
data : sklearn model or array-like structure
Object to get the data from.
top_n : int
Only get results for the top_n features.
feature_names : array-like
Feature_names
Returns
-------
numpy structured array
Returns a numpy structured array with the data. Columns are
feature_name, importance and std_ if an sklearn model with
sub-estimators was passed in data.
"""
# data can be either a sklearn estimator or an iterator with
# the actual importances values, try to get the values
try:
imp = data.feature_importances_
except:
imp = np.array(data)
# in case the user passed an estimator, it may have an estimators_
# attribute, which includes importnaces for every sub-estimator
# get them if possible
try:
sub_imp = np.array([e.feature_importances_ for e in data.estimators_])
# calculate std
std = np.std(sub_imp, axis=0)
except:
std = None
# get the number of features
n_features = len(imp)
# check that the data has the correct format
if top_n and top_n > n_features:
raise ValueError(('top_n ({}) cannot be greater than the number of'
' features ({})'.format(top_n, n_features)))
if top_n and top_n < 1:
raise ValueError('top_n cannot be less than 1')
if feature_names and len(feature_names) != n_features:
raise ValueError(('feature_names ({}) must match the number of'
' features ({})'.format(len(feature_names),
n_features)))
# if the user did not pass feature names create generic names
if feature_names is None:
feature_names = ['Feature {}'.format(n) for n in range(1, n_features+1)]
feature_names = np.array(feature_names)
else:
feature_names = np.array(feature_names)
# order the data according to the importance for the feature
idx = np.argsort(imp)[::-1]
imp = imp[idx]
feature_names = feature_names[idx]
if std is not None:
std = std[idx]
# build the structured array
if std is not None:
names = 'feature_name,importance,std_'
res = np.core.records.fromarrays([feature_names, imp, std],
names=names)
else:
names = 'feature_name,importance'
res = np.core.records.fromarrays([feature_names, imp],
names=names)
# get subset if top_n is not none
if top_n:
res = res[:top_n]
return res | python | def feature_importances(data, top_n=None, feature_names=None):
"""
Get and order feature importances from a scikit-learn model
or from an array-like structure.
If data is a scikit-learn model with sub-estimators (e.g. RandomForest,
AdaBoost) the function will compute the standard deviation of each
feature.
Parameters
----------
data : sklearn model or array-like structure
Object to get the data from.
top_n : int
Only get results for the top_n features.
feature_names : array-like
Feature_names
Returns
-------
numpy structured array
Returns a numpy structured array with the data. Columns are
feature_name, importance and std_ if an sklearn model with
sub-estimators was passed in data.
"""
# data can be either a sklearn estimator or an iterator with
# the actual importances values, try to get the values
try:
imp = data.feature_importances_
except:
imp = np.array(data)
# in case the user passed an estimator, it may have an estimators_
# attribute, which includes importnaces for every sub-estimator
# get them if possible
try:
sub_imp = np.array([e.feature_importances_ for e in data.estimators_])
# calculate std
std = np.std(sub_imp, axis=0)
except:
std = None
# get the number of features
n_features = len(imp)
# check that the data has the correct format
if top_n and top_n > n_features:
raise ValueError(('top_n ({}) cannot be greater than the number of'
' features ({})'.format(top_n, n_features)))
if top_n and top_n < 1:
raise ValueError('top_n cannot be less than 1')
if feature_names and len(feature_names) != n_features:
raise ValueError(('feature_names ({}) must match the number of'
' features ({})'.format(len(feature_names),
n_features)))
# if the user did not pass feature names create generic names
if feature_names is None:
feature_names = ['Feature {}'.format(n) for n in range(1, n_features+1)]
feature_names = np.array(feature_names)
else:
feature_names = np.array(feature_names)
# order the data according to the importance for the feature
idx = np.argsort(imp)[::-1]
imp = imp[idx]
feature_names = feature_names[idx]
if std is not None:
std = std[idx]
# build the structured array
if std is not None:
names = 'feature_name,importance,std_'
res = np.core.records.fromarrays([feature_names, imp, std],
names=names)
else:
names = 'feature_name,importance'
res = np.core.records.fromarrays([feature_names, imp],
names=names)
# get subset if top_n is not none
if top_n:
res = res[:top_n]
return res | [
"def",
"feature_importances",
"(",
"data",
",",
"top_n",
"=",
"None",
",",
"feature_names",
"=",
"None",
")",
":",
"# data can be either a sklearn estimator or an iterator with",
"# the actual importances values, try to get the values",
"try",
":",
"imp",
"=",
"data",
".",
"feature_importances_",
"except",
":",
"imp",
"=",
"np",
".",
"array",
"(",
"data",
")",
"# in case the user passed an estimator, it may have an estimators_",
"# attribute, which includes importnaces for every sub-estimator",
"# get them if possible",
"try",
":",
"sub_imp",
"=",
"np",
".",
"array",
"(",
"[",
"e",
".",
"feature_importances_",
"for",
"e",
"in",
"data",
".",
"estimators_",
"]",
")",
"# calculate std",
"std",
"=",
"np",
".",
"std",
"(",
"sub_imp",
",",
"axis",
"=",
"0",
")",
"except",
":",
"std",
"=",
"None",
"# get the number of features",
"n_features",
"=",
"len",
"(",
"imp",
")",
"# check that the data has the correct format",
"if",
"top_n",
"and",
"top_n",
">",
"n_features",
":",
"raise",
"ValueError",
"(",
"(",
"'top_n ({}) cannot be greater than the number of'",
"' features ({})'",
".",
"format",
"(",
"top_n",
",",
"n_features",
")",
")",
")",
"if",
"top_n",
"and",
"top_n",
"<",
"1",
":",
"raise",
"ValueError",
"(",
"'top_n cannot be less than 1'",
")",
"if",
"feature_names",
"and",
"len",
"(",
"feature_names",
")",
"!=",
"n_features",
":",
"raise",
"ValueError",
"(",
"(",
"'feature_names ({}) must match the number of'",
"' features ({})'",
".",
"format",
"(",
"len",
"(",
"feature_names",
")",
",",
"n_features",
")",
")",
")",
"# if the user did not pass feature names create generic names",
"if",
"feature_names",
"is",
"None",
":",
"feature_names",
"=",
"[",
"'Feature {}'",
".",
"format",
"(",
"n",
")",
"for",
"n",
"in",
"range",
"(",
"1",
",",
"n_features",
"+",
"1",
")",
"]",
"feature_names",
"=",
"np",
".",
"array",
"(",
"feature_names",
")",
"else",
":",
"feature_names",
"=",
"np",
".",
"array",
"(",
"feature_names",
")",
"# order the data according to the importance for the feature",
"idx",
"=",
"np",
".",
"argsort",
"(",
"imp",
")",
"[",
":",
":",
"-",
"1",
"]",
"imp",
"=",
"imp",
"[",
"idx",
"]",
"feature_names",
"=",
"feature_names",
"[",
"idx",
"]",
"if",
"std",
"is",
"not",
"None",
":",
"std",
"=",
"std",
"[",
"idx",
"]",
"# build the structured array",
"if",
"std",
"is",
"not",
"None",
":",
"names",
"=",
"'feature_name,importance,std_'",
"res",
"=",
"np",
".",
"core",
".",
"records",
".",
"fromarrays",
"(",
"[",
"feature_names",
",",
"imp",
",",
"std",
"]",
",",
"names",
"=",
"names",
")",
"else",
":",
"names",
"=",
"'feature_name,importance'",
"res",
"=",
"np",
".",
"core",
".",
"records",
".",
"fromarrays",
"(",
"[",
"feature_names",
",",
"imp",
"]",
",",
"names",
"=",
"names",
")",
"# get subset if top_n is not none",
"if",
"top_n",
":",
"res",
"=",
"res",
"[",
":",
"top_n",
"]",
"return",
"res"
] | Get and order feature importances from a scikit-learn model
or from an array-like structure.
If data is a scikit-learn model with sub-estimators (e.g. RandomForest,
AdaBoost) the function will compute the standard deviation of each
feature.
Parameters
----------
data : sklearn model or array-like structure
Object to get the data from.
top_n : int
Only get results for the top_n features.
feature_names : array-like
Feature_names
Returns
-------
numpy structured array
Returns a numpy structured array with the data. Columns are
feature_name, importance and std_ if an sklearn model with
sub-estimators was passed in data. | [
"Get",
"and",
"order",
"feature",
"importances",
"from",
"a",
"scikit",
"-",
"learn",
"model",
"or",
"from",
"an",
"array",
"-",
"like",
"structure",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/compute.py#L4-L89 |
edublancas/sklearn-evaluation | sklearn_evaluation/util.py | _group_by | def _group_by(data, criteria):
"""
Group objects in data using a function or a key
"""
if isinstance(criteria, str):
criteria_str = criteria
def criteria(x):
return x[criteria_str]
res = defaultdict(list)
for element in data:
key = criteria(element)
res[key].append(element)
return res | python | def _group_by(data, criteria):
"""
Group objects in data using a function or a key
"""
if isinstance(criteria, str):
criteria_str = criteria
def criteria(x):
return x[criteria_str]
res = defaultdict(list)
for element in data:
key = criteria(element)
res[key].append(element)
return res | [
"def",
"_group_by",
"(",
"data",
",",
"criteria",
")",
":",
"if",
"isinstance",
"(",
"criteria",
",",
"str",
")",
":",
"criteria_str",
"=",
"criteria",
"def",
"criteria",
"(",
"x",
")",
":",
"return",
"x",
"[",
"criteria_str",
"]",
"res",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"element",
"in",
"data",
":",
"key",
"=",
"criteria",
"(",
"element",
")",
"res",
"[",
"key",
"]",
".",
"append",
"(",
"element",
")",
"return",
"res"
] | Group objects in data using a function or a key | [
"Group",
"objects",
"in",
"data",
"using",
"a",
"function",
"or",
"a",
"key"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/util.py#L37-L51 |
edublancas/sklearn-evaluation | sklearn_evaluation/util.py | _get_params_value | def _get_params_value(params):
"""
Given an iterator (k1, k2), returns a function that when called
with an object obj returns a tuple of the form:
((k1, obj.parameters[k1]), (k2, obj.parameters[k2]))
"""
# sort params for consistency
ord_params = sorted(params)
def fn(obj):
l = []
for p in ord_params:
try:
l.append((p, obj.parameters[p]))
except:
raise ValueError('{} is not a valid parameter'.format(p))
return tuple(l)
return fn | python | def _get_params_value(params):
"""
Given an iterator (k1, k2), returns a function that when called
with an object obj returns a tuple of the form:
((k1, obj.parameters[k1]), (k2, obj.parameters[k2]))
"""
# sort params for consistency
ord_params = sorted(params)
def fn(obj):
l = []
for p in ord_params:
try:
l.append((p, obj.parameters[p]))
except:
raise ValueError('{} is not a valid parameter'.format(p))
return tuple(l)
return fn | [
"def",
"_get_params_value",
"(",
"params",
")",
":",
"# sort params for consistency",
"ord_params",
"=",
"sorted",
"(",
"params",
")",
"def",
"fn",
"(",
"obj",
")",
":",
"l",
"=",
"[",
"]",
"for",
"p",
"in",
"ord_params",
":",
"try",
":",
"l",
".",
"append",
"(",
"(",
"p",
",",
"obj",
".",
"parameters",
"[",
"p",
"]",
")",
")",
"except",
":",
"raise",
"ValueError",
"(",
"'{} is not a valid parameter'",
".",
"format",
"(",
"p",
")",
")",
"return",
"tuple",
"(",
"l",
")",
"return",
"fn"
] | Given an iterator (k1, k2), returns a function that when called
with an object obj returns a tuple of the form:
((k1, obj.parameters[k1]), (k2, obj.parameters[k2])) | [
"Given",
"an",
"iterator",
"(",
"k1",
"k2",
")",
"returns",
"a",
"function",
"that",
"when",
"called",
"with",
"an",
"object",
"obj",
"returns",
"a",
"tuple",
"of",
"the",
"form",
":",
"((",
"k1",
"obj",
".",
"parameters",
"[",
"k1",
"]",
")",
"(",
"k2",
"obj",
".",
"parameters",
"[",
"k2",
"]",
"))"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/util.py#L54-L71 |
edublancas/sklearn-evaluation | sklearn_evaluation/util.py | _product | def _product(k, v):
"""
Perform the product between two objects
even if they don't support iteration
"""
if not _can_iterate(k):
k = [k]
if not _can_iterate(v):
v = [v]
return list(product(k, v)) | python | def _product(k, v):
"""
Perform the product between two objects
even if they don't support iteration
"""
if not _can_iterate(k):
k = [k]
if not _can_iterate(v):
v = [v]
return list(product(k, v)) | [
"def",
"_product",
"(",
"k",
",",
"v",
")",
":",
"if",
"not",
"_can_iterate",
"(",
"k",
")",
":",
"k",
"=",
"[",
"k",
"]",
"if",
"not",
"_can_iterate",
"(",
"v",
")",
":",
"v",
"=",
"[",
"v",
"]",
"return",
"list",
"(",
"product",
"(",
"k",
",",
"v",
")",
")"
] | Perform the product between two objects
even if they don't support iteration | [
"Perform",
"the",
"product",
"between",
"two",
"objects",
"even",
"if",
"they",
"don",
"t",
"support",
"iteration"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/util.py#L80-L89 |
edublancas/sklearn-evaluation | sklearn_evaluation/util.py | _mapping_to_tuple_pairs | def _mapping_to_tuple_pairs(d):
"""
Convert a mapping object (such as a dictionary) to tuple pairs,
using its keys and values to generate the pairs and then generating
all possible combinations between those
e.g. {1: (1,2,3)} -> (((1, 1),), ((1, 2),), ((1, 3),))
"""
# order the keys, this will prevent different implementations of Python,
# return different results from the same dictionary since the order of
# iteration depends on it
t = []
ord_keys = sorted(d.keys())
for k in ord_keys:
t.append(_product(k, d[k]))
return tuple(product(*t)) | python | def _mapping_to_tuple_pairs(d):
"""
Convert a mapping object (such as a dictionary) to tuple pairs,
using its keys and values to generate the pairs and then generating
all possible combinations between those
e.g. {1: (1,2,3)} -> (((1, 1),), ((1, 2),), ((1, 3),))
"""
# order the keys, this will prevent different implementations of Python,
# return different results from the same dictionary since the order of
# iteration depends on it
t = []
ord_keys = sorted(d.keys())
for k in ord_keys:
t.append(_product(k, d[k]))
return tuple(product(*t)) | [
"def",
"_mapping_to_tuple_pairs",
"(",
"d",
")",
":",
"# order the keys, this will prevent different implementations of Python,",
"# return different results from the same dictionary since the order of",
"# iteration depends on it",
"t",
"=",
"[",
"]",
"ord_keys",
"=",
"sorted",
"(",
"d",
".",
"keys",
"(",
")",
")",
"for",
"k",
"in",
"ord_keys",
":",
"t",
".",
"append",
"(",
"_product",
"(",
"k",
",",
"d",
"[",
"k",
"]",
")",
")",
"return",
"tuple",
"(",
"product",
"(",
"*",
"t",
")",
")"
] | Convert a mapping object (such as a dictionary) to tuple pairs,
using its keys and values to generate the pairs and then generating
all possible combinations between those
e.g. {1: (1,2,3)} -> (((1, 1),), ((1, 2),), ((1, 3),)) | [
"Convert",
"a",
"mapping",
"object",
"(",
"such",
"as",
"a",
"dictionary",
")",
"to",
"tuple",
"pairs",
"using",
"its",
"keys",
"and",
"values",
"to",
"generate",
"the",
"pairs",
"and",
"then",
"generating",
"all",
"possible",
"combinations",
"between",
"those",
"e",
".",
"g",
".",
"{",
"1",
":",
"(",
"1",
"2",
"3",
")",
"}",
"-",
">",
"(((",
"1",
"1",
")",
")",
"((",
"1",
"2",
")",
")",
"((",
"1",
"3",
")",
"))"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/util.py#L92-L106 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/learning_curve.py | learning_curve | def learning_curve(train_scores, test_scores, train_sizes, ax=None):
"""Plot a learning curve
Plot a metric vs number of examples for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
train_sizes : array-like
Relative or absolute numbers of training examples used to generate
the learning curve
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/learning_curve.py
"""
if ax is None:
ax = plt.gca()
ax.grid()
ax.set_title("Learning Curve")
ax.set_xlabel("Training examples")
ax.set_ylabel("Score mean")
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
ax.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
ax.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
ax.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
ax.legend(loc="best")
ax.margins(0.05)
return ax | python | def learning_curve(train_scores, test_scores, train_sizes, ax=None):
"""Plot a learning curve
Plot a metric vs number of examples for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
train_sizes : array-like
Relative or absolute numbers of training examples used to generate
the learning curve
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/learning_curve.py
"""
if ax is None:
ax = plt.gca()
ax.grid()
ax.set_title("Learning Curve")
ax.set_xlabel("Training examples")
ax.set_ylabel("Score mean")
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
ax.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
ax.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
ax.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
ax.legend(loc="best")
ax.margins(0.05)
return ax | [
"def",
"learning_curve",
"(",
"train_scores",
",",
"test_scores",
",",
"train_sizes",
",",
"ax",
"=",
"None",
")",
":",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"ax",
".",
"grid",
"(",
")",
"ax",
".",
"set_title",
"(",
"\"Learning Curve\"",
")",
"ax",
".",
"set_xlabel",
"(",
"\"Training examples\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"Score mean\"",
")",
"train_scores_mean",
"=",
"np",
".",
"mean",
"(",
"train_scores",
",",
"axis",
"=",
"1",
")",
"train_scores_std",
"=",
"np",
".",
"std",
"(",
"train_scores",
",",
"axis",
"=",
"1",
")",
"test_scores_mean",
"=",
"np",
".",
"mean",
"(",
"test_scores",
",",
"axis",
"=",
"1",
")",
"test_scores_std",
"=",
"np",
".",
"std",
"(",
"test_scores",
",",
"axis",
"=",
"1",
")",
"ax",
".",
"fill_between",
"(",
"train_sizes",
",",
"train_scores_mean",
"-",
"train_scores_std",
",",
"train_scores_mean",
"+",
"train_scores_std",
",",
"alpha",
"=",
"0.1",
",",
"color",
"=",
"\"r\"",
")",
"ax",
".",
"fill_between",
"(",
"train_sizes",
",",
"test_scores_mean",
"-",
"test_scores_std",
",",
"test_scores_mean",
"+",
"test_scores_std",
",",
"alpha",
"=",
"0.1",
",",
"color",
"=",
"\"g\"",
")",
"ax",
".",
"plot",
"(",
"train_sizes",
",",
"train_scores_mean",
",",
"'o-'",
",",
"color",
"=",
"\"r\"",
",",
"label",
"=",
"\"Training score\"",
")",
"ax",
".",
"plot",
"(",
"train_sizes",
",",
"test_scores_mean",
",",
"'o-'",
",",
"color",
"=",
"\"g\"",
",",
"label",
"=",
"\"Cross-validation score\"",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"\"best\"",
")",
"ax",
".",
"margins",
"(",
"0.05",
")",
"return",
"ax"
] | Plot a learning curve
Plot a metric vs number of examples for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
train_sizes : array-like
Relative or absolute numbers of training examples used to generate
the learning curve
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/learning_curve.py | [
"Plot",
"a",
"learning",
"curve"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/learning_curve.py#L5-L59 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/precision_recall.py | precision_recall | def precision_recall(y_true, y_score, ax=None):
"""
Plot precision-recall curve.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples] or [n_samples, 2] for binary
classification or [n_samples, n_classes] for multiclass
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Notes
-----
It is assumed that the y_score parameter columns are in order. For example,
if ``y_true = [2, 2, 1, 0, 0, 1, 2]``, then the first column in y_score
must countain the scores for class 0, second column for class 1 and so on.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/precision_recall.py
"""
if any((val is None for val in (y_true, y_score))):
raise ValueError('y_true and y_score are needed to plot '
'Precision-Recall')
if ax is None:
ax = plt.gca()
# get the number of classes from y_score
y_score_is_vector = is_column_vector(y_score) or is_row_vector(y_score)
if y_score_is_vector:
n_classes = 2
else:
_, n_classes = y_score.shape
# check data shape?
if n_classes > 2:
# convert y_true to binary format
y_true_bin = label_binarize(y_true, classes=np.unique(y_true))
_precision_recall_multi(y_true_bin, y_score, ax=ax)
for i in range(n_classes):
_precision_recall(y_true_bin[:, i], y_score[:, i], ax=ax)
else:
if y_score_is_vector:
_precision_recall(y_true, y_score, ax)
else:
_precision_recall(y_true, y_score[:, 1], ax)
# raise error if n_classes = 1?
return ax | python | def precision_recall(y_true, y_score, ax=None):
"""
Plot precision-recall curve.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples] or [n_samples, 2] for binary
classification or [n_samples, n_classes] for multiclass
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Notes
-----
It is assumed that the y_score parameter columns are in order. For example,
if ``y_true = [2, 2, 1, 0, 0, 1, 2]``, then the first column in y_score
must countain the scores for class 0, second column for class 1 and so on.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/precision_recall.py
"""
if any((val is None for val in (y_true, y_score))):
raise ValueError('y_true and y_score are needed to plot '
'Precision-Recall')
if ax is None:
ax = plt.gca()
# get the number of classes from y_score
y_score_is_vector = is_column_vector(y_score) or is_row_vector(y_score)
if y_score_is_vector:
n_classes = 2
else:
_, n_classes = y_score.shape
# check data shape?
if n_classes > 2:
# convert y_true to binary format
y_true_bin = label_binarize(y_true, classes=np.unique(y_true))
_precision_recall_multi(y_true_bin, y_score, ax=ax)
for i in range(n_classes):
_precision_recall(y_true_bin[:, i], y_score[:, i], ax=ax)
else:
if y_score_is_vector:
_precision_recall(y_true, y_score, ax)
else:
_precision_recall(y_true, y_score[:, 1], ax)
# raise error if n_classes = 1?
return ax | [
"def",
"precision_recall",
"(",
"y_true",
",",
"y_score",
",",
"ax",
"=",
"None",
")",
":",
"if",
"any",
"(",
"(",
"val",
"is",
"None",
"for",
"val",
"in",
"(",
"y_true",
",",
"y_score",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"'y_true and y_score are needed to plot '",
"'Precision-Recall'",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"# get the number of classes from y_score",
"y_score_is_vector",
"=",
"is_column_vector",
"(",
"y_score",
")",
"or",
"is_row_vector",
"(",
"y_score",
")",
"if",
"y_score_is_vector",
":",
"n_classes",
"=",
"2",
"else",
":",
"_",
",",
"n_classes",
"=",
"y_score",
".",
"shape",
"# check data shape?",
"if",
"n_classes",
">",
"2",
":",
"# convert y_true to binary format",
"y_true_bin",
"=",
"label_binarize",
"(",
"y_true",
",",
"classes",
"=",
"np",
".",
"unique",
"(",
"y_true",
")",
")",
"_precision_recall_multi",
"(",
"y_true_bin",
",",
"y_score",
",",
"ax",
"=",
"ax",
")",
"for",
"i",
"in",
"range",
"(",
"n_classes",
")",
":",
"_precision_recall",
"(",
"y_true_bin",
"[",
":",
",",
"i",
"]",
",",
"y_score",
"[",
":",
",",
"i",
"]",
",",
"ax",
"=",
"ax",
")",
"else",
":",
"if",
"y_score_is_vector",
":",
"_precision_recall",
"(",
"y_true",
",",
"y_score",
",",
"ax",
")",
"else",
":",
"_precision_recall",
"(",
"y_true",
",",
"y_score",
"[",
":",
",",
"1",
"]",
",",
"ax",
")",
"# raise error if n_classes = 1?",
"return",
"ax"
] | Plot precision-recall curve.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples] or [n_samples, 2] for binary
classification or [n_samples, n_classes] for multiclass
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Notes
-----
It is assumed that the y_score parameter columns are in order. For example,
if ``y_true = [2, 2, 1, 0, 0, 1, 2]``, then the first column in y_score
must countain the scores for class 0, second column for class 1 and so on.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/precision_recall.py | [
"Plot",
"precision",
"-",
"recall",
"curve",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/precision_recall.py#L9-L70 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/precision_recall.py | _precision_recall | def _precision_recall(y_true, y_score, ax=None):
"""
Plot precision-recall curve.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples]
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
precision, recall, _ = precision_recall_curve(y_true, y_score)
average_precision = average_precision_score(y_true, y_score)
if ax is None:
ax = plt.gca()
ax.plot(recall, precision, label=('Precision-Recall curve: AUC={0:0.2f}'
.format(average_precision)))
_set_ax_settings(ax)
return ax | python | def _precision_recall(y_true, y_score, ax=None):
"""
Plot precision-recall curve.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples]
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
precision, recall, _ = precision_recall_curve(y_true, y_score)
average_precision = average_precision_score(y_true, y_score)
if ax is None:
ax = plt.gca()
ax.plot(recall, precision, label=('Precision-Recall curve: AUC={0:0.2f}'
.format(average_precision)))
_set_ax_settings(ax)
return ax | [
"def",
"_precision_recall",
"(",
"y_true",
",",
"y_score",
",",
"ax",
"=",
"None",
")",
":",
"precision",
",",
"recall",
",",
"_",
"=",
"precision_recall_curve",
"(",
"y_true",
",",
"y_score",
")",
"average_precision",
"=",
"average_precision_score",
"(",
"y_true",
",",
"y_score",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"ax",
".",
"plot",
"(",
"recall",
",",
"precision",
",",
"label",
"=",
"(",
"'Precision-Recall curve: AUC={0:0.2f}'",
".",
"format",
"(",
"average_precision",
")",
")",
")",
"_set_ax_settings",
"(",
"ax",
")",
"return",
"ax"
] | Plot precision-recall curve.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples]
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot | [
"Plot",
"precision",
"-",
"recall",
"curve",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/precision_recall.py#L73-L101 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/precision_recall.py | _precision_recall_multi | def _precision_recall_multi(y_true, y_score, ax=None):
"""
Plot precision-recall curve.
Parameters
----------
y_true : array-like, shape = [n_samples, n_classes]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples, n_classes]
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
# Compute micro-average ROC curve and ROC area
precision, recall, _ = precision_recall_curve(y_true.ravel(),
y_score.ravel())
avg_prec = average_precision_score(y_true, y_score, average="micro")
if ax is None:
ax = plt.gca()
ax.plot(recall, precision,
label=('micro-average Precision-recall curve (area = {0:0.2f})'
.format(avg_prec)))
_set_ax_settings(ax)
return ax | python | def _precision_recall_multi(y_true, y_score, ax=None):
"""
Plot precision-recall curve.
Parameters
----------
y_true : array-like, shape = [n_samples, n_classes]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples, n_classes]
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
# Compute micro-average ROC curve and ROC area
precision, recall, _ = precision_recall_curve(y_true.ravel(),
y_score.ravel())
avg_prec = average_precision_score(y_true, y_score, average="micro")
if ax is None:
ax = plt.gca()
ax.plot(recall, precision,
label=('micro-average Precision-recall curve (area = {0:0.2f})'
.format(avg_prec)))
_set_ax_settings(ax)
return ax | [
"def",
"_precision_recall_multi",
"(",
"y_true",
",",
"y_score",
",",
"ax",
"=",
"None",
")",
":",
"# Compute micro-average ROC curve and ROC area",
"precision",
",",
"recall",
",",
"_",
"=",
"precision_recall_curve",
"(",
"y_true",
".",
"ravel",
"(",
")",
",",
"y_score",
".",
"ravel",
"(",
")",
")",
"avg_prec",
"=",
"average_precision_score",
"(",
"y_true",
",",
"y_score",
",",
"average",
"=",
"\"micro\"",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"ax",
".",
"plot",
"(",
"recall",
",",
"precision",
",",
"label",
"=",
"(",
"'micro-average Precision-recall curve (area = {0:0.2f})'",
".",
"format",
"(",
"avg_prec",
")",
")",
")",
"_set_ax_settings",
"(",
"ax",
")",
"return",
"ax"
] | Plot precision-recall curve.
Parameters
----------
y_true : array-like, shape = [n_samples, n_classes]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples, n_classes]
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot | [
"Plot",
"precision",
"-",
"recall",
"curve",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/precision_recall.py#L104-L136 |
edublancas/sklearn-evaluation | sklearn_evaluation/metrics.py | precision_at | def precision_at(y_true, y_score, proportion, ignore_nas=False):
'''
Calculates precision at a given proportion.
Only supports binary classification.
'''
# Sort scores in descending order
scores_sorted = np.sort(y_score)[::-1]
# Based on the proportion, get the index to split the data
# if value is negative, return 0
cutoff_index = max(int(len(y_true) * proportion) - 1, 0)
# Get the cutoff value
cutoff_value = scores_sorted[cutoff_index]
# Convert scores to binary, by comparing them with the cutoff value
scores_binary = np.array([int(y >= cutoff_value) for y in y_score])
# Calculate precision using sklearn function
if ignore_nas:
precision = __precision(y_true, scores_binary)
else:
precision = precision_score(y_true, scores_binary)
return precision, cutoff_value | python | def precision_at(y_true, y_score, proportion, ignore_nas=False):
'''
Calculates precision at a given proportion.
Only supports binary classification.
'''
# Sort scores in descending order
scores_sorted = np.sort(y_score)[::-1]
# Based on the proportion, get the index to split the data
# if value is negative, return 0
cutoff_index = max(int(len(y_true) * proportion) - 1, 0)
# Get the cutoff value
cutoff_value = scores_sorted[cutoff_index]
# Convert scores to binary, by comparing them with the cutoff value
scores_binary = np.array([int(y >= cutoff_value) for y in y_score])
# Calculate precision using sklearn function
if ignore_nas:
precision = __precision(y_true, scores_binary)
else:
precision = precision_score(y_true, scores_binary)
return precision, cutoff_value | [
"def",
"precision_at",
"(",
"y_true",
",",
"y_score",
",",
"proportion",
",",
"ignore_nas",
"=",
"False",
")",
":",
"# Sort scores in descending order",
"scores_sorted",
"=",
"np",
".",
"sort",
"(",
"y_score",
")",
"[",
":",
":",
"-",
"1",
"]",
"# Based on the proportion, get the index to split the data",
"# if value is negative, return 0",
"cutoff_index",
"=",
"max",
"(",
"int",
"(",
"len",
"(",
"y_true",
")",
"*",
"proportion",
")",
"-",
"1",
",",
"0",
")",
"# Get the cutoff value",
"cutoff_value",
"=",
"scores_sorted",
"[",
"cutoff_index",
"]",
"# Convert scores to binary, by comparing them with the cutoff value",
"scores_binary",
"=",
"np",
".",
"array",
"(",
"[",
"int",
"(",
"y",
">=",
"cutoff_value",
")",
"for",
"y",
"in",
"y_score",
"]",
")",
"# Calculate precision using sklearn function",
"if",
"ignore_nas",
":",
"precision",
"=",
"__precision",
"(",
"y_true",
",",
"scores_binary",
")",
"else",
":",
"precision",
"=",
"precision_score",
"(",
"y_true",
",",
"scores_binary",
")",
"return",
"precision",
",",
"cutoff_value"
] | Calculates precision at a given proportion.
Only supports binary classification. | [
"Calculates",
"precision",
"at",
"a",
"given",
"proportion",
".",
"Only",
"supports",
"binary",
"classification",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/metrics.py#L7-L29 |
edublancas/sklearn-evaluation | sklearn_evaluation/metrics.py | __precision | def __precision(y_true, y_pred):
'''
Precision metric tolerant to unlabeled data in y_true,
NA values are ignored for the precision calculation
'''
# make copies of the arrays to avoid modifying the original ones
y_true = np.copy(y_true)
y_pred = np.copy(y_pred)
# precision = tp/(tp+fp)
# True nehatives do not affect precision value, so for every missing
# value in y_true, replace it with 0 and also replace the value
# in y_pred with 0
is_nan = np.isnan(y_true)
y_true[is_nan] = 0
y_pred[is_nan] = 0
precision = precision_score(y_true, y_pred)
return precision | python | def __precision(y_true, y_pred):
'''
Precision metric tolerant to unlabeled data in y_true,
NA values are ignored for the precision calculation
'''
# make copies of the arrays to avoid modifying the original ones
y_true = np.copy(y_true)
y_pred = np.copy(y_pred)
# precision = tp/(tp+fp)
# True nehatives do not affect precision value, so for every missing
# value in y_true, replace it with 0 and also replace the value
# in y_pred with 0
is_nan = np.isnan(y_true)
y_true[is_nan] = 0
y_pred[is_nan] = 0
precision = precision_score(y_true, y_pred)
return precision | [
"def",
"__precision",
"(",
"y_true",
",",
"y_pred",
")",
":",
"# make copies of the arrays to avoid modifying the original ones",
"y_true",
"=",
"np",
".",
"copy",
"(",
"y_true",
")",
"y_pred",
"=",
"np",
".",
"copy",
"(",
"y_pred",
")",
"# precision = tp/(tp+fp)",
"# True nehatives do not affect precision value, so for every missing",
"# value in y_true, replace it with 0 and also replace the value",
"# in y_pred with 0",
"is_nan",
"=",
"np",
".",
"isnan",
"(",
"y_true",
")",
"y_true",
"[",
"is_nan",
"]",
"=",
"0",
"y_pred",
"[",
"is_nan",
"]",
"=",
"0",
"precision",
"=",
"precision_score",
"(",
"y_true",
",",
"y_pred",
")",
"return",
"precision"
] | Precision metric tolerant to unlabeled data in y_true,
NA values are ignored for the precision calculation | [
"Precision",
"metric",
"tolerant",
"to",
"unlabeled",
"data",
"in",
"y_true",
"NA",
"values",
"are",
"ignored",
"for",
"the",
"precision",
"calculation"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/metrics.py#L51-L68 |
edublancas/sklearn-evaluation | sklearn_evaluation/metrics.py | labels_at | def labels_at(y_true, y_score, proportion, normalize=False):
'''
Return the number of labels encountered in the top X proportion
'''
# Get indexes of scores sorted in descending order
indexes = np.argsort(y_score)[::-1]
# Sort true values in the same order
y_true_sorted = y_true[indexes]
# Grab top x proportion of true values
cutoff_index = max(int(len(y_true_sorted) * proportion) - 1, 0)
# add one to index to grab values including that index
y_true_top = y_true_sorted[:cutoff_index+1]
# Count the number of non-nas in the top x proportion
# we are returning a count so it should be an int
values = int((~np.isnan(y_true_top)).sum())
if normalize:
values = float(values)/(~np.isnan(y_true)).sum()
return values | python | def labels_at(y_true, y_score, proportion, normalize=False):
'''
Return the number of labels encountered in the top X proportion
'''
# Get indexes of scores sorted in descending order
indexes = np.argsort(y_score)[::-1]
# Sort true values in the same order
y_true_sorted = y_true[indexes]
# Grab top x proportion of true values
cutoff_index = max(int(len(y_true_sorted) * proportion) - 1, 0)
# add one to index to grab values including that index
y_true_top = y_true_sorted[:cutoff_index+1]
# Count the number of non-nas in the top x proportion
# we are returning a count so it should be an int
values = int((~np.isnan(y_true_top)).sum())
if normalize:
values = float(values)/(~np.isnan(y_true)).sum()
return values | [
"def",
"labels_at",
"(",
"y_true",
",",
"y_score",
",",
"proportion",
",",
"normalize",
"=",
"False",
")",
":",
"# Get indexes of scores sorted in descending order",
"indexes",
"=",
"np",
".",
"argsort",
"(",
"y_score",
")",
"[",
":",
":",
"-",
"1",
"]",
"# Sort true values in the same order",
"y_true_sorted",
"=",
"y_true",
"[",
"indexes",
"]",
"# Grab top x proportion of true values",
"cutoff_index",
"=",
"max",
"(",
"int",
"(",
"len",
"(",
"y_true_sorted",
")",
"*",
"proportion",
")",
"-",
"1",
",",
"0",
")",
"# add one to index to grab values including that index",
"y_true_top",
"=",
"y_true_sorted",
"[",
":",
"cutoff_index",
"+",
"1",
"]",
"# Count the number of non-nas in the top x proportion",
"# we are returning a count so it should be an int",
"values",
"=",
"int",
"(",
"(",
"~",
"np",
".",
"isnan",
"(",
"y_true_top",
")",
")",
".",
"sum",
"(",
")",
")",
"if",
"normalize",
":",
"values",
"=",
"float",
"(",
"values",
")",
"/",
"(",
"~",
"np",
".",
"isnan",
"(",
"y_true",
")",
")",
".",
"sum",
"(",
")",
"return",
"values"
] | Return the number of labels encountered in the top X proportion | [
"Return",
"the",
"number",
"of",
"labels",
"encountered",
"in",
"the",
"top",
"X",
"proportion"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/metrics.py#L100-L122 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/validation_curve.py | validation_curve | def validation_curve(train_scores, test_scores, param_range, param_name=None,
semilogx=False, ax=None):
"""Plot a validation curve
Plot a metric vs hyperpameter values for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
param_range : array-like
Hyperparameter values used to generate the curve
param_range : str
Hyperparameter name
semilgo : bool
Sets a log scale on the x axis
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/validation_curve.py
"""
if ax is None:
ax = plt.gca()
if semilogx:
ax.set_xscale('log')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.set_title("Validation Curve")
ax.set_ylabel("Score mean")
if param_name:
ax.set_xlabel(param_name)
ax.plot(param_range, train_scores_mean, label="Training score", color="r")
ax.plot(param_range, test_scores_mean, label="Cross-validation score",
color="g")
ax.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
ax.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
ax.legend(loc="best")
ax.margins(0.05)
return ax | python | def validation_curve(train_scores, test_scores, param_range, param_name=None,
semilogx=False, ax=None):
"""Plot a validation curve
Plot a metric vs hyperpameter values for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
param_range : array-like
Hyperparameter values used to generate the curve
param_range : str
Hyperparameter name
semilgo : bool
Sets a log scale on the x axis
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/validation_curve.py
"""
if ax is None:
ax = plt.gca()
if semilogx:
ax.set_xscale('log')
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
ax.set_title("Validation Curve")
ax.set_ylabel("Score mean")
if param_name:
ax.set_xlabel(param_name)
ax.plot(param_range, train_scores_mean, label="Training score", color="r")
ax.plot(param_range, test_scores_mean, label="Cross-validation score",
color="g")
ax.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
ax.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
ax.legend(loc="best")
ax.margins(0.05)
return ax | [
"def",
"validation_curve",
"(",
"train_scores",
",",
"test_scores",
",",
"param_range",
",",
"param_name",
"=",
"None",
",",
"semilogx",
"=",
"False",
",",
"ax",
"=",
"None",
")",
":",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"if",
"semilogx",
":",
"ax",
".",
"set_xscale",
"(",
"'log'",
")",
"train_scores_mean",
"=",
"np",
".",
"mean",
"(",
"train_scores",
",",
"axis",
"=",
"1",
")",
"train_scores_std",
"=",
"np",
".",
"std",
"(",
"train_scores",
",",
"axis",
"=",
"1",
")",
"test_scores_mean",
"=",
"np",
".",
"mean",
"(",
"test_scores",
",",
"axis",
"=",
"1",
")",
"test_scores_std",
"=",
"np",
".",
"std",
"(",
"test_scores",
",",
"axis",
"=",
"1",
")",
"ax",
".",
"set_title",
"(",
"\"Validation Curve\"",
")",
"ax",
".",
"set_ylabel",
"(",
"\"Score mean\"",
")",
"if",
"param_name",
":",
"ax",
".",
"set_xlabel",
"(",
"param_name",
")",
"ax",
".",
"plot",
"(",
"param_range",
",",
"train_scores_mean",
",",
"label",
"=",
"\"Training score\"",
",",
"color",
"=",
"\"r\"",
")",
"ax",
".",
"plot",
"(",
"param_range",
",",
"test_scores_mean",
",",
"label",
"=",
"\"Cross-validation score\"",
",",
"color",
"=",
"\"g\"",
")",
"ax",
".",
"fill_between",
"(",
"param_range",
",",
"train_scores_mean",
"-",
"train_scores_std",
",",
"train_scores_mean",
"+",
"train_scores_std",
",",
"alpha",
"=",
"0.2",
",",
"color",
"=",
"\"r\"",
")",
"ax",
".",
"fill_between",
"(",
"param_range",
",",
"test_scores_mean",
"-",
"test_scores_std",
",",
"test_scores_mean",
"+",
"test_scores_std",
",",
"alpha",
"=",
"0.2",
",",
"color",
"=",
"\"g\"",
")",
"ax",
".",
"legend",
"(",
"loc",
"=",
"\"best\"",
")",
"ax",
".",
"margins",
"(",
"0.05",
")",
"return",
"ax"
] | Plot a validation curve
Plot a metric vs hyperpameter values for the training and test set
Parameters
----------
train_scores : array-like
Scores for the training set
test_scores : array-like
Scores for the test set
param_range : array-like
Hyperparameter values used to generate the curve
param_range : str
Hyperparameter name
semilgo : bool
Sets a log scale on the x axis
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/validation_curve.py | [
"Plot",
"a",
"validation",
"curve"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/validation_curve.py#L5-L66 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/classification.py | confusion_matrix | def confusion_matrix(y_true, y_pred, target_names=None, normalize=False,
cmap=None, ax=None):
"""
Plot confustion matrix.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_pred : array-like, shape = [n_samples]
Target predicted classes (estimator predictions).
target_names : list
List containing the names of the target classes. List must be in order
e.g. ``['Label for class 0', 'Label for class 1']``. If ``None``,
generic labels will be generated e.g. ``['Class 0', 'Class 1']``
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
normalize : bool
Normalize the confusion matrix
cmap : matplotlib Colormap
If ``None`` uses a modified version of matplotlib's OrRd colormap.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/confusion_matrix.py
"""
if any((val is None for val in (y_true, y_pred))):
raise ValueError("y_true and y_pred are needed to plot confusion "
"matrix")
# calculate how many names you expect
values = set(y_true).union(set(y_pred))
expected_len = len(values)
if target_names and (expected_len != len(target_names)):
raise ValueError(('Data cointains {} different values, but target'
' names contains {} values.'.format(expected_len,
len(target_names)
)))
# if the user didn't pass target_names, create generic ones
if not target_names:
values = list(values)
values.sort()
target_names = ['Class {}'.format(v) for v in values]
cm = sk_confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.set_printoptions(precision=2)
if ax is None:
ax = plt.gca()
# this (y, x) may sound counterintuitive. The reason is that
# in a matrix cell (i, j) is in row=i and col=j, translating that
# to an x, y plane (which matplotlib uses to plot), we need to use
# i as the y coordinate (how many steps down) and j as the x coordinate
# how many steps to the right.
for (y, x), v in np.ndenumerate(cm):
try:
label = '{:.2}'.format(v)
except:
label = v
ax.text(x, y, label, horizontalalignment='center',
verticalalignment='center')
if cmap is None:
cmap = default_heatmap()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
plt.colorbar(im, ax=ax)
tick_marks = np.arange(len(target_names))
ax.set_xticks(tick_marks)
ax.set_xticklabels(target_names)
ax.set_yticks(tick_marks)
ax.set_yticklabels(target_names)
title = 'Confusion matrix'
if normalize:
title += ' (normalized)'
ax.set_title(title)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
return ax | python | def confusion_matrix(y_true, y_pred, target_names=None, normalize=False,
cmap=None, ax=None):
"""
Plot confustion matrix.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_pred : array-like, shape = [n_samples]
Target predicted classes (estimator predictions).
target_names : list
List containing the names of the target classes. List must be in order
e.g. ``['Label for class 0', 'Label for class 1']``. If ``None``,
generic labels will be generated e.g. ``['Class 0', 'Class 1']``
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
normalize : bool
Normalize the confusion matrix
cmap : matplotlib Colormap
If ``None`` uses a modified version of matplotlib's OrRd colormap.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/confusion_matrix.py
"""
if any((val is None for val in (y_true, y_pred))):
raise ValueError("y_true and y_pred are needed to plot confusion "
"matrix")
# calculate how many names you expect
values = set(y_true).union(set(y_pred))
expected_len = len(values)
if target_names and (expected_len != len(target_names)):
raise ValueError(('Data cointains {} different values, but target'
' names contains {} values.'.format(expected_len,
len(target_names)
)))
# if the user didn't pass target_names, create generic ones
if not target_names:
values = list(values)
values.sort()
target_names = ['Class {}'.format(v) for v in values]
cm = sk_confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
np.set_printoptions(precision=2)
if ax is None:
ax = plt.gca()
# this (y, x) may sound counterintuitive. The reason is that
# in a matrix cell (i, j) is in row=i and col=j, translating that
# to an x, y plane (which matplotlib uses to plot), we need to use
# i as the y coordinate (how many steps down) and j as the x coordinate
# how many steps to the right.
for (y, x), v in np.ndenumerate(cm):
try:
label = '{:.2}'.format(v)
except:
label = v
ax.text(x, y, label, horizontalalignment='center',
verticalalignment='center')
if cmap is None:
cmap = default_heatmap()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
plt.colorbar(im, ax=ax)
tick_marks = np.arange(len(target_names))
ax.set_xticks(tick_marks)
ax.set_xticklabels(target_names)
ax.set_yticks(tick_marks)
ax.set_yticklabels(target_names)
title = 'Confusion matrix'
if normalize:
title += ' (normalized)'
ax.set_title(title)
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
return ax | [
"def",
"confusion_matrix",
"(",
"y_true",
",",
"y_pred",
",",
"target_names",
"=",
"None",
",",
"normalize",
"=",
"False",
",",
"cmap",
"=",
"None",
",",
"ax",
"=",
"None",
")",
":",
"if",
"any",
"(",
"(",
"val",
"is",
"None",
"for",
"val",
"in",
"(",
"y_true",
",",
"y_pred",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"y_true and y_pred are needed to plot confusion \"",
"\"matrix\"",
")",
"# calculate how many names you expect",
"values",
"=",
"set",
"(",
"y_true",
")",
".",
"union",
"(",
"set",
"(",
"y_pred",
")",
")",
"expected_len",
"=",
"len",
"(",
"values",
")",
"if",
"target_names",
"and",
"(",
"expected_len",
"!=",
"len",
"(",
"target_names",
")",
")",
":",
"raise",
"ValueError",
"(",
"(",
"'Data cointains {} different values, but target'",
"' names contains {} values.'",
".",
"format",
"(",
"expected_len",
",",
"len",
"(",
"target_names",
")",
")",
")",
")",
"# if the user didn't pass target_names, create generic ones",
"if",
"not",
"target_names",
":",
"values",
"=",
"list",
"(",
"values",
")",
"values",
".",
"sort",
"(",
")",
"target_names",
"=",
"[",
"'Class {}'",
".",
"format",
"(",
"v",
")",
"for",
"v",
"in",
"values",
"]",
"cm",
"=",
"sk_confusion_matrix",
"(",
"y_true",
",",
"y_pred",
")",
"if",
"normalize",
":",
"cm",
"=",
"cm",
".",
"astype",
"(",
"'float'",
")",
"/",
"cm",
".",
"sum",
"(",
"axis",
"=",
"1",
")",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"np",
".",
"set_printoptions",
"(",
"precision",
"=",
"2",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"# this (y, x) may sound counterintuitive. The reason is that",
"# in a matrix cell (i, j) is in row=i and col=j, translating that",
"# to an x, y plane (which matplotlib uses to plot), we need to use",
"# i as the y coordinate (how many steps down) and j as the x coordinate",
"# how many steps to the right.",
"for",
"(",
"y",
",",
"x",
")",
",",
"v",
"in",
"np",
".",
"ndenumerate",
"(",
"cm",
")",
":",
"try",
":",
"label",
"=",
"'{:.2}'",
".",
"format",
"(",
"v",
")",
"except",
":",
"label",
"=",
"v",
"ax",
".",
"text",
"(",
"x",
",",
"y",
",",
"label",
",",
"horizontalalignment",
"=",
"'center'",
",",
"verticalalignment",
"=",
"'center'",
")",
"if",
"cmap",
"is",
"None",
":",
"cmap",
"=",
"default_heatmap",
"(",
")",
"im",
"=",
"ax",
".",
"imshow",
"(",
"cm",
",",
"interpolation",
"=",
"'nearest'",
",",
"cmap",
"=",
"cmap",
")",
"plt",
".",
"colorbar",
"(",
"im",
",",
"ax",
"=",
"ax",
")",
"tick_marks",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"target_names",
")",
")",
"ax",
".",
"set_xticks",
"(",
"tick_marks",
")",
"ax",
".",
"set_xticklabels",
"(",
"target_names",
")",
"ax",
".",
"set_yticks",
"(",
"tick_marks",
")",
"ax",
".",
"set_yticklabels",
"(",
"target_names",
")",
"title",
"=",
"'Confusion matrix'",
"if",
"normalize",
":",
"title",
"+=",
"' (normalized)'",
"ax",
".",
"set_title",
"(",
"title",
")",
"ax",
".",
"set_ylabel",
"(",
"'True label'",
")",
"ax",
".",
"set_xlabel",
"(",
"'Predicted label'",
")",
"return",
"ax"
] | Plot confustion matrix.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_pred : array-like, shape = [n_samples]
Target predicted classes (estimator predictions).
target_names : list
List containing the names of the target classes. List must be in order
e.g. ``['Label for class 0', 'Label for class 1']``. If ``None``,
generic labels will be generated e.g. ``['Class 0', 'Class 1']``
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
normalize : bool
Normalize the confusion matrix
cmap : matplotlib Colormap
If ``None`` uses a modified version of matplotlib's OrRd colormap.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/confusion_matrix.py | [
"Plot",
"confustion",
"matrix",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/classification.py#L13-L105 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/classification.py | feature_importances | def feature_importances(data, top_n=None, feature_names=None, ax=None):
"""
Get and order feature importances from a scikit-learn model
or from an array-like structure. If data is a scikit-learn model with
sub-estimators (e.g. RandomForest, AdaBoost) the function will compute the
standard deviation of each feature.
Parameters
----------
data : sklearn model or array-like structure
Object to get the data from.
top_n : int
Only get results for the top_n features.
feature_names : array-like
Feature names
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/feature_importances.py
"""
if data is None:
raise ValueError('data is needed to plot feature importances. '
'When plotting using the evaluator you need to pass '
'an estimator ')
# If no feature_names is provided, assign numbers
res = compute.feature_importances(data, top_n, feature_names)
# number of features returned
n_feats = len(res)
if ax is None:
ax = plt.gca()
ax.set_title("Feature importances")
try:
ax.bar(range(n_feats), res.importance, yerr=res.std_, color='red',
align="center")
except:
ax.bar(range(n_feats), res.importance, color='red',
align="center")
ax.set_xticks(range(n_feats))
ax.set_xticklabels(res.feature_name)
ax.set_xlim([-1, n_feats])
return ax | python | def feature_importances(data, top_n=None, feature_names=None, ax=None):
"""
Get and order feature importances from a scikit-learn model
or from an array-like structure. If data is a scikit-learn model with
sub-estimators (e.g. RandomForest, AdaBoost) the function will compute the
standard deviation of each feature.
Parameters
----------
data : sklearn model or array-like structure
Object to get the data from.
top_n : int
Only get results for the top_n features.
feature_names : array-like
Feature names
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/feature_importances.py
"""
if data is None:
raise ValueError('data is needed to plot feature importances. '
'When plotting using the evaluator you need to pass '
'an estimator ')
# If no feature_names is provided, assign numbers
res = compute.feature_importances(data, top_n, feature_names)
# number of features returned
n_feats = len(res)
if ax is None:
ax = plt.gca()
ax.set_title("Feature importances")
try:
ax.bar(range(n_feats), res.importance, yerr=res.std_, color='red',
align="center")
except:
ax.bar(range(n_feats), res.importance, color='red',
align="center")
ax.set_xticks(range(n_feats))
ax.set_xticklabels(res.feature_name)
ax.set_xlim([-1, n_feats])
return ax | [
"def",
"feature_importances",
"(",
"data",
",",
"top_n",
"=",
"None",
",",
"feature_names",
"=",
"None",
",",
"ax",
"=",
"None",
")",
":",
"if",
"data",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'data is needed to plot feature importances. '",
"'When plotting using the evaluator you need to pass '",
"'an estimator '",
")",
"# If no feature_names is provided, assign numbers",
"res",
"=",
"compute",
".",
"feature_importances",
"(",
"data",
",",
"top_n",
",",
"feature_names",
")",
"# number of features returned",
"n_feats",
"=",
"len",
"(",
"res",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"ax",
".",
"set_title",
"(",
"\"Feature importances\"",
")",
"try",
":",
"ax",
".",
"bar",
"(",
"range",
"(",
"n_feats",
")",
",",
"res",
".",
"importance",
",",
"yerr",
"=",
"res",
".",
"std_",
",",
"color",
"=",
"'red'",
",",
"align",
"=",
"\"center\"",
")",
"except",
":",
"ax",
".",
"bar",
"(",
"range",
"(",
"n_feats",
")",
",",
"res",
".",
"importance",
",",
"color",
"=",
"'red'",
",",
"align",
"=",
"\"center\"",
")",
"ax",
".",
"set_xticks",
"(",
"range",
"(",
"n_feats",
")",
")",
"ax",
".",
"set_xticklabels",
"(",
"res",
".",
"feature_name",
")",
"ax",
".",
"set_xlim",
"(",
"[",
"-",
"1",
",",
"n_feats",
"]",
")",
"return",
"ax"
] | Get and order feature importances from a scikit-learn model
or from an array-like structure. If data is a scikit-learn model with
sub-estimators (e.g. RandomForest, AdaBoost) the function will compute the
standard deviation of each feature.
Parameters
----------
data : sklearn model or array-like structure
Object to get the data from.
top_n : int
Only get results for the top_n features.
feature_names : array-like
Feature names
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/feature_importances.py | [
"Get",
"and",
"order",
"feature",
"importances",
"from",
"a",
"scikit",
"-",
"learn",
"model",
"or",
"from",
"an",
"array",
"-",
"like",
"structure",
".",
"If",
"data",
"is",
"a",
"scikit",
"-",
"learn",
"model",
"with",
"sub",
"-",
"estimators",
"(",
"e",
".",
"g",
".",
"RandomForest",
"AdaBoost",
")",
"the",
"function",
"will",
"compute",
"the",
"standard",
"deviation",
"of",
"each",
"feature",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/classification.py#L113-L166 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/classification.py | precision_at_proportions | def precision_at_proportions(y_true, y_score, ax=None):
"""
Plot precision values at different proportions.
Parameters
----------
y_true : array-like
Correct target values (ground truth).
y_score : array-like
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
if any((val is None for val in (y_true, y_score))):
raise ValueError('y_true and y_score are needed to plot precision at '
'proportions')
if ax is None:
ax = plt.gca()
y_score_is_vector = is_column_vector(y_score) or is_row_vector(y_score)
if not y_score_is_vector:
y_score = y_score[:, 1]
# Calculate points
proportions = [0.01 * i for i in range(1, 101)]
precs_and_cutoffs = [precision_at(y_true, y_score, p) for p in proportions]
precs, cutoffs = zip(*precs_and_cutoffs)
# Plot and set nice defaults for title and axis labels
ax.plot(proportions, precs)
ax.set_title('Precision at various proportions')
ax.set_ylabel('Precision')
ax.set_xlabel('Proportion')
ticks = [0.1 * i for i in range(1, 11)]
ax.set_xticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticks(ticks)
ax.set_yticklabels(ticks)
ax.set_ylim([0, 1.0])
ax.set_xlim([0, 1.0])
return ax | python | def precision_at_proportions(y_true, y_score, ax=None):
"""
Plot precision values at different proportions.
Parameters
----------
y_true : array-like
Correct target values (ground truth).
y_score : array-like
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
if any((val is None for val in (y_true, y_score))):
raise ValueError('y_true and y_score are needed to plot precision at '
'proportions')
if ax is None:
ax = plt.gca()
y_score_is_vector = is_column_vector(y_score) or is_row_vector(y_score)
if not y_score_is_vector:
y_score = y_score[:, 1]
# Calculate points
proportions = [0.01 * i for i in range(1, 101)]
precs_and_cutoffs = [precision_at(y_true, y_score, p) for p in proportions]
precs, cutoffs = zip(*precs_and_cutoffs)
# Plot and set nice defaults for title and axis labels
ax.plot(proportions, precs)
ax.set_title('Precision at various proportions')
ax.set_ylabel('Precision')
ax.set_xlabel('Proportion')
ticks = [0.1 * i for i in range(1, 11)]
ax.set_xticks(ticks)
ax.set_xticklabels(ticks)
ax.set_yticks(ticks)
ax.set_yticklabels(ticks)
ax.set_ylim([0, 1.0])
ax.set_xlim([0, 1.0])
return ax | [
"def",
"precision_at_proportions",
"(",
"y_true",
",",
"y_score",
",",
"ax",
"=",
"None",
")",
":",
"if",
"any",
"(",
"(",
"val",
"is",
"None",
"for",
"val",
"in",
"(",
"y_true",
",",
"y_score",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"'y_true and y_score are needed to plot precision at '",
"'proportions'",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"y_score_is_vector",
"=",
"is_column_vector",
"(",
"y_score",
")",
"or",
"is_row_vector",
"(",
"y_score",
")",
"if",
"not",
"y_score_is_vector",
":",
"y_score",
"=",
"y_score",
"[",
":",
",",
"1",
"]",
"# Calculate points",
"proportions",
"=",
"[",
"0.01",
"*",
"i",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"101",
")",
"]",
"precs_and_cutoffs",
"=",
"[",
"precision_at",
"(",
"y_true",
",",
"y_score",
",",
"p",
")",
"for",
"p",
"in",
"proportions",
"]",
"precs",
",",
"cutoffs",
"=",
"zip",
"(",
"*",
"precs_and_cutoffs",
")",
"# Plot and set nice defaults for title and axis labels",
"ax",
".",
"plot",
"(",
"proportions",
",",
"precs",
")",
"ax",
".",
"set_title",
"(",
"'Precision at various proportions'",
")",
"ax",
".",
"set_ylabel",
"(",
"'Precision'",
")",
"ax",
".",
"set_xlabel",
"(",
"'Proportion'",
")",
"ticks",
"=",
"[",
"0.1",
"*",
"i",
"for",
"i",
"in",
"range",
"(",
"1",
",",
"11",
")",
"]",
"ax",
".",
"set_xticks",
"(",
"ticks",
")",
"ax",
".",
"set_xticklabels",
"(",
"ticks",
")",
"ax",
".",
"set_yticks",
"(",
"ticks",
")",
"ax",
".",
"set_yticklabels",
"(",
"ticks",
")",
"ax",
".",
"set_ylim",
"(",
"[",
"0",
",",
"1.0",
"]",
")",
"ax",
".",
"set_xlim",
"(",
"[",
"0",
",",
"1.0",
"]",
")",
"return",
"ax"
] | Plot precision values at different proportions.
Parameters
----------
y_true : array-like
Correct target values (ground truth).
y_score : array-like
Target scores (estimator predictions).
ax : matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot | [
"Plot",
"precision",
"values",
"at",
"different",
"proportions",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/classification.py#L169-L216 |
edublancas/sklearn-evaluation | sklearn_evaluation/table.py | feature_importances | def feature_importances(data, top_n=None, feature_names=None):
"""
Get and order feature importances from a scikit-learn model
or from an array-like structure.
If data is a scikit-learn model with sub-estimators (e.g. RandomForest,
AdaBoost) the function will compute the standard deviation of each
feature.
Parameters
----------
data : sklearn model or array-like structure
Object to get the data from.
top_n : int
Only get results for the top_n features.
feature_names : array-like
Feature_names
Returns
-------
table
Table object with the data. Columns are
feature_name, importance (`std_` only included for models with
sub-estimators)
"""
if data is None:
raise ValueError('data is needed to tabulate feature importances. '
'When plotting using the evaluator you need to pass '
'an estimator ')
res = compute.feature_importances(data, top_n, feature_names)
return Table(res, res.dtype.names) | python | def feature_importances(data, top_n=None, feature_names=None):
"""
Get and order feature importances from a scikit-learn model
or from an array-like structure.
If data is a scikit-learn model with sub-estimators (e.g. RandomForest,
AdaBoost) the function will compute the standard deviation of each
feature.
Parameters
----------
data : sklearn model or array-like structure
Object to get the data from.
top_n : int
Only get results for the top_n features.
feature_names : array-like
Feature_names
Returns
-------
table
Table object with the data. Columns are
feature_name, importance (`std_` only included for models with
sub-estimators)
"""
if data is None:
raise ValueError('data is needed to tabulate feature importances. '
'When plotting using the evaluator you need to pass '
'an estimator ')
res = compute.feature_importances(data, top_n, feature_names)
return Table(res, res.dtype.names) | [
"def",
"feature_importances",
"(",
"data",
",",
"top_n",
"=",
"None",
",",
"feature_names",
"=",
"None",
")",
":",
"if",
"data",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"'data is needed to tabulate feature importances. '",
"'When plotting using the evaluator you need to pass '",
"'an estimator '",
")",
"res",
"=",
"compute",
".",
"feature_importances",
"(",
"data",
",",
"top_n",
",",
"feature_names",
")",
"return",
"Table",
"(",
"res",
",",
"res",
".",
"dtype",
".",
"names",
")"
] | Get and order feature importances from a scikit-learn model
or from an array-like structure.
If data is a scikit-learn model with sub-estimators (e.g. RandomForest,
AdaBoost) the function will compute the standard deviation of each
feature.
Parameters
----------
data : sklearn model or array-like structure
Object to get the data from.
top_n : int
Only get results for the top_n features.
feature_names : array-like
Feature_names
Returns
-------
table
Table object with the data. Columns are
feature_name, importance (`std_` only included for models with
sub-estimators) | [
"Get",
"and",
"order",
"feature",
"importances",
"from",
"a",
"scikit",
"-",
"learn",
"model",
"or",
"from",
"an",
"array",
"-",
"like",
"structure",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/table.py#L30-L62 |
edublancas/sklearn-evaluation | docs/sphinxext/ipython_sphinxext/ipython_directive.py | block_parser | def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one output, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# Here is where we assume there is, at most, one decorator.
# Might need to rethink this.
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
# The default ipython_rgx* treat the space following the colon as optional.
# However, If the space is there we must consume it or code
# employing the cython_magic extension will fail to execute.
#
# This works with the default ipython_rgx* patterns,
# If you modify them, YMMV.
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block | python | def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one output, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# Here is where we assume there is, at most, one decorator.
# Might need to rethink this.
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
# The default ipython_rgx* treat the space following the colon as optional.
# However, If the space is there we must consume it or code
# employing the cython_magic extension will fail to execute.
#
# This works with the default ipython_rgx* patterns,
# If you modify them, YMMV.
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block | [
"def",
"block_parser",
"(",
"part",
",",
"rgxin",
",",
"rgxout",
",",
"fmtin",
",",
"fmtout",
")",
":",
"block",
"=",
"[",
"]",
"lines",
"=",
"part",
".",
"split",
"(",
"'\\n'",
")",
"N",
"=",
"len",
"(",
"lines",
")",
"i",
"=",
"0",
"decorator",
"=",
"None",
"while",
"1",
":",
"if",
"i",
"==",
"N",
":",
"# nothing left to parse -- the last line",
"break",
"line",
"=",
"lines",
"[",
"i",
"]",
"i",
"+=",
"1",
"line_stripped",
"=",
"line",
".",
"strip",
"(",
")",
"if",
"line_stripped",
".",
"startswith",
"(",
"'#'",
")",
":",
"block",
".",
"append",
"(",
"(",
"COMMENT",
",",
"line",
")",
")",
"continue",
"if",
"line_stripped",
".",
"startswith",
"(",
"'@'",
")",
":",
"# Here is where we assume there is, at most, one decorator.",
"# Might need to rethink this.",
"decorator",
"=",
"line_stripped",
"continue",
"# does this look like an input line?",
"matchin",
"=",
"rgxin",
".",
"match",
"(",
"line",
")",
"if",
"matchin",
":",
"lineno",
",",
"inputline",
"=",
"int",
"(",
"matchin",
".",
"group",
"(",
"1",
")",
")",
",",
"matchin",
".",
"group",
"(",
"2",
")",
"# the ....: continuation string",
"continuation",
"=",
"' %s:'",
"%",
"''",
".",
"join",
"(",
"[",
"'.'",
"]",
"*",
"(",
"len",
"(",
"str",
"(",
"lineno",
")",
")",
"+",
"2",
")",
")",
"Nc",
"=",
"len",
"(",
"continuation",
")",
"# input lines can continue on for more than one line, if",
"# we have a '\\' line continuation char or a function call",
"# echo line 'print'. The input line can only be",
"# terminated by the end of the block or an output line, so",
"# we parse out the rest of the input line if it is",
"# multiline as well as any echo text",
"rest",
"=",
"[",
"]",
"while",
"i",
"<",
"N",
":",
"# look ahead; if the next line is blank, or a comment, or",
"# an output line, we're done",
"nextline",
"=",
"lines",
"[",
"i",
"]",
"matchout",
"=",
"rgxout",
".",
"match",
"(",
"nextline",
")",
"#print \"nextline=%s, continuation=%s, starts=%s\"%(nextline, continuation, nextline.startswith(continuation))",
"if",
"matchout",
"or",
"nextline",
".",
"startswith",
"(",
"'#'",
")",
":",
"break",
"elif",
"nextline",
".",
"startswith",
"(",
"continuation",
")",
":",
"# The default ipython_rgx* treat the space following the colon as optional.",
"# However, If the space is there we must consume it or code",
"# employing the cython_magic extension will fail to execute.",
"#",
"# This works with the default ipython_rgx* patterns,",
"# If you modify them, YMMV.",
"nextline",
"=",
"nextline",
"[",
"Nc",
":",
"]",
"if",
"nextline",
"and",
"nextline",
"[",
"0",
"]",
"==",
"' '",
":",
"nextline",
"=",
"nextline",
"[",
"1",
":",
"]",
"inputline",
"+=",
"'\\n'",
"+",
"nextline",
"else",
":",
"rest",
".",
"append",
"(",
"nextline",
")",
"i",
"+=",
"1",
"block",
".",
"append",
"(",
"(",
"INPUT",
",",
"(",
"decorator",
",",
"inputline",
",",
"'\\n'",
".",
"join",
"(",
"rest",
")",
")",
")",
")",
"continue",
"# if it looks like an output line grab all the text to the end",
"# of the block",
"matchout",
"=",
"rgxout",
".",
"match",
"(",
"line",
")",
"if",
"matchout",
":",
"lineno",
",",
"output",
"=",
"int",
"(",
"matchout",
".",
"group",
"(",
"1",
")",
")",
",",
"matchout",
".",
"group",
"(",
"2",
")",
"if",
"i",
"<",
"N",
"-",
"1",
":",
"output",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"output",
"]",
"+",
"lines",
"[",
"i",
":",
"]",
")",
"block",
".",
"append",
"(",
"(",
"OUTPUT",
",",
"output",
")",
")",
"break",
"return",
"block"
] | part is a string of ipython text, comprised of at most one
input, one output, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line | [
"part",
"is",
"a",
"string",
"of",
"ipython",
"text",
"comprised",
"of",
"at",
"most",
"one",
"input",
"one",
"output",
"comments",
"and",
"blank",
"lines",
".",
"The",
"block",
"parser",
"parses",
"the",
"text",
"into",
"a",
"list",
"of",
"::"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/docs/sphinxext/ipython_sphinxext/ipython_directive.py#L165-L266 |
edublancas/sklearn-evaluation | docs/sphinxext/ipython_sphinxext/ipython_directive.py | EmbeddedSphinxShell.process_block | def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
found_input = False
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
found_input = True
(out_data, input_lines, output, is_doctest,
decorator, image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
if not found_input:
TAB = ' ' * 4
linenumber = 0
source = 'Unavailable'
content = 'Unavailable'
if self.directive:
linenumber = self.directive.state.document.current_line
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
e = ('\n\nInvalid block: Block contains an output prompt '
'without an input prompt.\n\n'
'Document source: {0}\n\n'
'Content begins at line {1}: \n\n{2}\n\n'
'Problematic block within content: \n\n{TAB}{3}\n\n')
e = e.format(source, linenumber, content, block, TAB=TAB)
# Write, rather than include in exception, since Sphinx
# will truncate tracebacks.
sys.stdout.write(e)
raise RuntimeError('An invalid block was detected.')
out_data = \
self.process_output(data, output_prompt, input_lines,
output, is_doctest, decorator,
image_file)
if out_data:
# Then there was user submitted output in verbatim mode.
# We need to remove the last element of `ret` that was
# added in `process_input`, as it is '' and would introduce
# an undesirable newline.
assert(ret[-1] == '')
del ret[-1]
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive | python | def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
found_input = False
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
found_input = True
(out_data, input_lines, output, is_doctest,
decorator, image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
if not found_input:
TAB = ' ' * 4
linenumber = 0
source = 'Unavailable'
content = 'Unavailable'
if self.directive:
linenumber = self.directive.state.document.current_line
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
e = ('\n\nInvalid block: Block contains an output prompt '
'without an input prompt.\n\n'
'Document source: {0}\n\n'
'Content begins at line {1}: \n\n{2}\n\n'
'Problematic block within content: \n\n{TAB}{3}\n\n')
e = e.format(source, linenumber, content, block, TAB=TAB)
# Write, rather than include in exception, since Sphinx
# will truncate tracebacks.
sys.stdout.write(e)
raise RuntimeError('An invalid block was detected.')
out_data = \
self.process_output(data, output_prompt, input_lines,
output, is_doctest, decorator,
image_file)
if out_data:
# Then there was user submitted output in verbatim mode.
# We need to remove the last element of `ret` that was
# added in `process_input`, as it is '' and would introduce
# an undesirable newline.
assert(ret[-1] == '')
del ret[-1]
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive | [
"def",
"process_block",
"(",
"self",
",",
"block",
")",
":",
"ret",
"=",
"[",
"]",
"output",
"=",
"None",
"input_lines",
"=",
"None",
"lineno",
"=",
"self",
".",
"IP",
".",
"execution_count",
"input_prompt",
"=",
"self",
".",
"promptin",
"%",
"lineno",
"output_prompt",
"=",
"self",
".",
"promptout",
"%",
"lineno",
"image_file",
"=",
"None",
"image_directive",
"=",
"None",
"found_input",
"=",
"False",
"for",
"token",
",",
"data",
"in",
"block",
":",
"if",
"token",
"==",
"COMMENT",
":",
"out_data",
"=",
"self",
".",
"process_comment",
"(",
"data",
")",
"elif",
"token",
"==",
"INPUT",
":",
"found_input",
"=",
"True",
"(",
"out_data",
",",
"input_lines",
",",
"output",
",",
"is_doctest",
",",
"decorator",
",",
"image_file",
",",
"image_directive",
")",
"=",
"self",
".",
"process_input",
"(",
"data",
",",
"input_prompt",
",",
"lineno",
")",
"elif",
"token",
"==",
"OUTPUT",
":",
"if",
"not",
"found_input",
":",
"TAB",
"=",
"' '",
"*",
"4",
"linenumber",
"=",
"0",
"source",
"=",
"'Unavailable'",
"content",
"=",
"'Unavailable'",
"if",
"self",
".",
"directive",
":",
"linenumber",
"=",
"self",
".",
"directive",
".",
"state",
".",
"document",
".",
"current_line",
"source",
"=",
"self",
".",
"directive",
".",
"state",
".",
"document",
".",
"current_source",
"content",
"=",
"self",
".",
"directive",
".",
"content",
"# Add tabs and join into a single string.",
"content",
"=",
"'\\n'",
".",
"join",
"(",
"[",
"TAB",
"+",
"line",
"for",
"line",
"in",
"content",
"]",
")",
"e",
"=",
"(",
"'\\n\\nInvalid block: Block contains an output prompt '",
"'without an input prompt.\\n\\n'",
"'Document source: {0}\\n\\n'",
"'Content begins at line {1}: \\n\\n{2}\\n\\n'",
"'Problematic block within content: \\n\\n{TAB}{3}\\n\\n'",
")",
"e",
"=",
"e",
".",
"format",
"(",
"source",
",",
"linenumber",
",",
"content",
",",
"block",
",",
"TAB",
"=",
"TAB",
")",
"# Write, rather than include in exception, since Sphinx",
"# will truncate tracebacks.",
"sys",
".",
"stdout",
".",
"write",
"(",
"e",
")",
"raise",
"RuntimeError",
"(",
"'An invalid block was detected.'",
")",
"out_data",
"=",
"self",
".",
"process_output",
"(",
"data",
",",
"output_prompt",
",",
"input_lines",
",",
"output",
",",
"is_doctest",
",",
"decorator",
",",
"image_file",
")",
"if",
"out_data",
":",
"# Then there was user submitted output in verbatim mode.",
"# We need to remove the last element of `ret` that was",
"# added in `process_input`, as it is '' and would introduce",
"# an undesirable newline.",
"assert",
"(",
"ret",
"[",
"-",
"1",
"]",
"==",
"''",
")",
"del",
"ret",
"[",
"-",
"1",
"]",
"if",
"out_data",
":",
"ret",
".",
"extend",
"(",
"out_data",
")",
"# save the image files",
"if",
"image_file",
"is",
"not",
"None",
":",
"self",
".",
"save_image",
"(",
"image_file",
")",
"return",
"ret",
",",
"image_directive"
] | process block from the block_parser and return a list of processed lines | [
"process",
"block",
"from",
"the",
"block_parser",
"and",
"return",
"a",
"list",
"of",
"processed",
"lines"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/docs/sphinxext/ipython_sphinxext/ipython_directive.py#L640-L708 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/grid_search.py | grid_search | def grid_search(grid_scores, change, subset=None, kind='line', cmap=None,
ax=None):
"""
Plot results from a sklearn grid search by changing two parameters at most.
Parameters
----------
grid_scores : list of named tuples
Results from a sklearn grid search (get them using the
`grid_scores_` parameter)
change : str or iterable with len<=2
Parameter to change
subset : dictionary-like
parameter-value(s) pairs to subset from grid_scores.
(e.g. ``{'n_estimartors': [1, 10]}``), if None all combinations will be
used.
kind : ['line', 'bar']
This only applies whe change is a single parameter. Changes the
type of plot
cmap : matplotlib Colormap
This only applies when change are two parameters. Colormap used for
the matrix. If None uses a modified version of matplotlib's OrRd
colormap.
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/grid_search.py
"""
if change is None:
raise ValueError(('change can\'t be None, you need to select at least'
' one value to make the plot.'))
if ax is None:
ax = plt.gca()
if cmap is None:
cmap = default_heatmap()
if isinstance(change, string_types) or len(change) == 1:
return _grid_search_single(grid_scores, change, subset, kind, ax)
elif len(change) == 2:
return _grid_search_double(grid_scores, change, subset, cmap, ax)
else:
raise ValueError('change must have length 1 or 2 or be a string') | python | def grid_search(grid_scores, change, subset=None, kind='line', cmap=None,
ax=None):
"""
Plot results from a sklearn grid search by changing two parameters at most.
Parameters
----------
grid_scores : list of named tuples
Results from a sklearn grid search (get them using the
`grid_scores_` parameter)
change : str or iterable with len<=2
Parameter to change
subset : dictionary-like
parameter-value(s) pairs to subset from grid_scores.
(e.g. ``{'n_estimartors': [1, 10]}``), if None all combinations will be
used.
kind : ['line', 'bar']
This only applies whe change is a single parameter. Changes the
type of plot
cmap : matplotlib Colormap
This only applies when change are two parameters. Colormap used for
the matrix. If None uses a modified version of matplotlib's OrRd
colormap.
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/grid_search.py
"""
if change is None:
raise ValueError(('change can\'t be None, you need to select at least'
' one value to make the plot.'))
if ax is None:
ax = plt.gca()
if cmap is None:
cmap = default_heatmap()
if isinstance(change, string_types) or len(change) == 1:
return _grid_search_single(grid_scores, change, subset, kind, ax)
elif len(change) == 2:
return _grid_search_double(grid_scores, change, subset, cmap, ax)
else:
raise ValueError('change must have length 1 or 2 or be a string') | [
"def",
"grid_search",
"(",
"grid_scores",
",",
"change",
",",
"subset",
"=",
"None",
",",
"kind",
"=",
"'line'",
",",
"cmap",
"=",
"None",
",",
"ax",
"=",
"None",
")",
":",
"if",
"change",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"(",
"'change can\\'t be None, you need to select at least'",
"' one value to make the plot.'",
")",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"if",
"cmap",
"is",
"None",
":",
"cmap",
"=",
"default_heatmap",
"(",
")",
"if",
"isinstance",
"(",
"change",
",",
"string_types",
")",
"or",
"len",
"(",
"change",
")",
"==",
"1",
":",
"return",
"_grid_search_single",
"(",
"grid_scores",
",",
"change",
",",
"subset",
",",
"kind",
",",
"ax",
")",
"elif",
"len",
"(",
"change",
")",
"==",
"2",
":",
"return",
"_grid_search_double",
"(",
"grid_scores",
",",
"change",
",",
"subset",
",",
"cmap",
",",
"ax",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'change must have length 1 or 2 or be a string'",
")"
] | Plot results from a sklearn grid search by changing two parameters at most.
Parameters
----------
grid_scores : list of named tuples
Results from a sklearn grid search (get them using the
`grid_scores_` parameter)
change : str or iterable with len<=2
Parameter to change
subset : dictionary-like
parameter-value(s) pairs to subset from grid_scores.
(e.g. ``{'n_estimartors': [1, 10]}``), if None all combinations will be
used.
kind : ['line', 'bar']
This only applies whe change is a single parameter. Changes the
type of plot
cmap : matplotlib Colormap
This only applies when change are two parameters. Colormap used for
the matrix. If None uses a modified version of matplotlib's OrRd
colormap.
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/grid_search.py | [
"Plot",
"results",
"from",
"a",
"sklearn",
"grid",
"search",
"by",
"changing",
"two",
"parameters",
"at",
"most",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/grid_search.py#L12-L64 |
edublancas/sklearn-evaluation | sklearn_evaluation/evaluator.py | ClassifierEvaluator.confusion_matrix | def confusion_matrix(self):
"""Confusion matrix plot
"""
return plot.confusion_matrix(self.y_true, self.y_pred,
self.target_names, ax=_gen_ax()) | python | def confusion_matrix(self):
"""Confusion matrix plot
"""
return plot.confusion_matrix(self.y_true, self.y_pred,
self.target_names, ax=_gen_ax()) | [
"def",
"confusion_matrix",
"(",
"self",
")",
":",
"return",
"plot",
".",
"confusion_matrix",
"(",
"self",
".",
"y_true",
",",
"self",
".",
"y_pred",
",",
"self",
".",
"target_names",
",",
"ax",
"=",
"_gen_ax",
"(",
")",
")"
] | Confusion matrix plot | [
"Confusion",
"matrix",
"plot"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/evaluator.py#L85-L89 |
edublancas/sklearn-evaluation | sklearn_evaluation/evaluator.py | ClassifierEvaluator.roc | def roc(self):
"""ROC plot
"""
return plot.roc(self.y_true, self.y_score, ax=_gen_ax()) | python | def roc(self):
"""ROC plot
"""
return plot.roc(self.y_true, self.y_score, ax=_gen_ax()) | [
"def",
"roc",
"(",
"self",
")",
":",
"return",
"plot",
".",
"roc",
"(",
"self",
".",
"y_true",
",",
"self",
".",
"y_score",
",",
"ax",
"=",
"_gen_ax",
"(",
")",
")"
] | ROC plot | [
"ROC",
"plot"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/evaluator.py#L92-L95 |
edublancas/sklearn-evaluation | sklearn_evaluation/evaluator.py | ClassifierEvaluator.precision_recall | def precision_recall(self):
"""Precision-recall plot
"""
return plot.precision_recall(self.y_true, self.y_score, ax=_gen_ax()) | python | def precision_recall(self):
"""Precision-recall plot
"""
return plot.precision_recall(self.y_true, self.y_score, ax=_gen_ax()) | [
"def",
"precision_recall",
"(",
"self",
")",
":",
"return",
"plot",
".",
"precision_recall",
"(",
"self",
".",
"y_true",
",",
"self",
".",
"y_score",
",",
"ax",
"=",
"_gen_ax",
"(",
")",
")"
] | Precision-recall plot | [
"Precision",
"-",
"recall",
"plot"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/evaluator.py#L98-L101 |
edublancas/sklearn-evaluation | sklearn_evaluation/evaluator.py | ClassifierEvaluator.feature_importances | def feature_importances(self):
"""Feature importances plot
"""
return plot.feature_importances(self.estimator,
feature_names=self.feature_names,
ax=_gen_ax()) | python | def feature_importances(self):
"""Feature importances plot
"""
return plot.feature_importances(self.estimator,
feature_names=self.feature_names,
ax=_gen_ax()) | [
"def",
"feature_importances",
"(",
"self",
")",
":",
"return",
"plot",
".",
"feature_importances",
"(",
"self",
".",
"estimator",
",",
"feature_names",
"=",
"self",
".",
"feature_names",
",",
"ax",
"=",
"_gen_ax",
"(",
")",
")"
] | Feature importances plot | [
"Feature",
"importances",
"plot"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/evaluator.py#L104-L109 |
edublancas/sklearn-evaluation | sklearn_evaluation/evaluator.py | ClassifierEvaluator.feature_importances_table | def feature_importances_table(self):
"""Feature importances table
"""
from . import table
return table.feature_importances(self.estimator,
feature_names=self.feature_names) | python | def feature_importances_table(self):
"""Feature importances table
"""
from . import table
return table.feature_importances(self.estimator,
feature_names=self.feature_names) | [
"def",
"feature_importances_table",
"(",
"self",
")",
":",
"from",
".",
"import",
"table",
"return",
"table",
".",
"feature_importances",
"(",
"self",
".",
"estimator",
",",
"feature_names",
"=",
"self",
".",
"feature_names",
")"
] | Feature importances table | [
"Feature",
"importances",
"table"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/evaluator.py#L112-L118 |
edublancas/sklearn-evaluation | sklearn_evaluation/evaluator.py | ClassifierEvaluator.precision_at_proportions | def precision_at_proportions(self):
"""Precision at proportions plot
"""
return plot.precision_at_proportions(self.y_true, self.y_score,
ax=_gen_ax()) | python | def precision_at_proportions(self):
"""Precision at proportions plot
"""
return plot.precision_at_proportions(self.y_true, self.y_score,
ax=_gen_ax()) | [
"def",
"precision_at_proportions",
"(",
"self",
")",
":",
"return",
"plot",
".",
"precision_at_proportions",
"(",
"self",
".",
"y_true",
",",
"self",
".",
"y_score",
",",
"ax",
"=",
"_gen_ax",
"(",
")",
")"
] | Precision at proportions plot | [
"Precision",
"at",
"proportions",
"plot"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/evaluator.py#L121-L125 |
edublancas/sklearn-evaluation | sklearn_evaluation/evaluator.py | ClassifierEvaluator.generate_report | def generate_report(self, template, path=None, style=None):
"""
Generate HTML report
Parameters
----------
template : markdown-formatted string or path to the template
file used for rendering the report. Any attribute of this
object can be included in the report using the {tag} format.
e.g.'# Report{estimator_name}{roc}{precision_recall}'.
Apart from every attribute, you can also use {date} and {date_utc}
tags to include the date for the report generation using local
and UTC timezones repectively.
path : str
Path to save the HTML report. If None, the function will return
the HTML code.
style: str
Path to a css file to apply style to the report. If None, no
style will be applied
Returns
-------
report: str
Returns the contents of the report if path is None.
"""
from .report import generate
return generate(self, template, path, style) | python | def generate_report(self, template, path=None, style=None):
"""
Generate HTML report
Parameters
----------
template : markdown-formatted string or path to the template
file used for rendering the report. Any attribute of this
object can be included in the report using the {tag} format.
e.g.'# Report{estimator_name}{roc}{precision_recall}'.
Apart from every attribute, you can also use {date} and {date_utc}
tags to include the date for the report generation using local
and UTC timezones repectively.
path : str
Path to save the HTML report. If None, the function will return
the HTML code.
style: str
Path to a css file to apply style to the report. If None, no
style will be applied
Returns
-------
report: str
Returns the contents of the report if path is None.
"""
from .report import generate
return generate(self, template, path, style) | [
"def",
"generate_report",
"(",
"self",
",",
"template",
",",
"path",
"=",
"None",
",",
"style",
"=",
"None",
")",
":",
"from",
".",
"report",
"import",
"generate",
"return",
"generate",
"(",
"self",
",",
"template",
",",
"path",
",",
"style",
")"
] | Generate HTML report
Parameters
----------
template : markdown-formatted string or path to the template
file used for rendering the report. Any attribute of this
object can be included in the report using the {tag} format.
e.g.'# Report{estimator_name}{roc}{precision_recall}'.
Apart from every attribute, you can also use {date} and {date_utc}
tags to include the date for the report generation using local
and UTC timezones repectively.
path : str
Path to save the HTML report. If None, the function will return
the HTML code.
style: str
Path to a css file to apply style to the report. If None, no
style will be applied
Returns
-------
report: str
Returns the contents of the report if path is None. | [
"Generate",
"HTML",
"report"
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/evaluator.py#L127-L157 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/roc.py | roc | def roc(y_true, y_score, ax=None):
"""
Plot ROC curve.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples] or [n_samples, 2] for binary
classification or [n_samples, n_classes] for multiclass
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Notes
-----
It is assumed that the y_score parameter columns are in order. For example,
if ``y_true = [2, 2, 1, 0, 0, 1, 2]``, then the first column in y_score
must countain the scores for class 0, second column for class 1 and so on.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/roc.py
"""
if any((val is None for val in (y_true, y_score))):
raise ValueError("y_true and y_score are needed to plot ROC")
if ax is None:
ax = plt.gca()
# get the number of classes based on the shape of y_score
y_score_is_vector = is_column_vector(y_score) or is_row_vector(y_score)
if y_score_is_vector:
n_classes = 2
else:
_, n_classes = y_score.shape
# check data shape?
if n_classes > 2:
# convert y_true to binary format
y_true_bin = label_binarize(y_true, classes=np.unique(y_true))
_roc_multi(y_true_bin, y_score, ax=ax)
for i in range(n_classes):
_roc(y_true_bin[:, i], y_score[:, i], ax=ax)
else:
if y_score_is_vector:
_roc(y_true, y_score, ax)
else:
_roc(y_true, y_score[:, 1], ax)
# raise error if n_classes = 1?
return ax | python | def roc(y_true, y_score, ax=None):
"""
Plot ROC curve.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples] or [n_samples, 2] for binary
classification or [n_samples, n_classes] for multiclass
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Notes
-----
It is assumed that the y_score parameter columns are in order. For example,
if ``y_true = [2, 2, 1, 0, 0, 1, 2]``, then the first column in y_score
must countain the scores for class 0, second column for class 1 and so on.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/roc.py
"""
if any((val is None for val in (y_true, y_score))):
raise ValueError("y_true and y_score are needed to plot ROC")
if ax is None:
ax = plt.gca()
# get the number of classes based on the shape of y_score
y_score_is_vector = is_column_vector(y_score) or is_row_vector(y_score)
if y_score_is_vector:
n_classes = 2
else:
_, n_classes = y_score.shape
# check data shape?
if n_classes > 2:
# convert y_true to binary format
y_true_bin = label_binarize(y_true, classes=np.unique(y_true))
_roc_multi(y_true_bin, y_score, ax=ax)
for i in range(n_classes):
_roc(y_true_bin[:, i], y_score[:, i], ax=ax)
else:
if y_score_is_vector:
_roc(y_true, y_score, ax)
else:
_roc(y_true, y_score[:, 1], ax)
# raise error if n_classes = 1?
return ax | [
"def",
"roc",
"(",
"y_true",
",",
"y_score",
",",
"ax",
"=",
"None",
")",
":",
"if",
"any",
"(",
"(",
"val",
"is",
"None",
"for",
"val",
"in",
"(",
"y_true",
",",
"y_score",
")",
")",
")",
":",
"raise",
"ValueError",
"(",
"\"y_true and y_score are needed to plot ROC\"",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"# get the number of classes based on the shape of y_score",
"y_score_is_vector",
"=",
"is_column_vector",
"(",
"y_score",
")",
"or",
"is_row_vector",
"(",
"y_score",
")",
"if",
"y_score_is_vector",
":",
"n_classes",
"=",
"2",
"else",
":",
"_",
",",
"n_classes",
"=",
"y_score",
".",
"shape",
"# check data shape?",
"if",
"n_classes",
">",
"2",
":",
"# convert y_true to binary format",
"y_true_bin",
"=",
"label_binarize",
"(",
"y_true",
",",
"classes",
"=",
"np",
".",
"unique",
"(",
"y_true",
")",
")",
"_roc_multi",
"(",
"y_true_bin",
",",
"y_score",
",",
"ax",
"=",
"ax",
")",
"for",
"i",
"in",
"range",
"(",
"n_classes",
")",
":",
"_roc",
"(",
"y_true_bin",
"[",
":",
",",
"i",
"]",
",",
"y_score",
"[",
":",
",",
"i",
"]",
",",
"ax",
"=",
"ax",
")",
"else",
":",
"if",
"y_score_is_vector",
":",
"_roc",
"(",
"y_true",
",",
"y_score",
",",
"ax",
")",
"else",
":",
"_roc",
"(",
"y_true",
",",
"y_score",
"[",
":",
",",
"1",
"]",
",",
"ax",
")",
"# raise error if n_classes = 1?",
"return",
"ax"
] | Plot ROC curve.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples] or [n_samples, 2] for binary
classification or [n_samples, n_classes] for multiclass
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Notes
-----
It is assumed that the y_score parameter columns are in order. For example,
if ``y_true = [2, 2, 1, 0, 0, 1, 2]``, then the first column in y_score
must countain the scores for class 0, second column for class 1 and so on.
Returns
-------
ax: matplotlib Axes
Axes containing the plot
Examples
--------
.. plot:: ../../examples/roc.py | [
"Plot",
"ROC",
"curve",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/roc.py#L9-L69 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/roc.py | _roc | def _roc(y_true, y_score, ax=None):
"""
Plot ROC curve for binary classification.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples]
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
# check dimensions
fpr, tpr, _ = roc_curve(y_true, y_score)
roc_auc = auc(fpr, tpr)
ax.plot(fpr, tpr, label=('ROC curve (area = {0:0.2f})'.format(roc_auc)))
_set_ax_settings(ax)
return ax | python | def _roc(y_true, y_score, ax=None):
"""
Plot ROC curve for binary classification.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples]
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
# check dimensions
fpr, tpr, _ = roc_curve(y_true, y_score)
roc_auc = auc(fpr, tpr)
ax.plot(fpr, tpr, label=('ROC curve (area = {0:0.2f})'.format(roc_auc)))
_set_ax_settings(ax)
return ax | [
"def",
"_roc",
"(",
"y_true",
",",
"y_score",
",",
"ax",
"=",
"None",
")",
":",
"# check dimensions",
"fpr",
",",
"tpr",
",",
"_",
"=",
"roc_curve",
"(",
"y_true",
",",
"y_score",
")",
"roc_auc",
"=",
"auc",
"(",
"fpr",
",",
"tpr",
")",
"ax",
".",
"plot",
"(",
"fpr",
",",
"tpr",
",",
"label",
"=",
"(",
"'ROC curve (area = {0:0.2f})'",
".",
"format",
"(",
"roc_auc",
")",
")",
")",
"_set_ax_settings",
"(",
"ax",
")",
"return",
"ax"
] | Plot ROC curve for binary classification.
Parameters
----------
y_true : array-like, shape = [n_samples]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples]
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot | [
"Plot",
"ROC",
"curve",
"for",
"binary",
"classification",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/roc.py#L72-L98 |
edublancas/sklearn-evaluation | sklearn_evaluation/plot/roc.py | _roc_multi | def _roc_multi(y_true, y_score, ax=None):
"""
Plot ROC curve for multi classification.
Parameters
----------
y_true : array-like, shape = [n_samples, n_classes]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples, n_classes]
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
# Compute micro-average ROC curve and ROC area
fpr, tpr, _ = roc_curve(y_true.ravel(), y_score.ravel())
roc_auc = auc(fpr, tpr)
if ax is None:
ax = plt.gca()
ax.plot(fpr, tpr, label=('micro-average ROC curve (area = {0:0.2f})'
.format(roc_auc)))
_set_ax_settings(ax)
return ax | python | def _roc_multi(y_true, y_score, ax=None):
"""
Plot ROC curve for multi classification.
Parameters
----------
y_true : array-like, shape = [n_samples, n_classes]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples, n_classes]
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot
"""
# Compute micro-average ROC curve and ROC area
fpr, tpr, _ = roc_curve(y_true.ravel(), y_score.ravel())
roc_auc = auc(fpr, tpr)
if ax is None:
ax = plt.gca()
ax.plot(fpr, tpr, label=('micro-average ROC curve (area = {0:0.2f})'
.format(roc_auc)))
_set_ax_settings(ax)
return ax | [
"def",
"_roc_multi",
"(",
"y_true",
",",
"y_score",
",",
"ax",
"=",
"None",
")",
":",
"# Compute micro-average ROC curve and ROC area",
"fpr",
",",
"tpr",
",",
"_",
"=",
"roc_curve",
"(",
"y_true",
".",
"ravel",
"(",
")",
",",
"y_score",
".",
"ravel",
"(",
")",
")",
"roc_auc",
"=",
"auc",
"(",
"fpr",
",",
"tpr",
")",
"if",
"ax",
"is",
"None",
":",
"ax",
"=",
"plt",
".",
"gca",
"(",
")",
"ax",
".",
"plot",
"(",
"fpr",
",",
"tpr",
",",
"label",
"=",
"(",
"'micro-average ROC curve (area = {0:0.2f})'",
".",
"format",
"(",
"roc_auc",
")",
")",
")",
"_set_ax_settings",
"(",
"ax",
")",
"return",
"ax"
] | Plot ROC curve for multi classification.
Parameters
----------
y_true : array-like, shape = [n_samples, n_classes]
Correct target values (ground truth).
y_score : array-like, shape = [n_samples, n_classes]
Target scores (estimator predictions).
ax: matplotlib Axes
Axes object to draw the plot onto, otherwise uses current Axes
Returns
-------
ax: matplotlib Axes
Axes containing the plot | [
"Plot",
"ROC",
"curve",
"for",
"multi",
"classification",
"."
] | train | https://github.com/edublancas/sklearn-evaluation/blob/79ee6e4dfe911b5a5a9b78a5caaed7c73eef6f39/sklearn_evaluation/plot/roc.py#L101-L130 |
hubo1016/vlcp | vlcp/utils/redisclient.py | RedisClient.get_connection | async def get_connection(self, container):
'''
Get an exclusive connection, useful for blocked commands and transactions.
You must call release or shutdown (not recommanded) to return the connection after use.
:param container: routine container
:returns: RedisClientBase object, with some commands same as RedisClient like execute_command,
batch_execute, register_script etc.
'''
if self._connpool:
conn = self._connpool.pop()
return RedisClientBase(conn, self)
else:
conn = self._create_client(container)
await RedisClientBase._get_connection(self, container, conn)
await self._protocol.send_command(conn, container, 'SELECT', str(self.db))
return RedisClientBase(conn, self) | python | async def get_connection(self, container):
'''
Get an exclusive connection, useful for blocked commands and transactions.
You must call release or shutdown (not recommanded) to return the connection after use.
:param container: routine container
:returns: RedisClientBase object, with some commands same as RedisClient like execute_command,
batch_execute, register_script etc.
'''
if self._connpool:
conn = self._connpool.pop()
return RedisClientBase(conn, self)
else:
conn = self._create_client(container)
await RedisClientBase._get_connection(self, container, conn)
await self._protocol.send_command(conn, container, 'SELECT', str(self.db))
return RedisClientBase(conn, self) | [
"async",
"def",
"get_connection",
"(",
"self",
",",
"container",
")",
":",
"if",
"self",
".",
"_connpool",
":",
"conn",
"=",
"self",
".",
"_connpool",
".",
"pop",
"(",
")",
"return",
"RedisClientBase",
"(",
"conn",
",",
"self",
")",
"else",
":",
"conn",
"=",
"self",
".",
"_create_client",
"(",
"container",
")",
"await",
"RedisClientBase",
".",
"_get_connection",
"(",
"self",
",",
"container",
",",
"conn",
")",
"await",
"self",
".",
"_protocol",
".",
"send_command",
"(",
"conn",
",",
"container",
",",
"'SELECT'",
",",
"str",
"(",
"self",
".",
"db",
")",
")",
"return",
"RedisClientBase",
"(",
"conn",
",",
"self",
")"
] | Get an exclusive connection, useful for blocked commands and transactions.
You must call release or shutdown (not recommanded) to return the connection after use.
:param container: routine container
:returns: RedisClientBase object, with some commands same as RedisClient like execute_command,
batch_execute, register_script etc. | [
"Get",
"an",
"exclusive",
"connection",
"useful",
"for",
"blocked",
"commands",
"and",
"transactions",
".",
"You",
"must",
"call",
"release",
"or",
"shutdown",
"(",
"not",
"recommanded",
")",
"to",
"return",
"the",
"connection",
"after",
"use",
".",
":",
"param",
"container",
":",
"routine",
"container",
":",
"returns",
":",
"RedisClientBase",
"object",
"with",
"some",
"commands",
"same",
"as",
"RedisClient",
"like",
"execute_command",
"batch_execute",
"register_script",
"etc",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/redisclient.py#L257-L275 |
hubo1016/vlcp | vlcp/utils/redisclient.py | RedisClient.execute_command | async def execute_command(self, container, *args):
'''
Execute command on Redis server:
- For (P)SUBSCRIBE/(P)UNSUBSCRIBE, the command is sent to the subscribe connection.
It is recommended to use (p)subscribe/(p)unsubscribe method instead of directly call the command
- For BLPOP, BRPOP, BRPOPLPUSH, the command is sent to a separated connection. The connection is
recycled after command returns.
- For other commands, the command is sent to the default connection.
'''
if args:
cmd = _str(args[0]).upper()
if cmd in ('SUBSCRIBE', 'UNSUBSCRIBE', 'PSUBSCRIBE', 'PUNSUBSCRIBE'):
await self._get_subscribe_connection(container)
return await self._protocol.execute_command(self._subscribeconn, container, *args)
elif cmd in ('BLPOP', 'BRPOP', 'BRPOPLPUSH'):
c = await self.get_connection(container)
with c.context(container):
return await c.execute_command(container, *args)
return await RedisClientBase.execute_command(self, container, *args) | python | async def execute_command(self, container, *args):
'''
Execute command on Redis server:
- For (P)SUBSCRIBE/(P)UNSUBSCRIBE, the command is sent to the subscribe connection.
It is recommended to use (p)subscribe/(p)unsubscribe method instead of directly call the command
- For BLPOP, BRPOP, BRPOPLPUSH, the command is sent to a separated connection. The connection is
recycled after command returns.
- For other commands, the command is sent to the default connection.
'''
if args:
cmd = _str(args[0]).upper()
if cmd in ('SUBSCRIBE', 'UNSUBSCRIBE', 'PSUBSCRIBE', 'PUNSUBSCRIBE'):
await self._get_subscribe_connection(container)
return await self._protocol.execute_command(self._subscribeconn, container, *args)
elif cmd in ('BLPOP', 'BRPOP', 'BRPOPLPUSH'):
c = await self.get_connection(container)
with c.context(container):
return await c.execute_command(container, *args)
return await RedisClientBase.execute_command(self, container, *args) | [
"async",
"def",
"execute_command",
"(",
"self",
",",
"container",
",",
"*",
"args",
")",
":",
"if",
"args",
":",
"cmd",
"=",
"_str",
"(",
"args",
"[",
"0",
"]",
")",
".",
"upper",
"(",
")",
"if",
"cmd",
"in",
"(",
"'SUBSCRIBE'",
",",
"'UNSUBSCRIBE'",
",",
"'PSUBSCRIBE'",
",",
"'PUNSUBSCRIBE'",
")",
":",
"await",
"self",
".",
"_get_subscribe_connection",
"(",
"container",
")",
"return",
"await",
"self",
".",
"_protocol",
".",
"execute_command",
"(",
"self",
".",
"_subscribeconn",
",",
"container",
",",
"*",
"args",
")",
"elif",
"cmd",
"in",
"(",
"'BLPOP'",
",",
"'BRPOP'",
",",
"'BRPOPLPUSH'",
")",
":",
"c",
"=",
"await",
"self",
".",
"get_connection",
"(",
"container",
")",
"with",
"c",
".",
"context",
"(",
"container",
")",
":",
"return",
"await",
"c",
".",
"execute_command",
"(",
"container",
",",
"*",
"args",
")",
"return",
"await",
"RedisClientBase",
".",
"execute_command",
"(",
"self",
",",
"container",
",",
"*",
"args",
")"
] | Execute command on Redis server:
- For (P)SUBSCRIBE/(P)UNSUBSCRIBE, the command is sent to the subscribe connection.
It is recommended to use (p)subscribe/(p)unsubscribe method instead of directly call the command
- For BLPOP, BRPOP, BRPOPLPUSH, the command is sent to a separated connection. The connection is
recycled after command returns.
- For other commands, the command is sent to the default connection. | [
"Execute",
"command",
"on",
"Redis",
"server",
":",
"-",
"For",
"(",
"P",
")",
"SUBSCRIBE",
"/",
"(",
"P",
")",
"UNSUBSCRIBE",
"the",
"command",
"is",
"sent",
"to",
"the",
"subscribe",
"connection",
".",
"It",
"is",
"recommended",
"to",
"use",
"(",
"p",
")",
"subscribe",
"/",
"(",
"p",
")",
"unsubscribe",
"method",
"instead",
"of",
"directly",
"call",
"the",
"command",
"-",
"For",
"BLPOP",
"BRPOP",
"BRPOPLPUSH",
"the",
"command",
"is",
"sent",
"to",
"a",
"separated",
"connection",
".",
"The",
"connection",
"is",
"recycled",
"after",
"command",
"returns",
".",
"-",
"For",
"other",
"commands",
"the",
"command",
"is",
"sent",
"to",
"the",
"default",
"connection",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/redisclient.py#L296-L314 |
hubo1016/vlcp | vlcp/utils/redisclient.py | RedisClient.subscribe | async def subscribe(self, container, *keys):
'''
Subscribe to specified channels
:param container: routine container
:param *keys: subscribed channels
:returns: list of event matchers for the specified channels
'''
await self._get_subscribe_connection(container)
realkeys = []
for k in keys:
count = self._subscribecounter.get(k, 0)
if count == 0:
realkeys.append(k)
self._subscribecounter[k] = count + 1
if realkeys:
await self._protocol.execute_command(self._subscribeconn, container, 'SUBSCRIBE', *realkeys)
return [self._protocol.subscribematcher(self._subscribeconn, k, None, RedisSubscribeMessageEvent.MESSAGE) for k in keys] | python | async def subscribe(self, container, *keys):
'''
Subscribe to specified channels
:param container: routine container
:param *keys: subscribed channels
:returns: list of event matchers for the specified channels
'''
await self._get_subscribe_connection(container)
realkeys = []
for k in keys:
count = self._subscribecounter.get(k, 0)
if count == 0:
realkeys.append(k)
self._subscribecounter[k] = count + 1
if realkeys:
await self._protocol.execute_command(self._subscribeconn, container, 'SUBSCRIBE', *realkeys)
return [self._protocol.subscribematcher(self._subscribeconn, k, None, RedisSubscribeMessageEvent.MESSAGE) for k in keys] | [
"async",
"def",
"subscribe",
"(",
"self",
",",
"container",
",",
"*",
"keys",
")",
":",
"await",
"self",
".",
"_get_subscribe_connection",
"(",
"container",
")",
"realkeys",
"=",
"[",
"]",
"for",
"k",
"in",
"keys",
":",
"count",
"=",
"self",
".",
"_subscribecounter",
".",
"get",
"(",
"k",
",",
"0",
")",
"if",
"count",
"==",
"0",
":",
"realkeys",
".",
"append",
"(",
"k",
")",
"self",
".",
"_subscribecounter",
"[",
"k",
"]",
"=",
"count",
"+",
"1",
"if",
"realkeys",
":",
"await",
"self",
".",
"_protocol",
".",
"execute_command",
"(",
"self",
".",
"_subscribeconn",
",",
"container",
",",
"'SUBSCRIBE'",
",",
"*",
"realkeys",
")",
"return",
"[",
"self",
".",
"_protocol",
".",
"subscribematcher",
"(",
"self",
".",
"_subscribeconn",
",",
"k",
",",
"None",
",",
"RedisSubscribeMessageEvent",
".",
"MESSAGE",
")",
"for",
"k",
"in",
"keys",
"]"
] | Subscribe to specified channels
:param container: routine container
:param *keys: subscribed channels
:returns: list of event matchers for the specified channels | [
"Subscribe",
"to",
"specified",
"channels",
":",
"param",
"container",
":",
"routine",
"container",
":",
"param",
"*",
"keys",
":",
"subscribed",
"channels",
":",
"returns",
":",
"list",
"of",
"event",
"matchers",
"for",
"the",
"specified",
"channels"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/redisclient.py#L316-L335 |
hubo1016/vlcp | vlcp/utils/redisclient.py | RedisClient.unsubscribe | async def unsubscribe(self, container, *keys):
'''
Unsubscribe specified channels. Every subscribed key should be unsubscribed exactly once, even if duplicated subscribed.
:param container: routine container
:param \*keys: subscribed channels
'''
await self._get_subscribe_connection(container)
realkeys = []
for k in keys:
count = self._subscribecounter.get(k, 0)
if count <= 1:
realkeys.append(k)
try:
del self._subscribecounter[k]
except KeyError:
pass
else:
self._subscribecounter[k] = count - 1
if realkeys:
await self._protocol.execute_command(self._subscribeconn, container, 'UNSUBSCRIBE', *realkeys) | python | async def unsubscribe(self, container, *keys):
'''
Unsubscribe specified channels. Every subscribed key should be unsubscribed exactly once, even if duplicated subscribed.
:param container: routine container
:param \*keys: subscribed channels
'''
await self._get_subscribe_connection(container)
realkeys = []
for k in keys:
count = self._subscribecounter.get(k, 0)
if count <= 1:
realkeys.append(k)
try:
del self._subscribecounter[k]
except KeyError:
pass
else:
self._subscribecounter[k] = count - 1
if realkeys:
await self._protocol.execute_command(self._subscribeconn, container, 'UNSUBSCRIBE', *realkeys) | [
"async",
"def",
"unsubscribe",
"(",
"self",
",",
"container",
",",
"*",
"keys",
")",
":",
"await",
"self",
".",
"_get_subscribe_connection",
"(",
"container",
")",
"realkeys",
"=",
"[",
"]",
"for",
"k",
"in",
"keys",
":",
"count",
"=",
"self",
".",
"_subscribecounter",
".",
"get",
"(",
"k",
",",
"0",
")",
"if",
"count",
"<=",
"1",
":",
"realkeys",
".",
"append",
"(",
"k",
")",
"try",
":",
"del",
"self",
".",
"_subscribecounter",
"[",
"k",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"self",
".",
"_subscribecounter",
"[",
"k",
"]",
"=",
"count",
"-",
"1",
"if",
"realkeys",
":",
"await",
"self",
".",
"_protocol",
".",
"execute_command",
"(",
"self",
".",
"_subscribeconn",
",",
"container",
",",
"'UNSUBSCRIBE'",
",",
"*",
"realkeys",
")"
] | Unsubscribe specified channels. Every subscribed key should be unsubscribed exactly once, even if duplicated subscribed.
:param container: routine container
:param \*keys: subscribed channels | [
"Unsubscribe",
"specified",
"channels",
".",
"Every",
"subscribed",
"key",
"should",
"be",
"unsubscribed",
"exactly",
"once",
"even",
"if",
"duplicated",
"subscribed",
".",
":",
"param",
"container",
":",
"routine",
"container",
":",
"param",
"\\",
"*",
"keys",
":",
"subscribed",
"channels"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/redisclient.py#L337-L358 |
hubo1016/vlcp | vlcp/utils/redisclient.py | RedisClient.psubscribe | async def psubscribe(self, container, *keys):
'''
Subscribe to specified globs
:param container: routine container
:param \*keys: subscribed globs
:returns: list of event matchers for the specified globs
'''
await self._get_subscribe_connection(container)
realkeys = []
for k in keys:
count = self._psubscribecounter.get(k, 0)
if count == 0:
realkeys.append(k)
self._psubscribecounter[k] = count + 1
await self._protocol.execute_command(self._subscribeconn, container, 'PSUBSCRIBE', *realkeys)
return [self._protocol.subscribematcher(self._subscribeconn, k, None, RedisSubscribeMessageEvent.PMESSAGE) for k in keys] | python | async def psubscribe(self, container, *keys):
'''
Subscribe to specified globs
:param container: routine container
:param \*keys: subscribed globs
:returns: list of event matchers for the specified globs
'''
await self._get_subscribe_connection(container)
realkeys = []
for k in keys:
count = self._psubscribecounter.get(k, 0)
if count == 0:
realkeys.append(k)
self._psubscribecounter[k] = count + 1
await self._protocol.execute_command(self._subscribeconn, container, 'PSUBSCRIBE', *realkeys)
return [self._protocol.subscribematcher(self._subscribeconn, k, None, RedisSubscribeMessageEvent.PMESSAGE) for k in keys] | [
"async",
"def",
"psubscribe",
"(",
"self",
",",
"container",
",",
"*",
"keys",
")",
":",
"await",
"self",
".",
"_get_subscribe_connection",
"(",
"container",
")",
"realkeys",
"=",
"[",
"]",
"for",
"k",
"in",
"keys",
":",
"count",
"=",
"self",
".",
"_psubscribecounter",
".",
"get",
"(",
"k",
",",
"0",
")",
"if",
"count",
"==",
"0",
":",
"realkeys",
".",
"append",
"(",
"k",
")",
"self",
".",
"_psubscribecounter",
"[",
"k",
"]",
"=",
"count",
"+",
"1",
"await",
"self",
".",
"_protocol",
".",
"execute_command",
"(",
"self",
".",
"_subscribeconn",
",",
"container",
",",
"'PSUBSCRIBE'",
",",
"*",
"realkeys",
")",
"return",
"[",
"self",
".",
"_protocol",
".",
"subscribematcher",
"(",
"self",
".",
"_subscribeconn",
",",
"k",
",",
"None",
",",
"RedisSubscribeMessageEvent",
".",
"PMESSAGE",
")",
"for",
"k",
"in",
"keys",
"]"
] | Subscribe to specified globs
:param container: routine container
:param \*keys: subscribed globs
:returns: list of event matchers for the specified globs | [
"Subscribe",
"to",
"specified",
"globs",
":",
"param",
"container",
":",
"routine",
"container",
":",
"param",
"\\",
"*",
"keys",
":",
"subscribed",
"globs",
":",
"returns",
":",
"list",
"of",
"event",
"matchers",
"for",
"the",
"specified",
"globs"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/redisclient.py#L360-L378 |
hubo1016/vlcp | vlcp/utils/redisclient.py | RedisClient.shutdown | async def shutdown(self, container, force=False):
'''
Shutdown all connections. Exclusive connections created by get_connection will shutdown after release()
'''
p = self._connpool
self._connpool = []
self._shutdown = True
if self._defaultconn:
p.append(self._defaultconn)
self._defaultconn = None
if self._subscribeconn:
p.append(self._subscribeconn)
self._subscribeconn = None
await container.execute_all([self._shutdown_conn(container, o, force)
for o in p]) | python | async def shutdown(self, container, force=False):
'''
Shutdown all connections. Exclusive connections created by get_connection will shutdown after release()
'''
p = self._connpool
self._connpool = []
self._shutdown = True
if self._defaultconn:
p.append(self._defaultconn)
self._defaultconn = None
if self._subscribeconn:
p.append(self._subscribeconn)
self._subscribeconn = None
await container.execute_all([self._shutdown_conn(container, o, force)
for o in p]) | [
"async",
"def",
"shutdown",
"(",
"self",
",",
"container",
",",
"force",
"=",
"False",
")",
":",
"p",
"=",
"self",
".",
"_connpool",
"self",
".",
"_connpool",
"=",
"[",
"]",
"self",
".",
"_shutdown",
"=",
"True",
"if",
"self",
".",
"_defaultconn",
":",
"p",
".",
"append",
"(",
"self",
".",
"_defaultconn",
")",
"self",
".",
"_defaultconn",
"=",
"None",
"if",
"self",
".",
"_subscribeconn",
":",
"p",
".",
"append",
"(",
"self",
".",
"_subscribeconn",
")",
"self",
".",
"_subscribeconn",
"=",
"None",
"await",
"container",
".",
"execute_all",
"(",
"[",
"self",
".",
"_shutdown_conn",
"(",
"container",
",",
"o",
",",
"force",
")",
"for",
"o",
"in",
"p",
"]",
")"
] | Shutdown all connections. Exclusive connections created by get_connection will shutdown after release() | [
"Shutdown",
"all",
"connections",
".",
"Exclusive",
"connections",
"created",
"by",
"get_connection",
"will",
"shutdown",
"after",
"release",
"()"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/redisclient.py#L399-L413 |
hubo1016/vlcp | vlcp/utils/redisclient.py | RedisClient.subscribe_state_matcher | def subscribe_state_matcher(self, container, connected = True):
'''
Return a matcher to match the subscribe connection status.
:param container: a routine container. NOTICE: this method is not a routine.
:param connected: if True, the matcher matches connection up. If False, the matcher matches
connection down.
:returns: an event matcher.
'''
if not self._subscribeconn:
self._subscribeconn = self._create_client(container)
return RedisConnectionStateEvent.createMatcher(
RedisConnectionStateEvent.CONNECTION_UP if connected else RedisConnectionStateEvent.CONNECTION_DOWN,
self._subscribeconn
) | python | def subscribe_state_matcher(self, container, connected = True):
'''
Return a matcher to match the subscribe connection status.
:param container: a routine container. NOTICE: this method is not a routine.
:param connected: if True, the matcher matches connection up. If False, the matcher matches
connection down.
:returns: an event matcher.
'''
if not self._subscribeconn:
self._subscribeconn = self._create_client(container)
return RedisConnectionStateEvent.createMatcher(
RedisConnectionStateEvent.CONNECTION_UP if connected else RedisConnectionStateEvent.CONNECTION_DOWN,
self._subscribeconn
) | [
"def",
"subscribe_state_matcher",
"(",
"self",
",",
"container",
",",
"connected",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"_subscribeconn",
":",
"self",
".",
"_subscribeconn",
"=",
"self",
".",
"_create_client",
"(",
"container",
")",
"return",
"RedisConnectionStateEvent",
".",
"createMatcher",
"(",
"RedisConnectionStateEvent",
".",
"CONNECTION_UP",
"if",
"connected",
"else",
"RedisConnectionStateEvent",
".",
"CONNECTION_DOWN",
",",
"self",
".",
"_subscribeconn",
")"
] | Return a matcher to match the subscribe connection status.
:param container: a routine container. NOTICE: this method is not a routine.
:param connected: if True, the matcher matches connection up. If False, the matcher matches
connection down.
:returns: an event matcher. | [
"Return",
"a",
"matcher",
"to",
"match",
"the",
"subscribe",
"connection",
"status",
".",
":",
"param",
"container",
":",
"a",
"routine",
"container",
".",
"NOTICE",
":",
"this",
"method",
"is",
"not",
"a",
"routine",
".",
":",
"param",
"connected",
":",
"if",
"True",
"the",
"matcher",
"matches",
"connection",
"up",
".",
"If",
"False",
"the",
"matcher",
"matches",
"connection",
"down",
".",
":",
"returns",
":",
"an",
"event",
"matcher",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/redisclient.py#L438-L454 |
hubo1016/vlcp | vlcp/service/connection/zookeeperdb.py | _escape_path | def _escape_path(key):
'''
Replace '/', '\\' in key
'''
return _tobytes(key).replace(b'$', b'$_').replace(b'/', b'$+').replace(b'\\', b'$$') | python | def _escape_path(key):
'''
Replace '/', '\\' in key
'''
return _tobytes(key).replace(b'$', b'$_').replace(b'/', b'$+').replace(b'\\', b'$$') | [
"def",
"_escape_path",
"(",
"key",
")",
":",
"return",
"_tobytes",
"(",
"key",
")",
".",
"replace",
"(",
"b'$'",
",",
"b'$_'",
")",
".",
"replace",
"(",
"b'/'",
",",
"b'$+'",
")",
".",
"replace",
"(",
"b'\\\\'",
",",
"b'$$'",
")"
] | Replace '/', '\\' in key | [
"Replace",
"/",
"\\\\",
"in",
"key"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/service/connection/zookeeperdb.py#L56-L60 |
hubo1016/vlcp | vlcp/utils/dhcp.py | reassemble_options | def reassemble_options(payload):
'''
Reassemble partial options to options, returns a list of dhcp_option
DHCP options are basically `|tag|length|value|` structure. When an
option is longer than 255 bytes, it can be splitted into multiple
structures with the same tag. The splitted structures must be
joined back to get the original option.
`dhcp_option_partial` is used to present the splitted options,
and `dhcp_option` is used for reassembled option.
'''
options = []
option_indices = {}
def process_option_list(partials):
for p in partials:
if p.tag == OPTION_END:
break
if p.tag == OPTION_PAD:
continue
if p.tag in option_indices:
# Reassemble the data
options[option_indices[p.tag]][1].append(p.data)
else:
options.append((p.tag, [p.data]))
option_indices[p.tag] = len(options) - 1
# First process options field
process_option_list(payload.options)
if OPTION_OVERLOAD in option_indices:
# There is an overload option
data = b''.join(options[option_indices[OPTION_OVERLOAD]][1])
overload_option = dhcp_overload.create(data)
if overload_option & OVERLOAD_FILE:
process_option_list(dhcp_option_partial[0].create(payload.file))
if overload_option & OVERLOAD_SNAME:
process_option_list(dhcp_option_partial[0].create(payload.sname))
def _create_dhcp_option(tag, data):
opt = dhcp_option(tag = tag)
opt._setextra(data)
opt._autosubclass()
return opt
return [_create_dhcp_option(tag, b''.join(data)) for tag,data in options] | python | def reassemble_options(payload):
'''
Reassemble partial options to options, returns a list of dhcp_option
DHCP options are basically `|tag|length|value|` structure. When an
option is longer than 255 bytes, it can be splitted into multiple
structures with the same tag. The splitted structures must be
joined back to get the original option.
`dhcp_option_partial` is used to present the splitted options,
and `dhcp_option` is used for reassembled option.
'''
options = []
option_indices = {}
def process_option_list(partials):
for p in partials:
if p.tag == OPTION_END:
break
if p.tag == OPTION_PAD:
continue
if p.tag in option_indices:
# Reassemble the data
options[option_indices[p.tag]][1].append(p.data)
else:
options.append((p.tag, [p.data]))
option_indices[p.tag] = len(options) - 1
# First process options field
process_option_list(payload.options)
if OPTION_OVERLOAD in option_indices:
# There is an overload option
data = b''.join(options[option_indices[OPTION_OVERLOAD]][1])
overload_option = dhcp_overload.create(data)
if overload_option & OVERLOAD_FILE:
process_option_list(dhcp_option_partial[0].create(payload.file))
if overload_option & OVERLOAD_SNAME:
process_option_list(dhcp_option_partial[0].create(payload.sname))
def _create_dhcp_option(tag, data):
opt = dhcp_option(tag = tag)
opt._setextra(data)
opt._autosubclass()
return opt
return [_create_dhcp_option(tag, b''.join(data)) for tag,data in options] | [
"def",
"reassemble_options",
"(",
"payload",
")",
":",
"options",
"=",
"[",
"]",
"option_indices",
"=",
"{",
"}",
"def",
"process_option_list",
"(",
"partials",
")",
":",
"for",
"p",
"in",
"partials",
":",
"if",
"p",
".",
"tag",
"==",
"OPTION_END",
":",
"break",
"if",
"p",
".",
"tag",
"==",
"OPTION_PAD",
":",
"continue",
"if",
"p",
".",
"tag",
"in",
"option_indices",
":",
"# Reassemble the data",
"options",
"[",
"option_indices",
"[",
"p",
".",
"tag",
"]",
"]",
"[",
"1",
"]",
".",
"append",
"(",
"p",
".",
"data",
")",
"else",
":",
"options",
".",
"append",
"(",
"(",
"p",
".",
"tag",
",",
"[",
"p",
".",
"data",
"]",
")",
")",
"option_indices",
"[",
"p",
".",
"tag",
"]",
"=",
"len",
"(",
"options",
")",
"-",
"1",
"# First process options field",
"process_option_list",
"(",
"payload",
".",
"options",
")",
"if",
"OPTION_OVERLOAD",
"in",
"option_indices",
":",
"# There is an overload option",
"data",
"=",
"b''",
".",
"join",
"(",
"options",
"[",
"option_indices",
"[",
"OPTION_OVERLOAD",
"]",
"]",
"[",
"1",
"]",
")",
"overload_option",
"=",
"dhcp_overload",
".",
"create",
"(",
"data",
")",
"if",
"overload_option",
"&",
"OVERLOAD_FILE",
":",
"process_option_list",
"(",
"dhcp_option_partial",
"[",
"0",
"]",
".",
"create",
"(",
"payload",
".",
"file",
")",
")",
"if",
"overload_option",
"&",
"OVERLOAD_SNAME",
":",
"process_option_list",
"(",
"dhcp_option_partial",
"[",
"0",
"]",
".",
"create",
"(",
"payload",
".",
"sname",
")",
")",
"def",
"_create_dhcp_option",
"(",
"tag",
",",
"data",
")",
":",
"opt",
"=",
"dhcp_option",
"(",
"tag",
"=",
"tag",
")",
"opt",
".",
"_setextra",
"(",
"data",
")",
"opt",
".",
"_autosubclass",
"(",
")",
"return",
"opt",
"return",
"[",
"_create_dhcp_option",
"(",
"tag",
",",
"b''",
".",
"join",
"(",
"data",
")",
")",
"for",
"tag",
",",
"data",
"in",
"options",
"]"
] | Reassemble partial options to options, returns a list of dhcp_option
DHCP options are basically `|tag|length|value|` structure. When an
option is longer than 255 bytes, it can be splitted into multiple
structures with the same tag. The splitted structures must be
joined back to get the original option.
`dhcp_option_partial` is used to present the splitted options,
and `dhcp_option` is used for reassembled option. | [
"Reassemble",
"partial",
"options",
"to",
"options",
"returns",
"a",
"list",
"of",
"dhcp_option",
"DHCP",
"options",
"are",
"basically",
"|tag|length|value|",
"structure",
".",
"When",
"an",
"option",
"is",
"longer",
"than",
"255",
"bytes",
"it",
"can",
"be",
"splitted",
"into",
"multiple",
"structures",
"with",
"the",
"same",
"tag",
".",
"The",
"splitted",
"structures",
"must",
"be",
"joined",
"back",
"to",
"get",
"the",
"original",
"option",
".",
"dhcp_option_partial",
"is",
"used",
"to",
"present",
"the",
"splitted",
"options",
"and",
"dhcp_option",
"is",
"used",
"for",
"reassembled",
"option",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/dhcp.py#L248-L289 |
hubo1016/vlcp | vlcp/utils/dhcp.py | build_options | def build_options(payload, options, maxsize = 576, overload = OVERLOAD_FILE | OVERLOAD_SNAME, allowpartial = True):
'''
Split a list of options
This is the reverse operation of `reassemble_options`, it splits `dhcp_option` into
`dhcp_option_partial` if necessary, and set overload option if field overloading is
used.
:param options: a list of `dhcp_option`
:param maxsize: Limit the maximum DHCP message size. If options cannot fit into the DHCP
message, specified fields are overloaded for options. If options cannot
fit after overloading, extra options are DROPPED if allowpartial = True.
It is important to sort the dhcp options by priority.
:param overload: fields that are allowed to be overloaded
:param allowpartial: When options cannot fit into the DHCP message, allow the rest options
to be dropped.
:return: Number of options that are dropped i.e. `options[:-return_value]` are dropped
'''
if maxsize < 576:
maxsize = 576
max_options_size = maxsize - 240
# Ignore OPTION_PAD and OPTION_END
options = [o for o in options if o.tag not in (OPTION_PAD, OPTION_END)]
# Only preserve data
option_data = [(o.tag, o._tobytes()[2:]) for o in options]
def split_options(option_data, limits):
"""
Split options into multiple fields
:param option_data: list of (tag, data) pair
:param limits: list of int for limit of each field (excluding PAD and END)
:return: number of options that are dropped
"""
# List of (dhcp_option_partial, option_not_finished)
partial_options = []
buffers = [0]
if not options:
return ([], 0)
def create_result():
# Remove any unfinished partial options
while partial_options and partial_options[-1][1]:
partial_options.pop()
buffers.append(len(partial_options))
r = [[po for po,_ in partial_options[buffers[i]:buffers[i+1]]] for i in range(0, len(buffers) - 1)]
# Remove empty fields
while r and not r[-1]:
r.pop()
return r
# Current field used size
current_size = 0
limit_iter = iter(limits)
try:
next_limit = next(limit_iter)
except (StopIteration, GeneratorExit):
return ([], False)
for i, (tag, data) in enumerate(option_data):
# Current used data size
data_size = 0
# Do not split very small options on boundary, this may prevent some broken DHCP clients/servers
# to cause problem
nosplit = (len(data) <= 32)
while True:
# next partial option size should be:
# 1. no more than the current field limit (minus 1-byte tag and 1-byte length)
# 2. no more than the single dhcp_option_partial data limit (255 due to single byte length)
# 3. no more than the rest data size
next_size = min(next_limit - current_size - 2, 255, len(data) - data_size)
if next_size < 0 or (next_size == 0 and data_size < len(data)) \
or (next_size < len(data) - data_size and nosplit):
# Cannot put this part of data on the current field, find the next field
try:
next_limit = next(limit_iter)
except (StopIteration, GeneratorExit):
return (create_result(), len(option_data) - i)
# Record field boundary
buffers.append(len(partial_options))
current_size = 0
else:
# Put this partial option on current field
partial_options.append((dhcp_option_partial(tag = tag, data = data[data_size : data_size + next_size]),
(next_size < len(data) - data_size)))
data_size += next_size
current_size += next_size + 2
if data_size >= len(data):
# finished current option
break
return (create_result(), 0)
# First try to fit all options in options field
# preserve a byte for OPTION_END
result, not_finished = split_options(option_data, [max_options_size - 1])
if not_finished:
if overload & (OVERLOAD_FILE | OVERLOAD_SNAME):
# Try overload
# minus a overload option (1-byte tag, 1-byte lenght, 1-byte dhcp_overload) and 1-byte OPTION_END
limits = [max_options_size - 4]
if overload & OVERLOAD_FILE:
# preserve a byte for OPTION_END
limits.append(127)
if overload & OVERLOAD_SNAME:
# preserve a byte for OPTION_END
limits.append(63)
result2, not_finished2 = split_options(option_data, limits)
# Only overload if we have a better result
if len(result2) > 1:
result = result2
not_finished = not_finished2
if not allowpartial and not_finished:
raise ValueError("%d options cannot fit into a DHCP message" % (not_finished,))
if not result:
return not_finished
elif len(result) <= 1:
# No overload
payload.options = result[0] + [dhcp_option_partial(tag = OPTION_END)]
else:
overload_option = 0
if len(result) >= 2 and result[1]:
overload_option |= OVERLOAD_FILE
# overload file field
payload.file = dhcp_option_partial[0].tobytes(result[1] + [dhcp_option_partial(tag = OPTION_END)])
if len(result) >= 3 and result[2]:
overload_option |= OVERLOAD_SNAME
# overload sname field
payload.sname = dhcp_option_partial[0].tobytes(result[2] + [dhcp_option_partial(tag = OPTION_END)])
# Put an overload option before any other options
payload.options = [dhcp_option_partial(tag = OPTION_OVERLOAD, data = dhcp_overload.tobytes(overload_option))] \
+ result[0] + [dhcp_option_partial(tag = OPTION_END)]
return not_finished | python | def build_options(payload, options, maxsize = 576, overload = OVERLOAD_FILE | OVERLOAD_SNAME, allowpartial = True):
'''
Split a list of options
This is the reverse operation of `reassemble_options`, it splits `dhcp_option` into
`dhcp_option_partial` if necessary, and set overload option if field overloading is
used.
:param options: a list of `dhcp_option`
:param maxsize: Limit the maximum DHCP message size. If options cannot fit into the DHCP
message, specified fields are overloaded for options. If options cannot
fit after overloading, extra options are DROPPED if allowpartial = True.
It is important to sort the dhcp options by priority.
:param overload: fields that are allowed to be overloaded
:param allowpartial: When options cannot fit into the DHCP message, allow the rest options
to be dropped.
:return: Number of options that are dropped i.e. `options[:-return_value]` are dropped
'''
if maxsize < 576:
maxsize = 576
max_options_size = maxsize - 240
# Ignore OPTION_PAD and OPTION_END
options = [o for o in options if o.tag not in (OPTION_PAD, OPTION_END)]
# Only preserve data
option_data = [(o.tag, o._tobytes()[2:]) for o in options]
def split_options(option_data, limits):
"""
Split options into multiple fields
:param option_data: list of (tag, data) pair
:param limits: list of int for limit of each field (excluding PAD and END)
:return: number of options that are dropped
"""
# List of (dhcp_option_partial, option_not_finished)
partial_options = []
buffers = [0]
if not options:
return ([], 0)
def create_result():
# Remove any unfinished partial options
while partial_options and partial_options[-1][1]:
partial_options.pop()
buffers.append(len(partial_options))
r = [[po for po,_ in partial_options[buffers[i]:buffers[i+1]]] for i in range(0, len(buffers) - 1)]
# Remove empty fields
while r and not r[-1]:
r.pop()
return r
# Current field used size
current_size = 0
limit_iter = iter(limits)
try:
next_limit = next(limit_iter)
except (StopIteration, GeneratorExit):
return ([], False)
for i, (tag, data) in enumerate(option_data):
# Current used data size
data_size = 0
# Do not split very small options on boundary, this may prevent some broken DHCP clients/servers
# to cause problem
nosplit = (len(data) <= 32)
while True:
# next partial option size should be:
# 1. no more than the current field limit (minus 1-byte tag and 1-byte length)
# 2. no more than the single dhcp_option_partial data limit (255 due to single byte length)
# 3. no more than the rest data size
next_size = min(next_limit - current_size - 2, 255, len(data) - data_size)
if next_size < 0 or (next_size == 0 and data_size < len(data)) \
or (next_size < len(data) - data_size and nosplit):
# Cannot put this part of data on the current field, find the next field
try:
next_limit = next(limit_iter)
except (StopIteration, GeneratorExit):
return (create_result(), len(option_data) - i)
# Record field boundary
buffers.append(len(partial_options))
current_size = 0
else:
# Put this partial option on current field
partial_options.append((dhcp_option_partial(tag = tag, data = data[data_size : data_size + next_size]),
(next_size < len(data) - data_size)))
data_size += next_size
current_size += next_size + 2
if data_size >= len(data):
# finished current option
break
return (create_result(), 0)
# First try to fit all options in options field
# preserve a byte for OPTION_END
result, not_finished = split_options(option_data, [max_options_size - 1])
if not_finished:
if overload & (OVERLOAD_FILE | OVERLOAD_SNAME):
# Try overload
# minus a overload option (1-byte tag, 1-byte lenght, 1-byte dhcp_overload) and 1-byte OPTION_END
limits = [max_options_size - 4]
if overload & OVERLOAD_FILE:
# preserve a byte for OPTION_END
limits.append(127)
if overload & OVERLOAD_SNAME:
# preserve a byte for OPTION_END
limits.append(63)
result2, not_finished2 = split_options(option_data, limits)
# Only overload if we have a better result
if len(result2) > 1:
result = result2
not_finished = not_finished2
if not allowpartial and not_finished:
raise ValueError("%d options cannot fit into a DHCP message" % (not_finished,))
if not result:
return not_finished
elif len(result) <= 1:
# No overload
payload.options = result[0] + [dhcp_option_partial(tag = OPTION_END)]
else:
overload_option = 0
if len(result) >= 2 and result[1]:
overload_option |= OVERLOAD_FILE
# overload file field
payload.file = dhcp_option_partial[0].tobytes(result[1] + [dhcp_option_partial(tag = OPTION_END)])
if len(result) >= 3 and result[2]:
overload_option |= OVERLOAD_SNAME
# overload sname field
payload.sname = dhcp_option_partial[0].tobytes(result[2] + [dhcp_option_partial(tag = OPTION_END)])
# Put an overload option before any other options
payload.options = [dhcp_option_partial(tag = OPTION_OVERLOAD, data = dhcp_overload.tobytes(overload_option))] \
+ result[0] + [dhcp_option_partial(tag = OPTION_END)]
return not_finished | [
"def",
"build_options",
"(",
"payload",
",",
"options",
",",
"maxsize",
"=",
"576",
",",
"overload",
"=",
"OVERLOAD_FILE",
"|",
"OVERLOAD_SNAME",
",",
"allowpartial",
"=",
"True",
")",
":",
"if",
"maxsize",
"<",
"576",
":",
"maxsize",
"=",
"576",
"max_options_size",
"=",
"maxsize",
"-",
"240",
"# Ignore OPTION_PAD and OPTION_END",
"options",
"=",
"[",
"o",
"for",
"o",
"in",
"options",
"if",
"o",
".",
"tag",
"not",
"in",
"(",
"OPTION_PAD",
",",
"OPTION_END",
")",
"]",
"# Only preserve data",
"option_data",
"=",
"[",
"(",
"o",
".",
"tag",
",",
"o",
".",
"_tobytes",
"(",
")",
"[",
"2",
":",
"]",
")",
"for",
"o",
"in",
"options",
"]",
"def",
"split_options",
"(",
"option_data",
",",
"limits",
")",
":",
"\"\"\"\n Split options into multiple fields\n \n :param option_data: list of (tag, data) pair\n \n :param limits: list of int for limit of each field (excluding PAD and END)\n \n :return: number of options that are dropped\n \"\"\"",
"# List of (dhcp_option_partial, option_not_finished)",
"partial_options",
"=",
"[",
"]",
"buffers",
"=",
"[",
"0",
"]",
"if",
"not",
"options",
":",
"return",
"(",
"[",
"]",
",",
"0",
")",
"def",
"create_result",
"(",
")",
":",
"# Remove any unfinished partial options",
"while",
"partial_options",
"and",
"partial_options",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
":",
"partial_options",
".",
"pop",
"(",
")",
"buffers",
".",
"append",
"(",
"len",
"(",
"partial_options",
")",
")",
"r",
"=",
"[",
"[",
"po",
"for",
"po",
",",
"_",
"in",
"partial_options",
"[",
"buffers",
"[",
"i",
"]",
":",
"buffers",
"[",
"i",
"+",
"1",
"]",
"]",
"]",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"buffers",
")",
"-",
"1",
")",
"]",
"# Remove empty fields",
"while",
"r",
"and",
"not",
"r",
"[",
"-",
"1",
"]",
":",
"r",
".",
"pop",
"(",
")",
"return",
"r",
"# Current field used size",
"current_size",
"=",
"0",
"limit_iter",
"=",
"iter",
"(",
"limits",
")",
"try",
":",
"next_limit",
"=",
"next",
"(",
"limit_iter",
")",
"except",
"(",
"StopIteration",
",",
"GeneratorExit",
")",
":",
"return",
"(",
"[",
"]",
",",
"False",
")",
"for",
"i",
",",
"(",
"tag",
",",
"data",
")",
"in",
"enumerate",
"(",
"option_data",
")",
":",
"# Current used data size",
"data_size",
"=",
"0",
"# Do not split very small options on boundary, this may prevent some broken DHCP clients/servers",
"# to cause problem",
"nosplit",
"=",
"(",
"len",
"(",
"data",
")",
"<=",
"32",
")",
"while",
"True",
":",
"# next partial option size should be:",
"# 1. no more than the current field limit (minus 1-byte tag and 1-byte length)",
"# 2. no more than the single dhcp_option_partial data limit (255 due to single byte length)",
"# 3. no more than the rest data size",
"next_size",
"=",
"min",
"(",
"next_limit",
"-",
"current_size",
"-",
"2",
",",
"255",
",",
"len",
"(",
"data",
")",
"-",
"data_size",
")",
"if",
"next_size",
"<",
"0",
"or",
"(",
"next_size",
"==",
"0",
"and",
"data_size",
"<",
"len",
"(",
"data",
")",
")",
"or",
"(",
"next_size",
"<",
"len",
"(",
"data",
")",
"-",
"data_size",
"and",
"nosplit",
")",
":",
"# Cannot put this part of data on the current field, find the next field",
"try",
":",
"next_limit",
"=",
"next",
"(",
"limit_iter",
")",
"except",
"(",
"StopIteration",
",",
"GeneratorExit",
")",
":",
"return",
"(",
"create_result",
"(",
")",
",",
"len",
"(",
"option_data",
")",
"-",
"i",
")",
"# Record field boundary",
"buffers",
".",
"append",
"(",
"len",
"(",
"partial_options",
")",
")",
"current_size",
"=",
"0",
"else",
":",
"# Put this partial option on current field",
"partial_options",
".",
"append",
"(",
"(",
"dhcp_option_partial",
"(",
"tag",
"=",
"tag",
",",
"data",
"=",
"data",
"[",
"data_size",
":",
"data_size",
"+",
"next_size",
"]",
")",
",",
"(",
"next_size",
"<",
"len",
"(",
"data",
")",
"-",
"data_size",
")",
")",
")",
"data_size",
"+=",
"next_size",
"current_size",
"+=",
"next_size",
"+",
"2",
"if",
"data_size",
">=",
"len",
"(",
"data",
")",
":",
"# finished current option",
"break",
"return",
"(",
"create_result",
"(",
")",
",",
"0",
")",
"# First try to fit all options in options field",
"# preserve a byte for OPTION_END",
"result",
",",
"not_finished",
"=",
"split_options",
"(",
"option_data",
",",
"[",
"max_options_size",
"-",
"1",
"]",
")",
"if",
"not_finished",
":",
"if",
"overload",
"&",
"(",
"OVERLOAD_FILE",
"|",
"OVERLOAD_SNAME",
")",
":",
"# Try overload",
"# minus a overload option (1-byte tag, 1-byte lenght, 1-byte dhcp_overload) and 1-byte OPTION_END",
"limits",
"=",
"[",
"max_options_size",
"-",
"4",
"]",
"if",
"overload",
"&",
"OVERLOAD_FILE",
":",
"# preserve a byte for OPTION_END",
"limits",
".",
"append",
"(",
"127",
")",
"if",
"overload",
"&",
"OVERLOAD_SNAME",
":",
"# preserve a byte for OPTION_END",
"limits",
".",
"append",
"(",
"63",
")",
"result2",
",",
"not_finished2",
"=",
"split_options",
"(",
"option_data",
",",
"limits",
")",
"# Only overload if we have a better result",
"if",
"len",
"(",
"result2",
")",
">",
"1",
":",
"result",
"=",
"result2",
"not_finished",
"=",
"not_finished2",
"if",
"not",
"allowpartial",
"and",
"not_finished",
":",
"raise",
"ValueError",
"(",
"\"%d options cannot fit into a DHCP message\"",
"%",
"(",
"not_finished",
",",
")",
")",
"if",
"not",
"result",
":",
"return",
"not_finished",
"elif",
"len",
"(",
"result",
")",
"<=",
"1",
":",
"# No overload",
"payload",
".",
"options",
"=",
"result",
"[",
"0",
"]",
"+",
"[",
"dhcp_option_partial",
"(",
"tag",
"=",
"OPTION_END",
")",
"]",
"else",
":",
"overload_option",
"=",
"0",
"if",
"len",
"(",
"result",
")",
">=",
"2",
"and",
"result",
"[",
"1",
"]",
":",
"overload_option",
"|=",
"OVERLOAD_FILE",
"# overload file field",
"payload",
".",
"file",
"=",
"dhcp_option_partial",
"[",
"0",
"]",
".",
"tobytes",
"(",
"result",
"[",
"1",
"]",
"+",
"[",
"dhcp_option_partial",
"(",
"tag",
"=",
"OPTION_END",
")",
"]",
")",
"if",
"len",
"(",
"result",
")",
">=",
"3",
"and",
"result",
"[",
"2",
"]",
":",
"overload_option",
"|=",
"OVERLOAD_SNAME",
"# overload sname field",
"payload",
".",
"sname",
"=",
"dhcp_option_partial",
"[",
"0",
"]",
".",
"tobytes",
"(",
"result",
"[",
"2",
"]",
"+",
"[",
"dhcp_option_partial",
"(",
"tag",
"=",
"OPTION_END",
")",
"]",
")",
"# Put an overload option before any other options",
"payload",
".",
"options",
"=",
"[",
"dhcp_option_partial",
"(",
"tag",
"=",
"OPTION_OVERLOAD",
",",
"data",
"=",
"dhcp_overload",
".",
"tobytes",
"(",
"overload_option",
")",
")",
"]",
"+",
"result",
"[",
"0",
"]",
"+",
"[",
"dhcp_option_partial",
"(",
"tag",
"=",
"OPTION_END",
")",
"]",
"return",
"not_finished"
] | Split a list of options
This is the reverse operation of `reassemble_options`, it splits `dhcp_option` into
`dhcp_option_partial` if necessary, and set overload option if field overloading is
used.
:param options: a list of `dhcp_option`
:param maxsize: Limit the maximum DHCP message size. If options cannot fit into the DHCP
message, specified fields are overloaded for options. If options cannot
fit after overloading, extra options are DROPPED if allowpartial = True.
It is important to sort the dhcp options by priority.
:param overload: fields that are allowed to be overloaded
:param allowpartial: When options cannot fit into the DHCP message, allow the rest options
to be dropped.
:return: Number of options that are dropped i.e. `options[:-return_value]` are dropped | [
"Split",
"a",
"list",
"of",
"options",
"This",
"is",
"the",
"reverse",
"operation",
"of",
"reassemble_options",
"it",
"splits",
"dhcp_option",
"into",
"dhcp_option_partial",
"if",
"necessary",
"and",
"set",
"overload",
"option",
"if",
"field",
"overloading",
"is",
"used",
".",
":",
"param",
"options",
":",
"a",
"list",
"of",
"dhcp_option",
":",
"param",
"maxsize",
":",
"Limit",
"the",
"maximum",
"DHCP",
"message",
"size",
".",
"If",
"options",
"cannot",
"fit",
"into",
"the",
"DHCP",
"message",
"specified",
"fields",
"are",
"overloaded",
"for",
"options",
".",
"If",
"options",
"cannot",
"fit",
"after",
"overloading",
"extra",
"options",
"are",
"DROPPED",
"if",
"allowpartial",
"=",
"True",
".",
"It",
"is",
"important",
"to",
"sort",
"the",
"dhcp",
"options",
"by",
"priority",
".",
":",
"param",
"overload",
":",
"fields",
"that",
"are",
"allowed",
"to",
"be",
"overloaded",
":",
"param",
"allowpartial",
":",
"When",
"options",
"cannot",
"fit",
"into",
"the",
"DHCP",
"message",
"allow",
"the",
"rest",
"options",
"to",
"be",
"dropped",
".",
":",
"return",
":",
"Number",
"of",
"options",
"that",
"are",
"dropped",
"i",
".",
"e",
".",
"options",
"[",
":",
"-",
"return_value",
"]",
"are",
"dropped"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/dhcp.py#L291-L424 |
hubo1016/vlcp | vlcp/utils/dhcp.py | create_option_from_value | def create_option_from_value(tag, value):
"""
Set DHCP option with human friendly value
"""
dhcp_option.parser()
fake_opt = dhcp_option(tag = tag)
for c in dhcp_option.subclasses:
if c.criteria(fake_opt):
if hasattr(c, '_parse_from_value'):
return c(tag = tag, value = c._parse_from_value(value))
else:
raise ValueError('Invalid DHCP option ' + str(tag) + ": " + repr(value))
else:
fake_opt._setextra(_tobytes(value))
return fake_opt | python | def create_option_from_value(tag, value):
"""
Set DHCP option with human friendly value
"""
dhcp_option.parser()
fake_opt = dhcp_option(tag = tag)
for c in dhcp_option.subclasses:
if c.criteria(fake_opt):
if hasattr(c, '_parse_from_value'):
return c(tag = tag, value = c._parse_from_value(value))
else:
raise ValueError('Invalid DHCP option ' + str(tag) + ": " + repr(value))
else:
fake_opt._setextra(_tobytes(value))
return fake_opt | [
"def",
"create_option_from_value",
"(",
"tag",
",",
"value",
")",
":",
"dhcp_option",
".",
"parser",
"(",
")",
"fake_opt",
"=",
"dhcp_option",
"(",
"tag",
"=",
"tag",
")",
"for",
"c",
"in",
"dhcp_option",
".",
"subclasses",
":",
"if",
"c",
".",
"criteria",
"(",
"fake_opt",
")",
":",
"if",
"hasattr",
"(",
"c",
",",
"'_parse_from_value'",
")",
":",
"return",
"c",
"(",
"tag",
"=",
"tag",
",",
"value",
"=",
"c",
".",
"_parse_from_value",
"(",
"value",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Invalid DHCP option '",
"+",
"str",
"(",
"tag",
")",
"+",
"\": \"",
"+",
"repr",
"(",
"value",
")",
")",
"else",
":",
"fake_opt",
".",
"_setextra",
"(",
"_tobytes",
"(",
"value",
")",
")",
"return",
"fake_opt"
] | Set DHCP option with human friendly value | [
"Set",
"DHCP",
"option",
"with",
"human",
"friendly",
"value"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/dhcp.py#L427-L441 |
hubo1016/vlcp | vlcp/utils/dhcp.py | create_dhcp_options | def create_dhcp_options(input_dict, ignoreError = False, generateNone = False):
"""
Try best to create dhcp_options from human friendly values, ignoring
invalid values
"""
retdict = {}
for k,v in dict(input_dict).items():
try:
if generateNone and v is None:
retdict[k] = None
else:
try:
retdict[k] = create_option_from_value(k, v)
except _EmptyOptionException:
if generateNone:
retdict[k] = None
except Exception:
if ignoreError:
continue
else:
raise
return retdict | python | def create_dhcp_options(input_dict, ignoreError = False, generateNone = False):
"""
Try best to create dhcp_options from human friendly values, ignoring
invalid values
"""
retdict = {}
for k,v in dict(input_dict).items():
try:
if generateNone and v is None:
retdict[k] = None
else:
try:
retdict[k] = create_option_from_value(k, v)
except _EmptyOptionException:
if generateNone:
retdict[k] = None
except Exception:
if ignoreError:
continue
else:
raise
return retdict | [
"def",
"create_dhcp_options",
"(",
"input_dict",
",",
"ignoreError",
"=",
"False",
",",
"generateNone",
"=",
"False",
")",
":",
"retdict",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"dict",
"(",
"input_dict",
")",
".",
"items",
"(",
")",
":",
"try",
":",
"if",
"generateNone",
"and",
"v",
"is",
"None",
":",
"retdict",
"[",
"k",
"]",
"=",
"None",
"else",
":",
"try",
":",
"retdict",
"[",
"k",
"]",
"=",
"create_option_from_value",
"(",
"k",
",",
"v",
")",
"except",
"_EmptyOptionException",
":",
"if",
"generateNone",
":",
"retdict",
"[",
"k",
"]",
"=",
"None",
"except",
"Exception",
":",
"if",
"ignoreError",
":",
"continue",
"else",
":",
"raise",
"return",
"retdict"
] | Try best to create dhcp_options from human friendly values, ignoring
invalid values | [
"Try",
"best",
"to",
"create",
"dhcp_options",
"from",
"human",
"friendly",
"values",
"ignoring",
"invalid",
"values"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/dhcp.py#L444-L465 |
hubo1016/vlcp | vlcp/event/event.py | with_indices | def with_indices(*args):
'''
Create indices for an event class. Every event class must be decorated with this decorator.
'''
def decorator(cls):
for c in cls.__bases__:
if hasattr(c, '_indicesNames'):
cls._classnameIndex = c._classnameIndex + 1
for i in range(0, cls._classnameIndex):
setattr(cls, '_classname' + str(i), getattr(c, '_classname' + str(i)))
setattr(cls, '_classname' + str(cls._classnameIndex), cls._getTypename())
cls._indicesNames = c._indicesNames + ('_classname' + str(cls._classnameIndex),) + args
cls._generateTemplate()
return cls
cls._classnameIndex = -1
cls._indicesNames = args
cls._generateTemplate()
return cls
return decorator | python | def with_indices(*args):
'''
Create indices for an event class. Every event class must be decorated with this decorator.
'''
def decorator(cls):
for c in cls.__bases__:
if hasattr(c, '_indicesNames'):
cls._classnameIndex = c._classnameIndex + 1
for i in range(0, cls._classnameIndex):
setattr(cls, '_classname' + str(i), getattr(c, '_classname' + str(i)))
setattr(cls, '_classname' + str(cls._classnameIndex), cls._getTypename())
cls._indicesNames = c._indicesNames + ('_classname' + str(cls._classnameIndex),) + args
cls._generateTemplate()
return cls
cls._classnameIndex = -1
cls._indicesNames = args
cls._generateTemplate()
return cls
return decorator | [
"def",
"with_indices",
"(",
"*",
"args",
")",
":",
"def",
"decorator",
"(",
"cls",
")",
":",
"for",
"c",
"in",
"cls",
".",
"__bases__",
":",
"if",
"hasattr",
"(",
"c",
",",
"'_indicesNames'",
")",
":",
"cls",
".",
"_classnameIndex",
"=",
"c",
".",
"_classnameIndex",
"+",
"1",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"cls",
".",
"_classnameIndex",
")",
":",
"setattr",
"(",
"cls",
",",
"'_classname'",
"+",
"str",
"(",
"i",
")",
",",
"getattr",
"(",
"c",
",",
"'_classname'",
"+",
"str",
"(",
"i",
")",
")",
")",
"setattr",
"(",
"cls",
",",
"'_classname'",
"+",
"str",
"(",
"cls",
".",
"_classnameIndex",
")",
",",
"cls",
".",
"_getTypename",
"(",
")",
")",
"cls",
".",
"_indicesNames",
"=",
"c",
".",
"_indicesNames",
"+",
"(",
"'_classname'",
"+",
"str",
"(",
"cls",
".",
"_classnameIndex",
")",
",",
")",
"+",
"args",
"cls",
".",
"_generateTemplate",
"(",
")",
"return",
"cls",
"cls",
".",
"_classnameIndex",
"=",
"-",
"1",
"cls",
".",
"_indicesNames",
"=",
"args",
"cls",
".",
"_generateTemplate",
"(",
")",
"return",
"cls",
"return",
"decorator"
] | Create indices for an event class. Every event class must be decorated with this decorator. | [
"Create",
"indices",
"for",
"an",
"event",
"class",
".",
"Every",
"event",
"class",
"must",
"be",
"decorated",
"with",
"this",
"decorator",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/event.py#L208-L226 |
hubo1016/vlcp | vlcp/event/event.py | Diff_.two_way_difference | def two_way_difference(self, b, extra_add = (), extra_remove = ()):
"""
Return (self - b, b - self)
"""
if self is b:
return ((), ())
if isinstance(b, DiffRef_):
extra_remove = extra_remove + b.add
b = b.origin
if extra_add == extra_remove:
extra_add = extra_remove = ()
if isinstance(b, Diff_):
if self.base is b.base:
first = self.add + b.remove
second = self.remove + b.add
elif self.base is b:
first = self.add
second = self.remove
elif b.base is self:
first = b.remove
second = b.add
else:
first = self
second = b
else:
first = self
second = b
if not first and not extra_add:
return ((), tuple(second) + tuple(extra_remove))
elif not second and not extra_remove:
return (tuple(first) + tuple(extra_add), ())
else:
first = set(first)
first.update(extra_add)
second = set(second)
second.update(extra_remove)
return tuple(first.difference(second)), tuple(second.difference(first)) | python | def two_way_difference(self, b, extra_add = (), extra_remove = ()):
"""
Return (self - b, b - self)
"""
if self is b:
return ((), ())
if isinstance(b, DiffRef_):
extra_remove = extra_remove + b.add
b = b.origin
if extra_add == extra_remove:
extra_add = extra_remove = ()
if isinstance(b, Diff_):
if self.base is b.base:
first = self.add + b.remove
second = self.remove + b.add
elif self.base is b:
first = self.add
second = self.remove
elif b.base is self:
first = b.remove
second = b.add
else:
first = self
second = b
else:
first = self
second = b
if not first and not extra_add:
return ((), tuple(second) + tuple(extra_remove))
elif not second and not extra_remove:
return (tuple(first) + tuple(extra_add), ())
else:
first = set(first)
first.update(extra_add)
second = set(second)
second.update(extra_remove)
return tuple(first.difference(second)), tuple(second.difference(first)) | [
"def",
"two_way_difference",
"(",
"self",
",",
"b",
",",
"extra_add",
"=",
"(",
")",
",",
"extra_remove",
"=",
"(",
")",
")",
":",
"if",
"self",
"is",
"b",
":",
"return",
"(",
"(",
")",
",",
"(",
")",
")",
"if",
"isinstance",
"(",
"b",
",",
"DiffRef_",
")",
":",
"extra_remove",
"=",
"extra_remove",
"+",
"b",
".",
"add",
"b",
"=",
"b",
".",
"origin",
"if",
"extra_add",
"==",
"extra_remove",
":",
"extra_add",
"=",
"extra_remove",
"=",
"(",
")",
"if",
"isinstance",
"(",
"b",
",",
"Diff_",
")",
":",
"if",
"self",
".",
"base",
"is",
"b",
".",
"base",
":",
"first",
"=",
"self",
".",
"add",
"+",
"b",
".",
"remove",
"second",
"=",
"self",
".",
"remove",
"+",
"b",
".",
"add",
"elif",
"self",
".",
"base",
"is",
"b",
":",
"first",
"=",
"self",
".",
"add",
"second",
"=",
"self",
".",
"remove",
"elif",
"b",
".",
"base",
"is",
"self",
":",
"first",
"=",
"b",
".",
"remove",
"second",
"=",
"b",
".",
"add",
"else",
":",
"first",
"=",
"self",
"second",
"=",
"b",
"else",
":",
"first",
"=",
"self",
"second",
"=",
"b",
"if",
"not",
"first",
"and",
"not",
"extra_add",
":",
"return",
"(",
"(",
")",
",",
"tuple",
"(",
"second",
")",
"+",
"tuple",
"(",
"extra_remove",
")",
")",
"elif",
"not",
"second",
"and",
"not",
"extra_remove",
":",
"return",
"(",
"tuple",
"(",
"first",
")",
"+",
"tuple",
"(",
"extra_add",
")",
",",
"(",
")",
")",
"else",
":",
"first",
"=",
"set",
"(",
"first",
")",
"first",
".",
"update",
"(",
"extra_add",
")",
"second",
"=",
"set",
"(",
"second",
")",
"second",
".",
"update",
"(",
"extra_remove",
")",
"return",
"tuple",
"(",
"first",
".",
"difference",
"(",
"second",
")",
")",
",",
"tuple",
"(",
"second",
".",
"difference",
"(",
"first",
")",
")"
] | Return (self - b, b - self) | [
"Return",
"(",
"self",
"-",
"b",
"b",
"-",
"self",
")"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/event.py#L166-L202 |
hubo1016/vlcp | vlcp/event/event.py | Event.getTypename | def getTypename(cls):
'''
:returns: return the proper name to match
'''
if cls is Event:
return None
else:
for c in cls.__bases__:
if issubclass(c, Event):
if c is Event:
return cls._getTypename()
else:
return c.getTypename() | python | def getTypename(cls):
'''
:returns: return the proper name to match
'''
if cls is Event:
return None
else:
for c in cls.__bases__:
if issubclass(c, Event):
if c is Event:
return cls._getTypename()
else:
return c.getTypename() | [
"def",
"getTypename",
"(",
"cls",
")",
":",
"if",
"cls",
"is",
"Event",
":",
"return",
"None",
"else",
":",
"for",
"c",
"in",
"cls",
".",
"__bases__",
":",
"if",
"issubclass",
"(",
"c",
",",
"Event",
")",
":",
"if",
"c",
"is",
"Event",
":",
"return",
"cls",
".",
"_getTypename",
"(",
")",
"else",
":",
"return",
"c",
".",
"getTypename",
"(",
")"
] | :returns: return the proper name to match | [
":",
"returns",
":",
"return",
"the",
"proper",
"name",
"to",
"match"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/event.py#L292-L304 |
hubo1016/vlcp | vlcp/event/event.py | Event.createMatcher | def createMatcher(cls, *args, **kwargs):
'''
:param _ismatch: user-defined function ismatch(event) for matching test
:param \*args: indices
:param \*\*kwargs: index_name=index_value for matching criteria
'''
if kwargs and not args:
return EventMatcher(tuple(getattr(cls, ind) if ind[:10] == '_classname' else kwargs.get(ind) for ind in cls.indicesNames()), kwargs.get('_ismatch'))
else:
return EventMatcher(tuple(cls._generateIndices(args)), kwargs.get('_ismatch')) | python | def createMatcher(cls, *args, **kwargs):
'''
:param _ismatch: user-defined function ismatch(event) for matching test
:param \*args: indices
:param \*\*kwargs: index_name=index_value for matching criteria
'''
if kwargs and not args:
return EventMatcher(tuple(getattr(cls, ind) if ind[:10] == '_classname' else kwargs.get(ind) for ind in cls.indicesNames()), kwargs.get('_ismatch'))
else:
return EventMatcher(tuple(cls._generateIndices(args)), kwargs.get('_ismatch')) | [
"def",
"createMatcher",
"(",
"cls",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"kwargs",
"and",
"not",
"args",
":",
"return",
"EventMatcher",
"(",
"tuple",
"(",
"getattr",
"(",
"cls",
",",
"ind",
")",
"if",
"ind",
"[",
":",
"10",
"]",
"==",
"'_classname'",
"else",
"kwargs",
".",
"get",
"(",
"ind",
")",
"for",
"ind",
"in",
"cls",
".",
"indicesNames",
"(",
")",
")",
",",
"kwargs",
".",
"get",
"(",
"'_ismatch'",
")",
")",
"else",
":",
"return",
"EventMatcher",
"(",
"tuple",
"(",
"cls",
".",
"_generateIndices",
"(",
"args",
")",
")",
",",
"kwargs",
".",
"get",
"(",
"'_ismatch'",
")",
")"
] | :param _ismatch: user-defined function ismatch(event) for matching test
:param \*args: indices
:param \*\*kwargs: index_name=index_value for matching criteria | [
":",
"param",
"_ismatch",
":",
"user",
"-",
"defined",
"function",
"ismatch",
"(",
"event",
")",
"for",
"matching",
"test",
":",
"param",
"\\",
"*",
"args",
":",
"indices",
":",
"param",
"\\",
"*",
"\\",
"*",
"kwargs",
":",
"index_name",
"=",
"index_value",
"for",
"matching",
"criteria"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/event.py#L331-L340 |
hubo1016/vlcp | vlcp/event/ratelimiter.py | RateLimiter.limit | async def limit(self, use = 1):
"""
Acquire "resources", wait until enough "resources" are acquired. For each loop,
`limit` number of "resources" are permitted.
:param use: number of "resouces" to be used.
:return: True if is limited
"""
c = self._counter
self._counter = c + use
if self._task is None:
self._task = self._container.subroutine(self._limiter_task(), False)
if c >= self._bottom_line:
# Limited
await RateLimitingEvent.createMatcher(self, c // self._limit)
return True
else:
return False | python | async def limit(self, use = 1):
"""
Acquire "resources", wait until enough "resources" are acquired. For each loop,
`limit` number of "resources" are permitted.
:param use: number of "resouces" to be used.
:return: True if is limited
"""
c = self._counter
self._counter = c + use
if self._task is None:
self._task = self._container.subroutine(self._limiter_task(), False)
if c >= self._bottom_line:
# Limited
await RateLimitingEvent.createMatcher(self, c // self._limit)
return True
else:
return False | [
"async",
"def",
"limit",
"(",
"self",
",",
"use",
"=",
"1",
")",
":",
"c",
"=",
"self",
".",
"_counter",
"self",
".",
"_counter",
"=",
"c",
"+",
"use",
"if",
"self",
".",
"_task",
"is",
"None",
":",
"self",
".",
"_task",
"=",
"self",
".",
"_container",
".",
"subroutine",
"(",
"self",
".",
"_limiter_task",
"(",
")",
",",
"False",
")",
"if",
"c",
">=",
"self",
".",
"_bottom_line",
":",
"# Limited",
"await",
"RateLimitingEvent",
".",
"createMatcher",
"(",
"self",
",",
"c",
"//",
"self",
".",
"_limit",
")",
"return",
"True",
"else",
":",
"return",
"False"
] | Acquire "resources", wait until enough "resources" are acquired. For each loop,
`limit` number of "resources" are permitted.
:param use: number of "resouces" to be used.
:return: True if is limited | [
"Acquire",
"resources",
"wait",
"until",
"enough",
"resources",
"are",
"acquired",
".",
"For",
"each",
"loop",
"limit",
"number",
"of",
"resources",
"are",
"permitted",
".",
":",
"param",
"use",
":",
"number",
"of",
"resouces",
"to",
"be",
"used",
".",
":",
"return",
":",
"True",
"if",
"is",
"limited"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/ratelimiter.py#L55-L73 |
hubo1016/vlcp | vlcp/utils/connector.py | TaskPool.run_task | async def run_task(self, container, task, newthread = False):
"Run task() in task pool. Raise an exception or return the return value"
e = TaskEvent(self, task=task, newthread = newthread)
await container.wait_for_send(e)
ev = await TaskDoneEvent.createMatcher(e)
if hasattr(ev, 'exception'):
raise ev.exception
else:
return ev.result | python | async def run_task(self, container, task, newthread = False):
"Run task() in task pool. Raise an exception or return the return value"
e = TaskEvent(self, task=task, newthread = newthread)
await container.wait_for_send(e)
ev = await TaskDoneEvent.createMatcher(e)
if hasattr(ev, 'exception'):
raise ev.exception
else:
return ev.result | [
"async",
"def",
"run_task",
"(",
"self",
",",
"container",
",",
"task",
",",
"newthread",
"=",
"False",
")",
":",
"e",
"=",
"TaskEvent",
"(",
"self",
",",
"task",
"=",
"task",
",",
"newthread",
"=",
"newthread",
")",
"await",
"container",
".",
"wait_for_send",
"(",
"e",
")",
"ev",
"=",
"await",
"TaskDoneEvent",
".",
"createMatcher",
"(",
"e",
")",
"if",
"hasattr",
"(",
"ev",
",",
"'exception'",
")",
":",
"raise",
"ev",
".",
"exception",
"else",
":",
"return",
"ev",
".",
"result"
] | Run task() in task pool. Raise an exception or return the return value | [
"Run",
"task",
"()",
"in",
"task",
"pool",
".",
"Raise",
"an",
"exception",
"or",
"return",
"the",
"return",
"value"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/connector.py#L450-L458 |
hubo1016/vlcp | vlcp/utils/connector.py | TaskPool.run_gen_task | async def run_gen_task(self, container, gentask, newthread = True):
"Run generator gentask() in task pool, yield customized events"
e = TaskEvent(self, gen_task = gentask, newthread = newthread)
await container.wait_for_send(e)
ev = await TaskDoneEvent.createMatcher(e)
if hasattr(ev, 'exception'):
raise ev.exception
else:
return ev.result | python | async def run_gen_task(self, container, gentask, newthread = True):
"Run generator gentask() in task pool, yield customized events"
e = TaskEvent(self, gen_task = gentask, newthread = newthread)
await container.wait_for_send(e)
ev = await TaskDoneEvent.createMatcher(e)
if hasattr(ev, 'exception'):
raise ev.exception
else:
return ev.result | [
"async",
"def",
"run_gen_task",
"(",
"self",
",",
"container",
",",
"gentask",
",",
"newthread",
"=",
"True",
")",
":",
"e",
"=",
"TaskEvent",
"(",
"self",
",",
"gen_task",
"=",
"gentask",
",",
"newthread",
"=",
"newthread",
")",
"await",
"container",
".",
"wait_for_send",
"(",
"e",
")",
"ev",
"=",
"await",
"TaskDoneEvent",
".",
"createMatcher",
"(",
"e",
")",
"if",
"hasattr",
"(",
"ev",
",",
"'exception'",
")",
":",
"raise",
"ev",
".",
"exception",
"else",
":",
"return",
"ev",
".",
"result"
] | Run generator gentask() in task pool, yield customized events | [
"Run",
"generator",
"gentask",
"()",
"in",
"task",
"pool",
"yield",
"customized",
"events"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/connector.py#L462-L470 |
hubo1016/vlcp | vlcp/utils/connector.py | TaskPool.run_async_task | async def run_async_task(self, container, asynctask, newthread = True):
"Run asynctask(sender) in task pool, call sender(events) to send customized events, return result"
e = TaskEvent(self, async_task = asynctask, newthread = newthread)
await container.wait_for_send(e)
ev = await TaskDoneEvent.createMatcher(e)
if hasattr(ev, 'exception'):
raise ev.exception
else:
return ev.result | python | async def run_async_task(self, container, asynctask, newthread = True):
"Run asynctask(sender) in task pool, call sender(events) to send customized events, return result"
e = TaskEvent(self, async_task = asynctask, newthread = newthread)
await container.wait_for_send(e)
ev = await TaskDoneEvent.createMatcher(e)
if hasattr(ev, 'exception'):
raise ev.exception
else:
return ev.result | [
"async",
"def",
"run_async_task",
"(",
"self",
",",
"container",
",",
"asynctask",
",",
"newthread",
"=",
"True",
")",
":",
"e",
"=",
"TaskEvent",
"(",
"self",
",",
"async_task",
"=",
"asynctask",
",",
"newthread",
"=",
"newthread",
")",
"await",
"container",
".",
"wait_for_send",
"(",
"e",
")",
"ev",
"=",
"await",
"TaskDoneEvent",
".",
"createMatcher",
"(",
"e",
")",
"if",
"hasattr",
"(",
"ev",
",",
"'exception'",
")",
":",
"raise",
"ev",
".",
"exception",
"else",
":",
"return",
"ev",
".",
"result"
] | Run asynctask(sender) in task pool, call sender(events) to send customized events, return result | [
"Run",
"asynctask",
"(",
"sender",
")",
"in",
"task",
"pool",
"call",
"sender",
"(",
"events",
")",
"to",
"send",
"customized",
"events",
"return",
"result"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/connector.py#L474-L482 |
hubo1016/vlcp | vlcp/event/lock.py | Lock.lock | async def lock(self, container = None):
"Wait for lock acquire"
if container is None:
container = RoutineContainer.get_container(self.scheduler)
if self.locked:
pass
elif self.lockroutine:
await LockedEvent.createMatcher(self)
else:
await container.wait_for_send(LockEvent(self.context, self.key, self))
self.locked = True | python | async def lock(self, container = None):
"Wait for lock acquire"
if container is None:
container = RoutineContainer.get_container(self.scheduler)
if self.locked:
pass
elif self.lockroutine:
await LockedEvent.createMatcher(self)
else:
await container.wait_for_send(LockEvent(self.context, self.key, self))
self.locked = True | [
"async",
"def",
"lock",
"(",
"self",
",",
"container",
"=",
"None",
")",
":",
"if",
"container",
"is",
"None",
":",
"container",
"=",
"RoutineContainer",
".",
"get_container",
"(",
"self",
".",
"scheduler",
")",
"if",
"self",
".",
"locked",
":",
"pass",
"elif",
"self",
".",
"lockroutine",
":",
"await",
"LockedEvent",
".",
"createMatcher",
"(",
"self",
")",
"else",
":",
"await",
"container",
".",
"wait_for_send",
"(",
"LockEvent",
"(",
"self",
".",
"context",
",",
"self",
".",
"key",
",",
"self",
")",
")",
"self",
".",
"locked",
"=",
"True"
] | Wait for lock acquire | [
"Wait",
"for",
"lock",
"acquire"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/lock.py#L54-L64 |
hubo1016/vlcp | vlcp/event/lock.py | Lock.trylock | def trylock(self):
"Try to acquire lock and return True; if cannot acquire the lock at this moment, return False."
if self.locked:
return True
if self.lockroutine:
return False
waiter = self.scheduler.send(LockEvent(self.context, self.key, self))
if waiter:
return False
else:
self.locked = True
return True | python | def trylock(self):
"Try to acquire lock and return True; if cannot acquire the lock at this moment, return False."
if self.locked:
return True
if self.lockroutine:
return False
waiter = self.scheduler.send(LockEvent(self.context, self.key, self))
if waiter:
return False
else:
self.locked = True
return True | [
"def",
"trylock",
"(",
"self",
")",
":",
"if",
"self",
".",
"locked",
":",
"return",
"True",
"if",
"self",
".",
"lockroutine",
":",
"return",
"False",
"waiter",
"=",
"self",
".",
"scheduler",
".",
"send",
"(",
"LockEvent",
"(",
"self",
".",
"context",
",",
"self",
".",
"key",
",",
"self",
")",
")",
"if",
"waiter",
":",
"return",
"False",
"else",
":",
"self",
".",
"locked",
"=",
"True",
"return",
"True"
] | Try to acquire lock and return True; if cannot acquire the lock at this moment, return False. | [
"Try",
"to",
"acquire",
"lock",
"and",
"return",
"True",
";",
"if",
"cannot",
"acquire",
"the",
"lock",
"at",
"this",
"moment",
"return",
"False",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/lock.py#L65-L76 |
hubo1016/vlcp | vlcp/event/lock.py | Lock.beginlock | def beginlock(self, container):
"Start to acquire lock in another routine. Call trylock or lock later to acquire the lock. Call unlock to cancel the lock routine"
if self.locked:
return True
if self.lockroutine:
return False
self.lockroutine = container.subroutine(self._lockroutine(container), False)
return self.locked | python | def beginlock(self, container):
"Start to acquire lock in another routine. Call trylock or lock later to acquire the lock. Call unlock to cancel the lock routine"
if self.locked:
return True
if self.lockroutine:
return False
self.lockroutine = container.subroutine(self._lockroutine(container), False)
return self.locked | [
"def",
"beginlock",
"(",
"self",
",",
"container",
")",
":",
"if",
"self",
".",
"locked",
":",
"return",
"True",
"if",
"self",
".",
"lockroutine",
":",
"return",
"False",
"self",
".",
"lockroutine",
"=",
"container",
".",
"subroutine",
"(",
"self",
".",
"_lockroutine",
"(",
"container",
")",
",",
"False",
")",
"return",
"self",
".",
"locked"
] | Start to acquire lock in another routine. Call trylock or lock later to acquire the lock. Call unlock to cancel the lock routine | [
"Start",
"to",
"acquire",
"lock",
"in",
"another",
"routine",
".",
"Call",
"trylock",
"or",
"lock",
"later",
"to",
"acquire",
"the",
"lock",
".",
"Call",
"unlock",
"to",
"cancel",
"the",
"lock",
"routine"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/lock.py#L80-L87 |
hubo1016/vlcp | vlcp/event/lock.py | Lock.unlock | def unlock(self):
"Unlock the key"
if self.lockroutine:
self.lockroutine.close()
self.lockroutine = None
if self.locked:
self.locked = False
self.scheduler.ignore(LockEvent.createMatcher(self.context, self.key, self)) | python | def unlock(self):
"Unlock the key"
if self.lockroutine:
self.lockroutine.close()
self.lockroutine = None
if self.locked:
self.locked = False
self.scheduler.ignore(LockEvent.createMatcher(self.context, self.key, self)) | [
"def",
"unlock",
"(",
"self",
")",
":",
"if",
"self",
".",
"lockroutine",
":",
"self",
".",
"lockroutine",
".",
"close",
"(",
")",
"self",
".",
"lockroutine",
"=",
"None",
"if",
"self",
".",
"locked",
":",
"self",
".",
"locked",
"=",
"False",
"self",
".",
"scheduler",
".",
"ignore",
"(",
"LockEvent",
".",
"createMatcher",
"(",
"self",
".",
"context",
",",
"self",
".",
"key",
",",
"self",
")",
")"
] | Unlock the key | [
"Unlock",
"the",
"key"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/lock.py#L88-L95 |
hubo1016/vlcp | vlcp/event/lock.py | Semaphore.create | def create(self):
"""
Create the subqueue to change the default behavior of Lock to semaphore.
"""
self.queue = self.scheduler.queue.addSubQueue(self.priority, LockEvent.createMatcher(self.context, self.key),
maxdefault = self.size, defaultQueueClass = CBQueue.AutoClassQueue.initHelper('locker', subqueuelimit = 1)) | python | def create(self):
"""
Create the subqueue to change the default behavior of Lock to semaphore.
"""
self.queue = self.scheduler.queue.addSubQueue(self.priority, LockEvent.createMatcher(self.context, self.key),
maxdefault = self.size, defaultQueueClass = CBQueue.AutoClassQueue.initHelper('locker', subqueuelimit = 1)) | [
"def",
"create",
"(",
"self",
")",
":",
"self",
".",
"queue",
"=",
"self",
".",
"scheduler",
".",
"queue",
".",
"addSubQueue",
"(",
"self",
".",
"priority",
",",
"LockEvent",
".",
"createMatcher",
"(",
"self",
".",
"context",
",",
"self",
".",
"key",
")",
",",
"maxdefault",
"=",
"self",
".",
"size",
",",
"defaultQueueClass",
"=",
"CBQueue",
".",
"AutoClassQueue",
".",
"initHelper",
"(",
"'locker'",
",",
"subqueuelimit",
"=",
"1",
")",
")"
] | Create the subqueue to change the default behavior of Lock to semaphore. | [
"Create",
"the",
"subqueue",
"to",
"change",
"the",
"default",
"behavior",
"of",
"Lock",
"to",
"semaphore",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/lock.py#L141-L146 |
hubo1016/vlcp | vlcp/event/lock.py | Semaphore.destroy | async def destroy(self, container = None):
"""
Destroy the created subqueue to change the behavior back to Lock
"""
if container is None:
container = RoutineContainer(self.scheduler)
if self.queue is not None:
await container.syscall_noreturn(syscall_removequeue(self.scheduler.queue, self.queue))
self.queue = None | python | async def destroy(self, container = None):
"""
Destroy the created subqueue to change the behavior back to Lock
"""
if container is None:
container = RoutineContainer(self.scheduler)
if self.queue is not None:
await container.syscall_noreturn(syscall_removequeue(self.scheduler.queue, self.queue))
self.queue = None | [
"async",
"def",
"destroy",
"(",
"self",
",",
"container",
"=",
"None",
")",
":",
"if",
"container",
"is",
"None",
":",
"container",
"=",
"RoutineContainer",
"(",
"self",
".",
"scheduler",
")",
"if",
"self",
".",
"queue",
"is",
"not",
"None",
":",
"await",
"container",
".",
"syscall_noreturn",
"(",
"syscall_removequeue",
"(",
"self",
".",
"scheduler",
".",
"queue",
",",
"self",
".",
"queue",
")",
")",
"self",
".",
"queue",
"=",
"None"
] | Destroy the created subqueue to change the behavior back to Lock | [
"Destroy",
"the",
"created",
"subqueue",
"to",
"change",
"the",
"behavior",
"back",
"to",
"Lock"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/lock.py#L147-L155 |
hubo1016/vlcp | vlcp/event/connection.py | Connection.shutdown | async def shutdown(self, force = False, connmark = -1):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.SHUTDOWN, force, connmark)) | python | async def shutdown(self, force = False, connmark = -1):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.SHUTDOWN, force, connmark)) | [
"async",
"def",
"shutdown",
"(",
"self",
",",
"force",
"=",
"False",
",",
"connmark",
"=",
"-",
"1",
")",
":",
"if",
"connmark",
"is",
"None",
":",
"connmark",
"=",
"self",
".",
"connmark",
"self",
".",
"scheduler",
".",
"emergesend",
"(",
"ConnectionControlEvent",
"(",
"self",
",",
"ConnectionControlEvent",
".",
"SHUTDOWN",
",",
"force",
",",
"connmark",
")",
")"
] | Can call without delegate | [
"Can",
"call",
"without",
"delegate"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/connection.py#L367-L373 |
hubo1016/vlcp | vlcp/event/connection.py | Connection.reconnect | async def reconnect(self, force = True, connmark = None):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.RECONNECT, force, connmark)) | python | async def reconnect(self, force = True, connmark = None):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.RECONNECT, force, connmark)) | [
"async",
"def",
"reconnect",
"(",
"self",
",",
"force",
"=",
"True",
",",
"connmark",
"=",
"None",
")",
":",
"if",
"connmark",
"is",
"None",
":",
"connmark",
"=",
"self",
".",
"connmark",
"self",
".",
"scheduler",
".",
"emergesend",
"(",
"ConnectionControlEvent",
"(",
"self",
",",
"ConnectionControlEvent",
".",
"RECONNECT",
",",
"force",
",",
"connmark",
")",
")"
] | Can call without delegate | [
"Can",
"call",
"without",
"delegate"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/connection.py#L374-L380 |
hubo1016/vlcp | vlcp/event/connection.py | Connection.reset | async def reset(self, force = True, connmark = None):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.RESET, force, connmark)) | python | async def reset(self, force = True, connmark = None):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.RESET, force, connmark)) | [
"async",
"def",
"reset",
"(",
"self",
",",
"force",
"=",
"True",
",",
"connmark",
"=",
"None",
")",
":",
"if",
"connmark",
"is",
"None",
":",
"connmark",
"=",
"self",
".",
"connmark",
"self",
".",
"scheduler",
".",
"emergesend",
"(",
"ConnectionControlEvent",
"(",
"self",
",",
"ConnectionControlEvent",
".",
"RESET",
",",
"force",
",",
"connmark",
")",
")"
] | Can call without delegate | [
"Can",
"call",
"without",
"delegate"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/connection.py#L381-L387 |
hubo1016/vlcp | vlcp/event/connection.py | Connection.write | async def write(self, event, ignoreException = True):
'''
Can call without delegate
'''
connmark = self.connmark
if self.connected:
def _until():
if not self.connected or self.connmark != connmark:
return True
r = await self.wait_for_send(event, until=_until)
if r:
if ignoreException:
return
else:
raise
else:
if not ignoreException:
raise ConnectionResetException | python | async def write(self, event, ignoreException = True):
'''
Can call without delegate
'''
connmark = self.connmark
if self.connected:
def _until():
if not self.connected or self.connmark != connmark:
return True
r = await self.wait_for_send(event, until=_until)
if r:
if ignoreException:
return
else:
raise
else:
if not ignoreException:
raise ConnectionResetException | [
"async",
"def",
"write",
"(",
"self",
",",
"event",
",",
"ignoreException",
"=",
"True",
")",
":",
"connmark",
"=",
"self",
".",
"connmark",
"if",
"self",
".",
"connected",
":",
"def",
"_until",
"(",
")",
":",
"if",
"not",
"self",
".",
"connected",
"or",
"self",
".",
"connmark",
"!=",
"connmark",
":",
"return",
"True",
"r",
"=",
"await",
"self",
".",
"wait_for_send",
"(",
"event",
",",
"until",
"=",
"_until",
")",
"if",
"r",
":",
"if",
"ignoreException",
":",
"return",
"else",
":",
"raise",
"else",
":",
"if",
"not",
"ignoreException",
":",
"raise",
"ConnectionResetException"
] | Can call without delegate | [
"Can",
"call",
"without",
"delegate"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/connection.py#L388-L405 |
hubo1016/vlcp | vlcp/event/connection.py | TcpServer.shutdown | async def shutdown(self, connmark = -1):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.SHUTDOWN, True, connmark)) | python | async def shutdown(self, connmark = -1):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.SHUTDOWN, True, connmark)) | [
"async",
"def",
"shutdown",
"(",
"self",
",",
"connmark",
"=",
"-",
"1",
")",
":",
"if",
"connmark",
"is",
"None",
":",
"connmark",
"=",
"self",
".",
"connmark",
"self",
".",
"scheduler",
".",
"emergesend",
"(",
"ConnectionControlEvent",
"(",
"self",
",",
"ConnectionControlEvent",
".",
"SHUTDOWN",
",",
"True",
",",
"connmark",
")",
")"
] | Can call without delegate | [
"Can",
"call",
"without",
"delegate"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/connection.py#L1056-L1062 |
hubo1016/vlcp | vlcp/event/connection.py | TcpServer.stoplisten | async def stoplisten(self, connmark = -1):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.STOPLISTEN, True, connmark)) | python | async def stoplisten(self, connmark = -1):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.STOPLISTEN, True, connmark)) | [
"async",
"def",
"stoplisten",
"(",
"self",
",",
"connmark",
"=",
"-",
"1",
")",
":",
"if",
"connmark",
"is",
"None",
":",
"connmark",
"=",
"self",
".",
"connmark",
"self",
".",
"scheduler",
".",
"emergesend",
"(",
"ConnectionControlEvent",
"(",
"self",
",",
"ConnectionControlEvent",
".",
"STOPLISTEN",
",",
"True",
",",
"connmark",
")",
")"
] | Can call without delegate | [
"Can",
"call",
"without",
"delegate"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/connection.py#L1064-L1070 |
hubo1016/vlcp | vlcp/event/connection.py | TcpServer.startlisten | async def startlisten(self, connmark = -1):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.STARTLISTEN, True, connmark)) | python | async def startlisten(self, connmark = -1):
'''
Can call without delegate
'''
if connmark is None:
connmark = self.connmark
self.scheduler.emergesend(ConnectionControlEvent(self, ConnectionControlEvent.STARTLISTEN, True, connmark)) | [
"async",
"def",
"startlisten",
"(",
"self",
",",
"connmark",
"=",
"-",
"1",
")",
":",
"if",
"connmark",
"is",
"None",
":",
"connmark",
"=",
"self",
".",
"connmark",
"self",
".",
"scheduler",
".",
"emergesend",
"(",
"ConnectionControlEvent",
"(",
"self",
",",
"ConnectionControlEvent",
".",
"STARTLISTEN",
",",
"True",
",",
"connmark",
")",
")"
] | Can call without delegate | [
"Can",
"call",
"without",
"delegate"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/connection.py#L1072-L1078 |
hubo1016/vlcp | vlcp/start.py | default_start | def default_start():
"""
Use `sys.argv` for starting parameters. This is the entry-point of `vlcp-start`
"""
(config, daemon, pidfile, startup, fork) = parsearg()
if config is None:
if os.path.isfile('/etc/vlcp.conf'):
config = '/etc/vlcp.conf'
else:
print('/etc/vlcp.conf is not found; start without configurations.')
elif not config:
config = None
main(config, startup, daemon, pidfile, fork) | python | def default_start():
"""
Use `sys.argv` for starting parameters. This is the entry-point of `vlcp-start`
"""
(config, daemon, pidfile, startup, fork) = parsearg()
if config is None:
if os.path.isfile('/etc/vlcp.conf'):
config = '/etc/vlcp.conf'
else:
print('/etc/vlcp.conf is not found; start without configurations.')
elif not config:
config = None
main(config, startup, daemon, pidfile, fork) | [
"def",
"default_start",
"(",
")",
":",
"(",
"config",
",",
"daemon",
",",
"pidfile",
",",
"startup",
",",
"fork",
")",
"=",
"parsearg",
"(",
")",
"if",
"config",
"is",
"None",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"'/etc/vlcp.conf'",
")",
":",
"config",
"=",
"'/etc/vlcp.conf'",
"else",
":",
"print",
"(",
"'/etc/vlcp.conf is not found; start without configurations.'",
")",
"elif",
"not",
"config",
":",
"config",
"=",
"None",
"main",
"(",
"config",
",",
"startup",
",",
"daemon",
",",
"pidfile",
",",
"fork",
")"
] | Use `sys.argv` for starting parameters. This is the entry-point of `vlcp-start` | [
"Use",
"sys",
".",
"argv",
"for",
"starting",
"parameters",
".",
"This",
"is",
"the",
"entry",
"-",
"point",
"of",
"vlcp",
"-",
"start"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/start.py#L61-L73 |
hubo1016/vlcp | vlcp/utils/webclient.py | Response.close | def close(self):
"Stop the output stream, but further download will still perform"
if self.stream:
self.stream.close(self.scheduler)
self.stream = None | python | def close(self):
"Stop the output stream, but further download will still perform"
if self.stream:
self.stream.close(self.scheduler)
self.stream = None | [
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"stream",
":",
"self",
".",
"stream",
".",
"close",
"(",
"self",
".",
"scheduler",
")",
"self",
".",
"stream",
"=",
"None"
] | Stop the output stream, but further download will still perform | [
"Stop",
"the",
"output",
"stream",
"but",
"further",
"download",
"will",
"still",
"perform"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/webclient.py#L314-L318 |
hubo1016/vlcp | vlcp/utils/webclient.py | Response.shutdown | async def shutdown(self):
"Force stop the output stream, if there are more data to download, shutdown the connection"
if self.stream:
if not self.stream.dataeof and not self.stream.dataerror:
self.stream.close(self.scheduler)
await self.connection.shutdown()
else:
self.stream.close(self.scheduler)
self.stream = None | python | async def shutdown(self):
"Force stop the output stream, if there are more data to download, shutdown the connection"
if self.stream:
if not self.stream.dataeof and not self.stream.dataerror:
self.stream.close(self.scheduler)
await self.connection.shutdown()
else:
self.stream.close(self.scheduler)
self.stream = None | [
"async",
"def",
"shutdown",
"(",
"self",
")",
":",
"if",
"self",
".",
"stream",
":",
"if",
"not",
"self",
".",
"stream",
".",
"dataeof",
"and",
"not",
"self",
".",
"stream",
".",
"dataerror",
":",
"self",
".",
"stream",
".",
"close",
"(",
"self",
".",
"scheduler",
")",
"await",
"self",
".",
"connection",
".",
"shutdown",
"(",
")",
"else",
":",
"self",
".",
"stream",
".",
"close",
"(",
"self",
".",
"scheduler",
")",
"self",
".",
"stream",
"=",
"None"
] | Force stop the output stream, if there are more data to download, shutdown the connection | [
"Force",
"stop",
"the",
"output",
"stream",
"if",
"there",
"are",
"more",
"data",
"to",
"download",
"shutdown",
"the",
"connection"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/webclient.py#L319-L327 |
hubo1016/vlcp | vlcp/utils/networkplugin.py | createphysicalnetwork | def createphysicalnetwork(type, create_processor = partial(default_processor, excluding=('id', 'type')),
reorder_dict = default_iterate_dict):
"""
:param type: physical network type
:param create_processor: create_processor(physicalnetwork, walk, write, \*, parameters)
"""
# create an new physical network
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
id_ = parameters['id']
new_network = create_new(PhysicalNetwork, value, id_)
new_network.type = type
create_processor(new_network, walk, write, parameters=parameters)
write(key, new_network)
new_networkmap = PhysicalNetworkMap.create_instance(id_)
new_networkmap.network = new_network.create_weakreference()
write(new_networkmap.getkey(), new_networkmap)
# Save into network set
try:
physet = walk(PhysicalNetworkSet.default_key())
except KeyError:
pass
else:
physet.set.dataset().add(new_network.create_weakreference())
write(physet.getkey(), physet)
return walker | python | def createphysicalnetwork(type, create_processor = partial(default_processor, excluding=('id', 'type')),
reorder_dict = default_iterate_dict):
"""
:param type: physical network type
:param create_processor: create_processor(physicalnetwork, walk, write, \*, parameters)
"""
# create an new physical network
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
id_ = parameters['id']
new_network = create_new(PhysicalNetwork, value, id_)
new_network.type = type
create_processor(new_network, walk, write, parameters=parameters)
write(key, new_network)
new_networkmap = PhysicalNetworkMap.create_instance(id_)
new_networkmap.network = new_network.create_weakreference()
write(new_networkmap.getkey(), new_networkmap)
# Save into network set
try:
physet = walk(PhysicalNetworkSet.default_key())
except KeyError:
pass
else:
physet.set.dataset().add(new_network.create_weakreference())
write(physet.getkey(), physet)
return walker | [
"def",
"createphysicalnetwork",
"(",
"type",
",",
"create_processor",
"=",
"partial",
"(",
"default_processor",
",",
"excluding",
"=",
"(",
"'id'",
",",
"'type'",
")",
")",
",",
"reorder_dict",
"=",
"default_iterate_dict",
")",
":",
"# create an new physical network",
"def",
"walker",
"(",
"walk",
",",
"write",
",",
"timestamp",
",",
"parameters_dict",
")",
":",
"for",
"key",
",",
"parameters",
"in",
"reorder_dict",
"(",
"parameters_dict",
")",
":",
"try",
":",
"value",
"=",
"walk",
"(",
"key",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"id_",
"=",
"parameters",
"[",
"'id'",
"]",
"new_network",
"=",
"create_new",
"(",
"PhysicalNetwork",
",",
"value",
",",
"id_",
")",
"new_network",
".",
"type",
"=",
"type",
"create_processor",
"(",
"new_network",
",",
"walk",
",",
"write",
",",
"parameters",
"=",
"parameters",
")",
"write",
"(",
"key",
",",
"new_network",
")",
"new_networkmap",
"=",
"PhysicalNetworkMap",
".",
"create_instance",
"(",
"id_",
")",
"new_networkmap",
".",
"network",
"=",
"new_network",
".",
"create_weakreference",
"(",
")",
"write",
"(",
"new_networkmap",
".",
"getkey",
"(",
")",
",",
"new_networkmap",
")",
"# Save into network set",
"try",
":",
"physet",
"=",
"walk",
"(",
"PhysicalNetworkSet",
".",
"default_key",
"(",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"physet",
".",
"set",
".",
"dataset",
"(",
")",
".",
"add",
"(",
"new_network",
".",
"create_weakreference",
"(",
")",
")",
"write",
"(",
"physet",
".",
"getkey",
"(",
")",
",",
"physet",
")",
"return",
"walker"
] | :param type: physical network type
:param create_processor: create_processor(physicalnetwork, walk, write, \*, parameters) | [
":",
"param",
"type",
":",
"physical",
"network",
"type",
":",
"param",
"create_processor",
":",
"create_processor",
"(",
"physicalnetwork",
"walk",
"write",
"\\",
"*",
"parameters",
")"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/networkplugin.py#L43-L76 |
hubo1016/vlcp | vlcp/utils/networkplugin.py | deletephysicalnetwork | def deletephysicalnetwork(check_processor = default_physicalnetwork_delete_check,
reorder_dict = default_iterate_dict):
"""
:param check_processor: check_processor(physicalnetwork, physicalnetworkmap, walk, write, \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
id_ = parameters['id']
try:
phy_map = walk(PhysicalNetworkMap.default_key(id_))
except KeyError:
pass
else:
check_processor(value, phy_map, walk, write, parameters=parameters)
write(phy_map.getkey(), None)
try:
phynetset = walk(PhysicalNetworkSet.default_key())
except KeyError:
pass
else:
phynetset.set.dataset().discard(value.create_weakreference())
write(phynetset.getkey(), phynetset)
write(key, None)
return walker | python | def deletephysicalnetwork(check_processor = default_physicalnetwork_delete_check,
reorder_dict = default_iterate_dict):
"""
:param check_processor: check_processor(physicalnetwork, physicalnetworkmap, walk, write, \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
id_ = parameters['id']
try:
phy_map = walk(PhysicalNetworkMap.default_key(id_))
except KeyError:
pass
else:
check_processor(value, phy_map, walk, write, parameters=parameters)
write(phy_map.getkey(), None)
try:
phynetset = walk(PhysicalNetworkSet.default_key())
except KeyError:
pass
else:
phynetset.set.dataset().discard(value.create_weakreference())
write(phynetset.getkey(), phynetset)
write(key, None)
return walker | [
"def",
"deletephysicalnetwork",
"(",
"check_processor",
"=",
"default_physicalnetwork_delete_check",
",",
"reorder_dict",
"=",
"default_iterate_dict",
")",
":",
"def",
"walker",
"(",
"walk",
",",
"write",
",",
"timestamp",
",",
"parameters_dict",
")",
":",
"for",
"key",
",",
"parameters",
"in",
"reorder_dict",
"(",
"parameters_dict",
")",
":",
"try",
":",
"value",
"=",
"walk",
"(",
"key",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"id_",
"=",
"parameters",
"[",
"'id'",
"]",
"try",
":",
"phy_map",
"=",
"walk",
"(",
"PhysicalNetworkMap",
".",
"default_key",
"(",
"id_",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"check_processor",
"(",
"value",
",",
"phy_map",
",",
"walk",
",",
"write",
",",
"parameters",
"=",
"parameters",
")",
"write",
"(",
"phy_map",
".",
"getkey",
"(",
")",
",",
"None",
")",
"try",
":",
"phynetset",
"=",
"walk",
"(",
"PhysicalNetworkSet",
".",
"default_key",
"(",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"phynetset",
".",
"set",
".",
"dataset",
"(",
")",
".",
"discard",
"(",
"value",
".",
"create_weakreference",
"(",
")",
")",
"write",
"(",
"phynetset",
".",
"getkey",
"(",
")",
",",
"phynetset",
")",
"write",
"(",
"key",
",",
"None",
")",
"return",
"walker"
] | :param check_processor: check_processor(physicalnetwork, physicalnetworkmap, walk, write, \*, parameters) | [
":",
"param",
"check_processor",
":",
"check_processor",
"(",
"physicalnetwork",
"physicalnetworkmap",
"walk",
"write",
"\\",
"*",
"parameters",
")"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/networkplugin.py#L104-L132 |
hubo1016/vlcp | vlcp/utils/networkplugin.py | createphysicalport | def createphysicalport(create_processor = partial(default_processor, excluding=('vhost', 'systemid',
'bridge', 'name',
'physicalnetwork')),
reorder_dict = default_iterate_dict):
"""
:param create_processor: create_processor(physicalport, physicalnetwork, physicalnetworkmap, walk, write, \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
p = create_new(PhysicalPort, value, parameters['vhost'], parameters['systemid'],
parameters['bridge'], parameters['name'])
try:
physicalnetwork = walk(PhysicalNetwork.default_key(parameters['physicalnetwork']))
except KeyError:
pass
else:
# Should already been check from outside
p.physicalnetwork = physicalnetwork.create_reference()
try:
phymap = walk(PhysicalNetworkMap._network.leftkey(physicalnetwork))
except KeyError:
pass
else:
create_processor(p, physicalnetwork, phymap, walk, write, parameters=parameters)
phymap.ports.dataset().add(p.create_weakreference())
write(phymap.getkey(), phymap)
try:
phyportset = walk(PhysicalPortSet.default_key())
except KeyError:
pass
else:
phyportset.set.dataset().add(p.create_weakreference())
write(phyportset.getkey(), phyportset)
write(p.getkey(), p)
return walker | python | def createphysicalport(create_processor = partial(default_processor, excluding=('vhost', 'systemid',
'bridge', 'name',
'physicalnetwork')),
reorder_dict = default_iterate_dict):
"""
:param create_processor: create_processor(physicalport, physicalnetwork, physicalnetworkmap, walk, write, \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
p = create_new(PhysicalPort, value, parameters['vhost'], parameters['systemid'],
parameters['bridge'], parameters['name'])
try:
physicalnetwork = walk(PhysicalNetwork.default_key(parameters['physicalnetwork']))
except KeyError:
pass
else:
# Should already been check from outside
p.physicalnetwork = physicalnetwork.create_reference()
try:
phymap = walk(PhysicalNetworkMap._network.leftkey(physicalnetwork))
except KeyError:
pass
else:
create_processor(p, physicalnetwork, phymap, walk, write, parameters=parameters)
phymap.ports.dataset().add(p.create_weakreference())
write(phymap.getkey(), phymap)
try:
phyportset = walk(PhysicalPortSet.default_key())
except KeyError:
pass
else:
phyportset.set.dataset().add(p.create_weakreference())
write(phyportset.getkey(), phyportset)
write(p.getkey(), p)
return walker | [
"def",
"createphysicalport",
"(",
"create_processor",
"=",
"partial",
"(",
"default_processor",
",",
"excluding",
"=",
"(",
"'vhost'",
",",
"'systemid'",
",",
"'bridge'",
",",
"'name'",
",",
"'physicalnetwork'",
")",
")",
",",
"reorder_dict",
"=",
"default_iterate_dict",
")",
":",
"def",
"walker",
"(",
"walk",
",",
"write",
",",
"timestamp",
",",
"parameters_dict",
")",
":",
"for",
"key",
",",
"parameters",
"in",
"reorder_dict",
"(",
"parameters_dict",
")",
":",
"try",
":",
"value",
"=",
"walk",
"(",
"key",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"p",
"=",
"create_new",
"(",
"PhysicalPort",
",",
"value",
",",
"parameters",
"[",
"'vhost'",
"]",
",",
"parameters",
"[",
"'systemid'",
"]",
",",
"parameters",
"[",
"'bridge'",
"]",
",",
"parameters",
"[",
"'name'",
"]",
")",
"try",
":",
"physicalnetwork",
"=",
"walk",
"(",
"PhysicalNetwork",
".",
"default_key",
"(",
"parameters",
"[",
"'physicalnetwork'",
"]",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"# Should already been check from outside",
"p",
".",
"physicalnetwork",
"=",
"physicalnetwork",
".",
"create_reference",
"(",
")",
"try",
":",
"phymap",
"=",
"walk",
"(",
"PhysicalNetworkMap",
".",
"_network",
".",
"leftkey",
"(",
"physicalnetwork",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"create_processor",
"(",
"p",
",",
"physicalnetwork",
",",
"phymap",
",",
"walk",
",",
"write",
",",
"parameters",
"=",
"parameters",
")",
"phymap",
".",
"ports",
".",
"dataset",
"(",
")",
".",
"add",
"(",
"p",
".",
"create_weakreference",
"(",
")",
")",
"write",
"(",
"phymap",
".",
"getkey",
"(",
")",
",",
"phymap",
")",
"try",
":",
"phyportset",
"=",
"walk",
"(",
"PhysicalPortSet",
".",
"default_key",
"(",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"phyportset",
".",
"set",
".",
"dataset",
"(",
")",
".",
"add",
"(",
"p",
".",
"create_weakreference",
"(",
")",
")",
"write",
"(",
"phyportset",
".",
"getkey",
"(",
")",
",",
"phyportset",
")",
"write",
"(",
"p",
".",
"getkey",
"(",
")",
",",
"p",
")",
"return",
"walker"
] | :param create_processor: create_processor(physicalport, physicalnetwork, physicalnetworkmap, walk, write, \*, parameters) | [
":",
"param",
"create_processor",
":",
"create_processor",
"(",
"physicalport",
"physicalnetwork",
"physicalnetworkmap",
"walk",
"write",
"\\",
"*",
"parameters",
")"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/networkplugin.py#L141-L180 |
hubo1016/vlcp | vlcp/utils/networkplugin.py | updatephysicalport | def updatephysicalport(update_processor = partial(default_processor, excluding=('vhost', 'systemid',
'bridge', 'name'),
disabled=('physicalnetwork',)),
reorder_dict = default_iterate_dict
):
"""
:param update_processor: update_processor(physcialport, walk, write, \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
if update_processor(value, walk, write, parameters=parameters):
write(key, value)
return walker | python | def updatephysicalport(update_processor = partial(default_processor, excluding=('vhost', 'systemid',
'bridge', 'name'),
disabled=('physicalnetwork',)),
reorder_dict = default_iterate_dict
):
"""
:param update_processor: update_processor(physcialport, walk, write, \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
if update_processor(value, walk, write, parameters=parameters):
write(key, value)
return walker | [
"def",
"updatephysicalport",
"(",
"update_processor",
"=",
"partial",
"(",
"default_processor",
",",
"excluding",
"=",
"(",
"'vhost'",
",",
"'systemid'",
",",
"'bridge'",
",",
"'name'",
")",
",",
"disabled",
"=",
"(",
"'physicalnetwork'",
",",
")",
")",
",",
"reorder_dict",
"=",
"default_iterate_dict",
")",
":",
"def",
"walker",
"(",
"walk",
",",
"write",
",",
"timestamp",
",",
"parameters_dict",
")",
":",
"for",
"key",
",",
"parameters",
"in",
"reorder_dict",
"(",
"parameters_dict",
")",
":",
"try",
":",
"value",
"=",
"walk",
"(",
"key",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"if",
"update_processor",
"(",
"value",
",",
"walk",
",",
"write",
",",
"parameters",
"=",
"parameters",
")",
":",
"write",
"(",
"key",
",",
"value",
")",
"return",
"walker"
] | :param update_processor: update_processor(physcialport, walk, write, \*, parameters) | [
":",
"param",
"update_processor",
":",
"update_processor",
"(",
"physcialport",
"walk",
"write",
"\\",
"*",
"parameters",
")"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/networkplugin.py#L183-L200 |
hubo1016/vlcp | vlcp/utils/networkplugin.py | deletephysicalport | def deletephysicalport(check_processor=_false_processor,
reorder_dict = default_iterate_dict):
"""
:param check_processor: check_processor(physicalport, physicalnetwork, physicalnetworkmap,
walk, write \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
try:
phynet = walk(value.physicalnetwork.getkey())
except KeyError:
pass
else:
try:
phymap = walk(PhysicalNetworkMap._network.leftkey(phynet))
except KeyError:
pass
else:
check_processor(value, phynet, phymap, walk, write, parameters=parameters)
phymap.ports.dataset().discard(value.create_weakreference())
write(phymap.getkey(), phymap)
try:
physet = walk(PhysicalPortSet.default_key())
except KeyError:
pass
else:
physet.set.dataset().discard(value.create_weakreference())
write(physet.getkey(), physet)
write(key, None)
return walker | python | def deletephysicalport(check_processor=_false_processor,
reorder_dict = default_iterate_dict):
"""
:param check_processor: check_processor(physicalport, physicalnetwork, physicalnetworkmap,
walk, write \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
try:
phynet = walk(value.physicalnetwork.getkey())
except KeyError:
pass
else:
try:
phymap = walk(PhysicalNetworkMap._network.leftkey(phynet))
except KeyError:
pass
else:
check_processor(value, phynet, phymap, walk, write, parameters=parameters)
phymap.ports.dataset().discard(value.create_weakreference())
write(phymap.getkey(), phymap)
try:
physet = walk(PhysicalPortSet.default_key())
except KeyError:
pass
else:
physet.set.dataset().discard(value.create_weakreference())
write(physet.getkey(), physet)
write(key, None)
return walker | [
"def",
"deletephysicalport",
"(",
"check_processor",
"=",
"_false_processor",
",",
"reorder_dict",
"=",
"default_iterate_dict",
")",
":",
"def",
"walker",
"(",
"walk",
",",
"write",
",",
"timestamp",
",",
"parameters_dict",
")",
":",
"for",
"key",
",",
"parameters",
"in",
"reorder_dict",
"(",
"parameters_dict",
")",
":",
"try",
":",
"value",
"=",
"walk",
"(",
"key",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"try",
":",
"phynet",
"=",
"walk",
"(",
"value",
".",
"physicalnetwork",
".",
"getkey",
"(",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"try",
":",
"phymap",
"=",
"walk",
"(",
"PhysicalNetworkMap",
".",
"_network",
".",
"leftkey",
"(",
"phynet",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"check_processor",
"(",
"value",
",",
"phynet",
",",
"phymap",
",",
"walk",
",",
"write",
",",
"parameters",
"=",
"parameters",
")",
"phymap",
".",
"ports",
".",
"dataset",
"(",
")",
".",
"discard",
"(",
"value",
".",
"create_weakreference",
"(",
")",
")",
"write",
"(",
"phymap",
".",
"getkey",
"(",
")",
",",
"phymap",
")",
"try",
":",
"physet",
"=",
"walk",
"(",
"PhysicalPortSet",
".",
"default_key",
"(",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"physet",
".",
"set",
".",
"dataset",
"(",
")",
".",
"discard",
"(",
"value",
".",
"create_weakreference",
"(",
")",
")",
"write",
"(",
"physet",
".",
"getkey",
"(",
")",
",",
"physet",
")",
"write",
"(",
"key",
",",
"None",
")",
"return",
"walker"
] | :param check_processor: check_processor(physicalport, physicalnetwork, physicalnetworkmap,
walk, write \*, parameters) | [
":",
"param",
"check_processor",
":",
"check_processor",
"(",
"physicalport",
"physicalnetwork",
"physicalnetworkmap",
"walk",
"write",
"\\",
"*",
"parameters",
")"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/networkplugin.py#L203-L237 |
hubo1016/vlcp | vlcp/utils/networkplugin.py | createlogicalnetwork | def createlogicalnetwork(create_processor = partial(default_processor, excluding=('id', 'physicalnetwork')),
reorder_dict = default_iterate_dict):
"""
:param create_processor: create_processor(logicalnetwork, logicalnetworkmap, physicalnetwork,
physicalnetworkmap, walk, write, \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
id_ = parameters['id']
lognet = create_new(LogicalNetwork, value, id_)
logmap = LogicalNetworkMap.create_instance(id_)
logmap.network = lognet.create_reference()
try:
phynet = walk(PhysicalNetwork.default_key(parameters['physicalnetwork']))
except KeyError:
pass
else:
lognet.physicalnetwork = phynet.create_reference()
try:
phymap = walk(PhysicalNetworkMap._network.leftkey(phynet))
except KeyError:
pass
else:
create_processor(lognet, logmap, phynet, phymap, walk, write, parameters=parameters)
phymap.logicnetworks.dataset().add(lognet.create_weakreference())
write(phymap.getkey(), phymap)
write(lognet.getkey(), lognet)
write(logmap.getkey(), logmap)
try:
logicalnetworkset = walk(LogicalNetworkSet.default_key())
except KeyError:
pass
else:
logicalnetworkset.set.dataset().add(lognet.create_weakreference())
write(logicalnetworkset.getkey(), logicalnetworkset)
return walker | python | def createlogicalnetwork(create_processor = partial(default_processor, excluding=('id', 'physicalnetwork')),
reorder_dict = default_iterate_dict):
"""
:param create_processor: create_processor(logicalnetwork, logicalnetworkmap, physicalnetwork,
physicalnetworkmap, walk, write, \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
id_ = parameters['id']
lognet = create_new(LogicalNetwork, value, id_)
logmap = LogicalNetworkMap.create_instance(id_)
logmap.network = lognet.create_reference()
try:
phynet = walk(PhysicalNetwork.default_key(parameters['physicalnetwork']))
except KeyError:
pass
else:
lognet.physicalnetwork = phynet.create_reference()
try:
phymap = walk(PhysicalNetworkMap._network.leftkey(phynet))
except KeyError:
pass
else:
create_processor(lognet, logmap, phynet, phymap, walk, write, parameters=parameters)
phymap.logicnetworks.dataset().add(lognet.create_weakreference())
write(phymap.getkey(), phymap)
write(lognet.getkey(), lognet)
write(logmap.getkey(), logmap)
try:
logicalnetworkset = walk(LogicalNetworkSet.default_key())
except KeyError:
pass
else:
logicalnetworkset.set.dataset().add(lognet.create_weakreference())
write(logicalnetworkset.getkey(), logicalnetworkset)
return walker | [
"def",
"createlogicalnetwork",
"(",
"create_processor",
"=",
"partial",
"(",
"default_processor",
",",
"excluding",
"=",
"(",
"'id'",
",",
"'physicalnetwork'",
")",
")",
",",
"reorder_dict",
"=",
"default_iterate_dict",
")",
":",
"def",
"walker",
"(",
"walk",
",",
"write",
",",
"timestamp",
",",
"parameters_dict",
")",
":",
"for",
"key",
",",
"parameters",
"in",
"reorder_dict",
"(",
"parameters_dict",
")",
":",
"try",
":",
"value",
"=",
"walk",
"(",
"key",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"id_",
"=",
"parameters",
"[",
"'id'",
"]",
"lognet",
"=",
"create_new",
"(",
"LogicalNetwork",
",",
"value",
",",
"id_",
")",
"logmap",
"=",
"LogicalNetworkMap",
".",
"create_instance",
"(",
"id_",
")",
"logmap",
".",
"network",
"=",
"lognet",
".",
"create_reference",
"(",
")",
"try",
":",
"phynet",
"=",
"walk",
"(",
"PhysicalNetwork",
".",
"default_key",
"(",
"parameters",
"[",
"'physicalnetwork'",
"]",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"lognet",
".",
"physicalnetwork",
"=",
"phynet",
".",
"create_reference",
"(",
")",
"try",
":",
"phymap",
"=",
"walk",
"(",
"PhysicalNetworkMap",
".",
"_network",
".",
"leftkey",
"(",
"phynet",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"create_processor",
"(",
"lognet",
",",
"logmap",
",",
"phynet",
",",
"phymap",
",",
"walk",
",",
"write",
",",
"parameters",
"=",
"parameters",
")",
"phymap",
".",
"logicnetworks",
".",
"dataset",
"(",
")",
".",
"add",
"(",
"lognet",
".",
"create_weakreference",
"(",
")",
")",
"write",
"(",
"phymap",
".",
"getkey",
"(",
")",
",",
"phymap",
")",
"write",
"(",
"lognet",
".",
"getkey",
"(",
")",
",",
"lognet",
")",
"write",
"(",
"logmap",
".",
"getkey",
"(",
")",
",",
"logmap",
")",
"try",
":",
"logicalnetworkset",
"=",
"walk",
"(",
"LogicalNetworkSet",
".",
"default_key",
"(",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"logicalnetworkset",
".",
"set",
".",
"dataset",
"(",
")",
".",
"add",
"(",
"lognet",
".",
"create_weakreference",
"(",
")",
")",
"write",
"(",
"logicalnetworkset",
".",
"getkey",
"(",
")",
",",
"logicalnetworkset",
")",
"return",
"walker"
] | :param create_processor: create_processor(logicalnetwork, logicalnetworkmap, physicalnetwork,
physicalnetworkmap, walk, write, \*, parameters) | [
":",
"param",
"create_processor",
":",
"create_processor",
"(",
"logicalnetwork",
"logicalnetworkmap",
"physicalnetwork",
"physicalnetworkmap",
"walk",
"write",
"\\",
"*",
"parameters",
")"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/networkplugin.py#L247-L287 |
hubo1016/vlcp | vlcp/utils/networkplugin.py | deletelogicalnetwork | def deletelogicalnetwork(check_processor=default_logicalnetwork_delete_check,
reorder_dict = default_iterate_dict):
"""
:param check_processor: check_processor(logicalnetwork, logicalnetworkmap,
physicalnetwork, physicalnetworkmap,
walk, write, \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
try:
logmap = walk(LogicalNetworkMap._network.leftkey(key))
except KeyError:
pass
else:
try:
phynet = walk(value.physicalnetwork.getkey())
except KeyError:
pass
else:
try:
phymap = walk(PhysicalNetworkMap._network.leftkey(phynet))
except KeyError:
pass
else:
check_processor(value, logmap, phynet, phymap, walk, write, parameters=parameters)
phymap.logicnetworks.dataset().discard(value.create_weakreference())
write(phymap.getkey(), phymap)
write(key, None)
write(logmap.getkey(), None)
try:
logicalnetworkset = walk(LogicalNetworkSet.default_key())
except KeyError:
pass
else:
logicalnetworkset.set.dataset().discard(value.create_weakreference())
write(logicalnetworkset.getkey(), logicalnetworkset)
return walker | python | def deletelogicalnetwork(check_processor=default_logicalnetwork_delete_check,
reorder_dict = default_iterate_dict):
"""
:param check_processor: check_processor(logicalnetwork, logicalnetworkmap,
physicalnetwork, physicalnetworkmap,
walk, write, \*, parameters)
"""
def walker(walk, write, timestamp, parameters_dict):
for key, parameters in reorder_dict(parameters_dict):
try:
value = walk(key)
except KeyError:
pass
else:
try:
logmap = walk(LogicalNetworkMap._network.leftkey(key))
except KeyError:
pass
else:
try:
phynet = walk(value.physicalnetwork.getkey())
except KeyError:
pass
else:
try:
phymap = walk(PhysicalNetworkMap._network.leftkey(phynet))
except KeyError:
pass
else:
check_processor(value, logmap, phynet, phymap, walk, write, parameters=parameters)
phymap.logicnetworks.dataset().discard(value.create_weakreference())
write(phymap.getkey(), phymap)
write(key, None)
write(logmap.getkey(), None)
try:
logicalnetworkset = walk(LogicalNetworkSet.default_key())
except KeyError:
pass
else:
logicalnetworkset.set.dataset().discard(value.create_weakreference())
write(logicalnetworkset.getkey(), logicalnetworkset)
return walker | [
"def",
"deletelogicalnetwork",
"(",
"check_processor",
"=",
"default_logicalnetwork_delete_check",
",",
"reorder_dict",
"=",
"default_iterate_dict",
")",
":",
"def",
"walker",
"(",
"walk",
",",
"write",
",",
"timestamp",
",",
"parameters_dict",
")",
":",
"for",
"key",
",",
"parameters",
"in",
"reorder_dict",
"(",
"parameters_dict",
")",
":",
"try",
":",
"value",
"=",
"walk",
"(",
"key",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"try",
":",
"logmap",
"=",
"walk",
"(",
"LogicalNetworkMap",
".",
"_network",
".",
"leftkey",
"(",
"key",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"try",
":",
"phynet",
"=",
"walk",
"(",
"value",
".",
"physicalnetwork",
".",
"getkey",
"(",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"try",
":",
"phymap",
"=",
"walk",
"(",
"PhysicalNetworkMap",
".",
"_network",
".",
"leftkey",
"(",
"phynet",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"check_processor",
"(",
"value",
",",
"logmap",
",",
"phynet",
",",
"phymap",
",",
"walk",
",",
"write",
",",
"parameters",
"=",
"parameters",
")",
"phymap",
".",
"logicnetworks",
".",
"dataset",
"(",
")",
".",
"discard",
"(",
"value",
".",
"create_weakreference",
"(",
")",
")",
"write",
"(",
"phymap",
".",
"getkey",
"(",
")",
",",
"phymap",
")",
"write",
"(",
"key",
",",
"None",
")",
"write",
"(",
"logmap",
".",
"getkey",
"(",
")",
",",
"None",
")",
"try",
":",
"logicalnetworkset",
"=",
"walk",
"(",
"LogicalNetworkSet",
".",
"default_key",
"(",
")",
")",
"except",
"KeyError",
":",
"pass",
"else",
":",
"logicalnetworkset",
".",
"set",
".",
"dataset",
"(",
")",
".",
"discard",
"(",
"value",
".",
"create_weakreference",
"(",
")",
")",
"write",
"(",
"logicalnetworkset",
".",
"getkey",
"(",
")",
",",
"logicalnetworkset",
")",
"return",
"walker"
] | :param check_processor: check_processor(logicalnetwork, logicalnetworkmap,
physicalnetwork, physicalnetworkmap,
walk, write, \*, parameters) | [
":",
"param",
"check_processor",
":",
"check_processor",
"(",
"logicalnetwork",
"logicalnetworkmap",
"physicalnetwork",
"physicalnetworkmap",
"walk",
"write",
"\\",
"*",
"parameters",
")"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/networkplugin.py#L315-L356 |
hubo1016/vlcp | vlcp/utils/flowupdater.py | FlowUpdater.restart_walk | async def restart_walk(self):
"""
Force a re-walk
"""
if not self._restartwalk:
self._restartwalk = True
await self.wait_for_send(FlowUpdaterNotification(self, FlowUpdaterNotification.STARTWALK)) | python | async def restart_walk(self):
"""
Force a re-walk
"""
if not self._restartwalk:
self._restartwalk = True
await self.wait_for_send(FlowUpdaterNotification(self, FlowUpdaterNotification.STARTWALK)) | [
"async",
"def",
"restart_walk",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_restartwalk",
":",
"self",
".",
"_restartwalk",
"=",
"True",
"await",
"self",
".",
"wait_for_send",
"(",
"FlowUpdaterNotification",
"(",
"self",
",",
"FlowUpdaterNotification",
".",
"STARTWALK",
")",
")"
] | Force a re-walk | [
"Force",
"a",
"re",
"-",
"walk"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/flowupdater.py#L106-L112 |
hubo1016/vlcp | vlcp/utils/flowupdater.py | FlowUpdater._dataobject_update_detect | async def _dataobject_update_detect(self, _initialkeys, _savedresult):
"""
Coroutine that wait for retrieved value update notification
"""
def expr(newvalues, updatedvalues):
if any(v.getkey() in _initialkeys for v in updatedvalues if v is not None):
return True
else:
return self.shouldupdate(newvalues, updatedvalues)
while True:
updatedvalues, _ = await multiwaitif(_savedresult, self, expr, True)
if not self._updatedset:
self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.DATAUPDATED))
self._updatedset.update(updatedvalues) | python | async def _dataobject_update_detect(self, _initialkeys, _savedresult):
"""
Coroutine that wait for retrieved value update notification
"""
def expr(newvalues, updatedvalues):
if any(v.getkey() in _initialkeys for v in updatedvalues if v is not None):
return True
else:
return self.shouldupdate(newvalues, updatedvalues)
while True:
updatedvalues, _ = await multiwaitif(_savedresult, self, expr, True)
if not self._updatedset:
self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.DATAUPDATED))
self._updatedset.update(updatedvalues) | [
"async",
"def",
"_dataobject_update_detect",
"(",
"self",
",",
"_initialkeys",
",",
"_savedresult",
")",
":",
"def",
"expr",
"(",
"newvalues",
",",
"updatedvalues",
")",
":",
"if",
"any",
"(",
"v",
".",
"getkey",
"(",
")",
"in",
"_initialkeys",
"for",
"v",
"in",
"updatedvalues",
"if",
"v",
"is",
"not",
"None",
")",
":",
"return",
"True",
"else",
":",
"return",
"self",
".",
"shouldupdate",
"(",
"newvalues",
",",
"updatedvalues",
")",
"while",
"True",
":",
"updatedvalues",
",",
"_",
"=",
"await",
"multiwaitif",
"(",
"_savedresult",
",",
"self",
",",
"expr",
",",
"True",
")",
"if",
"not",
"self",
".",
"_updatedset",
":",
"self",
".",
"scheduler",
".",
"emergesend",
"(",
"FlowUpdaterNotification",
"(",
"self",
",",
"FlowUpdaterNotification",
".",
"DATAUPDATED",
")",
")",
"self",
".",
"_updatedset",
".",
"update",
"(",
"updatedvalues",
")"
] | Coroutine that wait for retrieved value update notification | [
"Coroutine",
"that",
"wait",
"for",
"retrieved",
"value",
"update",
"notification"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/flowupdater.py#L114-L127 |
hubo1016/vlcp | vlcp/utils/flowupdater.py | FlowUpdater.updateobjects | def updateobjects(self, updatedvalues):
"""
Force a update notification on specified objects, even if they are not actually updated
in ObjectDB
"""
if not self._updatedset:
self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.DATAUPDATED))
self._updatedset.update(set(updatedvalues).intersection(self._savedresult)) | python | def updateobjects(self, updatedvalues):
"""
Force a update notification on specified objects, even if they are not actually updated
in ObjectDB
"""
if not self._updatedset:
self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.DATAUPDATED))
self._updatedset.update(set(updatedvalues).intersection(self._savedresult)) | [
"def",
"updateobjects",
"(",
"self",
",",
"updatedvalues",
")",
":",
"if",
"not",
"self",
".",
"_updatedset",
":",
"self",
".",
"scheduler",
".",
"emergesend",
"(",
"FlowUpdaterNotification",
"(",
"self",
",",
"FlowUpdaterNotification",
".",
"DATAUPDATED",
")",
")",
"self",
".",
"_updatedset",
".",
"update",
"(",
"set",
"(",
"updatedvalues",
")",
".",
"intersection",
"(",
"self",
".",
"_savedresult",
")",
")"
] | Force a update notification on specified objects, even if they are not actually updated
in ObjectDB | [
"Force",
"a",
"update",
"notification",
"on",
"specified",
"objects",
"even",
"if",
"they",
"are",
"not",
"actually",
"updated",
"in",
"ObjectDB"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/flowupdater.py#L129-L136 |
hubo1016/vlcp | vlcp/utils/flowupdater.py | FlowUpdater._flowupdater | async def _flowupdater(self):
"""
Coroutine calling `updateflow()`
"""
lastresult = set(v for v in self._savedresult if v is not None and not v.isdeleted())
flowupdate = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.FLOWUPDATE)
while True:
currentresult = [v for v in self._savedresult if v is not None and not v.isdeleted()]
# Calculating differences
additems = []
updateditems = []
updatedset2 = self._updatedset2
for v in currentresult:
if v not in lastresult:
additems.append(v)
else:
lastresult.remove(v)
if v in updatedset2:
# Updated
updateditems.append(v)
removeitems = lastresult
self._updatedset2.clear()
# Save current result for next difference
lastresult = set(currentresult)
if not additems and not removeitems and not updateditems:
await flowupdate
continue
await self.updateflow(self._connection, set(additems), removeitems, set(updateditems)) | python | async def _flowupdater(self):
"""
Coroutine calling `updateflow()`
"""
lastresult = set(v for v in self._savedresult if v is not None and not v.isdeleted())
flowupdate = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.FLOWUPDATE)
while True:
currentresult = [v for v in self._savedresult if v is not None and not v.isdeleted()]
# Calculating differences
additems = []
updateditems = []
updatedset2 = self._updatedset2
for v in currentresult:
if v not in lastresult:
additems.append(v)
else:
lastresult.remove(v)
if v in updatedset2:
# Updated
updateditems.append(v)
removeitems = lastresult
self._updatedset2.clear()
# Save current result for next difference
lastresult = set(currentresult)
if not additems and not removeitems and not updateditems:
await flowupdate
continue
await self.updateflow(self._connection, set(additems), removeitems, set(updateditems)) | [
"async",
"def",
"_flowupdater",
"(",
"self",
")",
":",
"lastresult",
"=",
"set",
"(",
"v",
"for",
"v",
"in",
"self",
".",
"_savedresult",
"if",
"v",
"is",
"not",
"None",
"and",
"not",
"v",
".",
"isdeleted",
"(",
")",
")",
"flowupdate",
"=",
"FlowUpdaterNotification",
".",
"createMatcher",
"(",
"self",
",",
"FlowUpdaterNotification",
".",
"FLOWUPDATE",
")",
"while",
"True",
":",
"currentresult",
"=",
"[",
"v",
"for",
"v",
"in",
"self",
".",
"_savedresult",
"if",
"v",
"is",
"not",
"None",
"and",
"not",
"v",
".",
"isdeleted",
"(",
")",
"]",
"# Calculating differences",
"additems",
"=",
"[",
"]",
"updateditems",
"=",
"[",
"]",
"updatedset2",
"=",
"self",
".",
"_updatedset2",
"for",
"v",
"in",
"currentresult",
":",
"if",
"v",
"not",
"in",
"lastresult",
":",
"additems",
".",
"append",
"(",
"v",
")",
"else",
":",
"lastresult",
".",
"remove",
"(",
"v",
")",
"if",
"v",
"in",
"updatedset2",
":",
"# Updated",
"updateditems",
".",
"append",
"(",
"v",
")",
"removeitems",
"=",
"lastresult",
"self",
".",
"_updatedset2",
".",
"clear",
"(",
")",
"# Save current result for next difference",
"lastresult",
"=",
"set",
"(",
"currentresult",
")",
"if",
"not",
"additems",
"and",
"not",
"removeitems",
"and",
"not",
"updateditems",
":",
"await",
"flowupdate",
"continue",
"await",
"self",
".",
"updateflow",
"(",
"self",
".",
"_connection",
",",
"set",
"(",
"additems",
")",
",",
"removeitems",
",",
"set",
"(",
"updateditems",
")",
")"
] | Coroutine calling `updateflow()` | [
"Coroutine",
"calling",
"updateflow",
"()"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/flowupdater.py#L138-L165 |
hubo1016/vlcp | vlcp/utils/flowupdater.py | FlowUpdater.main | async def main(self):
"""
Main coroutine
"""
try:
lastkeys = set()
dataupdate = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.DATAUPDATED)
startwalk = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.STARTWALK)
self.subroutine(self._flowupdater(), False, '_flowupdateroutine')
# Cache updated objects
presave_update = set()
while True:
self._restartwalk = False
presave_update.update(self._updatedset)
self._updatedset.clear()
_initialkeys = set(self._initialkeys)
try:
walk_result = await call_api(self, 'objectdb', 'walk',
{'keys': self._initialkeys, 'walkerdict': self._walkerdict,
'requestid': (self._requstid, self._requestindex)})
except Exception:
self._logger.warning("Flow updater %r walk step failed, conn = %r", self, self._connection,
exc_info=True)
# Cleanup
await call_api(self, 'objectdb', 'unwatchall',
{'requestid': (self._requstid, self._requestindex)})
await self.wait_with_timeout(2)
self._requestindex += 1
if self._restartwalk:
continue
if self._updatedset:
if any(v.getkey() in _initialkeys for v in self._updatedset):
# During walk, there are other initial keys that are updated
# To make sure we get the latest result, restart the walk
continue
lastkeys = set(self._savedkeys)
_savedkeys, _savedresult = walk_result
removekeys = tuple(lastkeys.difference(_savedkeys))
self.reset_initialkeys(_savedkeys, _savedresult)
_initialkeys = set(self._initialkeys)
if self._dataupdateroutine:
self.terminate(self._dataupdateroutine)
# Start detecting updates
self.subroutine(self._dataobject_update_detect(_initialkeys, _savedresult), False, "_dataupdateroutine")
# Set the updates back (potentially merged with newly updated objects)
self._updatedset.update(v for v in presave_update)
presave_update.clear()
await self.walkcomplete(_savedkeys, _savedresult)
if removekeys:
await call_api(self, 'objectdb', 'munwatch', {'keys': removekeys,
'requestid': (self._requstid, self._requestindex)})
# Transfer updated objects to updatedset2 before a flow update notification
# This helps to make `walkcomplete` executes before `updateflow`
#
# But notice that since there is only a single data object copy in all the program,
# it is impossible to hide the change completely during `updateflow`
self._updatedset2.update(self._updatedset)
self._updatedset.clear()
self._savedkeys = _savedkeys
self._savedresult = _savedresult
await self.wait_for_send(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE))
while not self._restartwalk:
if self._updatedset:
if any(v.getkey() in _initialkeys for v in self._updatedset):
break
else:
self._updatedset2.update(self._updatedset)
self._updatedset.clear()
self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE))
await M_(dataupdate, startwalk)
except Exception:
self._logger.exception("Flow updater %r stops update by an exception, conn = %r", self, self._connection)
raise
finally:
self.subroutine(send_api(self, 'objectdb', 'unwatchall', {'requestid': (self._requstid, self._requestindex)}),
False)
if self._flowupdateroutine:
self.terminate(self._flowupdateroutine)
self._flowupdateroutine = None
if self._dataupdateroutine:
self.terminate(self._dataupdateroutine)
self._dataupdateroutine = None | python | async def main(self):
"""
Main coroutine
"""
try:
lastkeys = set()
dataupdate = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.DATAUPDATED)
startwalk = FlowUpdaterNotification.createMatcher(self, FlowUpdaterNotification.STARTWALK)
self.subroutine(self._flowupdater(), False, '_flowupdateroutine')
# Cache updated objects
presave_update = set()
while True:
self._restartwalk = False
presave_update.update(self._updatedset)
self._updatedset.clear()
_initialkeys = set(self._initialkeys)
try:
walk_result = await call_api(self, 'objectdb', 'walk',
{'keys': self._initialkeys, 'walkerdict': self._walkerdict,
'requestid': (self._requstid, self._requestindex)})
except Exception:
self._logger.warning("Flow updater %r walk step failed, conn = %r", self, self._connection,
exc_info=True)
# Cleanup
await call_api(self, 'objectdb', 'unwatchall',
{'requestid': (self._requstid, self._requestindex)})
await self.wait_with_timeout(2)
self._requestindex += 1
if self._restartwalk:
continue
if self._updatedset:
if any(v.getkey() in _initialkeys for v in self._updatedset):
# During walk, there are other initial keys that are updated
# To make sure we get the latest result, restart the walk
continue
lastkeys = set(self._savedkeys)
_savedkeys, _savedresult = walk_result
removekeys = tuple(lastkeys.difference(_savedkeys))
self.reset_initialkeys(_savedkeys, _savedresult)
_initialkeys = set(self._initialkeys)
if self._dataupdateroutine:
self.terminate(self._dataupdateroutine)
# Start detecting updates
self.subroutine(self._dataobject_update_detect(_initialkeys, _savedresult), False, "_dataupdateroutine")
# Set the updates back (potentially merged with newly updated objects)
self._updatedset.update(v for v in presave_update)
presave_update.clear()
await self.walkcomplete(_savedkeys, _savedresult)
if removekeys:
await call_api(self, 'objectdb', 'munwatch', {'keys': removekeys,
'requestid': (self._requstid, self._requestindex)})
# Transfer updated objects to updatedset2 before a flow update notification
# This helps to make `walkcomplete` executes before `updateflow`
#
# But notice that since there is only a single data object copy in all the program,
# it is impossible to hide the change completely during `updateflow`
self._updatedset2.update(self._updatedset)
self._updatedset.clear()
self._savedkeys = _savedkeys
self._savedresult = _savedresult
await self.wait_for_send(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE))
while not self._restartwalk:
if self._updatedset:
if any(v.getkey() in _initialkeys for v in self._updatedset):
break
else:
self._updatedset2.update(self._updatedset)
self._updatedset.clear()
self.scheduler.emergesend(FlowUpdaterNotification(self, FlowUpdaterNotification.FLOWUPDATE))
await M_(dataupdate, startwalk)
except Exception:
self._logger.exception("Flow updater %r stops update by an exception, conn = %r", self, self._connection)
raise
finally:
self.subroutine(send_api(self, 'objectdb', 'unwatchall', {'requestid': (self._requstid, self._requestindex)}),
False)
if self._flowupdateroutine:
self.terminate(self._flowupdateroutine)
self._flowupdateroutine = None
if self._dataupdateroutine:
self.terminate(self._dataupdateroutine)
self._dataupdateroutine = None | [
"async",
"def",
"main",
"(",
"self",
")",
":",
"try",
":",
"lastkeys",
"=",
"set",
"(",
")",
"dataupdate",
"=",
"FlowUpdaterNotification",
".",
"createMatcher",
"(",
"self",
",",
"FlowUpdaterNotification",
".",
"DATAUPDATED",
")",
"startwalk",
"=",
"FlowUpdaterNotification",
".",
"createMatcher",
"(",
"self",
",",
"FlowUpdaterNotification",
".",
"STARTWALK",
")",
"self",
".",
"subroutine",
"(",
"self",
".",
"_flowupdater",
"(",
")",
",",
"False",
",",
"'_flowupdateroutine'",
")",
"# Cache updated objects",
"presave_update",
"=",
"set",
"(",
")",
"while",
"True",
":",
"self",
".",
"_restartwalk",
"=",
"False",
"presave_update",
".",
"update",
"(",
"self",
".",
"_updatedset",
")",
"self",
".",
"_updatedset",
".",
"clear",
"(",
")",
"_initialkeys",
"=",
"set",
"(",
"self",
".",
"_initialkeys",
")",
"try",
":",
"walk_result",
"=",
"await",
"call_api",
"(",
"self",
",",
"'objectdb'",
",",
"'walk'",
",",
"{",
"'keys'",
":",
"self",
".",
"_initialkeys",
",",
"'walkerdict'",
":",
"self",
".",
"_walkerdict",
",",
"'requestid'",
":",
"(",
"self",
".",
"_requstid",
",",
"self",
".",
"_requestindex",
")",
"}",
")",
"except",
"Exception",
":",
"self",
".",
"_logger",
".",
"warning",
"(",
"\"Flow updater %r walk step failed, conn = %r\"",
",",
"self",
",",
"self",
".",
"_connection",
",",
"exc_info",
"=",
"True",
")",
"# Cleanup",
"await",
"call_api",
"(",
"self",
",",
"'objectdb'",
",",
"'unwatchall'",
",",
"{",
"'requestid'",
":",
"(",
"self",
".",
"_requstid",
",",
"self",
".",
"_requestindex",
")",
"}",
")",
"await",
"self",
".",
"wait_with_timeout",
"(",
"2",
")",
"self",
".",
"_requestindex",
"+=",
"1",
"if",
"self",
".",
"_restartwalk",
":",
"continue",
"if",
"self",
".",
"_updatedset",
":",
"if",
"any",
"(",
"v",
".",
"getkey",
"(",
")",
"in",
"_initialkeys",
"for",
"v",
"in",
"self",
".",
"_updatedset",
")",
":",
"# During walk, there are other initial keys that are updated",
"# To make sure we get the latest result, restart the walk",
"continue",
"lastkeys",
"=",
"set",
"(",
"self",
".",
"_savedkeys",
")",
"_savedkeys",
",",
"_savedresult",
"=",
"walk_result",
"removekeys",
"=",
"tuple",
"(",
"lastkeys",
".",
"difference",
"(",
"_savedkeys",
")",
")",
"self",
".",
"reset_initialkeys",
"(",
"_savedkeys",
",",
"_savedresult",
")",
"_initialkeys",
"=",
"set",
"(",
"self",
".",
"_initialkeys",
")",
"if",
"self",
".",
"_dataupdateroutine",
":",
"self",
".",
"terminate",
"(",
"self",
".",
"_dataupdateroutine",
")",
"# Start detecting updates",
"self",
".",
"subroutine",
"(",
"self",
".",
"_dataobject_update_detect",
"(",
"_initialkeys",
",",
"_savedresult",
")",
",",
"False",
",",
"\"_dataupdateroutine\"",
")",
"# Set the updates back (potentially merged with newly updated objects)",
"self",
".",
"_updatedset",
".",
"update",
"(",
"v",
"for",
"v",
"in",
"presave_update",
")",
"presave_update",
".",
"clear",
"(",
")",
"await",
"self",
".",
"walkcomplete",
"(",
"_savedkeys",
",",
"_savedresult",
")",
"if",
"removekeys",
":",
"await",
"call_api",
"(",
"self",
",",
"'objectdb'",
",",
"'munwatch'",
",",
"{",
"'keys'",
":",
"removekeys",
",",
"'requestid'",
":",
"(",
"self",
".",
"_requstid",
",",
"self",
".",
"_requestindex",
")",
"}",
")",
"# Transfer updated objects to updatedset2 before a flow update notification",
"# This helps to make `walkcomplete` executes before `updateflow`",
"#",
"# But notice that since there is only a single data object copy in all the program,",
"# it is impossible to hide the change completely during `updateflow`",
"self",
".",
"_updatedset2",
".",
"update",
"(",
"self",
".",
"_updatedset",
")",
"self",
".",
"_updatedset",
".",
"clear",
"(",
")",
"self",
".",
"_savedkeys",
"=",
"_savedkeys",
"self",
".",
"_savedresult",
"=",
"_savedresult",
"await",
"self",
".",
"wait_for_send",
"(",
"FlowUpdaterNotification",
"(",
"self",
",",
"FlowUpdaterNotification",
".",
"FLOWUPDATE",
")",
")",
"while",
"not",
"self",
".",
"_restartwalk",
":",
"if",
"self",
".",
"_updatedset",
":",
"if",
"any",
"(",
"v",
".",
"getkey",
"(",
")",
"in",
"_initialkeys",
"for",
"v",
"in",
"self",
".",
"_updatedset",
")",
":",
"break",
"else",
":",
"self",
".",
"_updatedset2",
".",
"update",
"(",
"self",
".",
"_updatedset",
")",
"self",
".",
"_updatedset",
".",
"clear",
"(",
")",
"self",
".",
"scheduler",
".",
"emergesend",
"(",
"FlowUpdaterNotification",
"(",
"self",
",",
"FlowUpdaterNotification",
".",
"FLOWUPDATE",
")",
")",
"await",
"M_",
"(",
"dataupdate",
",",
"startwalk",
")",
"except",
"Exception",
":",
"self",
".",
"_logger",
".",
"exception",
"(",
"\"Flow updater %r stops update by an exception, conn = %r\"",
",",
"self",
",",
"self",
".",
"_connection",
")",
"raise",
"finally",
":",
"self",
".",
"subroutine",
"(",
"send_api",
"(",
"self",
",",
"'objectdb'",
",",
"'unwatchall'",
",",
"{",
"'requestid'",
":",
"(",
"self",
".",
"_requstid",
",",
"self",
".",
"_requestindex",
")",
"}",
")",
",",
"False",
")",
"if",
"self",
".",
"_flowupdateroutine",
":",
"self",
".",
"terminate",
"(",
"self",
".",
"_flowupdateroutine",
")",
"self",
".",
"_flowupdateroutine",
"=",
"None",
"if",
"self",
".",
"_dataupdateroutine",
":",
"self",
".",
"terminate",
"(",
"self",
".",
"_dataupdateroutine",
")",
"self",
".",
"_dataupdateroutine",
"=",
"None"
] | Main coroutine | [
"Main",
"coroutine"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/flowupdater.py#L167-L248 |
hubo1016/vlcp | vlcp/event/core.py | syscall_direct | def syscall_direct(*events):
'''
Directly process these events. This should never be used for normal events.
'''
def _syscall(scheduler, processor):
for e in events:
processor(e)
return _syscall | python | def syscall_direct(*events):
'''
Directly process these events. This should never be used for normal events.
'''
def _syscall(scheduler, processor):
for e in events:
processor(e)
return _syscall | [
"def",
"syscall_direct",
"(",
"*",
"events",
")",
":",
"def",
"_syscall",
"(",
"scheduler",
",",
"processor",
")",
":",
"for",
"e",
"in",
"events",
":",
"processor",
"(",
"e",
")",
"return",
"_syscall"
] | Directly process these events. This should never be used for normal events. | [
"Directly",
"process",
"these",
"events",
".",
"This",
"should",
"never",
"be",
"used",
"for",
"normal",
"events",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L520-L527 |
hubo1016/vlcp | vlcp/event/core.py | syscall_generator | def syscall_generator(generator):
'''
Directly process events from a generator function. This should never be used for normal events.
'''
def _syscall(scheduler, processor):
for e in generator():
processor(e)
return _syscall | python | def syscall_generator(generator):
'''
Directly process events from a generator function. This should never be used for normal events.
'''
def _syscall(scheduler, processor):
for e in generator():
processor(e)
return _syscall | [
"def",
"syscall_generator",
"(",
"generator",
")",
":",
"def",
"_syscall",
"(",
"scheduler",
",",
"processor",
")",
":",
"for",
"e",
"in",
"generator",
"(",
")",
":",
"processor",
"(",
"e",
")",
"return",
"_syscall"
] | Directly process events from a generator function. This should never be used for normal events. | [
"Directly",
"process",
"events",
"from",
"a",
"generator",
"function",
".",
"This",
"should",
"never",
"be",
"used",
"for",
"normal",
"events",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L529-L536 |
hubo1016/vlcp | vlcp/event/core.py | syscall_clearqueue | def syscall_clearqueue(queue):
'''
Clear a queue
'''
def _syscall(scheduler, processor):
qes, qees = queue.clear()
events = scheduler.queue.unblockqueue(queue)
for e in events:
scheduler.eventtree.remove(e)
for e in qes:
processor(e)
for e in qees:
processor(e)
return _syscall | python | def syscall_clearqueue(queue):
'''
Clear a queue
'''
def _syscall(scheduler, processor):
qes, qees = queue.clear()
events = scheduler.queue.unblockqueue(queue)
for e in events:
scheduler.eventtree.remove(e)
for e in qes:
processor(e)
for e in qees:
processor(e)
return _syscall | [
"def",
"syscall_clearqueue",
"(",
"queue",
")",
":",
"def",
"_syscall",
"(",
"scheduler",
",",
"processor",
")",
":",
"qes",
",",
"qees",
"=",
"queue",
".",
"clear",
"(",
")",
"events",
"=",
"scheduler",
".",
"queue",
".",
"unblockqueue",
"(",
"queue",
")",
"for",
"e",
"in",
"events",
":",
"scheduler",
".",
"eventtree",
".",
"remove",
"(",
"e",
")",
"for",
"e",
"in",
"qes",
":",
"processor",
"(",
"e",
")",
"for",
"e",
"in",
"qees",
":",
"processor",
"(",
"e",
")",
"return",
"_syscall"
] | Clear a queue | [
"Clear",
"a",
"queue"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L538-L551 |
hubo1016/vlcp | vlcp/event/core.py | syscall_removequeue | def syscall_removequeue(queue, index):
'''
Remove subqueue `queue[index]` from queue.
'''
def _syscall(scheduler, processor):
events = scheduler.queue.unblockqueue(queue[index])
for e in events:
scheduler.eventtree.remove(e)
qes, qees = queue.removeSubQueue(index)
for e in qes:
processor(e)
for e in qees:
processor(e)
return _syscall | python | def syscall_removequeue(queue, index):
'''
Remove subqueue `queue[index]` from queue.
'''
def _syscall(scheduler, processor):
events = scheduler.queue.unblockqueue(queue[index])
for e in events:
scheduler.eventtree.remove(e)
qes, qees = queue.removeSubQueue(index)
for e in qes:
processor(e)
for e in qees:
processor(e)
return _syscall | [
"def",
"syscall_removequeue",
"(",
"queue",
",",
"index",
")",
":",
"def",
"_syscall",
"(",
"scheduler",
",",
"processor",
")",
":",
"events",
"=",
"scheduler",
".",
"queue",
".",
"unblockqueue",
"(",
"queue",
"[",
"index",
"]",
")",
"for",
"e",
"in",
"events",
":",
"scheduler",
".",
"eventtree",
".",
"remove",
"(",
"e",
")",
"qes",
",",
"qees",
"=",
"queue",
".",
"removeSubQueue",
"(",
"index",
")",
"for",
"e",
"in",
"qes",
":",
"processor",
"(",
"e",
")",
"for",
"e",
"in",
"qees",
":",
"processor",
"(",
"e",
")",
"return",
"_syscall"
] | Remove subqueue `queue[index]` from queue. | [
"Remove",
"subqueue",
"queue",
"[",
"index",
"]",
"from",
"queue",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L553-L566 |
hubo1016/vlcp | vlcp/event/core.py | syscall_clearremovequeue | def syscall_clearremovequeue(queue, index):
'''
Clear the subqueue `queue[index]` and remove it from queue.
'''
def _syscall(scheduler, processor):
qes, qees = queue[index].clear()
events = scheduler.queue.unblockqueue(queue[index])
for e in events:
scheduler.eventtree.remove(e)
qes2, qees2 = queue.removeSubQueue(index)
for e in qes:
processor(e)
for e in qes2:
processor(e)
for e in qees:
processor(e)
for e in qees2:
processor(e)
return _syscall | python | def syscall_clearremovequeue(queue, index):
'''
Clear the subqueue `queue[index]` and remove it from queue.
'''
def _syscall(scheduler, processor):
qes, qees = queue[index].clear()
events = scheduler.queue.unblockqueue(queue[index])
for e in events:
scheduler.eventtree.remove(e)
qes2, qees2 = queue.removeSubQueue(index)
for e in qes:
processor(e)
for e in qes2:
processor(e)
for e in qees:
processor(e)
for e in qees2:
processor(e)
return _syscall | [
"def",
"syscall_clearremovequeue",
"(",
"queue",
",",
"index",
")",
":",
"def",
"_syscall",
"(",
"scheduler",
",",
"processor",
")",
":",
"qes",
",",
"qees",
"=",
"queue",
"[",
"index",
"]",
".",
"clear",
"(",
")",
"events",
"=",
"scheduler",
".",
"queue",
".",
"unblockqueue",
"(",
"queue",
"[",
"index",
"]",
")",
"for",
"e",
"in",
"events",
":",
"scheduler",
".",
"eventtree",
".",
"remove",
"(",
"e",
")",
"qes2",
",",
"qees2",
"=",
"queue",
".",
"removeSubQueue",
"(",
"index",
")",
"for",
"e",
"in",
"qes",
":",
"processor",
"(",
"e",
")",
"for",
"e",
"in",
"qes2",
":",
"processor",
"(",
"e",
")",
"for",
"e",
"in",
"qees",
":",
"processor",
"(",
"e",
")",
"for",
"e",
"in",
"qees2",
":",
"processor",
"(",
"e",
")",
"return",
"_syscall"
] | Clear the subqueue `queue[index]` and remove it from queue. | [
"Clear",
"the",
"subqueue",
"queue",
"[",
"index",
"]",
"and",
"remove",
"it",
"from",
"queue",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L568-L586 |
hubo1016/vlcp | vlcp/event/core.py | Scheduler.register | def register(self, matchers, runnable):
'''
Register an iterator(runnable) to scheduler and wait for events
:param matchers: sequence of EventMatchers
:param runnable: an iterator that accept send method
:param daemon: if True, the runnable will be registered as a daemon.
'''
if getattr(self, 'syscallfunc', None) is not None and getattr(self, 'syscallrunnable', None) is None:
# Inject this register
self.syscallrunnable = runnable
else:
for m in matchers:
self.matchtree.insert(m, runnable)
events = self.eventtree.findAndRemove(m)
for e in events:
self.queue.unblock(e)
if m.indices[0] == PollEvent._classname0 and len(m.indices) >= 2:
self.polling.onmatch(m.indices[1], None if len(m.indices) <= 2 else m.indices[2], True)
self.registerIndex.setdefault(runnable, set()).update(matchers) | python | def register(self, matchers, runnable):
'''
Register an iterator(runnable) to scheduler and wait for events
:param matchers: sequence of EventMatchers
:param runnable: an iterator that accept send method
:param daemon: if True, the runnable will be registered as a daemon.
'''
if getattr(self, 'syscallfunc', None) is not None and getattr(self, 'syscallrunnable', None) is None:
# Inject this register
self.syscallrunnable = runnable
else:
for m in matchers:
self.matchtree.insert(m, runnable)
events = self.eventtree.findAndRemove(m)
for e in events:
self.queue.unblock(e)
if m.indices[0] == PollEvent._classname0 and len(m.indices) >= 2:
self.polling.onmatch(m.indices[1], None if len(m.indices) <= 2 else m.indices[2], True)
self.registerIndex.setdefault(runnable, set()).update(matchers) | [
"def",
"register",
"(",
"self",
",",
"matchers",
",",
"runnable",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"'syscallfunc'",
",",
"None",
")",
"is",
"not",
"None",
"and",
"getattr",
"(",
"self",
",",
"'syscallrunnable'",
",",
"None",
")",
"is",
"None",
":",
"# Inject this register",
"self",
".",
"syscallrunnable",
"=",
"runnable",
"else",
":",
"for",
"m",
"in",
"matchers",
":",
"self",
".",
"matchtree",
".",
"insert",
"(",
"m",
",",
"runnable",
")",
"events",
"=",
"self",
".",
"eventtree",
".",
"findAndRemove",
"(",
"m",
")",
"for",
"e",
"in",
"events",
":",
"self",
".",
"queue",
".",
"unblock",
"(",
"e",
")",
"if",
"m",
".",
"indices",
"[",
"0",
"]",
"==",
"PollEvent",
".",
"_classname0",
"and",
"len",
"(",
"m",
".",
"indices",
")",
">=",
"2",
":",
"self",
".",
"polling",
".",
"onmatch",
"(",
"m",
".",
"indices",
"[",
"1",
"]",
",",
"None",
"if",
"len",
"(",
"m",
".",
"indices",
")",
"<=",
"2",
"else",
"m",
".",
"indices",
"[",
"2",
"]",
",",
"True",
")",
"self",
".",
"registerIndex",
".",
"setdefault",
"(",
"runnable",
",",
"set",
"(",
")",
")",
".",
"update",
"(",
"matchers",
")"
] | Register an iterator(runnable) to scheduler and wait for events
:param matchers: sequence of EventMatchers
:param runnable: an iterator that accept send method
:param daemon: if True, the runnable will be registered as a daemon. | [
"Register",
"an",
"iterator",
"(",
"runnable",
")",
"to",
"scheduler",
"and",
"wait",
"for",
"events",
":",
"param",
"matchers",
":",
"sequence",
"of",
"EventMatchers",
":",
"param",
"runnable",
":",
"an",
"iterator",
"that",
"accept",
"send",
"method",
":",
"param",
"daemon",
":",
"if",
"True",
"the",
"runnable",
"will",
"be",
"registered",
"as",
"a",
"daemon",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L128-L149 |
hubo1016/vlcp | vlcp/event/core.py | Scheduler.unregister | def unregister(self, matchers, runnable):
'''
Unregister an iterator(runnable) and stop waiting for events
:param matchers: sequence of EventMatchers
:param runnable: an iterator that accept send method
'''
for m in matchers:
self.matchtree.remove(m, runnable)
if m.indices[0] == PollEvent._classname0 and len(m.indices) >= 2:
self.polling.onmatch(m.indices[1], None if len(m.indices) <= 2 else m.indices[2], False)
self.registerIndex.setdefault(runnable, set()).difference_update(matchers) | python | def unregister(self, matchers, runnable):
'''
Unregister an iterator(runnable) and stop waiting for events
:param matchers: sequence of EventMatchers
:param runnable: an iterator that accept send method
'''
for m in matchers:
self.matchtree.remove(m, runnable)
if m.indices[0] == PollEvent._classname0 and len(m.indices) >= 2:
self.polling.onmatch(m.indices[1], None if len(m.indices) <= 2 else m.indices[2], False)
self.registerIndex.setdefault(runnable, set()).difference_update(matchers) | [
"def",
"unregister",
"(",
"self",
",",
"matchers",
",",
"runnable",
")",
":",
"for",
"m",
"in",
"matchers",
":",
"self",
".",
"matchtree",
".",
"remove",
"(",
"m",
",",
"runnable",
")",
"if",
"m",
".",
"indices",
"[",
"0",
"]",
"==",
"PollEvent",
".",
"_classname0",
"and",
"len",
"(",
"m",
".",
"indices",
")",
">=",
"2",
":",
"self",
".",
"polling",
".",
"onmatch",
"(",
"m",
".",
"indices",
"[",
"1",
"]",
",",
"None",
"if",
"len",
"(",
"m",
".",
"indices",
")",
"<=",
"2",
"else",
"m",
".",
"indices",
"[",
"2",
"]",
",",
"False",
")",
"self",
".",
"registerIndex",
".",
"setdefault",
"(",
"runnable",
",",
"set",
"(",
")",
")",
".",
"difference_update",
"(",
"matchers",
")"
] | Unregister an iterator(runnable) and stop waiting for events
:param matchers: sequence of EventMatchers
:param runnable: an iterator that accept send method | [
"Unregister",
"an",
"iterator",
"(",
"runnable",
")",
"and",
"stop",
"waiting",
"for",
"events",
":",
"param",
"matchers",
":",
"sequence",
"of",
"EventMatchers",
":",
"param",
"runnable",
":",
"an",
"iterator",
"that",
"accept",
"send",
"method"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L150-L162 |
hubo1016/vlcp | vlcp/event/core.py | Scheduler.unregisterall | def unregisterall(self, runnable):
'''
Unregister all matches and detach the runnable. Automatically called when runnable returns StopIteration.
'''
if runnable in self.registerIndex:
for m in self.registerIndex[runnable]:
self.matchtree.remove(m, runnable)
if m.indices[0] == PollEvent._classname0 and len(m.indices) >= 2:
self.polling.onmatch(m.indices[1], None if len(m.indices) <= 2 else m.indices[2], False)
del self.registerIndex[runnable]
self.daemons.discard(runnable) | python | def unregisterall(self, runnable):
'''
Unregister all matches and detach the runnable. Automatically called when runnable returns StopIteration.
'''
if runnable in self.registerIndex:
for m in self.registerIndex[runnable]:
self.matchtree.remove(m, runnable)
if m.indices[0] == PollEvent._classname0 and len(m.indices) >= 2:
self.polling.onmatch(m.indices[1], None if len(m.indices) <= 2 else m.indices[2], False)
del self.registerIndex[runnable]
self.daemons.discard(runnable) | [
"def",
"unregisterall",
"(",
"self",
",",
"runnable",
")",
":",
"if",
"runnable",
"in",
"self",
".",
"registerIndex",
":",
"for",
"m",
"in",
"self",
".",
"registerIndex",
"[",
"runnable",
"]",
":",
"self",
".",
"matchtree",
".",
"remove",
"(",
"m",
",",
"runnable",
")",
"if",
"m",
".",
"indices",
"[",
"0",
"]",
"==",
"PollEvent",
".",
"_classname0",
"and",
"len",
"(",
"m",
".",
"indices",
")",
">=",
"2",
":",
"self",
".",
"polling",
".",
"onmatch",
"(",
"m",
".",
"indices",
"[",
"1",
"]",
",",
"None",
"if",
"len",
"(",
"m",
".",
"indices",
")",
"<=",
"2",
"else",
"m",
".",
"indices",
"[",
"2",
"]",
",",
"False",
")",
"del",
"self",
".",
"registerIndex",
"[",
"runnable",
"]",
"self",
".",
"daemons",
".",
"discard",
"(",
"runnable",
")"
] | Unregister all matches and detach the runnable. Automatically called when runnable returns StopIteration. | [
"Unregister",
"all",
"matches",
"and",
"detach",
"the",
"runnable",
".",
"Automatically",
"called",
"when",
"runnable",
"returns",
"StopIteration",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L163-L173 |
hubo1016/vlcp | vlcp/event/core.py | Scheduler.ignore | def ignore(self, matcher):
'''
Unblock and ignore the matched events, if any.
'''
events = self.eventtree.findAndRemove(matcher)
for e in events:
self.queue.unblock(e)
e.canignore = True | python | def ignore(self, matcher):
'''
Unblock and ignore the matched events, if any.
'''
events = self.eventtree.findAndRemove(matcher)
for e in events:
self.queue.unblock(e)
e.canignore = True | [
"def",
"ignore",
"(",
"self",
",",
"matcher",
")",
":",
"events",
"=",
"self",
".",
"eventtree",
".",
"findAndRemove",
"(",
"matcher",
")",
"for",
"e",
"in",
"events",
":",
"self",
".",
"queue",
".",
"unblock",
"(",
"e",
")",
"e",
".",
"canignore",
"=",
"True"
] | Unblock and ignore the matched events, if any. | [
"Unblock",
"and",
"ignore",
"the",
"matched",
"events",
"if",
"any",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L174-L181 |
hubo1016/vlcp | vlcp/event/core.py | Scheduler.quit | def quit(self, daemononly = False):
'''
Send quit event to quit the main loop
'''
if not self.quitting:
self.quitting = True
self.queue.append(SystemControlEvent(SystemControlEvent.QUIT, daemononly = daemononly), True) | python | def quit(self, daemononly = False):
'''
Send quit event to quit the main loop
'''
if not self.quitting:
self.quitting = True
self.queue.append(SystemControlEvent(SystemControlEvent.QUIT, daemononly = daemononly), True) | [
"def",
"quit",
"(",
"self",
",",
"daemononly",
"=",
"False",
")",
":",
"if",
"not",
"self",
".",
"quitting",
":",
"self",
".",
"quitting",
"=",
"True",
"self",
".",
"queue",
".",
"append",
"(",
"SystemControlEvent",
"(",
"SystemControlEvent",
".",
"QUIT",
",",
"daemononly",
"=",
"daemononly",
")",
",",
"True",
")"
] | Send quit event to quit the main loop | [
"Send",
"quit",
"event",
"to",
"quit",
"the",
"main",
"loop"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L182-L188 |
hubo1016/vlcp | vlcp/event/core.py | Scheduler.setTimer | def setTimer(self, start, interval = None):
'''
Generate a TimerEvent on specified time
:param start: offset for time from now (seconds), or datetime for a fixed time
:param interval: if not None, the timer is regenerated by interval seconds.
:returns: a timer handle to wait or cancel the timer
'''
if isinstance(start, datetime):
timestamp = (start - datetime.fromtimestamp(0)).total_seconds()
else:
timestamp = self.current_time + start
if interval is not None:
if not (interval > 0):
raise ValueError('interval must be positive.')
th = self.TimerHandle(timestamp, interval)
self.timers.push(th, timestamp)
return th | python | def setTimer(self, start, interval = None):
'''
Generate a TimerEvent on specified time
:param start: offset for time from now (seconds), or datetime for a fixed time
:param interval: if not None, the timer is regenerated by interval seconds.
:returns: a timer handle to wait or cancel the timer
'''
if isinstance(start, datetime):
timestamp = (start - datetime.fromtimestamp(0)).total_seconds()
else:
timestamp = self.current_time + start
if interval is not None:
if not (interval > 0):
raise ValueError('interval must be positive.')
th = self.TimerHandle(timestamp, interval)
self.timers.push(th, timestamp)
return th | [
"def",
"setTimer",
"(",
"self",
",",
"start",
",",
"interval",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"start",
",",
"datetime",
")",
":",
"timestamp",
"=",
"(",
"start",
"-",
"datetime",
".",
"fromtimestamp",
"(",
"0",
")",
")",
".",
"total_seconds",
"(",
")",
"else",
":",
"timestamp",
"=",
"self",
".",
"current_time",
"+",
"start",
"if",
"interval",
"is",
"not",
"None",
":",
"if",
"not",
"(",
"interval",
">",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'interval must be positive.'",
")",
"th",
"=",
"self",
".",
"TimerHandle",
"(",
"timestamp",
",",
"interval",
")",
"self",
".",
"timers",
".",
"push",
"(",
"th",
",",
"timestamp",
")",
"return",
"th"
] | Generate a TimerEvent on specified time
:param start: offset for time from now (seconds), or datetime for a fixed time
:param interval: if not None, the timer is regenerated by interval seconds.
:returns: a timer handle to wait or cancel the timer | [
"Generate",
"a",
"TimerEvent",
"on",
"specified",
"time",
":",
"param",
"start",
":",
"offset",
"for",
"time",
"from",
"now",
"(",
"seconds",
")",
"or",
"datetime",
"for",
"a",
"fixed",
"time",
":",
"param",
"interval",
":",
"if",
"not",
"None",
"the",
"timer",
"is",
"regenerated",
"by",
"interval",
"seconds",
".",
":",
"returns",
":",
"a",
"timer",
"handle",
"to",
"wait",
"or",
"cancel",
"the",
"timer"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L201-L220 |
hubo1016/vlcp | vlcp/event/core.py | Scheduler.registerPolling | def registerPolling(self, fd, options = POLLING_IN|POLLING_OUT, daemon = False):
'''
register a polling file descriptor
:param fd: file descriptor or socket object
:param options: bit mask flags. Polling object should ignore the incompatible flag.
'''
self.polling.register(fd, options, daemon) | python | def registerPolling(self, fd, options = POLLING_IN|POLLING_OUT, daemon = False):
'''
register a polling file descriptor
:param fd: file descriptor or socket object
:param options: bit mask flags. Polling object should ignore the incompatible flag.
'''
self.polling.register(fd, options, daemon) | [
"def",
"registerPolling",
"(",
"self",
",",
"fd",
",",
"options",
"=",
"POLLING_IN",
"|",
"POLLING_OUT",
",",
"daemon",
"=",
"False",
")",
":",
"self",
".",
"polling",
".",
"register",
"(",
"fd",
",",
"options",
",",
"daemon",
")"
] | register a polling file descriptor
:param fd: file descriptor or socket object
:param options: bit mask flags. Polling object should ignore the incompatible flag. | [
"register",
"a",
"polling",
"file",
"descriptor",
":",
"param",
"fd",
":",
"file",
"descriptor",
"or",
"socket",
"object",
":",
"param",
"options",
":",
"bit",
"mask",
"flags",
".",
"Polling",
"object",
"should",
"ignore",
"the",
"incompatible",
"flag",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L230-L238 |
hubo1016/vlcp | vlcp/event/core.py | Scheduler.unregisterPolling | def unregisterPolling(self, fd, daemon = False):
'''
Unregister a polling file descriptor
:param fd: file descriptor or socket object
'''
self.polling.unregister(fd, daemon) | python | def unregisterPolling(self, fd, daemon = False):
'''
Unregister a polling file descriptor
:param fd: file descriptor or socket object
'''
self.polling.unregister(fd, daemon) | [
"def",
"unregisterPolling",
"(",
"self",
",",
"fd",
",",
"daemon",
"=",
"False",
")",
":",
"self",
".",
"polling",
".",
"unregister",
"(",
"fd",
",",
"daemon",
")"
] | Unregister a polling file descriptor
:param fd: file descriptor or socket object | [
"Unregister",
"a",
"polling",
"file",
"descriptor",
":",
"param",
"fd",
":",
"file",
"descriptor",
"or",
"socket",
"object"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L246-L252 |
hubo1016/vlcp | vlcp/event/core.py | Scheduler.setDaemon | def setDaemon(self, runnable, isdaemon, noregister = False):
'''
If a runnable is a daemon, it will not keep the main loop running. The main loop will end when all alived runnables are daemons.
'''
if not noregister and runnable not in self.registerIndex:
self.register((), runnable)
if isdaemon:
self.daemons.add(runnable)
else:
self.daemons.discard(runnable) | python | def setDaemon(self, runnable, isdaemon, noregister = False):
'''
If a runnable is a daemon, it will not keep the main loop running. The main loop will end when all alived runnables are daemons.
'''
if not noregister and runnable not in self.registerIndex:
self.register((), runnable)
if isdaemon:
self.daemons.add(runnable)
else:
self.daemons.discard(runnable) | [
"def",
"setDaemon",
"(",
"self",
",",
"runnable",
",",
"isdaemon",
",",
"noregister",
"=",
"False",
")",
":",
"if",
"not",
"noregister",
"and",
"runnable",
"not",
"in",
"self",
".",
"registerIndex",
":",
"self",
".",
"register",
"(",
"(",
")",
",",
"runnable",
")",
"if",
"isdaemon",
":",
"self",
".",
"daemons",
".",
"add",
"(",
"runnable",
")",
"else",
":",
"self",
".",
"daemons",
".",
"discard",
"(",
"runnable",
")"
] | If a runnable is a daemon, it will not keep the main loop running. The main loop will end when all alived runnables are daemons. | [
"If",
"a",
"runnable",
"is",
"a",
"daemon",
"it",
"will",
"not",
"keep",
"the",
"main",
"loop",
"running",
".",
"The",
"main",
"loop",
"will",
"end",
"when",
"all",
"alived",
"runnables",
"are",
"daemons",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L259-L268 |
hubo1016/vlcp | vlcp/event/core.py | Scheduler.syscall | def syscall(self, func):
'''
Call the func in core context (main loop).
func should like::
def syscall_sample(scheduler, processor):
something...
where processor is a function which accept an event. When calling processor, scheduler directly process this event without
sending it to queue.
An event matcher is returned to the caller, and the caller should wait for the event immediately to get the return value from the system call.
The SyscallReturnEvent will have 'retvalue' as the return value, or 'exception' as the exception thrown: (type, value, traceback)
:param func: syscall function
:returns: an event matcher to wait for the SyscallReturnEvent. If None is returned, a syscall is already scheduled;
return to core context at first.
'''
if getattr(self, 'syscallfunc', None) is not None:
return None
self.syscallfunc = func
self.syscallmatcher = SyscallReturnEvent.createMatcher()
return self.syscallmatcher | python | def syscall(self, func):
'''
Call the func in core context (main loop).
func should like::
def syscall_sample(scheduler, processor):
something...
where processor is a function which accept an event. When calling processor, scheduler directly process this event without
sending it to queue.
An event matcher is returned to the caller, and the caller should wait for the event immediately to get the return value from the system call.
The SyscallReturnEvent will have 'retvalue' as the return value, or 'exception' as the exception thrown: (type, value, traceback)
:param func: syscall function
:returns: an event matcher to wait for the SyscallReturnEvent. If None is returned, a syscall is already scheduled;
return to core context at first.
'''
if getattr(self, 'syscallfunc', None) is not None:
return None
self.syscallfunc = func
self.syscallmatcher = SyscallReturnEvent.createMatcher()
return self.syscallmatcher | [
"def",
"syscall",
"(",
"self",
",",
"func",
")",
":",
"if",
"getattr",
"(",
"self",
",",
"'syscallfunc'",
",",
"None",
")",
"is",
"not",
"None",
":",
"return",
"None",
"self",
".",
"syscallfunc",
"=",
"func",
"self",
".",
"syscallmatcher",
"=",
"SyscallReturnEvent",
".",
"createMatcher",
"(",
")",
"return",
"self",
".",
"syscallmatcher"
] | Call the func in core context (main loop).
func should like::
def syscall_sample(scheduler, processor):
something...
where processor is a function which accept an event. When calling processor, scheduler directly process this event without
sending it to queue.
An event matcher is returned to the caller, and the caller should wait for the event immediately to get the return value from the system call.
The SyscallReturnEvent will have 'retvalue' as the return value, or 'exception' as the exception thrown: (type, value, traceback)
:param func: syscall function
:returns: an event matcher to wait for the SyscallReturnEvent. If None is returned, a syscall is already scheduled;
return to core context at first. | [
"Call",
"the",
"func",
"in",
"core",
"context",
"(",
"main",
"loop",
")",
".",
"func",
"should",
"like",
"::",
"def",
"syscall_sample",
"(",
"scheduler",
"processor",
")",
":",
"something",
"..."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L269-L294 |
hubo1016/vlcp | vlcp/event/core.py | Scheduler.main | def main(self, installsignal = True, sendinit = True):
'''
Start main loop
'''
if installsignal:
sigterm = signal(SIGTERM, self._quitsignal)
sigint = signal(SIGINT, self._quitsignal)
try:
from signal import SIGUSR1
sigusr1 = signal(SIGUSR1, self._tracesignal)
except Exception:
pass
try:
if sendinit:
self.queue.append(SystemControlEvent(SystemControlEvent.INIT), True)
def processSyscall():
while self.syscallfunc is not None:
r = getattr(self, 'syscallrunnable', None)
if r is None:
self.syscallfunc = None
break
try:
try:
retvalue = self.syscallfunc(self, processEvent)
except Exception:
(t, v, tr) = sys.exc_info()
self.syscallfunc = None
self.syscallrunnable = None
r.send((SyscallReturnEvent(exception=(t, v, tr)), self.syscallmatcher))
else:
self.syscallfunc = None
self.syscallrunnable = None
r.send((SyscallReturnEvent(retvalue=retvalue), self.syscallmatcher))
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('processing syscall failed with exception')
self.unregisterall(r)
def processEvent(event, emptys = ()):
if self.debugging:
self.logger.debug('Processing event %s', repr(event))
runnables = self.matchtree.matchesWithMatchers(event)
for r, m in runnables:
try:
self.syscallfunc = None
self.syscallrunnable = None
if self.debugging:
self.logger.debug('Send event to %r, matched with %r', r, m)
r.send((event, m))
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('processing event %s failed with exception', repr(event))
self.unregisterall(r)
processSyscall()
if not event.canignore and not event.canignorenow():
self.eventtree.insert(event)
self.queue.block(event, emptys)
else:
for e in emptys:
processEvent(e)
def processQueueEvent(event):
"""
Optimized with queue events
"""
if self.debugging:
self.logger.debug('Processing event %s', repr(event))
is_valid = event.is_valid()
if is_valid is None:
processEvent(event)
else:
while event.is_valid():
result = self.matchtree.matchfirstwithmatcher(event)
if result is None:
break
r, m = result
try:
self.syscallfunc = None
self.syscallrunnable = None
if self.debugging:
self.logger.debug('Send event to %r, matched with %r', r, m)
r.send((event, m))
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('processing event %s failed with exception', repr(event))
self.unregisterall(r)
processSyscall()
def processYields():
while self._pending_runnables:
i = 0
while i < len(self._pending_runnables):
r = self._pending_runnables[i]
try:
next(r)
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('Resuming %r failed with exception', r)
self.unregisterall(r)
processSyscall()
i += 1
del self._pending_runnables[:i]
canquit = False
self.logger.info('Main loop started')
current_time = self.current_time = time()
processYields()
quitMatcher = SystemControlEvent.createMatcher(type=SystemControlEvent.QUIT)
while len(self.registerIndex) > len(self.daemons):
if self.debugging:
self.logger.debug('Blocked events: %d', len(self.queue.blockEvents))
self.logger.debug('Blocked events list: %r', list(self.queue.blockEvents.keys()))
if self.quitting:
self.logger.debug('Routines still not quit: %r', list(self.registerIndex.keys()))
if self.quitsignal:
self.quit()
if canquit and not self.queue.canPop() and not self.timers:
if self.quitting:
break
else:
self.quit(True)
self.queue.append(SystemControlLowPriorityEvent(SystemControlLowPriorityEvent.LOOP), True)
processedEvents = 0
while self.queue.canPop() and (self.processevents is None or processedEvents < self.processevents):
e, qes, emptys = self.queue.pop()
# Queue events will not enqueue again
if not e.canignore and not e.canignorenow():
# The event might block, must process it first
processEvent(e, emptys)
for qe in qes:
processQueueEvent(qe)
else:
for qe in qes:
processQueueEvent(qe)
processEvent(e, emptys)
processYields()
if quitMatcher.isMatch(e):
if e.daemononly:
runnables = list(self.daemons)
else:
runnables = list(self.registerIndex.keys())
for r in runnables:
try:
r.throw(QuitException)
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('Runnable quit failed with exception')
self.unregisterall(r)
processSyscall()
processYields()
if self.quitsignal:
self.quit()
processedEvents += 1
if len(self.registerIndex) <= len(self.daemons):
break
end_time = time()
if end_time - current_time > 1:
self.logger.warning("An iteration takes %r seconds to process", end_time - current_time)
if self.generatecontinue or self.queue.canPop():
wait = 0
elif not self.timers:
wait = None
else:
wait = self.timers.top().timestamp - end_time
if wait < 0:
wait = 0
events, canquit = self.polling.pollEvents(wait)
for e in events:
self.queue.append(e, True)
current_time = self.current_time = time()
now = current_time + 0.1
while self.timers and self.timers.topPriority() < now:
t = self.timers.top()
if t.interval is not None:
t.timestamp += t.interval
self.timers.setpriority(t, t.timestamp)
else:
self.timers.pop()
self.queue.append(TimerEvent(t), True)
if self.generatecontinue:
self.queue.append(SystemControlEvent(SystemControlEvent.CONTINUE), True)
self.generatecontinue = False
if self.registerIndex:
if len(self.registerIndex) > len(self.daemons):
self.logger.warning('Some runnables are not quit, doing cleanup')
self.logger.warning('Runnables list: %r', set(self.registerIndex.keys()).difference(self.daemons))
for r in list(self.registerIndex.keys()):
try:
r.close()
except Exception:
self.logger.exception('Runnable quit failed with exception')
finally:
self.unregisterall(r)
self.logger.info('Main loop quit normally')
finally:
if installsignal:
signal(SIGTERM, sigterm)
signal(SIGINT, sigint)
try:
signal(SIGUSR1, sigusr1)
except Exception:
pass | python | def main(self, installsignal = True, sendinit = True):
'''
Start main loop
'''
if installsignal:
sigterm = signal(SIGTERM, self._quitsignal)
sigint = signal(SIGINT, self._quitsignal)
try:
from signal import SIGUSR1
sigusr1 = signal(SIGUSR1, self._tracesignal)
except Exception:
pass
try:
if sendinit:
self.queue.append(SystemControlEvent(SystemControlEvent.INIT), True)
def processSyscall():
while self.syscallfunc is not None:
r = getattr(self, 'syscallrunnable', None)
if r is None:
self.syscallfunc = None
break
try:
try:
retvalue = self.syscallfunc(self, processEvent)
except Exception:
(t, v, tr) = sys.exc_info()
self.syscallfunc = None
self.syscallrunnable = None
r.send((SyscallReturnEvent(exception=(t, v, tr)), self.syscallmatcher))
else:
self.syscallfunc = None
self.syscallrunnable = None
r.send((SyscallReturnEvent(retvalue=retvalue), self.syscallmatcher))
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('processing syscall failed with exception')
self.unregisterall(r)
def processEvent(event, emptys = ()):
if self.debugging:
self.logger.debug('Processing event %s', repr(event))
runnables = self.matchtree.matchesWithMatchers(event)
for r, m in runnables:
try:
self.syscallfunc = None
self.syscallrunnable = None
if self.debugging:
self.logger.debug('Send event to %r, matched with %r', r, m)
r.send((event, m))
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('processing event %s failed with exception', repr(event))
self.unregisterall(r)
processSyscall()
if not event.canignore and not event.canignorenow():
self.eventtree.insert(event)
self.queue.block(event, emptys)
else:
for e in emptys:
processEvent(e)
def processQueueEvent(event):
"""
Optimized with queue events
"""
if self.debugging:
self.logger.debug('Processing event %s', repr(event))
is_valid = event.is_valid()
if is_valid is None:
processEvent(event)
else:
while event.is_valid():
result = self.matchtree.matchfirstwithmatcher(event)
if result is None:
break
r, m = result
try:
self.syscallfunc = None
self.syscallrunnable = None
if self.debugging:
self.logger.debug('Send event to %r, matched with %r', r, m)
r.send((event, m))
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('processing event %s failed with exception', repr(event))
self.unregisterall(r)
processSyscall()
def processYields():
while self._pending_runnables:
i = 0
while i < len(self._pending_runnables):
r = self._pending_runnables[i]
try:
next(r)
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('Resuming %r failed with exception', r)
self.unregisterall(r)
processSyscall()
i += 1
del self._pending_runnables[:i]
canquit = False
self.logger.info('Main loop started')
current_time = self.current_time = time()
processYields()
quitMatcher = SystemControlEvent.createMatcher(type=SystemControlEvent.QUIT)
while len(self.registerIndex) > len(self.daemons):
if self.debugging:
self.logger.debug('Blocked events: %d', len(self.queue.blockEvents))
self.logger.debug('Blocked events list: %r', list(self.queue.blockEvents.keys()))
if self.quitting:
self.logger.debug('Routines still not quit: %r', list(self.registerIndex.keys()))
if self.quitsignal:
self.quit()
if canquit and not self.queue.canPop() and not self.timers:
if self.quitting:
break
else:
self.quit(True)
self.queue.append(SystemControlLowPriorityEvent(SystemControlLowPriorityEvent.LOOP), True)
processedEvents = 0
while self.queue.canPop() and (self.processevents is None or processedEvents < self.processevents):
e, qes, emptys = self.queue.pop()
# Queue events will not enqueue again
if not e.canignore and not e.canignorenow():
# The event might block, must process it first
processEvent(e, emptys)
for qe in qes:
processQueueEvent(qe)
else:
for qe in qes:
processQueueEvent(qe)
processEvent(e, emptys)
processYields()
if quitMatcher.isMatch(e):
if e.daemononly:
runnables = list(self.daemons)
else:
runnables = list(self.registerIndex.keys())
for r in runnables:
try:
r.throw(QuitException)
except StopIteration:
self.unregisterall(r)
except QuitException:
self.unregisterall(r)
except Exception:
self.logger.exception('Runnable quit failed with exception')
self.unregisterall(r)
processSyscall()
processYields()
if self.quitsignal:
self.quit()
processedEvents += 1
if len(self.registerIndex) <= len(self.daemons):
break
end_time = time()
if end_time - current_time > 1:
self.logger.warning("An iteration takes %r seconds to process", end_time - current_time)
if self.generatecontinue or self.queue.canPop():
wait = 0
elif not self.timers:
wait = None
else:
wait = self.timers.top().timestamp - end_time
if wait < 0:
wait = 0
events, canquit = self.polling.pollEvents(wait)
for e in events:
self.queue.append(e, True)
current_time = self.current_time = time()
now = current_time + 0.1
while self.timers and self.timers.topPriority() < now:
t = self.timers.top()
if t.interval is not None:
t.timestamp += t.interval
self.timers.setpriority(t, t.timestamp)
else:
self.timers.pop()
self.queue.append(TimerEvent(t), True)
if self.generatecontinue:
self.queue.append(SystemControlEvent(SystemControlEvent.CONTINUE), True)
self.generatecontinue = False
if self.registerIndex:
if len(self.registerIndex) > len(self.daemons):
self.logger.warning('Some runnables are not quit, doing cleanup')
self.logger.warning('Runnables list: %r', set(self.registerIndex.keys()).difference(self.daemons))
for r in list(self.registerIndex.keys()):
try:
r.close()
except Exception:
self.logger.exception('Runnable quit failed with exception')
finally:
self.unregisterall(r)
self.logger.info('Main loop quit normally')
finally:
if installsignal:
signal(SIGTERM, sigterm)
signal(SIGINT, sigint)
try:
signal(SIGUSR1, sigusr1)
except Exception:
pass | [
"def",
"main",
"(",
"self",
",",
"installsignal",
"=",
"True",
",",
"sendinit",
"=",
"True",
")",
":",
"if",
"installsignal",
":",
"sigterm",
"=",
"signal",
"(",
"SIGTERM",
",",
"self",
".",
"_quitsignal",
")",
"sigint",
"=",
"signal",
"(",
"SIGINT",
",",
"self",
".",
"_quitsignal",
")",
"try",
":",
"from",
"signal",
"import",
"SIGUSR1",
"sigusr1",
"=",
"signal",
"(",
"SIGUSR1",
",",
"self",
".",
"_tracesignal",
")",
"except",
"Exception",
":",
"pass",
"try",
":",
"if",
"sendinit",
":",
"self",
".",
"queue",
".",
"append",
"(",
"SystemControlEvent",
"(",
"SystemControlEvent",
".",
"INIT",
")",
",",
"True",
")",
"def",
"processSyscall",
"(",
")",
":",
"while",
"self",
".",
"syscallfunc",
"is",
"not",
"None",
":",
"r",
"=",
"getattr",
"(",
"self",
",",
"'syscallrunnable'",
",",
"None",
")",
"if",
"r",
"is",
"None",
":",
"self",
".",
"syscallfunc",
"=",
"None",
"break",
"try",
":",
"try",
":",
"retvalue",
"=",
"self",
".",
"syscallfunc",
"(",
"self",
",",
"processEvent",
")",
"except",
"Exception",
":",
"(",
"t",
",",
"v",
",",
"tr",
")",
"=",
"sys",
".",
"exc_info",
"(",
")",
"self",
".",
"syscallfunc",
"=",
"None",
"self",
".",
"syscallrunnable",
"=",
"None",
"r",
".",
"send",
"(",
"(",
"SyscallReturnEvent",
"(",
"exception",
"=",
"(",
"t",
",",
"v",
",",
"tr",
")",
")",
",",
"self",
".",
"syscallmatcher",
")",
")",
"else",
":",
"self",
".",
"syscallfunc",
"=",
"None",
"self",
".",
"syscallrunnable",
"=",
"None",
"r",
".",
"send",
"(",
"(",
"SyscallReturnEvent",
"(",
"retvalue",
"=",
"retvalue",
")",
",",
"self",
".",
"syscallmatcher",
")",
")",
"except",
"StopIteration",
":",
"self",
".",
"unregisterall",
"(",
"r",
")",
"except",
"QuitException",
":",
"self",
".",
"unregisterall",
"(",
"r",
")",
"except",
"Exception",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'processing syscall failed with exception'",
")",
"self",
".",
"unregisterall",
"(",
"r",
")",
"def",
"processEvent",
"(",
"event",
",",
"emptys",
"=",
"(",
")",
")",
":",
"if",
"self",
".",
"debugging",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Processing event %s'",
",",
"repr",
"(",
"event",
")",
")",
"runnables",
"=",
"self",
".",
"matchtree",
".",
"matchesWithMatchers",
"(",
"event",
")",
"for",
"r",
",",
"m",
"in",
"runnables",
":",
"try",
":",
"self",
".",
"syscallfunc",
"=",
"None",
"self",
".",
"syscallrunnable",
"=",
"None",
"if",
"self",
".",
"debugging",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Send event to %r, matched with %r'",
",",
"r",
",",
"m",
")",
"r",
".",
"send",
"(",
"(",
"event",
",",
"m",
")",
")",
"except",
"StopIteration",
":",
"self",
".",
"unregisterall",
"(",
"r",
")",
"except",
"QuitException",
":",
"self",
".",
"unregisterall",
"(",
"r",
")",
"except",
"Exception",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'processing event %s failed with exception'",
",",
"repr",
"(",
"event",
")",
")",
"self",
".",
"unregisterall",
"(",
"r",
")",
"processSyscall",
"(",
")",
"if",
"not",
"event",
".",
"canignore",
"and",
"not",
"event",
".",
"canignorenow",
"(",
")",
":",
"self",
".",
"eventtree",
".",
"insert",
"(",
"event",
")",
"self",
".",
"queue",
".",
"block",
"(",
"event",
",",
"emptys",
")",
"else",
":",
"for",
"e",
"in",
"emptys",
":",
"processEvent",
"(",
"e",
")",
"def",
"processQueueEvent",
"(",
"event",
")",
":",
"\"\"\"\n Optimized with queue events\n \"\"\"",
"if",
"self",
".",
"debugging",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Processing event %s'",
",",
"repr",
"(",
"event",
")",
")",
"is_valid",
"=",
"event",
".",
"is_valid",
"(",
")",
"if",
"is_valid",
"is",
"None",
":",
"processEvent",
"(",
"event",
")",
"else",
":",
"while",
"event",
".",
"is_valid",
"(",
")",
":",
"result",
"=",
"self",
".",
"matchtree",
".",
"matchfirstwithmatcher",
"(",
"event",
")",
"if",
"result",
"is",
"None",
":",
"break",
"r",
",",
"m",
"=",
"result",
"try",
":",
"self",
".",
"syscallfunc",
"=",
"None",
"self",
".",
"syscallrunnable",
"=",
"None",
"if",
"self",
".",
"debugging",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Send event to %r, matched with %r'",
",",
"r",
",",
"m",
")",
"r",
".",
"send",
"(",
"(",
"event",
",",
"m",
")",
")",
"except",
"StopIteration",
":",
"self",
".",
"unregisterall",
"(",
"r",
")",
"except",
"QuitException",
":",
"self",
".",
"unregisterall",
"(",
"r",
")",
"except",
"Exception",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'processing event %s failed with exception'",
",",
"repr",
"(",
"event",
")",
")",
"self",
".",
"unregisterall",
"(",
"r",
")",
"processSyscall",
"(",
")",
"def",
"processYields",
"(",
")",
":",
"while",
"self",
".",
"_pending_runnables",
":",
"i",
"=",
"0",
"while",
"i",
"<",
"len",
"(",
"self",
".",
"_pending_runnables",
")",
":",
"r",
"=",
"self",
".",
"_pending_runnables",
"[",
"i",
"]",
"try",
":",
"next",
"(",
"r",
")",
"except",
"StopIteration",
":",
"self",
".",
"unregisterall",
"(",
"r",
")",
"except",
"QuitException",
":",
"self",
".",
"unregisterall",
"(",
"r",
")",
"except",
"Exception",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'Resuming %r failed with exception'",
",",
"r",
")",
"self",
".",
"unregisterall",
"(",
"r",
")",
"processSyscall",
"(",
")",
"i",
"+=",
"1",
"del",
"self",
".",
"_pending_runnables",
"[",
":",
"i",
"]",
"canquit",
"=",
"False",
"self",
".",
"logger",
".",
"info",
"(",
"'Main loop started'",
")",
"current_time",
"=",
"self",
".",
"current_time",
"=",
"time",
"(",
")",
"processYields",
"(",
")",
"quitMatcher",
"=",
"SystemControlEvent",
".",
"createMatcher",
"(",
"type",
"=",
"SystemControlEvent",
".",
"QUIT",
")",
"while",
"len",
"(",
"self",
".",
"registerIndex",
")",
">",
"len",
"(",
"self",
".",
"daemons",
")",
":",
"if",
"self",
".",
"debugging",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Blocked events: %d'",
",",
"len",
"(",
"self",
".",
"queue",
".",
"blockEvents",
")",
")",
"self",
".",
"logger",
".",
"debug",
"(",
"'Blocked events list: %r'",
",",
"list",
"(",
"self",
".",
"queue",
".",
"blockEvents",
".",
"keys",
"(",
")",
")",
")",
"if",
"self",
".",
"quitting",
":",
"self",
".",
"logger",
".",
"debug",
"(",
"'Routines still not quit: %r'",
",",
"list",
"(",
"self",
".",
"registerIndex",
".",
"keys",
"(",
")",
")",
")",
"if",
"self",
".",
"quitsignal",
":",
"self",
".",
"quit",
"(",
")",
"if",
"canquit",
"and",
"not",
"self",
".",
"queue",
".",
"canPop",
"(",
")",
"and",
"not",
"self",
".",
"timers",
":",
"if",
"self",
".",
"quitting",
":",
"break",
"else",
":",
"self",
".",
"quit",
"(",
"True",
")",
"self",
".",
"queue",
".",
"append",
"(",
"SystemControlLowPriorityEvent",
"(",
"SystemControlLowPriorityEvent",
".",
"LOOP",
")",
",",
"True",
")",
"processedEvents",
"=",
"0",
"while",
"self",
".",
"queue",
".",
"canPop",
"(",
")",
"and",
"(",
"self",
".",
"processevents",
"is",
"None",
"or",
"processedEvents",
"<",
"self",
".",
"processevents",
")",
":",
"e",
",",
"qes",
",",
"emptys",
"=",
"self",
".",
"queue",
".",
"pop",
"(",
")",
"# Queue events will not enqueue again",
"if",
"not",
"e",
".",
"canignore",
"and",
"not",
"e",
".",
"canignorenow",
"(",
")",
":",
"# The event might block, must process it first",
"processEvent",
"(",
"e",
",",
"emptys",
")",
"for",
"qe",
"in",
"qes",
":",
"processQueueEvent",
"(",
"qe",
")",
"else",
":",
"for",
"qe",
"in",
"qes",
":",
"processQueueEvent",
"(",
"qe",
")",
"processEvent",
"(",
"e",
",",
"emptys",
")",
"processYields",
"(",
")",
"if",
"quitMatcher",
".",
"isMatch",
"(",
"e",
")",
":",
"if",
"e",
".",
"daemononly",
":",
"runnables",
"=",
"list",
"(",
"self",
".",
"daemons",
")",
"else",
":",
"runnables",
"=",
"list",
"(",
"self",
".",
"registerIndex",
".",
"keys",
"(",
")",
")",
"for",
"r",
"in",
"runnables",
":",
"try",
":",
"r",
".",
"throw",
"(",
"QuitException",
")",
"except",
"StopIteration",
":",
"self",
".",
"unregisterall",
"(",
"r",
")",
"except",
"QuitException",
":",
"self",
".",
"unregisterall",
"(",
"r",
")",
"except",
"Exception",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'Runnable quit failed with exception'",
")",
"self",
".",
"unregisterall",
"(",
"r",
")",
"processSyscall",
"(",
")",
"processYields",
"(",
")",
"if",
"self",
".",
"quitsignal",
":",
"self",
".",
"quit",
"(",
")",
"processedEvents",
"+=",
"1",
"if",
"len",
"(",
"self",
".",
"registerIndex",
")",
"<=",
"len",
"(",
"self",
".",
"daemons",
")",
":",
"break",
"end_time",
"=",
"time",
"(",
")",
"if",
"end_time",
"-",
"current_time",
">",
"1",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"\"An iteration takes %r seconds to process\"",
",",
"end_time",
"-",
"current_time",
")",
"if",
"self",
".",
"generatecontinue",
"or",
"self",
".",
"queue",
".",
"canPop",
"(",
")",
":",
"wait",
"=",
"0",
"elif",
"not",
"self",
".",
"timers",
":",
"wait",
"=",
"None",
"else",
":",
"wait",
"=",
"self",
".",
"timers",
".",
"top",
"(",
")",
".",
"timestamp",
"-",
"end_time",
"if",
"wait",
"<",
"0",
":",
"wait",
"=",
"0",
"events",
",",
"canquit",
"=",
"self",
".",
"polling",
".",
"pollEvents",
"(",
"wait",
")",
"for",
"e",
"in",
"events",
":",
"self",
".",
"queue",
".",
"append",
"(",
"e",
",",
"True",
")",
"current_time",
"=",
"self",
".",
"current_time",
"=",
"time",
"(",
")",
"now",
"=",
"current_time",
"+",
"0.1",
"while",
"self",
".",
"timers",
"and",
"self",
".",
"timers",
".",
"topPriority",
"(",
")",
"<",
"now",
":",
"t",
"=",
"self",
".",
"timers",
".",
"top",
"(",
")",
"if",
"t",
".",
"interval",
"is",
"not",
"None",
":",
"t",
".",
"timestamp",
"+=",
"t",
".",
"interval",
"self",
".",
"timers",
".",
"setpriority",
"(",
"t",
",",
"t",
".",
"timestamp",
")",
"else",
":",
"self",
".",
"timers",
".",
"pop",
"(",
")",
"self",
".",
"queue",
".",
"append",
"(",
"TimerEvent",
"(",
"t",
")",
",",
"True",
")",
"if",
"self",
".",
"generatecontinue",
":",
"self",
".",
"queue",
".",
"append",
"(",
"SystemControlEvent",
"(",
"SystemControlEvent",
".",
"CONTINUE",
")",
",",
"True",
")",
"self",
".",
"generatecontinue",
"=",
"False",
"if",
"self",
".",
"registerIndex",
":",
"if",
"len",
"(",
"self",
".",
"registerIndex",
")",
">",
"len",
"(",
"self",
".",
"daemons",
")",
":",
"self",
".",
"logger",
".",
"warning",
"(",
"'Some runnables are not quit, doing cleanup'",
")",
"self",
".",
"logger",
".",
"warning",
"(",
"'Runnables list: %r'",
",",
"set",
"(",
"self",
".",
"registerIndex",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"self",
".",
"daemons",
")",
")",
"for",
"r",
"in",
"list",
"(",
"self",
".",
"registerIndex",
".",
"keys",
"(",
")",
")",
":",
"try",
":",
"r",
".",
"close",
"(",
")",
"except",
"Exception",
":",
"self",
".",
"logger",
".",
"exception",
"(",
"'Runnable quit failed with exception'",
")",
"finally",
":",
"self",
".",
"unregisterall",
"(",
"r",
")",
"self",
".",
"logger",
".",
"info",
"(",
"'Main loop quit normally'",
")",
"finally",
":",
"if",
"installsignal",
":",
"signal",
"(",
"SIGTERM",
",",
"sigterm",
")",
"signal",
"(",
"SIGINT",
",",
"sigint",
")",
"try",
":",
"signal",
"(",
"SIGUSR1",
",",
"sigusr1",
")",
"except",
"Exception",
":",
"pass"
] | Start main loop | [
"Start",
"main",
"loop"
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/event/core.py#L303-L518 |
hubo1016/vlcp | vlcp/service/connection/tcpserver.py | TcpServerBase.getservers | def getservers(self, vhost = None):
'''
Return current servers
:param vhost: return only servers of vhost if specified. '' to return only default servers.
None for all servers.
'''
if vhost is not None:
return [s for s in self.connections if s.protocol.vhost == vhost]
else:
return list(self.connections) | python | def getservers(self, vhost = None):
'''
Return current servers
:param vhost: return only servers of vhost if specified. '' to return only default servers.
None for all servers.
'''
if vhost is not None:
return [s for s in self.connections if s.protocol.vhost == vhost]
else:
return list(self.connections) | [
"def",
"getservers",
"(",
"self",
",",
"vhost",
"=",
"None",
")",
":",
"if",
"vhost",
"is",
"not",
"None",
":",
"return",
"[",
"s",
"for",
"s",
"in",
"self",
".",
"connections",
"if",
"s",
".",
"protocol",
".",
"vhost",
"==",
"vhost",
"]",
"else",
":",
"return",
"list",
"(",
"self",
".",
"connections",
")"
] | Return current servers
:param vhost: return only servers of vhost if specified. '' to return only default servers.
None for all servers. | [
"Return",
"current",
"servers",
":",
"param",
"vhost",
":",
"return",
"only",
"servers",
"of",
"vhost",
"if",
"specified",
".",
"to",
"return",
"only",
"default",
"servers",
".",
"None",
"for",
"all",
"servers",
"."
] | train | https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/service/connection/tcpserver.py#L138-L148 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.