body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def blackbody(nu, ref_freq=353.0): '\n The ratio of the blackbody function for dust at frequency nu\n over the value for reference frequency ref_freq\n\n Arguments\n ---------\n nu : float\n Frequency in GHz.\n ref_freq : float\n Reference frequency in GHz.\n\n Returns\n -------\n blackbody_ratio : float\n B(nu, T_dust) / B(nu_ref, T_dust)\n ' k = 1.38064852e-23 h = 6.62607004e-34 T = 19.6 nu_ref = (ref_freq * 1000000000.0) nu *= 1000000000.0 x = (((h * nu) / k) / T) x_ref = (((h * nu_ref) / k) / T) return ((((x ** 3) / (x_ref ** 3)) * (np.exp(x_ref) - 1)) / (np.exp(x) - 1))
-3,061,004,010,963,920,400
The ratio of the blackbody function for dust at frequency nu over the value for reference frequency ref_freq Arguments --------- nu : float Frequency in GHz. ref_freq : float Reference frequency in GHz. Returns ------- blackbody_ratio : float B(nu, T_dust) / B(nu_ref, T_dust)
xfaster/spec_tools.py
blackbody
SPIDER-CMB/xfaster
python
def blackbody(nu, ref_freq=353.0): '\n The ratio of the blackbody function for dust at frequency nu\n over the value for reference frequency ref_freq\n\n Arguments\n ---------\n nu : float\n Frequency in GHz.\n ref_freq : float\n Reference frequency in GHz.\n\n Returns\n -------\n blackbody_ratio : float\n B(nu, T_dust) / B(nu_ref, T_dust)\n ' k = 1.38064852e-23 h = 6.62607004e-34 T = 19.6 nu_ref = (ref_freq * 1000000000.0) nu *= 1000000000.0 x = (((h * nu) / k) / T) x_ref = (((h * nu_ref) / k) / T) return ((((x ** 3) / (x_ref ** 3)) * (np.exp(x_ref) - 1)) / (np.exp(x) - 1))
def rj2cmb(nu_in): '\n Conversion from Rayleigh-Jeans units to CMB temperature units\n\n Arguments\n ---------\n nu_in : float\n Frequency in GHz.\n\n Returns\n -------\n cal_fac : float\n Number by which to multiply a RJ temperature to get a CMB temp\n ' k = 1.38064852e-23 h = 6.62607004e-34 T = 2.72548 nu = (nu_in * 1000000000.0) x = (((h * nu) / k) / T) return (((np.exp(x) - 1.0) ** 2) / ((x ** 2) * np.exp(x)))
2,812,329,766,020,827,000
Conversion from Rayleigh-Jeans units to CMB temperature units Arguments --------- nu_in : float Frequency in GHz. Returns ------- cal_fac : float Number by which to multiply a RJ temperature to get a CMB temp
xfaster/spec_tools.py
rj2cmb
SPIDER-CMB/xfaster
python
def rj2cmb(nu_in): '\n Conversion from Rayleigh-Jeans units to CMB temperature units\n\n Arguments\n ---------\n nu_in : float\n Frequency in GHz.\n\n Returns\n -------\n cal_fac : float\n Number by which to multiply a RJ temperature to get a CMB temp\n ' k = 1.38064852e-23 h = 6.62607004e-34 T = 2.72548 nu = (nu_in * 1000000000.0) x = (((h * nu) / k) / T) return (((np.exp(x) - 1.0) ** 2) / ((x ** 2) * np.exp(x)))
def scale_dust(freq0, freq1, ref_freq, beta, delta_beta=None, deriv=False): '\n Get the factor by which you must multiply the cross spectrum from maps of\n frequencies freq0 and freq1 to match the dust power at ref_freq given\n spectra index beta.\n\n If deriv is True, return the frequency scaling at the reference beta,\n and the first derivative w.r.t. beta.\n\n Otherwise if delta_beta is given, return the scale factor adjusted\n for a linearized offset delta_beta from the reference beta.\n\n Arguments\n ---------\n freq0 : float\n Frequency of map0 in GHz.\n freq1 : float\n Frequency of map1 in GHz.\n ref_freq : float\n Reference frequency from which to compute relative scaling in GHz.\n beta : float\n Dust spectral index.\n delta_beta : float\n Difference from beta-- scaling computed as a first order Taylor\n expansion from original beta-scaling.\n deriv : bool\n If true, return the frequency scaling at the reference beta, along with\n the first derivative w.r.t. beta at the reference beta.\n\n Returns\n -------\n freq_scale : float\n The relative scaling factor for the dust cross spectrum-- multiply by\n this number to get the dust spectrum at the reference frequency\n -- or --\n freq_scale, deriv : floats\n The relative scaling factor and its derivative\n ' freq_scale = (((((rj2cmb(freq0) * rj2cmb(freq1)) / (rj2cmb(ref_freq) ** 2.0)) * blackbody(freq0, ref_freq=ref_freq)) * blackbody(freq1, ref_freq=ref_freq)) * (((freq0 * freq1) / (ref_freq ** 2)) ** (beta - 2.0))) if (deriv or (delta_beta is not None)): delta = np.log(((freq0 * freq1) / (ref_freq ** 2))) if deriv: return (freq_scale, (freq_scale * delta)) return (freq_scale * (1 + (delta * delta_beta))) return freq_scale
8,050,293,239,967,490,000
Get the factor by which you must multiply the cross spectrum from maps of frequencies freq0 and freq1 to match the dust power at ref_freq given spectra index beta. If deriv is True, return the frequency scaling at the reference beta, and the first derivative w.r.t. beta. Otherwise if delta_beta is given, return the scale factor adjusted for a linearized offset delta_beta from the reference beta. Arguments --------- freq0 : float Frequency of map0 in GHz. freq1 : float Frequency of map1 in GHz. ref_freq : float Reference frequency from which to compute relative scaling in GHz. beta : float Dust spectral index. delta_beta : float Difference from beta-- scaling computed as a first order Taylor expansion from original beta-scaling. deriv : bool If true, return the frequency scaling at the reference beta, along with the first derivative w.r.t. beta at the reference beta. Returns ------- freq_scale : float The relative scaling factor for the dust cross spectrum-- multiply by this number to get the dust spectrum at the reference frequency -- or -- freq_scale, deriv : floats The relative scaling factor and its derivative
xfaster/spec_tools.py
scale_dust
SPIDER-CMB/xfaster
python
def scale_dust(freq0, freq1, ref_freq, beta, delta_beta=None, deriv=False): '\n Get the factor by which you must multiply the cross spectrum from maps of\n frequencies freq0 and freq1 to match the dust power at ref_freq given\n spectra index beta.\n\n If deriv is True, return the frequency scaling at the reference beta,\n and the first derivative w.r.t. beta.\n\n Otherwise if delta_beta is given, return the scale factor adjusted\n for a linearized offset delta_beta from the reference beta.\n\n Arguments\n ---------\n freq0 : float\n Frequency of map0 in GHz.\n freq1 : float\n Frequency of map1 in GHz.\n ref_freq : float\n Reference frequency from which to compute relative scaling in GHz.\n beta : float\n Dust spectral index.\n delta_beta : float\n Difference from beta-- scaling computed as a first order Taylor\n expansion from original beta-scaling.\n deriv : bool\n If true, return the frequency scaling at the reference beta, along with\n the first derivative w.r.t. beta at the reference beta.\n\n Returns\n -------\n freq_scale : float\n The relative scaling factor for the dust cross spectrum-- multiply by\n this number to get the dust spectrum at the reference frequency\n -- or --\n freq_scale, deriv : floats\n The relative scaling factor and its derivative\n ' freq_scale = (((((rj2cmb(freq0) * rj2cmb(freq1)) / (rj2cmb(ref_freq) ** 2.0)) * blackbody(freq0, ref_freq=ref_freq)) * blackbody(freq1, ref_freq=ref_freq)) * (((freq0 * freq1) / (ref_freq ** 2)) ** (beta - 2.0))) if (deriv or (delta_beta is not None)): delta = np.log(((freq0 * freq1) / (ref_freq ** 2))) if deriv: return (freq_scale, (freq_scale * delta)) return (freq_scale * (1 + (delta * delta_beta))) return freq_scale
def wigner3j(l2, m2, l3, m3): '\n Wigner 3j symbols computed for all valid values of ``L``, as in:\n\n .. math::\n\n \\begin{pmatrix}\n \\ell_2 & \\ell_3 & L \\\\\n m_2 & m_3 & 0 \\\\\n \\end{pmatrix}\n\n Arguments\n ---------\n l2, m2, l3, m3 : int\n The ell and m values for which to compute the symbols.\n\n Returns\n -------\n fj : array_like\n Array of size ``l2 + l3 + 2``, indexed by ``L``\n lmin : int\n The minimum value of ``L`` for which ``fj`` is non-zero.\n lmax : int\n The maximum value of ``L`` for which ``fj`` is non-zero.\n ' import camb try: from camb.mathutils import threej except ImportError: from camb.bispectrum import threej arr = threej(l2, l3, m2, m3) lmin = np.max([np.abs((l2 - l3)), np.abs((m2 + m3))]) lmax = (l2 + l3) fj = np.zeros((lmax + 2), dtype=arr.dtype) fj[lmin:(lmax + 1)] = arr return (fj, lmin, lmax)
-2,767,139,856,052,830,000
Wigner 3j symbols computed for all valid values of ``L``, as in: .. math:: \begin{pmatrix} \ell_2 & \ell_3 & L \\ m_2 & m_3 & 0 \\ \end{pmatrix} Arguments --------- l2, m2, l3, m3 : int The ell and m values for which to compute the symbols. Returns ------- fj : array_like Array of size ``l2 + l3 + 2``, indexed by ``L`` lmin : int The minimum value of ``L`` for which ``fj`` is non-zero. lmax : int The maximum value of ``L`` for which ``fj`` is non-zero.
xfaster/spec_tools.py
wigner3j
SPIDER-CMB/xfaster
python
def wigner3j(l2, m2, l3, m3): '\n Wigner 3j symbols computed for all valid values of ``L``, as in:\n\n .. math::\n\n \\begin{pmatrix}\n \\ell_2 & \\ell_3 & L \\\\\n m_2 & m_3 & 0 \\\\\n \\end{pmatrix}\n\n Arguments\n ---------\n l2, m2, l3, m3 : int\n The ell and m values for which to compute the symbols.\n\n Returns\n -------\n fj : array_like\n Array of size ``l2 + l3 + 2``, indexed by ``L``\n lmin : int\n The minimum value of ``L`` for which ``fj`` is non-zero.\n lmax : int\n The maximum value of ``L`` for which ``fj`` is non-zero.\n ' import camb try: from camb.mathutils import threej except ImportError: from camb.bispectrum import threej arr = threej(l2, l3, m2, m3) lmin = np.max([np.abs((l2 - l3)), np.abs((m2 + m3))]) lmax = (l2 + l3) fj = np.zeros((lmax + 2), dtype=arr.dtype) fj[lmin:(lmax + 1)] = arr return (fj, lmin, lmax)
def get_camb_cl(r, lmax, nt=None, spec='total', lfac=True): "\n Compute camb spectrum with tensors and lensing.\n\n Parameter values are from arXiv:1807.06209 Table 1 Plik best fit\n\n Arguments\n ---------\n r : float\n Tensor-to-scalar ratio\n lmax : int\n Maximum ell for which to compute spectra\n nt : scalar, optional\n Tensor spectral index. If not supplied, assumes\n slow-roll consistency relation.\n spec : string, optional\n Spectrum component to return. Can be 'total', 'unlensed_total',\n 'unlensed_scalar', 'lensed_scalar', 'tensor', 'lens_potential'.\n lfac: bool, optional\n If True, multiply Cls by ell*(ell+1)/2/pi\n\n Returns\n -------\n cls : array_like\n Array of spectra of shape (lmax + 1, nspec).\n Diagonal ordering (TT, EE, BB, TE).\n " import camb pars = camb.CAMBparams() pars.set_cosmology(H0=67.32, ombh2=0.022383, omch2=0.12011, mnu=0.06, omk=0, tau=0.0543) ln1010As = 3.0448 pars.InitPower.set_params(As=(np.exp(ln1010As) / 10000000000.0), ns=0.96605, r=r, nt=nt) if (lmax < 2500): lmax0 = 2500 else: lmax0 = lmax pars.set_for_lmax(lmax0, lens_potential_accuracy=2) pars.WantTensors = True pars.do_lensing = True results = camb.get_results(pars) powers = results.get_cmb_power_spectra(pars, CMB_unit='muK', raw_cl=(not lfac)) totCL = powers[spec][:(lmax + 1), :4].T return totCL
6,037,776,520,534,010,000
Compute camb spectrum with tensors and lensing. Parameter values are from arXiv:1807.06209 Table 1 Plik best fit Arguments --------- r : float Tensor-to-scalar ratio lmax : int Maximum ell for which to compute spectra nt : scalar, optional Tensor spectral index. If not supplied, assumes slow-roll consistency relation. spec : string, optional Spectrum component to return. Can be 'total', 'unlensed_total', 'unlensed_scalar', 'lensed_scalar', 'tensor', 'lens_potential'. lfac: bool, optional If True, multiply Cls by ell*(ell+1)/2/pi Returns ------- cls : array_like Array of spectra of shape (lmax + 1, nspec). Diagonal ordering (TT, EE, BB, TE).
xfaster/spec_tools.py
get_camb_cl
SPIDER-CMB/xfaster
python
def get_camb_cl(r, lmax, nt=None, spec='total', lfac=True): "\n Compute camb spectrum with tensors and lensing.\n\n Parameter values are from arXiv:1807.06209 Table 1 Plik best fit\n\n Arguments\n ---------\n r : float\n Tensor-to-scalar ratio\n lmax : int\n Maximum ell for which to compute spectra\n nt : scalar, optional\n Tensor spectral index. If not supplied, assumes\n slow-roll consistency relation.\n spec : string, optional\n Spectrum component to return. Can be 'total', 'unlensed_total',\n 'unlensed_scalar', 'lensed_scalar', 'tensor', 'lens_potential'.\n lfac: bool, optional\n If True, multiply Cls by ell*(ell+1)/2/pi\n\n Returns\n -------\n cls : array_like\n Array of spectra of shape (lmax + 1, nspec).\n Diagonal ordering (TT, EE, BB, TE).\n " import camb pars = camb.CAMBparams() pars.set_cosmology(H0=67.32, ombh2=0.022383, omch2=0.12011, mnu=0.06, omk=0, tau=0.0543) ln1010As = 3.0448 pars.InitPower.set_params(As=(np.exp(ln1010As) / 10000000000.0), ns=0.96605, r=r, nt=nt) if (lmax < 2500): lmax0 = 2500 else: lmax0 = lmax pars.set_for_lmax(lmax0, lens_potential_accuracy=2) pars.WantTensors = True pars.do_lensing = True results = camb.get_results(pars) powers = results.get_cmb_power_spectra(pars, CMB_unit='muK', raw_cl=(not lfac)) totCL = powers[spec][:(lmax + 1), :4].T return totCL
def raise_does_not_exist(msg): 'Decorator to turn a function that get a http 404 response to a\n :exc:`DoesNotExist` exception.' def decorator(func): @wraps(func) def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except ClientHttpError as e: if (e.code == 404): raise DoesNotExist(msg) else: raise return wrapped return decorator
-7,812,388,451,997,881,000
Decorator to turn a function that get a http 404 response to a :exc:`DoesNotExist` exception.
seafileapi/utils.py
raise_does_not_exist
AdriCueGim/python-seafile
python
def raise_does_not_exist(msg): 'Decorator to turn a function that get a http 404 response to a\n :exc:`DoesNotExist` exception.' def decorator(func): @wraps(func) def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except ClientHttpError as e: if (e.code == 404): raise DoesNotExist(msg) else: raise return wrapped return decorator
def __init__(self): '\n :param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceIds: list of str\n :param ProjectId: 项目ID\n :type ProjectId: int\n ' self.InstanceIds = None self.ProjectId = None
-6,788,372,924,002,334,000
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同 :type InstanceIds: list of str :param ProjectId: 项目ID :type ProjectId: int
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceIds: list of str\n :param ProjectId: 项目ID\n :type ProjectId: int\n ' self.InstanceIds = None self.ProjectId = None
def __init__(self): '\n :param FlowIds: 返回的异步任务ID列表\n :type FlowIds: list of int non-negative\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.FlowIds = None self.RequestId = None
9,070,933,027,787,485,000
:param FlowIds: 返回的异步任务ID列表 :type FlowIds: list of int non-negative :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param FlowIds: 返回的异步任务ID列表\n :type FlowIds: list of int non-negative\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.FlowIds = None self.RequestId = None
def __init__(self): '\n :param IP: 连接的客户端IP\n :type IP: str\n :param Count: 对应客户端IP的连接数\n :type Count: int\n ' self.IP = None self.Count = None
8,443,674,525,877,007,000
:param IP: 连接的客户端IP :type IP: str :param Count: 对应客户端IP的连接数 :type Count: int
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param IP: 连接的客户端IP\n :type IP: str\n :param Count: 对应客户端IP的连接数\n :type Count: int\n ' self.IP = None self.Count = None
def __init__(self): '\n :param Memory: 实例内存大小,单位:GB\n :type Memory: int\n :param Volume: 实例硬盘大小,单位:GB\n :type Volume: int\n :param ReplicateSetNum: 副本集个数,1为单副本集实例,大于1为分片集群实例,最大不超过10\n :type ReplicateSetNum: int\n :param SecondaryNum: 每个副本集内从节点个数,目前只支持从节点数为2\n :type SecondaryNum: int\n :param EngineVersion: MongoDB引擎版本,值包括MONGO_3_WT 、MONGO_3_ROCKS和MONGO_36_WT\n :type EngineVersion: str\n :param Machine: 实例类型,GIO:高IO版;TGIO:高IO万兆\n :type Machine: str\n :param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10\n :type GoodsNum: int\n :param Zone: 可用区信息,格式如:ap-guangzhou-2\n :type Zone: str\n :param InstanceRole: 实例角色,支持值包括:MASTER-表示主实例,DR-表示灾备实例,RO-表示只读实例\n :type InstanceRole: str\n :param InstanceType: 实例类型,REPLSET-副本集,SHARD-分片集群\n :type InstanceType: str\n :param Encrypt: 数据是否加密,当且仅当引擎版本为MONGO_3_ROCKS,可以选择加密\n :type Encrypt: int\n :param VpcId: 私有网络ID,如果不传则默认选择基础网络\n :type VpcId: str\n :param SubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填\n :type SubnetId: str\n :param ProjectId: 项目ID,不填为默认项目\n :type ProjectId: int\n :param SecurityGroup: 安全组参数\n :type SecurityGroup: list of str\n ' self.Memory = None self.Volume = None self.ReplicateSetNum = None self.SecondaryNum = None self.EngineVersion = None self.Machine = None self.GoodsNum = None self.Zone = None self.InstanceRole = None self.InstanceType = None self.Encrypt = None self.VpcId = None self.SubnetId = None self.ProjectId = None self.SecurityGroup = None
9,074,244,070,652,575,000
:param Memory: 实例内存大小,单位:GB :type Memory: int :param Volume: 实例硬盘大小,单位:GB :type Volume: int :param ReplicateSetNum: 副本集个数,1为单副本集实例,大于1为分片集群实例,最大不超过10 :type ReplicateSetNum: int :param SecondaryNum: 每个副本集内从节点个数,目前只支持从节点数为2 :type SecondaryNum: int :param EngineVersion: MongoDB引擎版本,值包括MONGO_3_WT 、MONGO_3_ROCKS和MONGO_36_WT :type EngineVersion: str :param Machine: 实例类型,GIO:高IO版;TGIO:高IO万兆 :type Machine: str :param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10 :type GoodsNum: int :param Zone: 可用区信息,格式如:ap-guangzhou-2 :type Zone: str :param InstanceRole: 实例角色,支持值包括:MASTER-表示主实例,DR-表示灾备实例,RO-表示只读实例 :type InstanceRole: str :param InstanceType: 实例类型,REPLSET-副本集,SHARD-分片集群 :type InstanceType: str :param Encrypt: 数据是否加密,当且仅当引擎版本为MONGO_3_ROCKS,可以选择加密 :type Encrypt: int :param VpcId: 私有网络ID,如果不传则默认选择基础网络 :type VpcId: str :param SubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填 :type SubnetId: str :param ProjectId: 项目ID,不填为默认项目 :type ProjectId: int :param SecurityGroup: 安全组参数 :type SecurityGroup: list of str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param Memory: 实例内存大小,单位:GB\n :type Memory: int\n :param Volume: 实例硬盘大小,单位:GB\n :type Volume: int\n :param ReplicateSetNum: 副本集个数,1为单副本集实例,大于1为分片集群实例,最大不超过10\n :type ReplicateSetNum: int\n :param SecondaryNum: 每个副本集内从节点个数,目前只支持从节点数为2\n :type SecondaryNum: int\n :param EngineVersion: MongoDB引擎版本,值包括MONGO_3_WT 、MONGO_3_ROCKS和MONGO_36_WT\n :type EngineVersion: str\n :param Machine: 实例类型,GIO:高IO版;TGIO:高IO万兆\n :type Machine: str\n :param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10\n :type GoodsNum: int\n :param Zone: 可用区信息,格式如:ap-guangzhou-2\n :type Zone: str\n :param InstanceRole: 实例角色,支持值包括:MASTER-表示主实例,DR-表示灾备实例,RO-表示只读实例\n :type InstanceRole: str\n :param InstanceType: 实例类型,REPLSET-副本集,SHARD-分片集群\n :type InstanceType: str\n :param Encrypt: 数据是否加密,当且仅当引擎版本为MONGO_3_ROCKS,可以选择加密\n :type Encrypt: int\n :param VpcId: 私有网络ID,如果不传则默认选择基础网络\n :type VpcId: str\n :param SubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填\n :type SubnetId: str\n :param ProjectId: 项目ID,不填为默认项目\n :type ProjectId: int\n :param SecurityGroup: 安全组参数\n :type SecurityGroup: list of str\n ' self.Memory = None self.Volume = None self.ReplicateSetNum = None self.SecondaryNum = None self.EngineVersion = None self.Machine = None self.GoodsNum = None self.Zone = None self.InstanceRole = None self.InstanceType = None self.Encrypt = None self.VpcId = None self.SubnetId = None self.ProjectId = None self.SecurityGroup = None
def __init__(self): '\n :param DealId: 订单ID\n :type DealId: str\n :param InstanceIds: 创建的实例ID列表\n :type InstanceIds: list of str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.DealId = None self.InstanceIds = None self.RequestId = None
1,054,841,195,029,218,000
:param DealId: 订单ID :type DealId: str :param InstanceIds: 创建的实例ID列表 :type InstanceIds: list of str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param DealId: 订单ID\n :type DealId: str\n :param InstanceIds: 创建的实例ID列表\n :type InstanceIds: list of str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.DealId = None self.InstanceIds = None self.RequestId = None
def __init__(self): '\n :param SecondaryNum: 每个副本集内从节点个数\n :type SecondaryNum: int\n :param Memory: 实例内存大小,单位:GB\n :type Memory: int\n :param Volume: 实例硬盘大小,单位:GB\n :type Volume: int\n :param MongoVersion: 版本号,当前支持 MONGO_3_WT、MONGO_3_ROCKS、MONGO_36_WT\n :type MongoVersion: str\n :param MachineCode: 机器类型,GIO:高IO版;TGIO:高IO万兆\n :type MachineCode: str\n :param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10\n :type GoodsNum: int\n :param Zone: 实例所属区域名称,格式如:ap-guangzhou-2\n :type Zone: str\n :param TimeSpan: 时长,购买月数\n :type TimeSpan: int\n :param Password: 实例密码\n :type Password: str\n :param ProjectId: 项目ID,不填为默认项目\n :type ProjectId: int\n :param SecurityGroup: 安全组参数\n :type SecurityGroup: list of str\n :param UniqVpcId: 私有网络ID,如果不传则默认选择基础网络\n :type UniqVpcId: str\n :param UniqSubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填\n :type UniqSubnetId: str\n ' self.SecondaryNum = None self.Memory = None self.Volume = None self.MongoVersion = None self.MachineCode = None self.GoodsNum = None self.Zone = None self.TimeSpan = None self.Password = None self.ProjectId = None self.SecurityGroup = None self.UniqVpcId = None self.UniqSubnetId = None
7,096,470,038,123,483,000
:param SecondaryNum: 每个副本集内从节点个数 :type SecondaryNum: int :param Memory: 实例内存大小,单位:GB :type Memory: int :param Volume: 实例硬盘大小,单位:GB :type Volume: int :param MongoVersion: 版本号,当前支持 MONGO_3_WT、MONGO_3_ROCKS、MONGO_36_WT :type MongoVersion: str :param MachineCode: 机器类型,GIO:高IO版;TGIO:高IO万兆 :type MachineCode: str :param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10 :type GoodsNum: int :param Zone: 实例所属区域名称,格式如:ap-guangzhou-2 :type Zone: str :param TimeSpan: 时长,购买月数 :type TimeSpan: int :param Password: 实例密码 :type Password: str :param ProjectId: 项目ID,不填为默认项目 :type ProjectId: int :param SecurityGroup: 安全组参数 :type SecurityGroup: list of str :param UniqVpcId: 私有网络ID,如果不传则默认选择基础网络 :type UniqVpcId: str :param UniqSubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填 :type UniqSubnetId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param SecondaryNum: 每个副本集内从节点个数\n :type SecondaryNum: int\n :param Memory: 实例内存大小,单位:GB\n :type Memory: int\n :param Volume: 实例硬盘大小,单位:GB\n :type Volume: int\n :param MongoVersion: 版本号,当前支持 MONGO_3_WT、MONGO_3_ROCKS、MONGO_36_WT\n :type MongoVersion: str\n :param MachineCode: 机器类型,GIO:高IO版;TGIO:高IO万兆\n :type MachineCode: str\n :param GoodsNum: 实例数量,默认值为1, 最小值1,最大值为10\n :type GoodsNum: int\n :param Zone: 实例所属区域名称,格式如:ap-guangzhou-2\n :type Zone: str\n :param TimeSpan: 时长,购买月数\n :type TimeSpan: int\n :param Password: 实例密码\n :type Password: str\n :param ProjectId: 项目ID,不填为默认项目\n :type ProjectId: int\n :param SecurityGroup: 安全组参数\n :type SecurityGroup: list of str\n :param UniqVpcId: 私有网络ID,如果不传则默认选择基础网络\n :type UniqVpcId: str\n :param UniqSubnetId: 私有网络下的子网ID,如果设置了 VpcId,则 SubnetId必填\n :type UniqSubnetId: str\n ' self.SecondaryNum = None self.Memory = None self.Volume = None self.MongoVersion = None self.MachineCode = None self.GoodsNum = None self.Zone = None self.TimeSpan = None self.Password = None self.ProjectId = None self.SecurityGroup = None self.UniqVpcId = None self.UniqSubnetId = None
def __init__(self): '\n :param DealId: 订单ID\n :type DealId: str\n :param InstanceIds: 创建的实例ID列表\n :type InstanceIds: list of str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.DealId = None self.InstanceIds = None self.RequestId = None
1,054,841,195,029,218,000
:param DealId: 订单ID :type DealId: str :param InstanceIds: 创建的实例ID列表 :type InstanceIds: list of str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param DealId: 订单ID\n :type DealId: str\n :param InstanceIds: 创建的实例ID列表\n :type InstanceIds: list of str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.DealId = None self.InstanceIds = None self.RequestId = None
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceId: str\n ' self.InstanceId = None
7,466,592,164,755,291,000
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同 :type InstanceId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceId: str\n ' self.InstanceId = None
def __init__(self): '\n :param Clients: 客户端连接信息,包括客户端IP和对应IP的连接数量\n注意:此字段可能返回 null,表示取不到有效值。\n :type Clients: list of ClientConnection\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.Clients = None self.RequestId = None
4,556,269,938,670,427,600
:param Clients: 客户端连接信息,包括客户端IP和对应IP的连接数量 注意:此字段可能返回 null,表示取不到有效值。 :type Clients: list of ClientConnection :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param Clients: 客户端连接信息,包括客户端IP和对应IP的连接数量\n注意:此字段可能返回 null,表示取不到有效值。\n :type Clients: list of ClientConnection\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.Clients = None self.RequestId = None
def __init__(self): '\n :param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceIds: list of str\n :param InstanceType: 实例类型,取值范围:0-所有实例,1-正式实例,2-临时实例, 3-只读实例,-1-正式实例+只读+灾备实例\n :type InstanceType: int\n :param ClusterType: 集群类型,取值范围:0-副本集实例,1-分片实例,-1-所有实例\n :type ClusterType: int\n :param Status: 实例状态,取值范围:0-待初始化,1-流程执行中,2-实例有效,-2-实例已过期\n :type Status: list of int\n :param VpcId: 私有网络的ID,基础网络则不传该参数\n :type VpcId: str\n :param SubnetId: 私有网络的子网ID,基础网络则不传该参数。入参设置该参数的同时,必须设置相应的VpcId\n :type SubnetId: str\n :param PayMode: 付费类型,取值范围:0-按量计费,1-包年包月,-1-按量计费+包年包月\n :type PayMode: int\n :param Limit: 单次请求返回的数量,最小值为1,最大值为100,默认值为20\n :type Limit: int\n :param Offset: 偏移量,默认值为0\n :type Offset: int\n :param OrderBy: 返回结果集排序的字段,目前支持:"ProjectId", "InstanceName", "CreateTime",默认为升序排序\n :type OrderBy: str\n :param OrderByType: 返回结果集排序方式,目前支持:"ASC"或者"DESC"\n :type OrderByType: str\n ' self.InstanceIds = None self.InstanceType = None self.ClusterType = None self.Status = None self.VpcId = None self.SubnetId = None self.PayMode = None self.Limit = None self.Offset = None self.OrderBy = None self.OrderByType = None
-7,437,992,389,170,799,000
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同 :type InstanceIds: list of str :param InstanceType: 实例类型,取值范围:0-所有实例,1-正式实例,2-临时实例, 3-只读实例,-1-正式实例+只读+灾备实例 :type InstanceType: int :param ClusterType: 集群类型,取值范围:0-副本集实例,1-分片实例,-1-所有实例 :type ClusterType: int :param Status: 实例状态,取值范围:0-待初始化,1-流程执行中,2-实例有效,-2-实例已过期 :type Status: list of int :param VpcId: 私有网络的ID,基础网络则不传该参数 :type VpcId: str :param SubnetId: 私有网络的子网ID,基础网络则不传该参数。入参设置该参数的同时,必须设置相应的VpcId :type SubnetId: str :param PayMode: 付费类型,取值范围:0-按量计费,1-包年包月,-1-按量计费+包年包月 :type PayMode: int :param Limit: 单次请求返回的数量,最小值为1,最大值为100,默认值为20 :type Limit: int :param Offset: 偏移量,默认值为0 :type Offset: int :param OrderBy: 返回结果集排序的字段,目前支持:"ProjectId", "InstanceName", "CreateTime",默认为升序排序 :type OrderBy: str :param OrderByType: 返回结果集排序方式,目前支持:"ASC"或者"DESC" :type OrderByType: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceIds: list of str\n :param InstanceType: 实例类型,取值范围:0-所有实例,1-正式实例,2-临时实例, 3-只读实例,-1-正式实例+只读+灾备实例\n :type InstanceType: int\n :param ClusterType: 集群类型,取值范围:0-副本集实例,1-分片实例,-1-所有实例\n :type ClusterType: int\n :param Status: 实例状态,取值范围:0-待初始化,1-流程执行中,2-实例有效,-2-实例已过期\n :type Status: list of int\n :param VpcId: 私有网络的ID,基础网络则不传该参数\n :type VpcId: str\n :param SubnetId: 私有网络的子网ID,基础网络则不传该参数。入参设置该参数的同时,必须设置相应的VpcId\n :type SubnetId: str\n :param PayMode: 付费类型,取值范围:0-按量计费,1-包年包月,-1-按量计费+包年包月\n :type PayMode: int\n :param Limit: 单次请求返回的数量,最小值为1,最大值为100,默认值为20\n :type Limit: int\n :param Offset: 偏移量,默认值为0\n :type Offset: int\n :param OrderBy: 返回结果集排序的字段,目前支持:"ProjectId", "InstanceName", "CreateTime",默认为升序排序\n :type OrderBy: str\n :param OrderByType: 返回结果集排序方式,目前支持:"ASC"或者"DESC"\n :type OrderByType: str\n ' self.InstanceIds = None self.InstanceType = None self.ClusterType = None self.Status = None self.VpcId = None self.SubnetId = None self.PayMode = None self.Limit = None self.Offset = None self.OrderBy = None self.OrderByType = None
def __init__(self): '\n :param TotalCount: 符合查询条件的实例总数\n :type TotalCount: int\n :param InstanceDetails: 实例详细信息\n :type InstanceDetails: list of MongoDBInstanceDetail\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.TotalCount = None self.InstanceDetails = None self.RequestId = None
-5,344,254,023,169,226,000
:param TotalCount: 符合查询条件的实例总数 :type TotalCount: int :param InstanceDetails: 实例详细信息 :type InstanceDetails: list of MongoDBInstanceDetail :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param TotalCount: 符合查询条件的实例总数\n :type TotalCount: int\n :param InstanceDetails: 实例详细信息\n :type InstanceDetails: list of MongoDBInstanceDetail\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.TotalCount = None self.InstanceDetails = None self.RequestId = None
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceId: str\n :param StartTime: 慢日志起始时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-01 10:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。\n :type StartTime: str\n :param EndTime: 慢日志终止时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-02 12:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。\n :type EndTime: str\n :param SlowMS: 慢日志执行时间阈值,返回执行时间超过该阈值的慢日志,单位为毫秒(ms),最小为100毫秒。\n :type SlowMS: int\n :param Offset: 偏移量,最小值为0,最大值为10000,默认值为0。\n :type Offset: int\n :param Limit: 分页大小,最小值为1,最大值为100,默认值为20。\n :type Limit: int\n ' self.InstanceId = None self.StartTime = None self.EndTime = None self.SlowMS = None self.Offset = None self.Limit = None
3,319,906,674,030,196,700
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同 :type InstanceId: str :param StartTime: 慢日志起始时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-01 10:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。 :type StartTime: str :param EndTime: 慢日志终止时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-02 12:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。 :type EndTime: str :param SlowMS: 慢日志执行时间阈值,返回执行时间超过该阈值的慢日志,单位为毫秒(ms),最小为100毫秒。 :type SlowMS: int :param Offset: 偏移量,最小值为0,最大值为10000,默认值为0。 :type Offset: int :param Limit: 分页大小,最小值为1,最大值为100,默认值为20。 :type Limit: int
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceId: str\n :param StartTime: 慢日志起始时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-01 10:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。\n :type StartTime: str\n :param EndTime: 慢日志终止时间,格式:yyyy-mm-dd hh:mm:ss,如:2019-06-02 12:00:00。查询起止时间间隔不能超过24小时,只允许查询最近7天内慢日志。\n :type EndTime: str\n :param SlowMS: 慢日志执行时间阈值,返回执行时间超过该阈值的慢日志,单位为毫秒(ms),最小为100毫秒。\n :type SlowMS: int\n :param Offset: 偏移量,最小值为0,最大值为10000,默认值为0。\n :type Offset: int\n :param Limit: 分页大小,最小值为1,最大值为100,默认值为20。\n :type Limit: int\n ' self.InstanceId = None self.StartTime = None self.EndTime = None self.SlowMS = None self.Offset = None self.Limit = None
def __init__(self): '\n :param TotalCount: 符合查询条件的慢查询日志总数。\n :type TotalCount: int\n :param SlowLogList: 符合查询条件的慢查询日志详情。\n :type SlowLogList: list of str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.TotalCount = None self.SlowLogList = None self.RequestId = None
-7,829,219,154,739,202,000
:param TotalCount: 符合查询条件的慢查询日志总数。 :type TotalCount: int :param SlowLogList: 符合查询条件的慢查询日志详情。 :type SlowLogList: list of str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param TotalCount: 符合查询条件的慢查询日志总数。\n :type TotalCount: int\n :param SlowLogList: 符合查询条件的慢查询日志详情。\n :type SlowLogList: list of str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.TotalCount = None self.SlowLogList = None self.RequestId = None
def __init__(self): '\n :param Zone: 可用区\n :type Zone: str\n ' self.Zone = None
-5,430,548,088,093,569,000
:param Zone: 可用区 :type Zone: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param Zone: 可用区\n :type Zone: str\n ' self.Zone = None
def __init__(self): '\n :param SpecInfoList: 实例售卖规格信息列表\n :type SpecInfoList: list of SpecificationInfo\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.SpecInfoList = None self.RequestId = None
-308,205,219,353,436,860
:param SpecInfoList: 实例售卖规格信息列表 :type SpecInfoList: list of SpecificationInfo :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param SpecInfoList: 实例售卖规格信息列表\n :type SpecInfoList: list of SpecificationInfo\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.SpecInfoList = None self.RequestId = None
def __init__(self): '\n :param InstanceId: 实例ID\n :type InstanceId: str\n :param Region: 地域信息\n :type Region: str\n ' self.InstanceId = None self.Region = None
-8,440,805,245,017,172,000
:param InstanceId: 实例ID :type InstanceId: str :param Region: 地域信息 :type Region: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceId: 实例ID\n :type InstanceId: str\n :param Region: 地域信息\n :type Region: str\n ' self.InstanceId = None self.Region = None
def __init__(self): '\n :param InstanceId: 实例ID\n :type InstanceId: str\n :param InstanceName: 实例名称\n :type InstanceName: str\n :param PayMode: 付费类型,可能的返回值:1-包年包月;0-按量计费\n :type PayMode: int\n :param ProjectId: 项目ID\n :type ProjectId: int\n :param ClusterType: 集群类型,可能的返回值:0-副本集实例,1-分片实例,\n :type ClusterType: int\n :param Region: 地域信息\n :type Region: str\n :param Zone: 可用区信息\n :type Zone: str\n :param NetType: 网络类型,可能的返回值:0-基础网络,1-私有网络\n :type NetType: int\n :param VpcId: 私有网络的ID\n :type VpcId: str\n :param SubnetId: 私有网络的子网ID\n :type SubnetId: str\n :param Status: 实例状态,可能的返回值:0-待初始化,1-流程处理中,2-运行中,-2-实例已过期\n :type Status: int\n :param Vip: 实例IP\n :type Vip: str\n :param Vport: 端口号\n :type Vport: int\n :param CreateTime: 实例创建时间\n :type CreateTime: str\n :param DeadLine: 实例到期时间\n :type DeadLine: str\n :param MongoVersion: 实例版本信息\n :type MongoVersion: str\n :param Memory: 实例内存规格,单位为MB\n :type Memory: int\n :param Volume: 实例磁盘规格,单位为MB\n :type Volume: int\n :param CpuNum: 实例CPU核心数\n :type CpuNum: int\n :param MachineType: 实例机器类型\n :type MachineType: str\n :param SecondaryNum: 实例从节点数\n :type SecondaryNum: int\n :param ReplicationSetNum: 实例分片数\n :type ReplicationSetNum: int\n :param AutoRenewFlag: 实例自动续费标志,可能的返回值:0-手动续费,1-自动续费,2-确认不续费\n :type AutoRenewFlag: int\n :param UsedVolume: 已用容量,单位MB\n :type UsedVolume: int\n :param MaintenanceStart: 维护窗口起始时间\n :type MaintenanceStart: str\n :param MaintenanceEnd: 维护窗口结束时间\n :type MaintenanceEnd: str\n :param ReplicaSets: 分片信息\n :type ReplicaSets: list of MongodbShardInfo\n :param ReadonlyInstances: 只读实例信息\n注意:此字段可能返回 null,表示取不到有效值。\n :type ReadonlyInstances: list of MongoDBInstance\n :param StandbyInstances: 灾备实例信息\n注意:此字段可能返回 null,表示取不到有效值。\n :type StandbyInstances: list of MongoDBInstance\n :param CloneInstances: 临时实例信息\n注意:此字段可能返回 null,表示取不到有效值。\n :type CloneInstances: list of MongoDBInstance\n :param RelatedInstance: 关联实例信息,对于正式实例,该字段表示它的临时实例信息;对于临时实例,则表示它的正式实例信息;如果为只读/灾备实例,则表示他的主实例信息\n注意:此字段可能返回 null,表示取不到有效值。\n :type RelatedInstance: :class:`tencentcloud.mongodb.v20180408.models.MongoDBInstance`\n :param Tags: 实例标签信息集合\n注意:此字段可能返回 null,表示取不到有效值。\n :type Tags: list of TagInfo\n :param InstanceVer: 实例标记\n :type InstanceVer: int\n :param ClusterVer: 实例标记\n :type ClusterVer: int\n :param Protocol: 协议信息,可能的返回值:1-mongodb,2-dynamodb\n :type Protocol: int\n :param InstanceType: 实例类型,可能的返回值,1-正式实例,2-临时实例,3-只读实例,4-灾备实例\n :type InstanceType: int\n :param InstanceStatusDesc: 实例状态描述\n :type InstanceStatusDesc: str\n :param RealInstanceId: 实例对应的物理实例ID,回档并替换过的实例有不同的InstanceId和RealInstanceId,从barad获取监控数据等场景下需要用物理id获取\n :type RealInstanceId: str\n ' self.InstanceId = None self.InstanceName = None self.PayMode = None self.ProjectId = None self.ClusterType = None self.Region = None self.Zone = None self.NetType = None self.VpcId = None self.SubnetId = None self.Status = None self.Vip = None self.Vport = None self.CreateTime = None self.DeadLine = None self.MongoVersion = None self.Memory = None self.Volume = None self.CpuNum = None self.MachineType = None self.SecondaryNum = None self.ReplicationSetNum = None self.AutoRenewFlag = None self.UsedVolume = None self.MaintenanceStart = None self.MaintenanceEnd = None self.ReplicaSets = None self.ReadonlyInstances = None self.StandbyInstances = None self.CloneInstances = None self.RelatedInstance = None self.Tags = None self.InstanceVer = None self.ClusterVer = None self.Protocol = None self.InstanceType = None self.InstanceStatusDesc = None self.RealInstanceId = None
4,477,843,219,725,253,000
:param InstanceId: 实例ID :type InstanceId: str :param InstanceName: 实例名称 :type InstanceName: str :param PayMode: 付费类型,可能的返回值:1-包年包月;0-按量计费 :type PayMode: int :param ProjectId: 项目ID :type ProjectId: int :param ClusterType: 集群类型,可能的返回值:0-副本集实例,1-分片实例, :type ClusterType: int :param Region: 地域信息 :type Region: str :param Zone: 可用区信息 :type Zone: str :param NetType: 网络类型,可能的返回值:0-基础网络,1-私有网络 :type NetType: int :param VpcId: 私有网络的ID :type VpcId: str :param SubnetId: 私有网络的子网ID :type SubnetId: str :param Status: 实例状态,可能的返回值:0-待初始化,1-流程处理中,2-运行中,-2-实例已过期 :type Status: int :param Vip: 实例IP :type Vip: str :param Vport: 端口号 :type Vport: int :param CreateTime: 实例创建时间 :type CreateTime: str :param DeadLine: 实例到期时间 :type DeadLine: str :param MongoVersion: 实例版本信息 :type MongoVersion: str :param Memory: 实例内存规格,单位为MB :type Memory: int :param Volume: 实例磁盘规格,单位为MB :type Volume: int :param CpuNum: 实例CPU核心数 :type CpuNum: int :param MachineType: 实例机器类型 :type MachineType: str :param SecondaryNum: 实例从节点数 :type SecondaryNum: int :param ReplicationSetNum: 实例分片数 :type ReplicationSetNum: int :param AutoRenewFlag: 实例自动续费标志,可能的返回值:0-手动续费,1-自动续费,2-确认不续费 :type AutoRenewFlag: int :param UsedVolume: 已用容量,单位MB :type UsedVolume: int :param MaintenanceStart: 维护窗口起始时间 :type MaintenanceStart: str :param MaintenanceEnd: 维护窗口结束时间 :type MaintenanceEnd: str :param ReplicaSets: 分片信息 :type ReplicaSets: list of MongodbShardInfo :param ReadonlyInstances: 只读实例信息 注意:此字段可能返回 null,表示取不到有效值。 :type ReadonlyInstances: list of MongoDBInstance :param StandbyInstances: 灾备实例信息 注意:此字段可能返回 null,表示取不到有效值。 :type StandbyInstances: list of MongoDBInstance :param CloneInstances: 临时实例信息 注意:此字段可能返回 null,表示取不到有效值。 :type CloneInstances: list of MongoDBInstance :param RelatedInstance: 关联实例信息,对于正式实例,该字段表示它的临时实例信息;对于临时实例,则表示它的正式实例信息;如果为只读/灾备实例,则表示他的主实例信息 注意:此字段可能返回 null,表示取不到有效值。 :type RelatedInstance: :class:`tencentcloud.mongodb.v20180408.models.MongoDBInstance` :param Tags: 实例标签信息集合 注意:此字段可能返回 null,表示取不到有效值。 :type Tags: list of TagInfo :param InstanceVer: 实例标记 :type InstanceVer: int :param ClusterVer: 实例标记 :type ClusterVer: int :param Protocol: 协议信息,可能的返回值:1-mongodb,2-dynamodb :type Protocol: int :param InstanceType: 实例类型,可能的返回值,1-正式实例,2-临时实例,3-只读实例,4-灾备实例 :type InstanceType: int :param InstanceStatusDesc: 实例状态描述 :type InstanceStatusDesc: str :param RealInstanceId: 实例对应的物理实例ID,回档并替换过的实例有不同的InstanceId和RealInstanceId,从barad获取监控数据等场景下需要用物理id获取 :type RealInstanceId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceId: 实例ID\n :type InstanceId: str\n :param InstanceName: 实例名称\n :type InstanceName: str\n :param PayMode: 付费类型,可能的返回值:1-包年包月;0-按量计费\n :type PayMode: int\n :param ProjectId: 项目ID\n :type ProjectId: int\n :param ClusterType: 集群类型,可能的返回值:0-副本集实例,1-分片实例,\n :type ClusterType: int\n :param Region: 地域信息\n :type Region: str\n :param Zone: 可用区信息\n :type Zone: str\n :param NetType: 网络类型,可能的返回值:0-基础网络,1-私有网络\n :type NetType: int\n :param VpcId: 私有网络的ID\n :type VpcId: str\n :param SubnetId: 私有网络的子网ID\n :type SubnetId: str\n :param Status: 实例状态,可能的返回值:0-待初始化,1-流程处理中,2-运行中,-2-实例已过期\n :type Status: int\n :param Vip: 实例IP\n :type Vip: str\n :param Vport: 端口号\n :type Vport: int\n :param CreateTime: 实例创建时间\n :type CreateTime: str\n :param DeadLine: 实例到期时间\n :type DeadLine: str\n :param MongoVersion: 实例版本信息\n :type MongoVersion: str\n :param Memory: 实例内存规格,单位为MB\n :type Memory: int\n :param Volume: 实例磁盘规格,单位为MB\n :type Volume: int\n :param CpuNum: 实例CPU核心数\n :type CpuNum: int\n :param MachineType: 实例机器类型\n :type MachineType: str\n :param SecondaryNum: 实例从节点数\n :type SecondaryNum: int\n :param ReplicationSetNum: 实例分片数\n :type ReplicationSetNum: int\n :param AutoRenewFlag: 实例自动续费标志,可能的返回值:0-手动续费,1-自动续费,2-确认不续费\n :type AutoRenewFlag: int\n :param UsedVolume: 已用容量,单位MB\n :type UsedVolume: int\n :param MaintenanceStart: 维护窗口起始时间\n :type MaintenanceStart: str\n :param MaintenanceEnd: 维护窗口结束时间\n :type MaintenanceEnd: str\n :param ReplicaSets: 分片信息\n :type ReplicaSets: list of MongodbShardInfo\n :param ReadonlyInstances: 只读实例信息\n注意:此字段可能返回 null,表示取不到有效值。\n :type ReadonlyInstances: list of MongoDBInstance\n :param StandbyInstances: 灾备实例信息\n注意:此字段可能返回 null,表示取不到有效值。\n :type StandbyInstances: list of MongoDBInstance\n :param CloneInstances: 临时实例信息\n注意:此字段可能返回 null,表示取不到有效值。\n :type CloneInstances: list of MongoDBInstance\n :param RelatedInstance: 关联实例信息,对于正式实例,该字段表示它的临时实例信息;对于临时实例,则表示它的正式实例信息;如果为只读/灾备实例,则表示他的主实例信息\n注意:此字段可能返回 null,表示取不到有效值。\n :type RelatedInstance: :class:`tencentcloud.mongodb.v20180408.models.MongoDBInstance`\n :param Tags: 实例标签信息集合\n注意:此字段可能返回 null,表示取不到有效值。\n :type Tags: list of TagInfo\n :param InstanceVer: 实例标记\n :type InstanceVer: int\n :param ClusterVer: 实例标记\n :type ClusterVer: int\n :param Protocol: 协议信息,可能的返回值:1-mongodb,2-dynamodb\n :type Protocol: int\n :param InstanceType: 实例类型,可能的返回值,1-正式实例,2-临时实例,3-只读实例,4-灾备实例\n :type InstanceType: int\n :param InstanceStatusDesc: 实例状态描述\n :type InstanceStatusDesc: str\n :param RealInstanceId: 实例对应的物理实例ID,回档并替换过的实例有不同的InstanceId和RealInstanceId,从barad获取监控数据等场景下需要用物理id获取\n :type RealInstanceId: str\n ' self.InstanceId = None self.InstanceName = None self.PayMode = None self.ProjectId = None self.ClusterType = None self.Region = None self.Zone = None self.NetType = None self.VpcId = None self.SubnetId = None self.Status = None self.Vip = None self.Vport = None self.CreateTime = None self.DeadLine = None self.MongoVersion = None self.Memory = None self.Volume = None self.CpuNum = None self.MachineType = None self.SecondaryNum = None self.ReplicationSetNum = None self.AutoRenewFlag = None self.UsedVolume = None self.MaintenanceStart = None self.MaintenanceEnd = None self.ReplicaSets = None self.ReadonlyInstances = None self.StandbyInstances = None self.CloneInstances = None self.RelatedInstance = None self.Tags = None self.InstanceVer = None self.ClusterVer = None self.Protocol = None self.InstanceType = None self.InstanceStatusDesc = None self.RealInstanceId = None
def __init__(self): '\n :param UsedVolume: 分片已使用容量\n :type UsedVolume: float\n :param ReplicaSetId: 分片ID\n :type ReplicaSetId: str\n :param ReplicaSetName: 分片名\n :type ReplicaSetName: str\n :param Memory: 分片内存规格,单位为MB\n :type Memory: int\n :param Volume: 分片磁盘规格,单位为MB\n :type Volume: int\n :param OplogSize: 分片Oplog大小,单位为MB\n :type OplogSize: int\n :param SecondaryNum: 分片从节点数\n :type SecondaryNum: int\n :param RealReplicaSetId: 分片物理ID\n :type RealReplicaSetId: str\n ' self.UsedVolume = None self.ReplicaSetId = None self.ReplicaSetName = None self.Memory = None self.Volume = None self.OplogSize = None self.SecondaryNum = None self.RealReplicaSetId = None
-5,998,285,072,832,786,000
:param UsedVolume: 分片已使用容量 :type UsedVolume: float :param ReplicaSetId: 分片ID :type ReplicaSetId: str :param ReplicaSetName: 分片名 :type ReplicaSetName: str :param Memory: 分片内存规格,单位为MB :type Memory: int :param Volume: 分片磁盘规格,单位为MB :type Volume: int :param OplogSize: 分片Oplog大小,单位为MB :type OplogSize: int :param SecondaryNum: 分片从节点数 :type SecondaryNum: int :param RealReplicaSetId: 分片物理ID :type RealReplicaSetId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param UsedVolume: 分片已使用容量\n :type UsedVolume: float\n :param ReplicaSetId: 分片ID\n :type ReplicaSetId: str\n :param ReplicaSetName: 分片名\n :type ReplicaSetName: str\n :param Memory: 分片内存规格,单位为MB\n :type Memory: int\n :param Volume: 分片磁盘规格,单位为MB\n :type Volume: int\n :param OplogSize: 分片Oplog大小,单位为MB\n :type OplogSize: int\n :param SecondaryNum: 分片从节点数\n :type SecondaryNum: int\n :param RealReplicaSetId: 分片物理ID\n :type RealReplicaSetId: str\n ' self.UsedVolume = None self.ReplicaSetId = None self.ReplicaSetName = None self.Memory = None self.Volume = None self.OplogSize = None self.SecondaryNum = None self.RealReplicaSetId = None
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceId: str\n :param NewName: 实例名称\n :type NewName: str\n ' self.InstanceId = None self.NewName = None
-1,895,953,465,478,440,000
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同 :type InstanceId: str :param NewName: 实例名称 :type NewName: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceId: str\n :param NewName: 实例名称\n :type NewName: str\n ' self.InstanceId = None self.NewName = None
def __init__(self): '\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.RequestId = None
-5,957,967,262,820,529,000
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.RequestId = None
def __init__(self): '\n :param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceIds: list of str\n :param AutoRenewFlag: 续费选项,取值范围:0-手动续费,1-自动续费,2-确认不续费\n :type AutoRenewFlag: int\n ' self.InstanceIds = None self.AutoRenewFlag = None
-1,801,866,595,669,366,500
:param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同 :type InstanceIds: list of str :param AutoRenewFlag: 续费选项,取值范围:0-手动续费,1-自动续费,2-确认不续费 :type AutoRenewFlag: int
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceIds: 实例ID列表,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceIds: list of str\n :param AutoRenewFlag: 续费选项,取值范围:0-手动续费,1-自动续费,2-确认不续费\n :type AutoRenewFlag: int\n ' self.InstanceIds = None self.AutoRenewFlag = None
def __init__(self): '\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.RequestId = None
-5,957,967,262,820,529,000
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.RequestId = None
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceId: str\n :param UserName: 实例账户名称\n :type UserName: str\n :param Password: 实例新密码,至少包含字母、数字和字符(!@#%^*())中的两种,长度为8-16个字符\n :type Password: str\n ' self.InstanceId = None self.UserName = None self.Password = None
2,673,499,813,344,423,400
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同 :type InstanceId: str :param UserName: 实例账户名称 :type UserName: str :param Password: 实例新密码,至少包含字母、数字和字符(!@#%^*())中的两种,长度为8-16个字符 :type Password: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceId: str\n :param UserName: 实例账户名称\n :type UserName: str\n :param Password: 实例新密码,至少包含字母、数字和字符(!@#%^*())中的两种,长度为8-16个字符\n :type Password: str\n ' self.InstanceId = None self.UserName = None self.Password = None
def __init__(self): '\n :param FlowId: 返回的异步任务ID\n :type FlowId: int\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.FlowId = None self.RequestId = None
168,431,123,442,788,260
:param FlowId: 返回的异步任务ID :type FlowId: int :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param FlowId: 返回的异步任务ID\n :type FlowId: int\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.FlowId = None self.RequestId = None
def __init__(self): '\n :param SpecCode: 规格信息标识\n :type SpecCode: str\n :param Status: 规格有效标志,取值:0-停止售卖,1-开放售卖\n :type Status: int\n :param MachineType: 机器类型,取值:0-HIO,4-HIO10G\n :type MachineType: str\n :param Cpu: cpu核心数\n :type Cpu: int\n :param Memory: 内存规格,单位为MB\n :type Memory: int\n :param DefaultStorage: 默认磁盘规格,单位MB\n :type DefaultStorage: int\n :param MaxStorage: 最大磁盘规格,单位MB\n :type MaxStorage: int\n :param MinStorage: 最小磁盘规格,单位MB\n :type MinStorage: int\n :param Qps: 可承载qps信息\n :type Qps: int\n :param Conns: 连接数限制\n :type Conns: int\n :param MongoVersionCode: 实例mongodb版本信息\n :type MongoVersionCode: str\n :param MongoVersionValue: 实例mongodb版本号\n :type MongoVersionValue: int\n :param Version: 实例mongodb版本号(短)\n :type Version: str\n :param EngineName: 存储引擎\n :type EngineName: str\n :param ClusterType: 集群类型,取值:1-分片集群,0-副本集集群\n :type ClusterType: int\n :param MinNodeNum: 最小副本集从节点数\n :type MinNodeNum: int\n :param MaxNodeNum: 最大副本集从节点数\n :type MaxNodeNum: int\n :param MinReplicateSetNum: 最小分片数\n :type MinReplicateSetNum: int\n :param MaxReplicateSetNum: 最大分片数\n :type MaxReplicateSetNum: int\n :param MinReplicateSetNodeNum: 最小分片从节点数\n :type MinReplicateSetNodeNum: int\n :param MaxReplicateSetNodeNum: 最大分片从节点数\n :type MaxReplicateSetNodeNum: int\n ' self.SpecCode = None self.Status = None self.MachineType = None self.Cpu = None self.Memory = None self.DefaultStorage = None self.MaxStorage = None self.MinStorage = None self.Qps = None self.Conns = None self.MongoVersionCode = None self.MongoVersionValue = None self.Version = None self.EngineName = None self.ClusterType = None self.MinNodeNum = None self.MaxNodeNum = None self.MinReplicateSetNum = None self.MaxReplicateSetNum = None self.MinReplicateSetNodeNum = None self.MaxReplicateSetNodeNum = None
342,077,246,768,402,050
:param SpecCode: 规格信息标识 :type SpecCode: str :param Status: 规格有效标志,取值:0-停止售卖,1-开放售卖 :type Status: int :param MachineType: 机器类型,取值:0-HIO,4-HIO10G :type MachineType: str :param Cpu: cpu核心数 :type Cpu: int :param Memory: 内存规格,单位为MB :type Memory: int :param DefaultStorage: 默认磁盘规格,单位MB :type DefaultStorage: int :param MaxStorage: 最大磁盘规格,单位MB :type MaxStorage: int :param MinStorage: 最小磁盘规格,单位MB :type MinStorage: int :param Qps: 可承载qps信息 :type Qps: int :param Conns: 连接数限制 :type Conns: int :param MongoVersionCode: 实例mongodb版本信息 :type MongoVersionCode: str :param MongoVersionValue: 实例mongodb版本号 :type MongoVersionValue: int :param Version: 实例mongodb版本号(短) :type Version: str :param EngineName: 存储引擎 :type EngineName: str :param ClusterType: 集群类型,取值:1-分片集群,0-副本集集群 :type ClusterType: int :param MinNodeNum: 最小副本集从节点数 :type MinNodeNum: int :param MaxNodeNum: 最大副本集从节点数 :type MaxNodeNum: int :param MinReplicateSetNum: 最小分片数 :type MinReplicateSetNum: int :param MaxReplicateSetNum: 最大分片数 :type MaxReplicateSetNum: int :param MinReplicateSetNodeNum: 最小分片从节点数 :type MinReplicateSetNodeNum: int :param MaxReplicateSetNodeNum: 最大分片从节点数 :type MaxReplicateSetNodeNum: int
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param SpecCode: 规格信息标识\n :type SpecCode: str\n :param Status: 规格有效标志,取值:0-停止售卖,1-开放售卖\n :type Status: int\n :param MachineType: 机器类型,取值:0-HIO,4-HIO10G\n :type MachineType: str\n :param Cpu: cpu核心数\n :type Cpu: int\n :param Memory: 内存规格,单位为MB\n :type Memory: int\n :param DefaultStorage: 默认磁盘规格,单位MB\n :type DefaultStorage: int\n :param MaxStorage: 最大磁盘规格,单位MB\n :type MaxStorage: int\n :param MinStorage: 最小磁盘规格,单位MB\n :type MinStorage: int\n :param Qps: 可承载qps信息\n :type Qps: int\n :param Conns: 连接数限制\n :type Conns: int\n :param MongoVersionCode: 实例mongodb版本信息\n :type MongoVersionCode: str\n :param MongoVersionValue: 实例mongodb版本号\n :type MongoVersionValue: int\n :param Version: 实例mongodb版本号(短)\n :type Version: str\n :param EngineName: 存储引擎\n :type EngineName: str\n :param ClusterType: 集群类型,取值:1-分片集群,0-副本集集群\n :type ClusterType: int\n :param MinNodeNum: 最小副本集从节点数\n :type MinNodeNum: int\n :param MaxNodeNum: 最大副本集从节点数\n :type MaxNodeNum: int\n :param MinReplicateSetNum: 最小分片数\n :type MinReplicateSetNum: int\n :param MaxReplicateSetNum: 最大分片数\n :type MaxReplicateSetNum: int\n :param MinReplicateSetNodeNum: 最小分片从节点数\n :type MinReplicateSetNodeNum: int\n :param MaxReplicateSetNodeNum: 最大分片从节点数\n :type MaxReplicateSetNodeNum: int\n ' self.SpecCode = None self.Status = None self.MachineType = None self.Cpu = None self.Memory = None self.DefaultStorage = None self.MaxStorage = None self.MinStorage = None self.Qps = None self.Conns = None self.MongoVersionCode = None self.MongoVersionValue = None self.Version = None self.EngineName = None self.ClusterType = None self.MinNodeNum = None self.MaxNodeNum = None self.MinReplicateSetNum = None self.MaxReplicateSetNum = None self.MinReplicateSetNodeNum = None self.MaxReplicateSetNodeNum = None
def __init__(self): '\n :param Region: 地域信息\n :type Region: str\n :param Zone: 可用区信息\n :type Zone: str\n :param SpecItems: 售卖规格信息\n :type SpecItems: list of SpecItem\n ' self.Region = None self.Zone = None self.SpecItems = None
2,896,008,479,591,454,700
:param Region: 地域信息 :type Region: str :param Zone: 可用区信息 :type Zone: str :param SpecItems: 售卖规格信息 :type SpecItems: list of SpecItem
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param Region: 地域信息\n :type Region: str\n :param Zone: 可用区信息\n :type Zone: str\n :param SpecItems: 售卖规格信息\n :type SpecItems: list of SpecItem\n ' self.Region = None self.Zone = None self.SpecItems = None
def __init__(self): '\n :param TagKey: 标签Key值\n :type TagKey: str\n :param TagValue: 标签值\n :type TagValue: str\n ' self.TagKey = None self.TagValue = None
2,818,798,211,660,525,600
:param TagKey: 标签Key值 :type TagKey: str :param TagValue: 标签值 :type TagValue: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param TagKey: 标签Key值\n :type TagKey: str\n :param TagValue: 标签值\n :type TagValue: str\n ' self.TagKey = None self.TagValue = None
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。\n :type InstanceId: str\n ' self.InstanceId = None
-4,814,981,435,062,103,000
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。 :type InstanceId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。\n :type InstanceId: str\n ' self.InstanceId = None
def __init__(self): '\n :param AsyncRequestId: 订单ID,表示注销实例成功\n :type AsyncRequestId: str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.AsyncRequestId = None self.RequestId = None
1,323,675,753,685,545,500
:param AsyncRequestId: 订单ID,表示注销实例成功 :type AsyncRequestId: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param AsyncRequestId: 订单ID,表示注销实例成功\n :type AsyncRequestId: str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.AsyncRequestId = None self.RequestId = None
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5\n :type InstanceId: str\n :param Memory: 升级后的内存大小,单位:GB\n :type Memory: int\n :param Volume: 升级后的硬盘大小,单位:GB\n :type Volume: int\n :param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%\n :type OplogSize: int\n ' self.InstanceId = None self.Memory = None self.Volume = None self.OplogSize = None
-4,952,864,791,188,179,000
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5 :type InstanceId: str :param Memory: 升级后的内存大小,单位:GB :type Memory: int :param Volume: 升级后的硬盘大小,单位:GB :type Volume: int :param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90% :type OplogSize: int
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5\n :type InstanceId: str\n :param Memory: 升级后的内存大小,单位:GB\n :type Memory: int\n :param Volume: 升级后的硬盘大小,单位:GB\n :type Volume: int\n :param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%\n :type OplogSize: int\n ' self.InstanceId = None self.Memory = None self.Volume = None self.OplogSize = None
def __init__(self): '\n :param DealId: 订单ID\n :type DealId: str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.DealId = None self.RequestId = None
4,609,315,430,297,468,400
:param DealId: 订单ID :type DealId: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param DealId: 订单ID\n :type DealId: str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.DealId = None self.RequestId = None
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceId: str\n :param Memory: 升级后的内存大小,单位:GB\n :type Memory: int\n :param Volume: 升级后的硬盘大小,单位:GB\n :type Volume: int\n :param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%\n :type OplogSize: int\n ' self.InstanceId = None self.Memory = None self.Volume = None self.OplogSize = None
3,858,098,735,489,418,000
:param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同 :type InstanceId: str :param Memory: 升级后的内存大小,单位:GB :type Memory: int :param Volume: 升级后的硬盘大小,单位:GB :type Volume: int :param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90% :type OplogSize: int
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param InstanceId: 实例ID,格式如:cmgo-p8vnipr5。与云数据库控制台页面中显示的实例ID相同\n :type InstanceId: str\n :param Memory: 升级后的内存大小,单位:GB\n :type Memory: int\n :param Volume: 升级后的硬盘大小,单位:GB\n :type Volume: int\n :param OplogSize: 升级后oplog的大小,单位:GB,默认为磁盘空间的10%,允许设置的最小值为磁盘的10%,最大值为磁盘的90%\n :type OplogSize: int\n ' self.InstanceId = None self.Memory = None self.Volume = None self.OplogSize = None
def __init__(self): '\n :param DealId: 订单ID\n :type DealId: str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.DealId = None self.RequestId = None
4,609,315,430,297,468,400
:param DealId: 订单ID :type DealId: str :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。 :type RequestId: str
tencentcloud/mongodb/v20180408/models.py
__init__
qin5506/tencentcloud-sdk-python
python
def __init__(self): '\n :param DealId: 订单ID\n :type DealId: str\n :param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。\n :type RequestId: str\n ' self.DealId = None self.RequestId = None
def __init__(self, row, to_account): 'Build a Skype Event from a single row.\n\n Args:\n row: A row object (instance of sqlite3.Row) that contains the\n extracted data from a single row in the database.\n to_account: A string containing the accounts (excluding the\n author) of the conversation.\n ' super(SkypeChatEvent, self).__init__(row['timestamp'], u'Chat from Skype', self.DATA_TYPE) self.title = row['title'] self.text = row['body_xml'] self.from_account = u'{0:s} <{1:s}>'.format(row['from_displayname'], row['author']) self.to_account = to_account
-2,209,459,207,477,835,500
Build a Skype Event from a single row. Args: row: A row object (instance of sqlite3.Row) that contains the extracted data from a single row in the database. to_account: A string containing the accounts (excluding the author) of the conversation.
plaso/parsers/sqlite_plugins/skype.py
__init__
Defense-Cyber-Crime-Center/plaso
python
def __init__(self, row, to_account): 'Build a Skype Event from a single row.\n\n Args:\n row: A row object (instance of sqlite3.Row) that contains the\n extracted data from a single row in the database.\n to_account: A string containing the accounts (excluding the\n author) of the conversation.\n ' super(SkypeChatEvent, self).__init__(row['timestamp'], u'Chat from Skype', self.DATA_TYPE) self.title = row['title'] self.text = row['body_xml'] self.from_account = u'{0:s} <{1:s}>'.format(row['from_displayname'], row['author']) self.to_account = to_account
def __init__(self, timestamp, usage, identifier, full_name, display_name, email, country): 'Initialize the event.\n\n Args:\n timestamp: The POSIX timestamp value.\n usage: A string containing the description string of the timestamp.\n identifier: The row identifier.\n full_name: A string containing the full name of the Skype account holder.\n display_name: A string containing the chosen display name of the account\n holder.\n email: A string containing the registered email address of the account\n holder.\n country: A string containing the chosen home country of the account\n holder.\n ' super(SkypeAccountEvent, self).__init__(timestamp, usage) self.offset = identifier self.username = u'{0:s} <{1:s}>'.format(full_name, display_name) self.display_name = display_name self.email = email self.country = country self.data_type = self.DATA_TYPE
-1,848,867,815,453,986,800
Initialize the event. Args: timestamp: The POSIX timestamp value. usage: A string containing the description string of the timestamp. identifier: The row identifier. full_name: A string containing the full name of the Skype account holder. display_name: A string containing the chosen display name of the account holder. email: A string containing the registered email address of the account holder. country: A string containing the chosen home country of the account holder.
plaso/parsers/sqlite_plugins/skype.py
__init__
Defense-Cyber-Crime-Center/plaso
python
def __init__(self, timestamp, usage, identifier, full_name, display_name, email, country): 'Initialize the event.\n\n Args:\n timestamp: The POSIX timestamp value.\n usage: A string containing the description string of the timestamp.\n identifier: The row identifier.\n full_name: A string containing the full name of the Skype account holder.\n display_name: A string containing the chosen display name of the account\n holder.\n email: A string containing the registered email address of the account\n holder.\n country: A string containing the chosen home country of the account\n holder.\n ' super(SkypeAccountEvent, self).__init__(timestamp, usage) self.offset = identifier self.username = u'{0:s} <{1:s}>'.format(full_name, display_name) self.display_name = display_name self.email = email self.country = country self.data_type = self.DATA_TYPE
def __init__(self, row, dst_number): "Read the information related with the SMS.\n\n Args:\n row: row form the sql query.\n row['time_sms']: timestamp when the sms was send.\n row['dstnum_sms']: number which receives the sms.\n row['msg_sms']: text send to this sms.\n dst_number: phone number where the user send the sms.\n " super(SkypeSMSEvent, self).__init__(row['time_sms'], u'SMS from Skype', self.DATA_TYPE) self.number = dst_number self.text = row['msg_sms']
7,238,326,909,042,559,000
Read the information related with the SMS. Args: row: row form the sql query. row['time_sms']: timestamp when the sms was send. row['dstnum_sms']: number which receives the sms. row['msg_sms']: text send to this sms. dst_number: phone number where the user send the sms.
plaso/parsers/sqlite_plugins/skype.py
__init__
Defense-Cyber-Crime-Center/plaso
python
def __init__(self, row, dst_number): "Read the information related with the SMS.\n\n Args:\n row: row form the sql query.\n row['time_sms']: timestamp when the sms was send.\n row['dstnum_sms']: number which receives the sms.\n row['msg_sms']: text send to this sms.\n dst_number: phone number where the user send the sms.\n " super(SkypeSMSEvent, self).__init__(row['time_sms'], u'SMS from Skype', self.DATA_TYPE) self.number = dst_number self.text = row['msg_sms']
def __init__(self, timestamp, call_type, user_start_call, source, destination, video_conference): 'Contains information if the call was cancelled, accepted or finished.\n\n Args:\n timestamp: the timestamp of the event.\n call_type: WAITING, STARTED, FINISHED.\n user_start_call: boolean, true indicates that the owner\n account started the call.\n source: the account which started the call.\n destination: the account which gets the call.\n video_conference: boolean, if is true it was a videoconference.\n ' super(SkypeCallEvent, self).__init__(timestamp, u'Call from Skype', self.DATA_TYPE) self.call_type = call_type self.user_start_call = user_start_call self.src_call = source self.dst_call = destination self.video_conference = video_conference
3,729,223,191,627,515,400
Contains information if the call was cancelled, accepted or finished. Args: timestamp: the timestamp of the event. call_type: WAITING, STARTED, FINISHED. user_start_call: boolean, true indicates that the owner account started the call. source: the account which started the call. destination: the account which gets the call. video_conference: boolean, if is true it was a videoconference.
plaso/parsers/sqlite_plugins/skype.py
__init__
Defense-Cyber-Crime-Center/plaso
python
def __init__(self, timestamp, call_type, user_start_call, source, destination, video_conference): 'Contains information if the call was cancelled, accepted or finished.\n\n Args:\n timestamp: the timestamp of the event.\n call_type: WAITING, STARTED, FINISHED.\n user_start_call: boolean, true indicates that the owner\n account started the call.\n source: the account which started the call.\n destination: the account which gets the call.\n video_conference: boolean, if is true it was a videoconference.\n ' super(SkypeCallEvent, self).__init__(timestamp, u'Call from Skype', self.DATA_TYPE) self.call_type = call_type self.user_start_call = user_start_call self.src_call = source self.dst_call = destination self.video_conference = video_conference
def __init__(self, row, timestamp, action_type, source, destination): 'Actions related with sending files.\n\n Args:\n row:\n filepath: path from the file.\n filename: name of the file.\n filesize: size of the file.\n timestamp: when the action happens.\n action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED.\n source: The account that sent the file.\n destination: The account that received the file.\n ' super(SkypeTransferFileEvent, self).__init__(timestamp, u'File transfer from Skype', self.DATA_TYPE) self.offset = row['id'] self.action_type = action_type self.source = source self.destination = destination self.transferred_filepath = row['filepath'] self.transferred_filename = row['filename'] try: self.transferred_filesize = int(row['filesize']) except ValueError: logging.debug(u'Unknown filesize {0:s}'.format(self.transferred_filename)) self.transferred_filesize = 0
-365,589,398,346,313,150
Actions related with sending files. Args: row: filepath: path from the file. filename: name of the file. filesize: size of the file. timestamp: when the action happens. action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED. source: The account that sent the file. destination: The account that received the file.
plaso/parsers/sqlite_plugins/skype.py
__init__
Defense-Cyber-Crime-Center/plaso
python
def __init__(self, row, timestamp, action_type, source, destination): 'Actions related with sending files.\n\n Args:\n row:\n filepath: path from the file.\n filename: name of the file.\n filesize: size of the file.\n timestamp: when the action happens.\n action_type: GETSOLICITUDE, SENDSOLICITUDE, ACCEPTED, FINISHED.\n source: The account that sent the file.\n destination: The account that received the file.\n ' super(SkypeTransferFileEvent, self).__init__(timestamp, u'File transfer from Skype', self.DATA_TYPE) self.offset = row['id'] self.action_type = action_type self.source = source self.destination = destination self.transferred_filepath = row['filepath'] self.transferred_filename = row['filename'] try: self.transferred_filesize = int(row['filesize']) except ValueError: logging.debug(u'Unknown filesize {0:s}'.format(self.transferred_filename)) self.transferred_filesize = 0
def ParseAccountInformation(self, parser_mediator, row, query=None, **unused_kwargs): 'Parses the Accounts database.\n\n Args:\n parser_mediator: A parser mediator object (instance of ParserMediator).\n row: The row resulting from the query.\n query: Optional query string. The default is None.\n ' if row['profile_timestamp']: event_object = SkypeAccountEvent(row['profile_timestamp'], u'Profile Changed', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['authreq_timestamp']: event_object = SkypeAccountEvent(row['authreq_timestamp'], u'Authenticate Request', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastonline_timestamp']: event_object = SkypeAccountEvent(row['lastonline_timestamp'], u'Last Online', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['mood_timestamp']: event_object = SkypeAccountEvent(row['mood_timestamp'], u'Mood Event', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['sent_authrequest_time']: event_object = SkypeAccountEvent(row['sent_authrequest_time'], u'Auth Request Sent', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastused_timestamp']: event_object = SkypeAccountEvent(row['lastused_timestamp'], u'Last Used', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query)
1,106,927,174,727,200,300
Parses the Accounts database. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None.
plaso/parsers/sqlite_plugins/skype.py
ParseAccountInformation
Defense-Cyber-Crime-Center/plaso
python
def ParseAccountInformation(self, parser_mediator, row, query=None, **unused_kwargs): 'Parses the Accounts database.\n\n Args:\n parser_mediator: A parser mediator object (instance of ParserMediator).\n row: The row resulting from the query.\n query: Optional query string. The default is None.\n ' if row['profile_timestamp']: event_object = SkypeAccountEvent(row['profile_timestamp'], u'Profile Changed', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['authreq_timestamp']: event_object = SkypeAccountEvent(row['authreq_timestamp'], u'Authenticate Request', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastonline_timestamp']: event_object = SkypeAccountEvent(row['lastonline_timestamp'], u'Last Online', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['mood_timestamp']: event_object = SkypeAccountEvent(row['mood_timestamp'], u'Mood Event', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['sent_authrequest_time']: event_object = SkypeAccountEvent(row['sent_authrequest_time'], u'Auth Request Sent', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query) if row['lastused_timestamp']: event_object = SkypeAccountEvent(row['lastused_timestamp'], u'Last Used', row['id'], row['fullname'], row['given_displayname'], row['emails'], row['country']) parser_mediator.ProduceEvent(event_object, query=query)
def ParseChat(self, parser_mediator, row, query=None, **unused_kwargs): 'Parses a chat message row.\n\n Args:\n parser_mediator: A parser mediator object (instance of ParserMediator).\n row: The row resulting from the query.\n query: Optional query string. The default is None.\n ' to_account = u'' accounts = [] participants = row['participants'].split(' ') for participant in participants: if (participant != row['author']): accounts.append(participant) to_account = u', '.join(accounts) if (not to_account): if row['dialog_partner']: to_account = row['dialog_partner'] else: to_account = u'Unknown User' event_object = SkypeChatEvent(row, to_account) parser_mediator.ProduceEvent(event_object, query=query)
4,747,559,496,911,788,000
Parses a chat message row. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None.
plaso/parsers/sqlite_plugins/skype.py
ParseChat
Defense-Cyber-Crime-Center/plaso
python
def ParseChat(self, parser_mediator, row, query=None, **unused_kwargs): 'Parses a chat message row.\n\n Args:\n parser_mediator: A parser mediator object (instance of ParserMediator).\n row: The row resulting from the query.\n query: Optional query string. The default is None.\n ' to_account = u accounts = [] participants = row['participants'].split(' ') for participant in participants: if (participant != row['author']): accounts.append(participant) to_account = u', '.join(accounts) if (not to_account): if row['dialog_partner']: to_account = row['dialog_partner'] else: to_account = u'Unknown User' event_object = SkypeChatEvent(row, to_account) parser_mediator.ProduceEvent(event_object, query=query)
def ParseSMS(self, parser_mediator, row, query=None, **unused_kwargs): 'Parse SMS.\n\n Args:\n parser_mediator: A parser mediator object (instance of ParserMediator).\n row: The row resulting from the query.\n query: Optional query string. The default is None.\n ' dst_number = row['dstnum_sms'].replace(u' ', u'') event_object = SkypeSMSEvent(row, dst_number) parser_mediator.ProduceEvent(event_object, query=query)
8,635,166,024,142,017,000
Parse SMS. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None.
plaso/parsers/sqlite_plugins/skype.py
ParseSMS
Defense-Cyber-Crime-Center/plaso
python
def ParseSMS(self, parser_mediator, row, query=None, **unused_kwargs): 'Parse SMS.\n\n Args:\n parser_mediator: A parser mediator object (instance of ParserMediator).\n row: The row resulting from the query.\n query: Optional query string. The default is None.\n ' dst_number = row['dstnum_sms'].replace(u' ', u) event_object = SkypeSMSEvent(row, dst_number) parser_mediator.ProduceEvent(event_object, query=query)
def ParseCall(self, parser_mediator, row, query=None, **unused_kwargs): 'Parse the calls taking into accounts some rows.\n\n Args:\n parser_mediator: A parser mediator object (instance of ParserMediator).\n row: The row resulting from the query.\n query: Optional query string. The default is None.\n ' try: aux = row['guid'] if aux: aux_list = aux.split(u'-') src_aux = aux_list[0] dst_aux = aux_list[1] else: src_aux = u'Unknown [no GUID]' dst_aux = u'Unknown [no GUID]' except IndexError: src_aux = u'Unknown [{0:s}]'.format(row['guid']) dst_aux = u'Unknown [{0:s}]'.format(row['guid']) if (row['is_incoming'] == u'0'): user_start_call = True source = src_aux if row['ip_address']: destination = u'{0:s} <{1:s}>'.format(dst_aux, row['ip_address']) else: destination = dst_aux else: user_start_call = False source = src_aux destination = dst_aux if (row['videostatus'] == u'3'): video_conference = True else: video_conference = False event_object = SkypeCallEvent(row['try_call'], u'WAITING', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['accept_call']: event_object = SkypeCallEvent(row['accept_call'], u'ACCEPTED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['call_duration']: try: timestamp = (int(row['accept_call']) + int(row['call_duration'])) event_object = SkypeCallEvent(timestamp, u'FINISHED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) except ValueError: logging.debug(u'[{0:s}] Unable to determine when the call {1:s} was finished.'.format(self.NAME, row['id']))
3,052,790,095,354,990,600
Parse the calls taking into accounts some rows. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: The row resulting from the query. query: Optional query string. The default is None.
plaso/parsers/sqlite_plugins/skype.py
ParseCall
Defense-Cyber-Crime-Center/plaso
python
def ParseCall(self, parser_mediator, row, query=None, **unused_kwargs): 'Parse the calls taking into accounts some rows.\n\n Args:\n parser_mediator: A parser mediator object (instance of ParserMediator).\n row: The row resulting from the query.\n query: Optional query string. The default is None.\n ' try: aux = row['guid'] if aux: aux_list = aux.split(u'-') src_aux = aux_list[0] dst_aux = aux_list[1] else: src_aux = u'Unknown [no GUID]' dst_aux = u'Unknown [no GUID]' except IndexError: src_aux = u'Unknown [{0:s}]'.format(row['guid']) dst_aux = u'Unknown [{0:s}]'.format(row['guid']) if (row['is_incoming'] == u'0'): user_start_call = True source = src_aux if row['ip_address']: destination = u'{0:s} <{1:s}>'.format(dst_aux, row['ip_address']) else: destination = dst_aux else: user_start_call = False source = src_aux destination = dst_aux if (row['videostatus'] == u'3'): video_conference = True else: video_conference = False event_object = SkypeCallEvent(row['try_call'], u'WAITING', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['accept_call']: event_object = SkypeCallEvent(row['accept_call'], u'ACCEPTED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) if row['call_duration']: try: timestamp = (int(row['accept_call']) + int(row['call_duration'])) event_object = SkypeCallEvent(timestamp, u'FINISHED', user_start_call, source, destination, video_conference) parser_mediator.ProduceEvent(event_object, query=query) except ValueError: logging.debug(u'[{0:s}] Unable to determine when the call {1:s} was finished.'.format(self.NAME, row['id']))
def ParseFileTransfer(self, parser_mediator, row, cache=None, database=None, query=None, **unused_kwargs): 'Parse the transfer files.\n\n There is no direct relationship between who sends the file and\n who accepts the file.\n\n Args:\n parser_mediator: A parser mediator object (instance of ParserMediator).\n row: the row with all information related with the file transfers.\n query: Optional query string. The default is None.\n cache: a cache object (instance of SQLiteCache).\n database: A database object (instance of SQLiteDatabase).\n ' source_dict = cache.GetResults(u'source') if (not source_dict): cursor = database.cursor results = cursor.execute(self.QUERY_SOURCE_FROM_TRANSFER) cache.CacheQueryResults(results, 'source', 'pk_id', ('skypeid', 'skypename')) source_dict = cache.GetResults(u'source') dest_dict = cache.GetResults(u'destination') if (not dest_dict): cursor = database.cursor results = cursor.execute(self.QUERY_DEST_FROM_TRANSFER) cache.CacheQueryResults(results, 'destination', 'parent_id', ('skypeid', 'skypename')) dest_dict = cache.GetResults(u'destination') source = u'Unknown' destination = u'Unknown' if row['parent_id']: destination = u'{0:s} <{1:s}>'.format(row['partner_handle'], row['partner_dispname']) (skype_id, skype_name) = source_dict.get(row['parent_id'], [None, None]) if skype_name: source = u'{0:s} <{1:s}>'.format(skype_id, skype_name) else: source = u'{0:s} <{1:s}>'.format(row['partner_handle'], row['partner_dispname']) if row['pk_id']: (skype_id, skype_name) = dest_dict.get(row['pk_id'], [None, None]) if skype_name: destination = u'{0:s} <{1:s}>'.format(skype_id, skype_name) if (row['status'] == 8): if row['starttime']: event_object = SkypeTransferFileEvent(row, row['starttime'], u'GETSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['accepttime']: event_object = SkypeTransferFileEvent(row, row['accepttime'], u'ACCEPTED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['finishtime']: event_object = SkypeTransferFileEvent(row, row['finishtime'], u'FINISHED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) elif ((row['status'] == 2) and row['starttime']): event_object = SkypeTransferFileEvent(row, row['starttime'], u'SENDSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query)
-4,675,259,592,888,954,000
Parse the transfer files. There is no direct relationship between who sends the file and who accepts the file. Args: parser_mediator: A parser mediator object (instance of ParserMediator). row: the row with all information related with the file transfers. query: Optional query string. The default is None. cache: a cache object (instance of SQLiteCache). database: A database object (instance of SQLiteDatabase).
plaso/parsers/sqlite_plugins/skype.py
ParseFileTransfer
Defense-Cyber-Crime-Center/plaso
python
def ParseFileTransfer(self, parser_mediator, row, cache=None, database=None, query=None, **unused_kwargs): 'Parse the transfer files.\n\n There is no direct relationship between who sends the file and\n who accepts the file.\n\n Args:\n parser_mediator: A parser mediator object (instance of ParserMediator).\n row: the row with all information related with the file transfers.\n query: Optional query string. The default is None.\n cache: a cache object (instance of SQLiteCache).\n database: A database object (instance of SQLiteDatabase).\n ' source_dict = cache.GetResults(u'source') if (not source_dict): cursor = database.cursor results = cursor.execute(self.QUERY_SOURCE_FROM_TRANSFER) cache.CacheQueryResults(results, 'source', 'pk_id', ('skypeid', 'skypename')) source_dict = cache.GetResults(u'source') dest_dict = cache.GetResults(u'destination') if (not dest_dict): cursor = database.cursor results = cursor.execute(self.QUERY_DEST_FROM_TRANSFER) cache.CacheQueryResults(results, 'destination', 'parent_id', ('skypeid', 'skypename')) dest_dict = cache.GetResults(u'destination') source = u'Unknown' destination = u'Unknown' if row['parent_id']: destination = u'{0:s} <{1:s}>'.format(row['partner_handle'], row['partner_dispname']) (skype_id, skype_name) = source_dict.get(row['parent_id'], [None, None]) if skype_name: source = u'{0:s} <{1:s}>'.format(skype_id, skype_name) else: source = u'{0:s} <{1:s}>'.format(row['partner_handle'], row['partner_dispname']) if row['pk_id']: (skype_id, skype_name) = dest_dict.get(row['pk_id'], [None, None]) if skype_name: destination = u'{0:s} <{1:s}>'.format(skype_id, skype_name) if (row['status'] == 8): if row['starttime']: event_object = SkypeTransferFileEvent(row, row['starttime'], u'GETSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['accepttime']: event_object = SkypeTransferFileEvent(row, row['accepttime'], u'ACCEPTED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) if row['finishtime']: event_object = SkypeTransferFileEvent(row, row['finishtime'], u'FINISHED', source, destination) parser_mediator.ProduceEvent(event_object, query=query) elif ((row['status'] == 2) and row['starttime']): event_object = SkypeTransferFileEvent(row, row['starttime'], u'SENDSOLICITUDE', source, destination) parser_mediator.ProduceEvent(event_object, query=query)
def searchable(self): 'Enable search line edit visible.' self._search_line_edit.setVisible(True) return self
-8,225,402,888,302,331,000
Enable search line edit visible.
dayu_widgets/item_view_set.py
searchable
kanbang/dayu_widgets
python
def searchable(self): self._search_line_edit.setVisible(True) return self
def setUp(self): '\n Set up method to run before each test case\n ' self.new_user = credentials('Paul', '123')
4,424,100,375,332,029,000
Set up method to run before each test case
credentials_test.py
setUp
paulmunyao/Password-Locker
python
def setUp(self): '\n \n ' self.new_user = credentials('Paul', '123')
def test__init__(self): '\n test__init__ test case to test if the object is initialized properly\n ' self.assertEqual(self.new_user.user_name, 'Paul') self.assertEqual(self.new_user.password, '123')
734,204,525,461,232,000
test__init__ test case to test if the object is initialized properly
credentials_test.py
test__init__
paulmunyao/Password-Locker
python
def test__init__(self): '\n \n ' self.assertEqual(self.new_user.user_name, 'Paul') self.assertEqual(self.new_user.password, '123')
def test__save_user(self): '\n test to see if the user is saved\n ' self.new_credentials.save_credentials() self.assertEqual(len(credentials.user_list), 1)
-8,293,255,632,897,053,000
test to see if the user is saved
credentials_test.py
test__save_user
paulmunyao/Password-Locker
python
def test__save_user(self): '\n \n ' self.new_credentials.save_credentials() self.assertEqual(len(credentials.user_list), 1)
@property def verbosity(self) -> Verbosity: "\n Verbosity level (default `warning`)\n\n Level 0: only show 'error' messages.\n Level 1: also show 'warning' messages.\n Level 2: also show 'info' messages.\n Level 3: also show 'hint' messages.\n Level 4: also show very detailed progress for 'debug'ging.\n " return self._verbosity
6,424,720,869,344,444,000
Verbosity level (default `warning`) Level 0: only show 'error' messages. Level 1: also show 'warning' messages. Level 2: also show 'info' messages. Level 3: also show 'hint' messages. Level 4: also show very detailed progress for 'debug'ging.
scanpy/_settings.py
verbosity
gamazeps/scanpy
python
@property def verbosity(self) -> Verbosity: "\n Verbosity level (default `warning`)\n\n Level 0: only show 'error' messages.\n Level 1: also show 'warning' messages.\n Level 2: also show 'info' messages.\n Level 3: also show 'hint' messages.\n Level 4: also show very detailed progress for 'debug'ging.\n " return self._verbosity
@property def plot_suffix(self) -> str: 'Global suffix that is appended to figure filenames.\n ' return self._plot_suffix
3,859,983,572,578,482,000
Global suffix that is appended to figure filenames.
scanpy/_settings.py
plot_suffix
gamazeps/scanpy
python
@property def plot_suffix(self) -> str: '\n ' return self._plot_suffix
@property def file_format_data(self) -> str: "File format for saving AnnData objects.\n\n Allowed are 'txt', 'csv' (comma separated value file) for exporting and 'h5ad'\n (hdf5) for lossless saving.\n " return self._file_format_data
-3,070,800,732,118,796,300
File format for saving AnnData objects. Allowed are 'txt', 'csv' (comma separated value file) for exporting and 'h5ad' (hdf5) for lossless saving.
scanpy/_settings.py
file_format_data
gamazeps/scanpy
python
@property def file_format_data(self) -> str: "File format for saving AnnData objects.\n\n Allowed are 'txt', 'csv' (comma separated value file) for exporting and 'h5ad'\n (hdf5) for lossless saving.\n " return self._file_format_data
@property def file_format_figs(self) -> str: "File format for saving figures.\n\n For example 'png', 'pdf' or 'svg'. Many other formats work as well (see\n `matplotlib.pyplot.savefig`).\n " return self._file_format_figs
-1,319,306,077,014,287,400
File format for saving figures. For example 'png', 'pdf' or 'svg'. Many other formats work as well (see `matplotlib.pyplot.savefig`).
scanpy/_settings.py
file_format_figs
gamazeps/scanpy
python
@property def file_format_figs(self) -> str: "File format for saving figures.\n\n For example 'png', 'pdf' or 'svg'. Many other formats work as well (see\n `matplotlib.pyplot.savefig`).\n " return self._file_format_figs
@property def autosave(self) -> bool: ' Automatically save figures in :attr:`~scanpy._settings.ScanpyConfig.figdir` (default `False`).\n\n Do not show plots/figures interactively.\n ' return self._autosave
3,660,266,334,677,051,400
Automatically save figures in :attr:`~scanpy._settings.ScanpyConfig.figdir` (default `False`). Do not show plots/figures interactively.
scanpy/_settings.py
autosave
gamazeps/scanpy
python
@property def autosave(self) -> bool: ' Automatically save figures in :attr:`~scanpy._settings.ScanpyConfig.figdir` (default `False`).\n\n Do not show plots/figures interactively.\n ' return self._autosave
@property def autoshow(self) -> bool: ' Automatically show figures if `autosave == False` (default `True`).\n\n There is no need to call the matplotlib pl.show() in this case.\n ' return self._autoshow
-6,447,307,756,049,594,000
Automatically show figures if `autosave == False` (default `True`). There is no need to call the matplotlib pl.show() in this case.
scanpy/_settings.py
autoshow
gamazeps/scanpy
python
@property def autoshow(self) -> bool: ' Automatically show figures if `autosave == False` (default `True`).\n\n There is no need to call the matplotlib pl.show() in this case.\n ' return self._autoshow
@property def writedir(self) -> Path: ' Directory where the function scanpy.write writes to by default.\n ' return self._writedir
-5,245,418,655,141,521,000
Directory where the function scanpy.write writes to by default.
scanpy/_settings.py
writedir
gamazeps/scanpy
python
@property def writedir(self) -> Path: ' \n ' return self._writedir
@property def cachedir(self) -> Path: " Directory for cache files (default `'./cache/'`).\n " return self._cachedir
-1,021,373,160,847,789,800
Directory for cache files (default `'./cache/'`).
scanpy/_settings.py
cachedir
gamazeps/scanpy
python
@property def cachedir(self) -> Path: " \n " return self._cachedir
@property def datasetdir(self) -> Path: " Directory for example :mod:`~scanpy.datasets` (default `'./data/'`).\n " return self._datasetdir
6,038,991,707,708,268,000
Directory for example :mod:`~scanpy.datasets` (default `'./data/'`).
scanpy/_settings.py
datasetdir
gamazeps/scanpy
python
@property def datasetdir(self) -> Path: " \n " return self._datasetdir
@property def figdir(self) -> Path: " Directory for saving figures (default `'./figures/'`).\n " return self._figdir
3,064,606,170,553,432,600
Directory for saving figures (default `'./figures/'`).
scanpy/_settings.py
figdir
gamazeps/scanpy
python
@property def figdir(self) -> Path: " \n " return self._figdir
@property def max_memory(self) -> Union[(int, float)]: ' Maximal memory usage in Gigabyte.\n\n Is currently not well respected....\n ' return self._max_memory
-7,489,614,085,946,220,000
Maximal memory usage in Gigabyte. Is currently not well respected....
scanpy/_settings.py
max_memory
gamazeps/scanpy
python
@property def max_memory(self) -> Union[(int, float)]: ' Maximal memory usage in Gigabyte.\n\n Is currently not well respected....\n ' return self._max_memory
@property def n_jobs(self) -> int: ' Default number of jobs/ CPUs to use for parallel computing.\n ' return self._n_jobs
1,803,948,937,692,783,000
Default number of jobs/ CPUs to use for parallel computing.
scanpy/_settings.py
n_jobs
gamazeps/scanpy
python
@property def n_jobs(self) -> int: ' \n ' return self._n_jobs
@property def logpath(self) -> Optional[Path]: ' The file path `logfile` was set to.\n ' return self._logpath
-2,058,415,471,124,060,200
The file path `logfile` was set to.
scanpy/_settings.py
logpath
gamazeps/scanpy
python
@property def logpath(self) -> Optional[Path]: ' \n ' return self._logpath
@property def logfile(self) -> TextIO: " The open file to write logs to.\n\n Set it to a :class:`~pathlib.Path` or :class:`str` to open a new one.\n The default `None` corresponds to :obj:`sys.stdout` in jupyter notebooks\n and to :obj:`sys.stderr` otherwise.\n\n For backwards compatibility, setting it to `''` behaves like setting it to `None`.\n " return self._logfile
629,563,856,985,492,100
The open file to write logs to. Set it to a :class:`~pathlib.Path` or :class:`str` to open a new one. The default `None` corresponds to :obj:`sys.stdout` in jupyter notebooks and to :obj:`sys.stderr` otherwise. For backwards compatibility, setting it to `''` behaves like setting it to `None`.
scanpy/_settings.py
logfile
gamazeps/scanpy
python
@property def logfile(self) -> TextIO: " The open file to write logs to.\n\n Set it to a :class:`~pathlib.Path` or :class:`str` to open a new one.\n The default `None` corresponds to :obj:`sys.stdout` in jupyter notebooks\n and to :obj:`sys.stderr` otherwise.\n\n For backwards compatibility, setting it to `` behaves like setting it to `None`.\n " return self._logfile
@property def categories_to_ignore(self) -> List[str]: ' Categories that are omitted in plotting etc.\n ' return self._categories_to_ignore
-7,675,846,271,189,168,000
Categories that are omitted in plotting etc.
scanpy/_settings.py
categories_to_ignore
gamazeps/scanpy
python
@property def categories_to_ignore(self) -> List[str]: ' \n ' return self._categories_to_ignore
def set_figure_params(self, scanpy: bool=True, dpi: int=80, dpi_save: int=150, frameon: bool=True, vector_friendly: bool=True, fontsize: int=14, color_map: Optional[str]=None, format: Union[(str, Iterable[str])]='pdf', transparent: bool=False, ipython_format: str='png2x'): " Set resolution/size, styling and format of figures.\n\n Parameters\n ----------\n scanpy\n Init default values for :obj:`matplotlib.rcParams` suited for Scanpy.\n dpi\n Resolution of rendered figures - this influences the size of figures in notebooks.\n dpi_save\n Resolution of saved figures. This should typically be higher to achieve\n publication quality.\n frameon\n Add frames and axes labels to scatter plots.\n vector_friendly\n Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`.\n fontsize\n Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`.\n color_map\n Convenience method for setting the default color map. Ignored if `scanpy=False`.\n format: {`'png'`, `'pdf'`, `'svg'`, etc.}, optional (default: `'pdf'`)\n This sets the default format for saving figures: `file_format_figs`.\n transparent\n Save figures with transparent back ground. Sets\n `rcParams['savefig.transparent']`.\n ipython_format\n Only concerns the notebook/IPython environment; see\n :func:`~IPython.display.set_matplotlib_formats` for details.\n " try: import IPython if isinstance(ipython_format, str): ipython_format = [ipython_format] IPython.display.set_matplotlib_formats(*ipython_format) except Exception: pass from matplotlib import rcParams self._vector_friendly = vector_friendly self.file_format_figs = format if (dpi is not None): rcParams['figure.dpi'] = dpi if (dpi_save is not None): rcParams['savefig.dpi'] = dpi_save if (transparent is not None): rcParams['savefig.transparent'] = transparent if scanpy: from .plotting._rcmod import set_rcParams_scanpy set_rcParams_scanpy(fontsize=fontsize, color_map=color_map) self._frameon = frameon
7,115,684,889,461,385,000
Set resolution/size, styling and format of figures. Parameters ---------- scanpy Init default values for :obj:`matplotlib.rcParams` suited for Scanpy. dpi Resolution of rendered figures - this influences the size of figures in notebooks. dpi_save Resolution of saved figures. This should typically be higher to achieve publication quality. frameon Add frames and axes labels to scatter plots. vector_friendly Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`. fontsize Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`. color_map Convenience method for setting the default color map. Ignored if `scanpy=False`. format: {`'png'`, `'pdf'`, `'svg'`, etc.}, optional (default: `'pdf'`) This sets the default format for saving figures: `file_format_figs`. transparent Save figures with transparent back ground. Sets `rcParams['savefig.transparent']`. ipython_format Only concerns the notebook/IPython environment; see :func:`~IPython.display.set_matplotlib_formats` for details.
scanpy/_settings.py
set_figure_params
gamazeps/scanpy
python
def set_figure_params(self, scanpy: bool=True, dpi: int=80, dpi_save: int=150, frameon: bool=True, vector_friendly: bool=True, fontsize: int=14, color_map: Optional[str]=None, format: Union[(str, Iterable[str])]='pdf', transparent: bool=False, ipython_format: str='png2x'): " Set resolution/size, styling and format of figures.\n\n Parameters\n ----------\n scanpy\n Init default values for :obj:`matplotlib.rcParams` suited for Scanpy.\n dpi\n Resolution of rendered figures - this influences the size of figures in notebooks.\n dpi_save\n Resolution of saved figures. This should typically be higher to achieve\n publication quality.\n frameon\n Add frames and axes labels to scatter plots.\n vector_friendly\n Plot scatter plots using `png` backend even when exporting as `pdf` or `svg`.\n fontsize\n Set the fontsize for several `rcParams` entries. Ignored if `scanpy=False`.\n color_map\n Convenience method for setting the default color map. Ignored if `scanpy=False`.\n format: {`'png'`, `'pdf'`, `'svg'`, etc.}, optional (default: `'pdf'`)\n This sets the default format for saving figures: `file_format_figs`.\n transparent\n Save figures with transparent back ground. Sets\n `rcParams['savefig.transparent']`.\n ipython_format\n Only concerns the notebook/IPython environment; see\n :func:`~IPython.display.set_matplotlib_formats` for details.\n " try: import IPython if isinstance(ipython_format, str): ipython_format = [ipython_format] IPython.display.set_matplotlib_formats(*ipython_format) except Exception: pass from matplotlib import rcParams self._vector_friendly = vector_friendly self.file_format_figs = format if (dpi is not None): rcParams['figure.dpi'] = dpi if (dpi_save is not None): rcParams['savefig.dpi'] = dpi_save if (transparent is not None): rcParams['savefig.transparent'] = transparent if scanpy: from .plotting._rcmod import set_rcParams_scanpy set_rcParams_scanpy(fontsize=fontsize, color_map=color_map) self._frameon = frameon
@staticmethod def _is_run_from_ipython(): 'Determines whether run from Ipython.\n\n Only affects progress bars.\n ' try: __IPYTHON__ return True except NameError: return False
-3,831,951,316,772,162,000
Determines whether run from Ipython. Only affects progress bars.
scanpy/_settings.py
_is_run_from_ipython
gamazeps/scanpy
python
@staticmethod def _is_run_from_ipython(): 'Determines whether run from Ipython.\n\n Only affects progress bars.\n ' try: __IPYTHON__ return True except NameError: return False
def fps(src: torch.Tensor, batch=None, ratio=None, random_start=True): '"A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature\n Learning on Point Sets in a Metric Space"\n <https://arxiv.org/abs/1706.02413>`_ paper, which iteratively samples the\n most distant point with regard to the rest points.\n\n Args:\n src (Tensor): Point feature matrix\n :math:`\\mathbf{X} \\in \\mathbb{R}^{N \\times F}`.\n batch (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. (default: :obj:`None`)\n ratio (float or Tensor, optional): Sampling ratio.\n (default: :obj:`0.5`)\n random_start (bool, optional): If set to :obj:`False`, use the first\n node in :math:`\\mathbf{X}` as starting node. (default: obj:`True`)\n\n :rtype: :class:`LongTensor`\n\n\n .. code-block:: python\n\n import torch\n from torch_cluster import fps\n\n src = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])\n batch = torch.tensor([0, 0, 0, 0])\n index = fps(src, batch, ratio=0.5)\n ' r: Optional[Tensor] = None if (ratio is None): r = torch.tensor(0.5, dtype=src.dtype, device=src.device) elif isinstance(ratio, float): r = torch.tensor(ratio, dtype=src.dtype, device=src.device) else: r = ratio assert (r is not None) if (batch is not None): assert (src.size(0) == batch.numel()) batch_size = (int(batch.max()) + 1) deg = src.new_zeros(batch_size, dtype=torch.long) deg.scatter_add_(0, batch, torch.ones_like(batch)) ptr = deg.new_zeros((batch_size + 1)) torch.cumsum(deg, 0, out=ptr[1:]) else: ptr = torch.tensor([0, src.size(0)], device=src.device) return torch.ops.torch_cluster.fps(src, ptr, r, random_start)
-3,016,408,835,132,523,000
"A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space" <https://arxiv.org/abs/1706.02413>`_ paper, which iteratively samples the most distant point with regard to the rest points. Args: src (Tensor): Point feature matrix :math:`\mathbf{X} \in \mathbb{R}^{N \times F}`. batch (LongTensor, optional): Batch vector :math:`\mathbf{b} \in {\{ 0, \ldots, B-1\}}^N`, which assigns each node to a specific example. (default: :obj:`None`) ratio (float or Tensor, optional): Sampling ratio. (default: :obj:`0.5`) random_start (bool, optional): If set to :obj:`False`, use the first node in :math:`\mathbf{X}` as starting node. (default: obj:`True`) :rtype: :class:`LongTensor` .. code-block:: python import torch from torch_cluster import fps src = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]]) batch = torch.tensor([0, 0, 0, 0]) index = fps(src, batch, ratio=0.5)
torch_cluster/fps.py
fps
Hacky-DH/pytorch_cluster
python
def fps(src: torch.Tensor, batch=None, ratio=None, random_start=True): '"A sampling algorithm from the `"PointNet++: Deep Hierarchical Feature\n Learning on Point Sets in a Metric Space"\n <https://arxiv.org/abs/1706.02413>`_ paper, which iteratively samples the\n most distant point with regard to the rest points.\n\n Args:\n src (Tensor): Point feature matrix\n :math:`\\mathbf{X} \\in \\mathbb{R}^{N \\times F}`.\n batch (LongTensor, optional): Batch vector\n :math:`\\mathbf{b} \\in {\\{ 0, \\ldots, B-1\\}}^N`, which assigns each\n node to a specific example. (default: :obj:`None`)\n ratio (float or Tensor, optional): Sampling ratio.\n (default: :obj:`0.5`)\n random_start (bool, optional): If set to :obj:`False`, use the first\n node in :math:`\\mathbf{X}` as starting node. (default: obj:`True`)\n\n :rtype: :class:`LongTensor`\n\n\n .. code-block:: python\n\n import torch\n from torch_cluster import fps\n\n src = torch.Tensor([[-1, -1], [-1, 1], [1, -1], [1, 1]])\n batch = torch.tensor([0, 0, 0, 0])\n index = fps(src, batch, ratio=0.5)\n ' r: Optional[Tensor] = None if (ratio is None): r = torch.tensor(0.5, dtype=src.dtype, device=src.device) elif isinstance(ratio, float): r = torch.tensor(ratio, dtype=src.dtype, device=src.device) else: r = ratio assert (r is not None) if (batch is not None): assert (src.size(0) == batch.numel()) batch_size = (int(batch.max()) + 1) deg = src.new_zeros(batch_size, dtype=torch.long) deg.scatter_add_(0, batch, torch.ones_like(batch)) ptr = deg.new_zeros((batch_size + 1)) torch.cumsum(deg, 0, out=ptr[1:]) else: ptr = torch.tensor([0, src.size(0)], device=src.device) return torch.ops.torch_cluster.fps(src, ptr, r, random_start)
def request_endpoint(audio, speech_config, output_directory, lexical): 'Request the speech service endpoint\n Args:\n audio: Input data frame\n speech_config: Choice between scoring and \n output_folder: LUIS app ID\n case: LUIS subscription key\n lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00\n Returns:\n df: Scoring data frame with predicted intents and scores\n Raises:\n ConnectionError: If file is not found\n ' audio_config = speechsdk.audio.AudioConfig(filename=audio) speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config) result = speech_recognizer.recognize_once() filename = audio[(audio.rindex('\\') + 1):] text = process_recognition(result, filename, output_directory, lexical) return (text, filename)
6,316,814,903,347,039,000
Request the speech service endpoint Args: audio: Input data frame speech_config: Choice between scoring and output_folder: LUIS app ID case: LUIS subscription key lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00 Returns: df: Scoring data frame with predicted intents and scores Raises: ConnectionError: If file is not found
src/stt.py
request_endpoint
microsoft/SpeechServices
python
def request_endpoint(audio, speech_config, output_directory, lexical): 'Request the speech service endpoint\n Args:\n audio: Input data frame\n speech_config: Choice between scoring and \n output_folder: LUIS app ID\n case: LUIS subscription key\n lexical: Minimum confidence score for LUIS result, between 0.00 and 1.00\n Returns:\n df: Scoring data frame with predicted intents and scores\n Raises:\n ConnectionError: If file is not found\n ' audio_config = speechsdk.audio.AudioConfig(filename=audio) speech_recognizer = speechsdk.SpeechRecognizer(speech_config=speech_config, audio_config=audio_config) result = speech_recognizer.recognize_once() filename = audio[(audio.rindex('\\') + 1):] text = process_recognition(result, filename, output_directory, lexical) return (text, filename)
def process_recognition(result, filename, output_directory, lexical): 'Process recognition received from the speech service\n Args:\n result: Result object returned by STT-service\n filename: Filename for output file\n output_directory: Output directory for the file\n lexical: Boolean to enable extended lexical version of STT-result\n Returns:\n text: Processed recognition as string\n ' if (result.reason == speechsdk.ResultReason.RecognizedSpeech): if lexical: text = f"{format(result.text)} {json.loads(result.json)['NBest'][0]['Lexical']}" else: text = f'{format(result.text)}' logging.info(f'[INFO] - Recognition successful: {filename} -> {result.text}') elif (result.reason == speechsdk.ResultReason.NoMatch): logging.warning(((filename + '\t') + f'No speech could be recognized: {result.no_match_details}')) text = '' elif (result.reason == speechsdk.ResultReason.Canceled): cancellation_details = result.cancellation_details logging.error(((filename + '\t') + f'Speech Recognition canceled: {cancellation_details.reason}')) if (cancellation_details.reason == speechsdk.CancellationReason.Error): logging.error(f'Error details: {cancellation_details.error_details}') text = '' return text
7,902,317,877,991,636,000
Process recognition received from the speech service Args: result: Result object returned by STT-service filename: Filename for output file output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result Returns: text: Processed recognition as string
src/stt.py
process_recognition
microsoft/SpeechServices
python
def process_recognition(result, filename, output_directory, lexical): 'Process recognition received from the speech service\n Args:\n result: Result object returned by STT-service\n filename: Filename for output file\n output_directory: Output directory for the file\n lexical: Boolean to enable extended lexical version of STT-result\n Returns:\n text: Processed recognition as string\n ' if (result.reason == speechsdk.ResultReason.RecognizedSpeech): if lexical: text = f"{format(result.text)} {json.loads(result.json)['NBest'][0]['Lexical']}" else: text = f'{format(result.text)}' logging.info(f'[INFO] - Recognition successful: {filename} -> {result.text}') elif (result.reason == speechsdk.ResultReason.NoMatch): logging.warning(((filename + '\t') + f'No speech could be recognized: {result.no_match_details}')) text = elif (result.reason == speechsdk.ResultReason.Canceled): cancellation_details = result.cancellation_details logging.error(((filename + '\t') + f'Speech Recognition canceled: {cancellation_details.reason}')) if (cancellation_details.reason == speechsdk.CancellationReason.Error): logging.error(f'Error details: {cancellation_details.error_details}') text = return text
def write_transcription(output_directory, text): 'Write transcription to file\n Args:\n text: Processed recognition as string\n output_directory: Output directory for the file\n Returns:\n Writes output to file\n ' if (not os.path.exists(f'{output_directory}/transcriptions.txt')): transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig') transfile.close() logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.') with open(f'{output_directory}/transcriptions.txt', 'a', encoding='utf-8-sig') as transfile: transfile.write(f'''{text} ''') transfile.close()
5,400,919,661,310,008,000
Write transcription to file Args: text: Processed recognition as string output_directory: Output directory for the file Returns: Writes output to file
src/stt.py
write_transcription
microsoft/SpeechServices
python
def write_transcription(output_directory, text): 'Write transcription to file\n Args:\n text: Processed recognition as string\n output_directory: Output directory for the file\n Returns:\n Writes output to file\n ' if (not os.path.exists(f'{output_directory}/transcriptions.txt')): transfile = codecs.open(f'{output_directory}/transcriptions.txt', 'w', encoding='utf-8-sig') transfile.close() logging.warning(f'[INFO] - Created transcript file with utf-8 bom encoding.') with open(f'{output_directory}/transcriptions.txt', 'a', encoding='utf-8-sig') as transfile: transfile.write(f'{text} ') transfile.close()
def main(speech_files, output_directory, lexical=False, enable_proxy=False, *argv): 'Main function for STT-functionality\n Args:\n speech_files: Directory of audio files to be transcribed\n output_directory: Output directory for the file\n lexical: Boolean to enable extended lexical version of STT-result\n enable_proxy: Boolean to enable proxy function in case you need it\n *argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str\n Returns:\n zip(filenames, results): Zipped lists of filenames and STT-results as string\n ' try: speech_config = speechsdk.SpeechConfig(subscription=pa.config_data['stt_key'], region=pa.config_data['stt_region']) except RuntimeError: logging.error('[ERROR] - Could not retrieve speech config') if enable_proxy: speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3]) speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter) if (pa.config_data['stt_endpoint'] != ''): speech_config.endpoint_id = pa.config_data['stt_endpoint'] logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files') results = [] filenames = [] for audio in glob.iglob(f'{speech_files}*av'): (result, filename) = request_endpoint(audio, speech_config, output_directory, lexical) results.append(result) filenames.append(filename) return zip(filenames, results)
4,704,541,752,918,082,000
Main function for STT-functionality Args: speech_files: Directory of audio files to be transcribed output_directory: Output directory for the file lexical: Boolean to enable extended lexical version of STT-result enable_proxy: Boolean to enable proxy function in case you need it *argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str Returns: zip(filenames, results): Zipped lists of filenames and STT-results as string
src/stt.py
main
microsoft/SpeechServices
python
def main(speech_files, output_directory, lexical=False, enable_proxy=False, *argv): 'Main function for STT-functionality\n Args:\n speech_files: Directory of audio files to be transcribed\n output_directory: Output directory for the file\n lexical: Boolean to enable extended lexical version of STT-result\n enable_proxy: Boolean to enable proxy function in case you need it\n *argv: Proxy information if enable_proxy is True -> hostname: str, port: str, username: str, password: str\n Returns:\n zip(filenames, results): Zipped lists of filenames and STT-results as string\n ' try: speech_config = speechsdk.SpeechConfig(subscription=pa.config_data['stt_key'], region=pa.config_data['stt_region']) except RuntimeError: logging.error('[ERROR] - Could not retrieve speech config') if enable_proxy: speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3]) speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter) if (pa.config_data['stt_endpoint'] != ): speech_config.endpoint_id = pa.config_data['stt_endpoint'] logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files') results = [] filenames = [] for audio in glob.iglob(f'{speech_files}*av'): (result, filename) = request_endpoint(audio, speech_config, output_directory, lexical) results.append(result) filenames.append(filename) return zip(filenames, results)
def get_params() -> AttributeDict: 'Return a dict containing training parameters.\n\n All training related parameters that are not passed from the commandline\n is saved in the variable `params`.\n\n Commandline options are merged into `params` after they are parsed, so\n you can also access them via `params`.\n\n Explanation of options saved in `params`:\n\n - exp_dir: It specifies the directory where all training related\n files, e.g., checkpoints, log, etc, are saved\n\n - lang_dir: It contains language related input files such as\n "lexicon.txt"\n\n - lr: It specifies the initial learning rate\n\n - feature_dim: The model input dim. It has to match the one used\n in computing features.\n\n - weight_decay: The weight_decay for the optimizer.\n\n - subsampling_factor: The subsampling factor for the model.\n\n - best_train_loss: Best training loss so far. It is used to select\n the model that has the lowest training loss. It is\n updated during the training.\n\n - best_valid_loss: Best validation loss so far. It is used to select\n the model that has the lowest validation loss. It is\n updated during the training.\n\n - best_train_epoch: It is the epoch that has the best training loss.\n\n - best_valid_epoch: It is the epoch that has the best validation loss.\n\n - batch_idx_train: Used to writing statistics to tensorboard. It\n contains number of batches trained so far across\n epochs.\n\n - log_interval: Print training loss if batch_idx % log_interval` is 0\n\n - reset_interval: Reset statistics if batch_idx % reset_interval is 0\n\n - valid_interval: Run validation if batch_idx % valid_interval` is 0\n\n - beam_size: It is used in k2.ctc_loss\n\n - reduction: It is used in k2.ctc_loss\n\n - use_double_scores: It is used in k2.ctc_loss\n ' params = AttributeDict({'exp_dir': Path('tdnn_lstm_ctc/exp'), 'lang_dir': Path('data/lang_phone'), 'lr': 0.001, 'feature_dim': 80, 'weight_decay': 0.0005, 'subsampling_factor': 3, 'best_train_loss': float('inf'), 'best_valid_loss': float('inf'), 'best_train_epoch': (- 1), 'best_valid_epoch': (- 1), 'batch_idx_train': 0, 'log_interval': 10, 'reset_interval': 200, 'valid_interval': 1000, 'beam_size': 10, 'reduction': 'sum', 'use_double_scores': True, 'env_info': get_env_info()}) return params
-8,064,291,771,705,554,000
Return a dict containing training parameters. All training related parameters that are not passed from the commandline is saved in the variable `params`. Commandline options are merged into `params` after they are parsed, so you can also access them via `params`. Explanation of options saved in `params`: - exp_dir: It specifies the directory where all training related files, e.g., checkpoints, log, etc, are saved - lang_dir: It contains language related input files such as "lexicon.txt" - lr: It specifies the initial learning rate - feature_dim: The model input dim. It has to match the one used in computing features. - weight_decay: The weight_decay for the optimizer. - subsampling_factor: The subsampling factor for the model. - best_train_loss: Best training loss so far. It is used to select the model that has the lowest training loss. It is updated during the training. - best_valid_loss: Best validation loss so far. It is used to select the model that has the lowest validation loss. It is updated during the training. - best_train_epoch: It is the epoch that has the best training loss. - best_valid_epoch: It is the epoch that has the best validation loss. - batch_idx_train: Used to writing statistics to tensorboard. It contains number of batches trained so far across epochs. - log_interval: Print training loss if batch_idx % log_interval` is 0 - reset_interval: Reset statistics if batch_idx % reset_interval is 0 - valid_interval: Run validation if batch_idx % valid_interval` is 0 - beam_size: It is used in k2.ctc_loss - reduction: It is used in k2.ctc_loss - use_double_scores: It is used in k2.ctc_loss
egs/librispeech/ASR/tdnn_lstm_ctc/train.py
get_params
aarora8/icefall
python
def get_params() -> AttributeDict: 'Return a dict containing training parameters.\n\n All training related parameters that are not passed from the commandline\n is saved in the variable `params`.\n\n Commandline options are merged into `params` after they are parsed, so\n you can also access them via `params`.\n\n Explanation of options saved in `params`:\n\n - exp_dir: It specifies the directory where all training related\n files, e.g., checkpoints, log, etc, are saved\n\n - lang_dir: It contains language related input files such as\n "lexicon.txt"\n\n - lr: It specifies the initial learning rate\n\n - feature_dim: The model input dim. It has to match the one used\n in computing features.\n\n - weight_decay: The weight_decay for the optimizer.\n\n - subsampling_factor: The subsampling factor for the model.\n\n - best_train_loss: Best training loss so far. It is used to select\n the model that has the lowest training loss. It is\n updated during the training.\n\n - best_valid_loss: Best validation loss so far. It is used to select\n the model that has the lowest validation loss. It is\n updated during the training.\n\n - best_train_epoch: It is the epoch that has the best training loss.\n\n - best_valid_epoch: It is the epoch that has the best validation loss.\n\n - batch_idx_train: Used to writing statistics to tensorboard. It\n contains number of batches trained so far across\n epochs.\n\n - log_interval: Print training loss if batch_idx % log_interval` is 0\n\n - reset_interval: Reset statistics if batch_idx % reset_interval is 0\n\n - valid_interval: Run validation if batch_idx % valid_interval` is 0\n\n - beam_size: It is used in k2.ctc_loss\n\n - reduction: It is used in k2.ctc_loss\n\n - use_double_scores: It is used in k2.ctc_loss\n ' params = AttributeDict({'exp_dir': Path('tdnn_lstm_ctc/exp'), 'lang_dir': Path('data/lang_phone'), 'lr': 0.001, 'feature_dim': 80, 'weight_decay': 0.0005, 'subsampling_factor': 3, 'best_train_loss': float('inf'), 'best_valid_loss': float('inf'), 'best_train_epoch': (- 1), 'best_valid_epoch': (- 1), 'batch_idx_train': 0, 'log_interval': 10, 'reset_interval': 200, 'valid_interval': 1000, 'beam_size': 10, 'reduction': 'sum', 'use_double_scores': True, 'env_info': get_env_info()}) return params
def load_checkpoint_if_available(params: AttributeDict, model: nn.Module, optimizer: Optional[torch.optim.Optimizer]=None, scheduler: Optional[torch.optim.lr_scheduler._LRScheduler]=None) -> None: 'Load checkpoint from file.\n\n If params.start_epoch is positive, it will load the checkpoint from\n `params.start_epoch - 1`. Otherwise, this function does nothing.\n\n Apart from loading state dict for `model`, `optimizer` and `scheduler`,\n it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`,\n and `best_valid_loss` in `params`.\n\n Args:\n params:\n The return value of :func:`get_params`.\n model:\n The training model.\n optimizer:\n The optimizer that we are using.\n scheduler:\n The learning rate scheduler we are using.\n Returns:\n Return None.\n ' if (params.start_epoch <= 0): return filename = (params.exp_dir / f'epoch-{(params.start_epoch - 1)}.pt') saved_params = load_checkpoint(filename, model=model, optimizer=optimizer, scheduler=scheduler) keys = ['best_train_epoch', 'best_valid_epoch', 'batch_idx_train', 'best_train_loss', 'best_valid_loss'] for k in keys: params[k] = saved_params[k] return saved_params
-8,657,515,020,424,165,000
Load checkpoint from file. If params.start_epoch is positive, it will load the checkpoint from `params.start_epoch - 1`. Otherwise, this function does nothing. Apart from loading state dict for `model`, `optimizer` and `scheduler`, it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`, and `best_valid_loss` in `params`. Args: params: The return value of :func:`get_params`. model: The training model. optimizer: The optimizer that we are using. scheduler: The learning rate scheduler we are using. Returns: Return None.
egs/librispeech/ASR/tdnn_lstm_ctc/train.py
load_checkpoint_if_available
aarora8/icefall
python
def load_checkpoint_if_available(params: AttributeDict, model: nn.Module, optimizer: Optional[torch.optim.Optimizer]=None, scheduler: Optional[torch.optim.lr_scheduler._LRScheduler]=None) -> None: 'Load checkpoint from file.\n\n If params.start_epoch is positive, it will load the checkpoint from\n `params.start_epoch - 1`. Otherwise, this function does nothing.\n\n Apart from loading state dict for `model`, `optimizer` and `scheduler`,\n it also updates `best_train_epoch`, `best_train_loss`, `best_valid_epoch`,\n and `best_valid_loss` in `params`.\n\n Args:\n params:\n The return value of :func:`get_params`.\n model:\n The training model.\n optimizer:\n The optimizer that we are using.\n scheduler:\n The learning rate scheduler we are using.\n Returns:\n Return None.\n ' if (params.start_epoch <= 0): return filename = (params.exp_dir / f'epoch-{(params.start_epoch - 1)}.pt') saved_params = load_checkpoint(filename, model=model, optimizer=optimizer, scheduler=scheduler) keys = ['best_train_epoch', 'best_valid_epoch', 'batch_idx_train', 'best_train_loss', 'best_valid_loss'] for k in keys: params[k] = saved_params[k] return saved_params
def save_checkpoint(params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler._LRScheduler, rank: int=0) -> None: 'Save model, optimizer, scheduler and training stats to file.\n\n Args:\n params:\n It is returned by :func:`get_params`.\n model:\n The training model.\n ' if (rank != 0): return filename = (params.exp_dir / f'epoch-{params.cur_epoch}.pt') save_checkpoint_impl(filename=filename, model=model, params=params, optimizer=optimizer, scheduler=scheduler, rank=rank) if (params.best_train_epoch == params.cur_epoch): best_train_filename = (params.exp_dir / 'best-train-loss.pt') copyfile(src=filename, dst=best_train_filename) if (params.best_valid_epoch == params.cur_epoch): best_valid_filename = (params.exp_dir / 'best-valid-loss.pt') copyfile(src=filename, dst=best_valid_filename)
4,040,369,752,138,356,000
Save model, optimizer, scheduler and training stats to file. Args: params: It is returned by :func:`get_params`. model: The training model.
egs/librispeech/ASR/tdnn_lstm_ctc/train.py
save_checkpoint
aarora8/icefall
python
def save_checkpoint(params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler._LRScheduler, rank: int=0) -> None: 'Save model, optimizer, scheduler and training stats to file.\n\n Args:\n params:\n It is returned by :func:`get_params`.\n model:\n The training model.\n ' if (rank != 0): return filename = (params.exp_dir / f'epoch-{params.cur_epoch}.pt') save_checkpoint_impl(filename=filename, model=model, params=params, optimizer=optimizer, scheduler=scheduler, rank=rank) if (params.best_train_epoch == params.cur_epoch): best_train_filename = (params.exp_dir / 'best-train-loss.pt') copyfile(src=filename, dst=best_train_filename) if (params.best_valid_epoch == params.cur_epoch): best_valid_filename = (params.exp_dir / 'best-valid-loss.pt') copyfile(src=filename, dst=best_valid_filename)
def compute_loss(params: AttributeDict, model: nn.Module, batch: dict, graph_compiler: CtcTrainingGraphCompiler, is_training: bool) -> Tuple[(Tensor, MetricsTracker)]: '\n Compute CTC loss given the model and its inputs.\n\n Args:\n params:\n Parameters for training. See :func:`get_params`.\n model:\n The model for training. It is an instance of TdnnLstm in our case.\n batch:\n A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`\n for the content in it.\n graph_compiler:\n It is used to build a decoding graph from a ctc topo and training\n transcript. The training transcript is contained in the given `batch`,\n while the ctc topo is built when this compiler is instantiated.\n is_training:\n True for training. False for validation. When it is True, this\n function enables autograd during computation; when it is False, it\n disables autograd.\n ' device = graph_compiler.device feature = batch['inputs'] feature = feature.permute(0, 2, 1) assert (feature.ndim == 3) feature = feature.to(device) with torch.set_grad_enabled(is_training): nnet_output = model(feature) supervisions = batch['supervisions'] (supervision_segments, texts) = encode_supervisions(supervisions, subsampling_factor=params.subsampling_factor) decoding_graph = graph_compiler.compile(texts) dense_fsa_vec = k2.DenseFsaVec(nnet_output, supervision_segments, allow_truncate=(params.subsampling_factor - 1)) loss = k2.ctc_loss(decoding_graph=decoding_graph, dense_fsa_vec=dense_fsa_vec, output_beam=params.beam_size, reduction=params.reduction, use_double_scores=params.use_double_scores) assert (loss.requires_grad == is_training) info = MetricsTracker() info['frames'] = supervision_segments[:, 2].sum().item() info['loss'] = loss.detach().cpu().item() return (loss, info)
-85,694,476,020,040,110
Compute CTC loss given the model and its inputs. Args: params: Parameters for training. See :func:`get_params`. model: The model for training. It is an instance of TdnnLstm in our case. batch: A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()` for the content in it. graph_compiler: It is used to build a decoding graph from a ctc topo and training transcript. The training transcript is contained in the given `batch`, while the ctc topo is built when this compiler is instantiated. is_training: True for training. False for validation. When it is True, this function enables autograd during computation; when it is False, it disables autograd.
egs/librispeech/ASR/tdnn_lstm_ctc/train.py
compute_loss
aarora8/icefall
python
def compute_loss(params: AttributeDict, model: nn.Module, batch: dict, graph_compiler: CtcTrainingGraphCompiler, is_training: bool) -> Tuple[(Tensor, MetricsTracker)]: '\n Compute CTC loss given the model and its inputs.\n\n Args:\n params:\n Parameters for training. See :func:`get_params`.\n model:\n The model for training. It is an instance of TdnnLstm in our case.\n batch:\n A batch of data. See `lhotse.dataset.K2SpeechRecognitionDataset()`\n for the content in it.\n graph_compiler:\n It is used to build a decoding graph from a ctc topo and training\n transcript. The training transcript is contained in the given `batch`,\n while the ctc topo is built when this compiler is instantiated.\n is_training:\n True for training. False for validation. When it is True, this\n function enables autograd during computation; when it is False, it\n disables autograd.\n ' device = graph_compiler.device feature = batch['inputs'] feature = feature.permute(0, 2, 1) assert (feature.ndim == 3) feature = feature.to(device) with torch.set_grad_enabled(is_training): nnet_output = model(feature) supervisions = batch['supervisions'] (supervision_segments, texts) = encode_supervisions(supervisions, subsampling_factor=params.subsampling_factor) decoding_graph = graph_compiler.compile(texts) dense_fsa_vec = k2.DenseFsaVec(nnet_output, supervision_segments, allow_truncate=(params.subsampling_factor - 1)) loss = k2.ctc_loss(decoding_graph=decoding_graph, dense_fsa_vec=dense_fsa_vec, output_beam=params.beam_size, reduction=params.reduction, use_double_scores=params.use_double_scores) assert (loss.requires_grad == is_training) info = MetricsTracker() info['frames'] = supervision_segments[:, 2].sum().item() info['loss'] = loss.detach().cpu().item() return (loss, info)
def compute_validation_loss(params: AttributeDict, model: nn.Module, graph_compiler: CtcTrainingGraphCompiler, valid_dl: torch.utils.data.DataLoader, world_size: int=1) -> MetricsTracker: 'Run the validation process. The validation loss\n is saved in `params.valid_loss`.\n ' model.eval() tot_loss = MetricsTracker() for (batch_idx, batch) in enumerate(valid_dl): (loss, loss_info) = compute_loss(params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=False) assert (loss.requires_grad is False) tot_loss = (tot_loss + loss_info) if (world_size > 1): tot_loss.reduce(loss.device) loss_value = (tot_loss['loss'] / tot_loss['frames']) if (loss_value < params.best_valid_loss): params.best_valid_epoch = params.cur_epoch params.best_valid_loss = loss_value return tot_loss
5,921,257,761,175,695,000
Run the validation process. The validation loss is saved in `params.valid_loss`.
egs/librispeech/ASR/tdnn_lstm_ctc/train.py
compute_validation_loss
aarora8/icefall
python
def compute_validation_loss(params: AttributeDict, model: nn.Module, graph_compiler: CtcTrainingGraphCompiler, valid_dl: torch.utils.data.DataLoader, world_size: int=1) -> MetricsTracker: 'Run the validation process. The validation loss\n is saved in `params.valid_loss`.\n ' model.eval() tot_loss = MetricsTracker() for (batch_idx, batch) in enumerate(valid_dl): (loss, loss_info) = compute_loss(params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=False) assert (loss.requires_grad is False) tot_loss = (tot_loss + loss_info) if (world_size > 1): tot_loss.reduce(loss.device) loss_value = (tot_loss['loss'] / tot_loss['frames']) if (loss_value < params.best_valid_loss): params.best_valid_epoch = params.cur_epoch params.best_valid_loss = loss_value return tot_loss
def train_one_epoch(params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, graph_compiler: CtcTrainingGraphCompiler, train_dl: torch.utils.data.DataLoader, valid_dl: torch.utils.data.DataLoader, tb_writer: Optional[SummaryWriter]=None, world_size: int=1) -> None: 'Train the model for one epoch.\n\n The training loss from the mean of all frames is saved in\n `params.train_loss`. It runs the validation process every\n `params.valid_interval` batches.\n\n Args:\n params:\n It is returned by :func:`get_params`.\n model:\n The model for training.\n optimizer:\n The optimizer we are using.\n graph_compiler:\n It is used to convert transcripts to FSAs.\n train_dl:\n Dataloader for the training dataset.\n valid_dl:\n Dataloader for the validation dataset.\n tb_writer:\n Writer to write log messages to tensorboard.\n world_size:\n Number of nodes in DDP training. If it is 1, DDP is disabled.\n ' model.train() tot_loss = MetricsTracker() for (batch_idx, batch) in enumerate(train_dl): params.batch_idx_train += 1 batch_size = len(batch['supervisions']['text']) (loss, loss_info) = compute_loss(params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=True) tot_loss = ((tot_loss * (1 - (1 / params.reset_interval))) + loss_info) optimizer.zero_grad() loss.backward() clip_grad_norm_(model.parameters(), 5.0, 2.0) optimizer.step() if ((batch_idx % params.log_interval) == 0): logging.info(f'Epoch {params.cur_epoch}, batch {batch_idx}, loss[{loss_info}], tot_loss[{tot_loss}], batch size: {batch_size}') if ((batch_idx % params.log_interval) == 0): if (tb_writer is not None): loss_info.write_summary(tb_writer, 'train/current_', params.batch_idx_train) tot_loss.write_summary(tb_writer, 'train/tot_', params.batch_idx_train) if ((batch_idx > 0) and ((batch_idx % params.valid_interval) == 0)): valid_info = compute_validation_loss(params=params, model=model, graph_compiler=graph_compiler, valid_dl=valid_dl, world_size=world_size) model.train() logging.info(f'Epoch {params.cur_epoch}, validation {valid_info}') if (tb_writer is not None): valid_info.write_summary(tb_writer, 'train/valid_', params.batch_idx_train) loss_value = (tot_loss['loss'] / tot_loss['frames']) params.train_loss = loss_value if (params.train_loss < params.best_train_loss): params.best_train_epoch = params.cur_epoch params.best_train_loss = params.train_loss
6,525,583,933,407,344,000
Train the model for one epoch. The training loss from the mean of all frames is saved in `params.train_loss`. It runs the validation process every `params.valid_interval` batches. Args: params: It is returned by :func:`get_params`. model: The model for training. optimizer: The optimizer we are using. graph_compiler: It is used to convert transcripts to FSAs. train_dl: Dataloader for the training dataset. valid_dl: Dataloader for the validation dataset. tb_writer: Writer to write log messages to tensorboard. world_size: Number of nodes in DDP training. If it is 1, DDP is disabled.
egs/librispeech/ASR/tdnn_lstm_ctc/train.py
train_one_epoch
aarora8/icefall
python
def train_one_epoch(params: AttributeDict, model: nn.Module, optimizer: torch.optim.Optimizer, graph_compiler: CtcTrainingGraphCompiler, train_dl: torch.utils.data.DataLoader, valid_dl: torch.utils.data.DataLoader, tb_writer: Optional[SummaryWriter]=None, world_size: int=1) -> None: 'Train the model for one epoch.\n\n The training loss from the mean of all frames is saved in\n `params.train_loss`. It runs the validation process every\n `params.valid_interval` batches.\n\n Args:\n params:\n It is returned by :func:`get_params`.\n model:\n The model for training.\n optimizer:\n The optimizer we are using.\n graph_compiler:\n It is used to convert transcripts to FSAs.\n train_dl:\n Dataloader for the training dataset.\n valid_dl:\n Dataloader for the validation dataset.\n tb_writer:\n Writer to write log messages to tensorboard.\n world_size:\n Number of nodes in DDP training. If it is 1, DDP is disabled.\n ' model.train() tot_loss = MetricsTracker() for (batch_idx, batch) in enumerate(train_dl): params.batch_idx_train += 1 batch_size = len(batch['supervisions']['text']) (loss, loss_info) = compute_loss(params=params, model=model, batch=batch, graph_compiler=graph_compiler, is_training=True) tot_loss = ((tot_loss * (1 - (1 / params.reset_interval))) + loss_info) optimizer.zero_grad() loss.backward() clip_grad_norm_(model.parameters(), 5.0, 2.0) optimizer.step() if ((batch_idx % params.log_interval) == 0): logging.info(f'Epoch {params.cur_epoch}, batch {batch_idx}, loss[{loss_info}], tot_loss[{tot_loss}], batch size: {batch_size}') if ((batch_idx % params.log_interval) == 0): if (tb_writer is not None): loss_info.write_summary(tb_writer, 'train/current_', params.batch_idx_train) tot_loss.write_summary(tb_writer, 'train/tot_', params.batch_idx_train) if ((batch_idx > 0) and ((batch_idx % params.valid_interval) == 0)): valid_info = compute_validation_loss(params=params, model=model, graph_compiler=graph_compiler, valid_dl=valid_dl, world_size=world_size) model.train() logging.info(f'Epoch {params.cur_epoch}, validation {valid_info}') if (tb_writer is not None): valid_info.write_summary(tb_writer, 'train/valid_', params.batch_idx_train) loss_value = (tot_loss['loss'] / tot_loss['frames']) params.train_loss = loss_value if (params.train_loss < params.best_train_loss): params.best_train_epoch = params.cur_epoch params.best_train_loss = params.train_loss
def run(rank, world_size, args): '\n Args:\n rank:\n It is a value between 0 and `world_size-1`, which is\n passed automatically by `mp.spawn()` in :func:`main`.\n The node with rank 0 is responsible for saving checkpoint.\n world_size:\n Number of GPUs for DDP training.\n args:\n The return value of get_parser().parse_args()\n ' params = get_params() params.update(vars(args)) fix_random_seed(42) if (world_size > 1): setup_dist(rank, world_size, params.master_port) setup_logger(f'{params.exp_dir}/log/log-train') logging.info('Training started') logging.info(params) if (args.tensorboard and (rank == 0)): tb_writer = SummaryWriter(log_dir=f'{params.exp_dir}/tensorboard') else: tb_writer = None lexicon = Lexicon(params.lang_dir) max_phone_id = max(lexicon.tokens) device = torch.device('cpu') if torch.cuda.is_available(): device = torch.device('cuda', rank) graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device) model = TdnnLstm(num_features=params.feature_dim, num_classes=(max_phone_id + 1), subsampling_factor=params.subsampling_factor) checkpoints = load_checkpoint_if_available(params=params, model=model) model.to(device) if (world_size > 1): model = DDP(model, device_ids=[rank]) optimizer = optim.AdamW(model.parameters(), lr=params.lr, weight_decay=params.weight_decay) scheduler = StepLR(optimizer, step_size=8, gamma=0.1) if checkpoints: optimizer.load_state_dict(checkpoints['optimizer']) scheduler.load_state_dict(checkpoints['scheduler']) librispeech = LibriSpeechAsrDataModule(args) train_dl = librispeech.train_dataloaders() valid_dl = librispeech.valid_dataloaders() for epoch in range(params.start_epoch, params.num_epochs): train_dl.sampler.set_epoch(epoch) if (epoch > params.start_epoch): logging.info(f'epoch {epoch}, lr: {scheduler.get_last_lr()[0]}') if (tb_writer is not None): tb_writer.add_scalar('train/lr', scheduler.get_last_lr()[0], params.batch_idx_train) tb_writer.add_scalar('train/epoch', epoch, params.batch_idx_train) params.cur_epoch = epoch train_one_epoch(params=params, model=model, optimizer=optimizer, graph_compiler=graph_compiler, train_dl=train_dl, valid_dl=valid_dl, tb_writer=tb_writer, world_size=world_size) scheduler.step() save_checkpoint(params=params, model=model, optimizer=optimizer, scheduler=scheduler, rank=rank) logging.info('Done!') if (world_size > 1): torch.distributed.barrier() cleanup_dist()
1,424,617,520,821,463,800
Args: rank: It is a value between 0 and `world_size-1`, which is passed automatically by `mp.spawn()` in :func:`main`. The node with rank 0 is responsible for saving checkpoint. world_size: Number of GPUs for DDP training. args: The return value of get_parser().parse_args()
egs/librispeech/ASR/tdnn_lstm_ctc/train.py
run
aarora8/icefall
python
def run(rank, world_size, args): '\n Args:\n rank:\n It is a value between 0 and `world_size-1`, which is\n passed automatically by `mp.spawn()` in :func:`main`.\n The node with rank 0 is responsible for saving checkpoint.\n world_size:\n Number of GPUs for DDP training.\n args:\n The return value of get_parser().parse_args()\n ' params = get_params() params.update(vars(args)) fix_random_seed(42) if (world_size > 1): setup_dist(rank, world_size, params.master_port) setup_logger(f'{params.exp_dir}/log/log-train') logging.info('Training started') logging.info(params) if (args.tensorboard and (rank == 0)): tb_writer = SummaryWriter(log_dir=f'{params.exp_dir}/tensorboard') else: tb_writer = None lexicon = Lexicon(params.lang_dir) max_phone_id = max(lexicon.tokens) device = torch.device('cpu') if torch.cuda.is_available(): device = torch.device('cuda', rank) graph_compiler = CtcTrainingGraphCompiler(lexicon=lexicon, device=device) model = TdnnLstm(num_features=params.feature_dim, num_classes=(max_phone_id + 1), subsampling_factor=params.subsampling_factor) checkpoints = load_checkpoint_if_available(params=params, model=model) model.to(device) if (world_size > 1): model = DDP(model, device_ids=[rank]) optimizer = optim.AdamW(model.parameters(), lr=params.lr, weight_decay=params.weight_decay) scheduler = StepLR(optimizer, step_size=8, gamma=0.1) if checkpoints: optimizer.load_state_dict(checkpoints['optimizer']) scheduler.load_state_dict(checkpoints['scheduler']) librispeech = LibriSpeechAsrDataModule(args) train_dl = librispeech.train_dataloaders() valid_dl = librispeech.valid_dataloaders() for epoch in range(params.start_epoch, params.num_epochs): train_dl.sampler.set_epoch(epoch) if (epoch > params.start_epoch): logging.info(f'epoch {epoch}, lr: {scheduler.get_last_lr()[0]}') if (tb_writer is not None): tb_writer.add_scalar('train/lr', scheduler.get_last_lr()[0], params.batch_idx_train) tb_writer.add_scalar('train/epoch', epoch, params.batch_idx_train) params.cur_epoch = epoch train_one_epoch(params=params, model=model, optimizer=optimizer, graph_compiler=graph_compiler, train_dl=train_dl, valid_dl=valid_dl, tb_writer=tb_writer, world_size=world_size) scheduler.step() save_checkpoint(params=params, model=model, optimizer=optimizer, scheduler=scheduler, rank=rank) logging.info('Done!') if (world_size > 1): torch.distributed.barrier() cleanup_dist()
def __init__(self): '\n The schema generator generates a GraphQL schema.\n The purpose is to provide a schema to which resolvers are then\n attached, which is then given to Ariadne, and for resolvers to\n have information about expected types.\n\n For RPSL queries and types, this is dynamically generated based on\n the RPSL objects from irrd.rpsl. Other parts are fixed.\n This means that the schema is always the same for a given IRRd\n codebase - there are no runtime or user configurable parts.\n\n Along with generating the schema, some metadata is saved, e.g.\n self.graphql_types which allows resolvers to learn the GraphQL\n type for a certain field.\n\n This generator also creates Ariadne object types on self, which\n are used to attach resolvers to them.\n ' self._set_rpsl_query_fields() self._set_rpsl_object_interface_schema() self._set_rpsl_contact_schema() self._set_rpsl_object_schemas() self._set_enums() schema = self.enums schema += (('\n scalar ASN\n scalar IP\n\n schema {\n query: Query\n }\n\n type Query {\n rpslObjects(' + self.rpsl_query_fields) + '): [RPSLObject!]\n databaseStatus(sources: [String!]): [DatabaseStatus]\n asnPrefixes(asns: [ASN!]!, ipVersion: Int, sources: [String!]): [ASNPrefixes!]\n asSetPrefixes(setNames: [String!]!, ipVersion: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [AsSetPrefixes!]\n recursiveSetMembers(setNames: [String!]!, depth: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [SetMembers!]\n }\n\n type DatabaseStatus {\n source: String!\n authoritative: Boolean!\n objectClassFilter: [String!]\n rpkiRovFilter: Boolean!\n scopefilterEnabled: Boolean!\n localJournalKept: Boolean!\n serialOldestJournal: Int\n serialNewestJournal: Int\n serialLastExport: Int\n serialNewestMirror: Int\n lastUpdate: String\n synchronisedSerials: Boolean!\n }\n\n type RPSLJournalEntry {\n rpslPk: String!\n source: String!\n serialNrtm: Int!\n operation: String!\n origin: String\n objectClass: String!\n objectText: String!\n timestamp: String!\n }\n\n type ASNPrefixes {\n asn: ASN!\n prefixes: [IP!]\n }\n\n type AsSetPrefixes {\n rpslPk: String!\n prefixes: [IP!]\n }\n\n type SetMembers {\n rpslPk: String!\n members: [String!]\n }\n ') schema += self.rpsl_object_interface_schema schema += self.rpsl_contact_schema schema += ''.join(self.rpsl_object_schemas.values()) schema += 'union RPSLContactUnion = RPSLPerson | RPSLRole' self.type_defs = ariadne.gql(schema) self.query_type = ariadne.QueryType() self.rpsl_object_type = ariadne.InterfaceType('RPSLObject') self.rpsl_contact_union_type = ariadne.UnionType('RPSLContactUnion') self.asn_scalar_type = ariadne.ScalarType('ASN') self.ip_scalar_type = ariadne.ScalarType('IP') self.object_types = [self.query_type, self.rpsl_object_type, self.rpsl_contact_union_type, self.asn_scalar_type, self.ip_scalar_type] for name in self.rpsl_object_schemas.keys(): self.object_types.append(ariadne.ObjectType(name)) self.object_types.append(ariadne.ObjectType('ASNPrefixes')) self.object_types.append(ariadne.ObjectType('AsSetPrefixes')) self.object_types.append(ariadne.ObjectType('SetMembers')) self.object_types.append(ariadne.EnumType('RPKIStatus', RPKIStatus)) self.object_types.append(ariadne.EnumType('ScopeFilterStatus', ScopeFilterStatus))
-6,691,687,725,778,370,000
The schema generator generates a GraphQL schema. The purpose is to provide a schema to which resolvers are then attached, which is then given to Ariadne, and for resolvers to have information about expected types. For RPSL queries and types, this is dynamically generated based on the RPSL objects from irrd.rpsl. Other parts are fixed. This means that the schema is always the same for a given IRRd codebase - there are no runtime or user configurable parts. Along with generating the schema, some metadata is saved, e.g. self.graphql_types which allows resolvers to learn the GraphQL type for a certain field. This generator also creates Ariadne object types on self, which are used to attach resolvers to them.
irrd/server/graphql/schema_generator.py
__init__
morrowc/irrd
python
def __init__(self): '\n The schema generator generates a GraphQL schema.\n The purpose is to provide a schema to which resolvers are then\n attached, which is then given to Ariadne, and for resolvers to\n have information about expected types.\n\n For RPSL queries and types, this is dynamically generated based on\n the RPSL objects from irrd.rpsl. Other parts are fixed.\n This means that the schema is always the same for a given IRRd\n codebase - there are no runtime or user configurable parts.\n\n Along with generating the schema, some metadata is saved, e.g.\n self.graphql_types which allows resolvers to learn the GraphQL\n type for a certain field.\n\n This generator also creates Ariadne object types on self, which\n are used to attach resolvers to them.\n ' self._set_rpsl_query_fields() self._set_rpsl_object_interface_schema() self._set_rpsl_contact_schema() self._set_rpsl_object_schemas() self._set_enums() schema = self.enums schema += (('\n scalar ASN\n scalar IP\n\n schema {\n query: Query\n }\n\n type Query {\n rpslObjects(' + self.rpsl_query_fields) + '): [RPSLObject!]\n databaseStatus(sources: [String!]): [DatabaseStatus]\n asnPrefixes(asns: [ASN!]!, ipVersion: Int, sources: [String!]): [ASNPrefixes!]\n asSetPrefixes(setNames: [String!]!, ipVersion: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [AsSetPrefixes!]\n recursiveSetMembers(setNames: [String!]!, depth: Int, sources: [String!], excludeSets: [String!], sqlTrace: Boolean): [SetMembers!]\n }\n\n type DatabaseStatus {\n source: String!\n authoritative: Boolean!\n objectClassFilter: [String!]\n rpkiRovFilter: Boolean!\n scopefilterEnabled: Boolean!\n localJournalKept: Boolean!\n serialOldestJournal: Int\n serialNewestJournal: Int\n serialLastExport: Int\n serialNewestMirror: Int\n lastUpdate: String\n synchronisedSerials: Boolean!\n }\n\n type RPSLJournalEntry {\n rpslPk: String!\n source: String!\n serialNrtm: Int!\n operation: String!\n origin: String\n objectClass: String!\n objectText: String!\n timestamp: String!\n }\n\n type ASNPrefixes {\n asn: ASN!\n prefixes: [IP!]\n }\n\n type AsSetPrefixes {\n rpslPk: String!\n prefixes: [IP!]\n }\n\n type SetMembers {\n rpslPk: String!\n members: [String!]\n }\n ') schema += self.rpsl_object_interface_schema schema += self.rpsl_contact_schema schema += .join(self.rpsl_object_schemas.values()) schema += 'union RPSLContactUnion = RPSLPerson | RPSLRole' self.type_defs = ariadne.gql(schema) self.query_type = ariadne.QueryType() self.rpsl_object_type = ariadne.InterfaceType('RPSLObject') self.rpsl_contact_union_type = ariadne.UnionType('RPSLContactUnion') self.asn_scalar_type = ariadne.ScalarType('ASN') self.ip_scalar_type = ariadne.ScalarType('IP') self.object_types = [self.query_type, self.rpsl_object_type, self.rpsl_contact_union_type, self.asn_scalar_type, self.ip_scalar_type] for name in self.rpsl_object_schemas.keys(): self.object_types.append(ariadne.ObjectType(name)) self.object_types.append(ariadne.ObjectType('ASNPrefixes')) self.object_types.append(ariadne.ObjectType('AsSetPrefixes')) self.object_types.append(ariadne.ObjectType('SetMembers')) self.object_types.append(ariadne.EnumType('RPKIStatus', RPKIStatus)) self.object_types.append(ariadne.EnumType('ScopeFilterStatus', ScopeFilterStatus))
def _set_rpsl_query_fields(self): '\n Create a sub-schema for the fields that can be queried for RPSL objects.\n This includes all fields from all objects, along with a few\n special fields.\n ' string_list_fields = {'rpsl_pk', 'sources', 'object_class'}.union(lookup_field_names()) params = [(snake_to_camel_case(p) + ': [String!]') for p in sorted(string_list_fields)] params += ['ipExact: IP', 'ipLessSpecific: IP', 'ipLessSpecificOneLevel: IP', 'ipMoreSpecific: IP', 'ipAny: IP', 'asn: [ASN!]', 'rpkiStatus: [RPKIStatus!]', 'scopeFilterStatus: [ScopeFilterStatus!]', 'textSearch: String', 'recordLimit: Int', 'sqlTrace: Boolean'] self.rpsl_query_fields = ', '.join(params)
-5,529,400,313,608,977,000
Create a sub-schema for the fields that can be queried for RPSL objects. This includes all fields from all objects, along with a few special fields.
irrd/server/graphql/schema_generator.py
_set_rpsl_query_fields
morrowc/irrd
python
def _set_rpsl_query_fields(self): '\n Create a sub-schema for the fields that can be queried for RPSL objects.\n This includes all fields from all objects, along with a few\n special fields.\n ' string_list_fields = {'rpsl_pk', 'sources', 'object_class'}.union(lookup_field_names()) params = [(snake_to_camel_case(p) + ': [String!]') for p in sorted(string_list_fields)] params += ['ipExact: IP', 'ipLessSpecific: IP', 'ipLessSpecificOneLevel: IP', 'ipMoreSpecific: IP', 'ipAny: IP', 'asn: [ASN!]', 'rpkiStatus: [RPKIStatus!]', 'scopeFilterStatus: [ScopeFilterStatus!]', 'textSearch: String', 'recordLimit: Int', 'sqlTrace: Boolean'] self.rpsl_query_fields = ', '.join(params)
def _set_enums(self): '\n Create the schema for enums, current RPKI and scope filter status.\n ' self.enums = '' for enum in [RPKIStatus, ScopeFilterStatus]: self.enums += f'''enum {enum.__name__} {{ ''' for value in enum: self.enums += f''' {value.name} ''' self.enums += '}\n\n'
3,618,900,208,178,960,000
Create the schema for enums, current RPKI and scope filter status.
irrd/server/graphql/schema_generator.py
_set_enums
morrowc/irrd
python
def _set_enums(self): '\n \n ' self.enums = for enum in [RPKIStatus, ScopeFilterStatus]: self.enums += f'enum {enum.__name__} {{ ' for value in enum: self.enums += f' {value.name} ' self.enums += '}\n\n'
def _set_rpsl_object_interface_schema(self): '\n Create the schema for RPSLObject, which contains only fields that\n are common to every known RPSL object, along with meta\n ' common_fields = None for rpsl_object_class in OBJECT_CLASS_MAPPING.values(): if (common_fields is None): common_fields = set(rpsl_object_class.fields.keys()) else: common_fields = common_fields.intersection(set(rpsl_object_class.fields.keys())) common_fields = list(common_fields) common_fields = (['rpslPk', 'objectClass', 'objectText', 'updated'] + common_fields) common_field_dict = self._dict_for_common_fields(common_fields) common_field_dict['journal'] = '[RPSLJournalEntry]' schema = self._generate_schema_str('RPSLObject', 'interface', common_field_dict) self.rpsl_object_interface_schema = schema
-4,808,421,517,788,839,000
Create the schema for RPSLObject, which contains only fields that are common to every known RPSL object, along with meta
irrd/server/graphql/schema_generator.py
_set_rpsl_object_interface_schema
morrowc/irrd
python
def _set_rpsl_object_interface_schema(self): '\n Create the schema for RPSLObject, which contains only fields that\n are common to every known RPSL object, along with meta\n ' common_fields = None for rpsl_object_class in OBJECT_CLASS_MAPPING.values(): if (common_fields is None): common_fields = set(rpsl_object_class.fields.keys()) else: common_fields = common_fields.intersection(set(rpsl_object_class.fields.keys())) common_fields = list(common_fields) common_fields = (['rpslPk', 'objectClass', 'objectText', 'updated'] + common_fields) common_field_dict = self._dict_for_common_fields(common_fields) common_field_dict['journal'] = '[RPSLJournalEntry]' schema = self._generate_schema_str('RPSLObject', 'interface', common_field_dict) self.rpsl_object_interface_schema = schema
def _set_rpsl_contact_schema(self): '\n Create the schema for RPSLContact. This contains shared fields between\n RPSLPerson and RPSLRole, as they are so similar.\n ' common_fields = set(RPSLPerson.fields.keys()).intersection(set(RPSLRole.fields.keys())) common_fields = common_fields.union({'rpslPk', 'objectClass', 'objectText', 'updated'}) common_field_dict = self._dict_for_common_fields(list(common_fields)) schema = self._generate_schema_str('RPSLContact', 'interface', common_field_dict) self.rpsl_contact_schema = schema
6,149,839,233,699,117,000
Create the schema for RPSLContact. This contains shared fields between RPSLPerson and RPSLRole, as they are so similar.
irrd/server/graphql/schema_generator.py
_set_rpsl_contact_schema
morrowc/irrd
python
def _set_rpsl_contact_schema(self): '\n Create the schema for RPSLContact. This contains shared fields between\n RPSLPerson and RPSLRole, as they are so similar.\n ' common_fields = set(RPSLPerson.fields.keys()).intersection(set(RPSLRole.fields.keys())) common_fields = common_fields.union({'rpslPk', 'objectClass', 'objectText', 'updated'}) common_field_dict = self._dict_for_common_fields(list(common_fields)) schema = self._generate_schema_str('RPSLContact', 'interface', common_field_dict) self.rpsl_contact_schema = schema
def _set_rpsl_object_schemas(self): '\n Create the schemas for each specific RPSL object class.\n Each of these implements RPSLObject, and RPSLPerson/RPSLRole\n implement RPSLContact as well.\n ' self.graphql_types = defaultdict(dict) schemas = OrderedDict() for (object_class, klass) in OBJECT_CLASS_MAPPING.items(): object_name = klass.__name__ graphql_fields = OrderedDict() graphql_fields['rpslPk'] = 'String' graphql_fields['objectClass'] = 'String' graphql_fields['objectText'] = 'String' graphql_fields['updated'] = 'String' graphql_fields['journal'] = '[RPSLJournalEntry]' for (field_name, field) in klass.fields.items(): graphql_type = self._graphql_type_for_rpsl_field(field) graphql_fields[snake_to_camel_case(field_name)] = graphql_type self.graphql_types[snake_to_camel_case(object_name)][field_name] = graphql_type (reference_name, reference_type) = self._grapql_type_for_reference_field(field_name, field) if (reference_name and reference_type): graphql_fields[reference_name] = reference_type self.graphql_types[object_name][reference_name] = reference_type for field_name in klass.field_extracts: if field_name.startswith('asn'): graphql_type = 'ASN' elif (field_name == 'prefix'): graphql_type = 'IP' elif (field_name == 'prefix_length'): graphql_type = 'Int' else: graphql_type = 'String' graphql_fields[snake_to_camel_case(field_name)] = graphql_type if klass.rpki_relevant: graphql_fields['rpkiStatus'] = 'RPKIStatus' graphql_fields['rpkiMaxLength'] = 'Int' self.graphql_types[object_name]['rpki_max_length'] = 'Int' implements = ('RPSLContact & RPSLObject' if (klass in [RPSLPerson, RPSLRole]) else 'RPSLObject') schema = self._generate_schema_str(object_name, 'type', graphql_fields, implements) schemas[object_name] = schema self.rpsl_object_schemas = schemas
-2,214,039,650,622,902,500
Create the schemas for each specific RPSL object class. Each of these implements RPSLObject, and RPSLPerson/RPSLRole implement RPSLContact as well.
irrd/server/graphql/schema_generator.py
_set_rpsl_object_schemas
morrowc/irrd
python
def _set_rpsl_object_schemas(self): '\n Create the schemas for each specific RPSL object class.\n Each of these implements RPSLObject, and RPSLPerson/RPSLRole\n implement RPSLContact as well.\n ' self.graphql_types = defaultdict(dict) schemas = OrderedDict() for (object_class, klass) in OBJECT_CLASS_MAPPING.items(): object_name = klass.__name__ graphql_fields = OrderedDict() graphql_fields['rpslPk'] = 'String' graphql_fields['objectClass'] = 'String' graphql_fields['objectText'] = 'String' graphql_fields['updated'] = 'String' graphql_fields['journal'] = '[RPSLJournalEntry]' for (field_name, field) in klass.fields.items(): graphql_type = self._graphql_type_for_rpsl_field(field) graphql_fields[snake_to_camel_case(field_name)] = graphql_type self.graphql_types[snake_to_camel_case(object_name)][field_name] = graphql_type (reference_name, reference_type) = self._grapql_type_for_reference_field(field_name, field) if (reference_name and reference_type): graphql_fields[reference_name] = reference_type self.graphql_types[object_name][reference_name] = reference_type for field_name in klass.field_extracts: if field_name.startswith('asn'): graphql_type = 'ASN' elif (field_name == 'prefix'): graphql_type = 'IP' elif (field_name == 'prefix_length'): graphql_type = 'Int' else: graphql_type = 'String' graphql_fields[snake_to_camel_case(field_name)] = graphql_type if klass.rpki_relevant: graphql_fields['rpkiStatus'] = 'RPKIStatus' graphql_fields['rpkiMaxLength'] = 'Int' self.graphql_types[object_name]['rpki_max_length'] = 'Int' implements = ('RPSLContact & RPSLObject' if (klass in [RPSLPerson, RPSLRole]) else 'RPSLObject') schema = self._generate_schema_str(object_name, 'type', graphql_fields, implements) schemas[object_name] = schema self.rpsl_object_schemas = schemas
def _graphql_type_for_rpsl_field(self, field: RPSLTextField) -> str: '\n Return the GraphQL type for a regular RPSL field.\n This is always a list of strings if the field is a list and/or\n can occur multiple times.\n ' if ((RPSLFieldListMixin in field.__class__.__bases__) or field.multiple): return '[String!]' return 'String'
-4,626,389,360,198,936,000
Return the GraphQL type for a regular RPSL field. This is always a list of strings if the field is a list and/or can occur multiple times.
irrd/server/graphql/schema_generator.py
_graphql_type_for_rpsl_field
morrowc/irrd
python
def _graphql_type_for_rpsl_field(self, field: RPSLTextField) -> str: '\n Return the GraphQL type for a regular RPSL field.\n This is always a list of strings if the field is a list and/or\n can occur multiple times.\n ' if ((RPSLFieldListMixin in field.__class__.__bases__) or field.multiple): return '[String!]' return 'String'
def _grapql_type_for_reference_field(self, field_name: str, rpsl_field: RPSLTextField) -> Tuple[(Optional[str], Optional[str])]: '\n Return the GraphQL name and type for a reference field.\n For example, for a field "admin-c" that refers to person/role,\n returns (\'adminC\', \'[RPSLContactUnion!]\').\n Some fields are excluded because they are syntactical references,\n not real references.\n ' if (isinstance(rpsl_field, RPSLReferenceField) and getattr(rpsl_field, 'referring', None)): rpsl_field.resolve_references() graphql_name = (snake_to_camel_case(field_name) + 'Objs') grapql_referring = set(rpsl_field.referring_object_classes) if (RPSLAutNum in grapql_referring): grapql_referring.remove(RPSLAutNum) if (RPSLInetRtr in grapql_referring): grapql_referring.remove(RPSLInetRtr) if (grapql_referring == {RPSLPerson, RPSLRole}): graphql_type = '[RPSLContactUnion!]' else: graphql_type = (('[' + grapql_referring.pop().__name__) + '!]') return (graphql_name, graphql_type) return (None, None)
-6,929,608,980,858,813,000
Return the GraphQL name and type for a reference field. For example, for a field "admin-c" that refers to person/role, returns ('adminC', '[RPSLContactUnion!]'). Some fields are excluded because they are syntactical references, not real references.
irrd/server/graphql/schema_generator.py
_grapql_type_for_reference_field
morrowc/irrd
python
def _grapql_type_for_reference_field(self, field_name: str, rpsl_field: RPSLTextField) -> Tuple[(Optional[str], Optional[str])]: '\n Return the GraphQL name and type for a reference field.\n For example, for a field "admin-c" that refers to person/role,\n returns (\'adminC\', \'[RPSLContactUnion!]\').\n Some fields are excluded because they are syntactical references,\n not real references.\n ' if (isinstance(rpsl_field, RPSLReferenceField) and getattr(rpsl_field, 'referring', None)): rpsl_field.resolve_references() graphql_name = (snake_to_camel_case(field_name) + 'Objs') grapql_referring = set(rpsl_field.referring_object_classes) if (RPSLAutNum in grapql_referring): grapql_referring.remove(RPSLAutNum) if (RPSLInetRtr in grapql_referring): grapql_referring.remove(RPSLInetRtr) if (grapql_referring == {RPSLPerson, RPSLRole}): graphql_type = '[RPSLContactUnion!]' else: graphql_type = (('[' + grapql_referring.pop().__name__) + '!]') return (graphql_name, graphql_type) return (None, None)
def _generate_schema_str(self, name: str, graphql_type: str, fields: Dict[(str, str)], implements: Optional[str]=None) -> str: '\n Generate a schema string for a given name, object type and dict of fields.\n ' schema = f'{graphql_type} {name} ' if implements: schema += f'implements {implements} ' schema += '{\n' for (field, field_type) in fields.items(): schema += f''' {field}: {field_type} ''' schema += '}\n\n' return schema
-3,460,663,556,156,464,000
Generate a schema string for a given name, object type and dict of fields.
irrd/server/graphql/schema_generator.py
_generate_schema_str
morrowc/irrd
python
def _generate_schema_str(self, name: str, graphql_type: str, fields: Dict[(str, str)], implements: Optional[str]=None) -> str: '\n \n ' schema = f'{graphql_type} {name} ' if implements: schema += f'implements {implements} ' schema += '{\n' for (field, field_type) in fields.items(): schema += f' {field}: {field_type} ' schema += '}\n\n' return schema
@cached_property def additional_properties_type(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n ' lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type)
1,702,168,743,392,494,600
This must be a method because a model may have properties that are of type self, this must run after the class is loaded
sdks/python/client/argo_workflows/model/lifecycle_handler.py
additional_properties_type
AnuragThePathak/argo-workflows
python
@cached_property def additional_properties_type(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n ' lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type)
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' lazy_import() return {'_exec': (ExecAction,), 'http_get': (HTTPGetAction,), 'tcp_socket': (TCPSocketAction,)}
-7,129,412,387,319,964,000
This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type.
sdks/python/client/argo_workflows/model/lifecycle_handler.py
openapi_types
AnuragThePathak/argo-workflows
python
@cached_property def openapi_types(): '\n This must be a method because a model may have properties that are\n of type self, this must run after the class is loaded\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n ' lazy_import() return {'_exec': (ExecAction,), 'http_get': (HTTPGetAction,), 'tcp_socket': (TCPSocketAction,)}
@classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): 'LifecycleHandler - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n _exec (ExecAction): [optional] # noqa: E501\n http_get (HTTPGetAction): [optional] # noqa: E501\n tcp_socket (TCPSocketAction): [optional] # noqa: E501\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self
7,650,468,161,058,138,000
LifecycleHandler - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) _exec (ExecAction): [optional] # noqa: E501 http_get (HTTPGetAction): [optional] # noqa: E501 tcp_socket (TCPSocketAction): [optional] # noqa: E501
sdks/python/client/argo_workflows/model/lifecycle_handler.py
_from_openapi_data
AnuragThePathak/argo-workflows
python
@classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): 'LifecycleHandler - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n _exec (ExecAction): [optional] # noqa: E501\n http_get (HTTPGetAction): [optional] # noqa: E501\n tcp_socket (TCPSocketAction): [optional] # noqa: E501\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self
@convert_js_args_to_python_args def __init__(self, *args, **kwargs): 'LifecycleHandler - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n _exec (ExecAction): [optional] # noqa: E501\n http_get (HTTPGetAction): [optional] # noqa: E501\n tcp_socket (TCPSocketAction): [optional] # noqa: E501\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
-4,069,975,532,339,985,000
LifecycleHandler - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) _exec (ExecAction): [optional] # noqa: E501 http_get (HTTPGetAction): [optional] # noqa: E501 tcp_socket (TCPSocketAction): [optional] # noqa: E501
sdks/python/client/argo_workflows/model/lifecycle_handler.py
__init__
AnuragThePathak/argo-workflows
python
@convert_js_args_to_python_args def __init__(self, *args, **kwargs): 'LifecycleHandler - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _spec_property_naming (bool): True if the variable names in the input data\n are serialized names, as specified in the OpenAPI document.\n False if the variable names in the input data\n are pythonic names, e.g. snake case (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n _visited_composed_classes (tuple): This stores a tuple of\n classes that we have traveled through so that\n if we see that class again we will not use its\n discriminator again.\n When traveling through a discriminator, the\n composed schema that is\n is traveled through is added to this set.\n For example if Animal has a discriminator\n petType and we pass in "Dog", and the class Dog\n allOf includes Animal, we move through Animal\n once using the discriminator, and pick Dog.\n Then in Dog, we will make an instance of the\n Animal class but this time we won\'t travel\n through its discriminator because we passed in\n _visited_composed_classes = (Animal,)\n _exec (ExecAction): [optional] # noqa: E501\n http_get (HTTPGetAction): [optional] # noqa: E501\n tcp_socket (TCPSocketAction): [optional] # noqa: E501\n ' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
def addTableEntry(self, tableEntry=None): '\n Add a table entry to the switch\n ' response = self.stub.AddEntry(tableEntry) if (response.code == 0): Log.error('Error for entry:', tableEntry, 'on switch', self.name)
-3,067,755,745,714,994,000
Add a table entry to the switch
Controller-Implementation/libs/core/SwitchConnection.py
addTableEntry
qcz994/p4-bier
python
def addTableEntry(self, tableEntry=None): '\n \n ' response = self.stub.AddEntry(tableEntry) if (response.code == 0): Log.error('Error for entry:', tableEntry, 'on switch', self.name)
def removeTableEntry(self, tableEntry=None): '\n Remove a table entry from the switch\n ' response = self.stub.RemoveEntry(tableEntry) if (response.code == 0): Log.error('Error while removing entry:', tableEntry, 'on switch', self.name)
4,002,122,230,061,831,000
Remove a table entry from the switch
Controller-Implementation/libs/core/SwitchConnection.py
removeTableEntry
qcz994/p4-bier
python
def removeTableEntry(self, tableEntry=None): '\n \n ' response = self.stub.RemoveEntry(tableEntry) if (response.code == 0): Log.error('Error while removing entry:', tableEntry, 'on switch', self.name)
@read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): "\n List the bad file replicas summary. Method used by the rucio-ui.\n :param rse_expression: The RSE expression.\n :param from_date: The start date.\n :param to_date: The end date.\n :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True}\n :param session: The database session in use.\n " result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append((models.BadReplicas.rse_id == rse['id'])) elif filter: for rse in list_rses(filters=filter, session=session): rse_clause.append((models.BadReplicas.rse_id == rse['id'])) if (session.bind.dialect.name == 'oracle'): to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif (session.bind.dialect.name == 'mysql'): to_days = func.date(models.BadReplicas.created_at) elif (session.bind.dialect.name == 'postgresql'): to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) if (rse_clause != []): query = query.filter(or_(*rse_clause)) if from_date: query = query.filter((models.BadReplicas.created_at > from_date)) if to_date: query = query.filter((models.BadReplicas.created_at < to_date)) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if ((row[2], row[1], row[4]) not in incidents): incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result
6,065,724,123,909,250,000
List the bad file replicas summary. Method used by the rucio-ui. :param rse_expression: The RSE expression. :param from_date: The start date. :param to_date: The end date. :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True} :param session: The database session in use.
lib/rucio/core/replica.py
get_bad_replicas_summary
bari12/rucio
python
@read_session def get_bad_replicas_summary(rse_expression=None, from_date=None, to_date=None, filter=None, session=None): "\n List the bad file replicas summary. Method used by the rucio-ui.\n :param rse_expression: The RSE expression.\n :param from_date: The start date.\n :param to_date: The end date.\n :param filter: Dictionary of attributes by which the RSE results should be filtered. e.g.: {'availability_write': True}\n :param session: The database session in use.\n " result = [] incidents = {} rse_clause = [] if rse_expression: for rse in parse_expression(expression=rse_expression, filter=filter, session=session): rse_clause.append((models.BadReplicas.rse_id == rse['id'])) elif filter: for rse in list_rses(filters=filter, session=session): rse_clause.append((models.BadReplicas.rse_id == rse['id'])) if (session.bind.dialect.name == 'oracle'): to_days = func.trunc(models.BadReplicas.created_at, str('DD')) elif (session.bind.dialect.name == 'mysql'): to_days = func.date(models.BadReplicas.created_at) elif (session.bind.dialect.name == 'postgresql'): to_days = func.date_trunc('day', models.BadReplicas.created_at) else: to_days = func.strftime(models.BadReplicas.created_at, '%Y-%m-%d') query = session.query(func.count(), to_days, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.reason) if (rse_clause != []): query = query.filter(or_(*rse_clause)) if from_date: query = query.filter((models.BadReplicas.created_at > from_date)) if to_date: query = query.filter((models.BadReplicas.created_at < to_date)) summary = query.group_by(to_days, models.BadReplicas.rse_id, models.BadReplicas.reason, models.BadReplicas.state).all() for row in summary: if ((row[2], row[1], row[4]) not in incidents): incidents[(row[2], row[1], row[4])] = {} incidents[(row[2], row[1], row[4])][str(row[3].name)] = row[0] for incident in incidents: res = incidents[incident] res['rse_id'] = incident[0] res['rse'] = get_rse_name(rse_id=incident[0], session=session) res['created_at'] = incident[1] res['reason'] = incident[2] result.append(res) return result
@read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): '\n Internal method to check if a replica exists at a given site.\n :param rse_id: The RSE id.\n :param scope: The scope of the file.\n :param name: The name of the file.\n :param path: The path of the replica.\n :param session: The database session in use.\n ' already_declared = False if path: path_clause = [(models.RSEFileAssociation.path == path)] if path.startswith('/'): path_clause.append((models.RSEFileAssociation.path == path[1:])) else: path_clause.append((models.RSEFileAssociation.path == ('/%s' % path))) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).with_hint(models.RSEFileAssociation, '+ index(replicas REPLICAS_PATH_IDX', 'oracle').filter((models.RSEFileAssociation.rse_id == rse_id)).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() (path, scope, name, rse_id, size) = result query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return (True, scope, name, already_declared, size) else: return (False, None, None, already_declared, None)
2,500,845,477,038,364,000
Internal method to check if a replica exists at a given site. :param rse_id: The RSE id. :param scope: The scope of the file. :param name: The name of the file. :param path: The path of the replica. :param session: The database session in use.
lib/rucio/core/replica.py
__exists_replicas
bari12/rucio
python
@read_session def __exists_replicas(rse_id, scope=None, name=None, path=None, session=None): '\n Internal method to check if a replica exists at a given site.\n :param rse_id: The RSE id.\n :param scope: The scope of the file.\n :param name: The name of the file.\n :param path: The path of the replica.\n :param session: The database session in use.\n ' already_declared = False if path: path_clause = [(models.RSEFileAssociation.path == path)] if path.startswith('/'): path_clause.append((models.RSEFileAssociation.path == path[1:])) else: path_clause.append((models.RSEFileAssociation.path == ('/%s' % path))) query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).with_hint(models.RSEFileAssociation, '+ index(replicas REPLICAS_PATH_IDX', 'oracle').filter((models.RSEFileAssociation.rse_id == rse_id)).filter(or_(*path_clause)) else: query = session.query(models.RSEFileAssociation.path, models.RSEFileAssociation.scope, models.RSEFileAssociation.name, models.RSEFileAssociation.rse_id, models.RSEFileAssociation.bytes).filter_by(rse_id=rse_id, scope=scope, name=name) if query.count(): result = query.first() (path, scope, name, rse_id, size) = result query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state).filter_by(rse_id=rse_id, scope=scope, name=name, state=BadFilesStatus.BAD) if query.count(): already_declared = True return (True, scope, name, already_declared, size) else: return (False, None, None, already_declared, None)
@read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): '\n List the bad file replicas history states. Method used by the rucio-ui.\n :param state: The state of the file (SUSPICIOUS or BAD).\n :param rse_id: The RSE id.\n :param younger_than: datetime object to select bad replicas younger than this date.\n :param older_than: datetime object to select bad replicas older than this date.\n :param limit: The maximum number of replicas returned.\n :param vo: The VO to find replicas from.\n :param session: The database session in use.\n ' result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter((models.BadReplicas.state == state)) if rse_id: query = query.filter((models.BadReplicas.rse_id == rse_id)) if younger_than: query = query.filter((models.BadReplicas.created_at >= younger_than)) if older_than: query = query.filter((models.BadReplicas.created_at <= older_than)) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if (badfile.scope.vo == vo): if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if ((rse_id in rep['rses']) and rep['rses'][rse_id]): pfn = rep['rses'][rse_id][0] if (pfn and (pfn not in reps)): reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result
-5,288,423,726,230,488,000
List the bad file replicas history states. Method used by the rucio-ui. :param state: The state of the file (SUSPICIOUS or BAD). :param rse_id: The RSE id. :param younger_than: datetime object to select bad replicas younger than this date. :param older_than: datetime object to select bad replicas older than this date. :param limit: The maximum number of replicas returned. :param vo: The VO to find replicas from. :param session: The database session in use.
lib/rucio/core/replica.py
list_bad_replicas_status
bari12/rucio
python
@read_session def list_bad_replicas_status(state=BadFilesStatus.BAD, rse_id=None, younger_than=None, older_than=None, limit=None, list_pfns=False, vo='def', session=None): '\n List the bad file replicas history states. Method used by the rucio-ui.\n :param state: The state of the file (SUSPICIOUS or BAD).\n :param rse_id: The RSE id.\n :param younger_than: datetime object to select bad replicas younger than this date.\n :param older_than: datetime object to select bad replicas older than this date.\n :param limit: The maximum number of replicas returned.\n :param vo: The VO to find replicas from.\n :param session: The database session in use.\n ' result = [] query = session.query(models.BadReplicas.scope, models.BadReplicas.name, models.BadReplicas.rse_id, models.BadReplicas.state, models.BadReplicas.created_at, models.BadReplicas.updated_at) if state: query = query.filter((models.BadReplicas.state == state)) if rse_id: query = query.filter((models.BadReplicas.rse_id == rse_id)) if younger_than: query = query.filter((models.BadReplicas.created_at >= younger_than)) if older_than: query = query.filter((models.BadReplicas.created_at <= older_than)) if limit: query = query.limit(limit) for badfile in query.yield_per(1000): if (badfile.scope.vo == vo): if list_pfns: result.append({'scope': badfile.scope, 'name': badfile.name, 'type': DIDType.FILE}) else: result.append({'scope': badfile.scope, 'name': badfile.name, 'rse': get_rse_name(rse_id=badfile.rse_id, session=session), 'rse_id': badfile.rse_id, 'state': badfile.state, 'created_at': badfile.created_at, 'updated_at': badfile.updated_at}) if list_pfns: reps = [] for rep in list_replicas(result, schemes=None, unavailable=False, request_id=None, ignore_availability=True, all_states=True, session=session): pfn = None if ((rse_id in rep['rses']) and rep['rses'][rse_id]): pfn = rep['rses'][rse_id][0] if (pfn and (pfn not in reps)): reps.append(pfn) else: reps.extend([item for row in rep['rses'].values() for item in row]) list(set(reps)) result = reps return result