body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
def get_nm_node_yaml(nm_host, node_name, ssl_verify=False, verbose=False):
'\n Get the raw ENC YAML for a given node\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param node_name: name of the node to get YAML for\n :type node_name: string\n :param ssl_verify: whether or not to verify SSL certificate, default False\n :type ssl_verify: boolean\n :rtype: string\n :returns: raw YAML string, or None\n '
nm_url = ('http://%s/enc/puppet/%s' % (nm_host, node_name))
r = requests.get(nm_url, headers={'Accept': 'text/yaml'}, verify=ssl_verify)
if (r.status_code == 200):
return r.content
else:
logger.error('got status code {s} for {u}'.format(s=r.status_code, u=nm_url))
return None | 6,246,137,961,526,569,000 | Get the raw ENC YAML for a given node
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param node_name: name of the node to get YAML for
:type node_name: string
:param ssl_verify: whether or not to verify SSL certificate, default False
:type ssl_verify: boolean
:rtype: string
:returns: raw YAML string, or None | contrib/cli_scripts/nodemeisterlib.py | get_nm_node_yaml | coxmediagroup/nodemeister | python | def get_nm_node_yaml(nm_host, node_name, ssl_verify=False, verbose=False):
'\n Get the raw ENC YAML for a given node\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param node_name: name of the node to get YAML for\n :type node_name: string\n :param ssl_verify: whether or not to verify SSL certificate, default False\n :type ssl_verify: boolean\n :rtype: string\n :returns: raw YAML string, or None\n '
nm_url = ('http://%s/enc/puppet/%s' % (nm_host, node_name))
r = requests.get(nm_url, headers={'Accept': 'text/yaml'}, verify=ssl_verify)
if (r.status_code == 200):
return r.content
else:
logger.error('got status code {s} for {u}'.format(s=r.status_code, u=nm_url))
return None |
def get_dashboard_node_yaml(url, ssl_verify=False, verbose=False):
'\n Given the full URL to a Puppet Dashboard node YAML file,\n return the content of the YAML file as a string.\n\n :param url: full URL to Dashboard node yaml\n :type url: string\n :param ssl_verify: whether or not to verify SSL certificate, default False\n :type ssl_verify: boolean\n :rtype: string\n :returns: raw YAML string, or None\n '
r = requests.get(url, headers={'Accept': 'text/yaml'}, verify=ssl_verify)
if (r.status_code == 200):
return r.content
else:
logger.error('got status code {s} for {u}'.format(s=r.status_code, u=url))
return None | 1,135,490,431,877,605,600 | Given the full URL to a Puppet Dashboard node YAML file,
return the content of the YAML file as a string.
:param url: full URL to Dashboard node yaml
:type url: string
:param ssl_verify: whether or not to verify SSL certificate, default False
:type ssl_verify: boolean
:rtype: string
:returns: raw YAML string, or None | contrib/cli_scripts/nodemeisterlib.py | get_dashboard_node_yaml | coxmediagroup/nodemeister | python | def get_dashboard_node_yaml(url, ssl_verify=False, verbose=False):
'\n Given the full URL to a Puppet Dashboard node YAML file,\n return the content of the YAML file as a string.\n\n :param url: full URL to Dashboard node yaml\n :type url: string\n :param ssl_verify: whether or not to verify SSL certificate, default False\n :type ssl_verify: boolean\n :rtype: string\n :returns: raw YAML string, or None\n '
r = requests.get(url, headers={'Accept': 'text/yaml'}, verify=ssl_verify)
if (r.status_code == 200):
return r.content
else:
logger.error('got status code {s} for {u}'.format(s=r.status_code, u=url))
return None |
def get_json(url):
"\n uses requests to GET and return deserialized json\n\n uses anyjson if the Response object doesn't have .json()\n\n :param url: the URL to get\n :type url: string\n :rtype: dict/mixed or None\n :returns: unserialized JSON, or None\n "
r = requests.get(url)
if ('json' in dir(r)):
return r.json()
try:
j = anyjson.deserialize(r.content)
return j
except:
logger.error('could not deserialize JSON for {u} (got status code {s})'.format(s=r.status_code, u=url))
return None | -8,498,332,083,392,072,000 | uses requests to GET and return deserialized json
uses anyjson if the Response object doesn't have .json()
:param url: the URL to get
:type url: string
:rtype: dict/mixed or None
:returns: unserialized JSON, or None | contrib/cli_scripts/nodemeisterlib.py | get_json | coxmediagroup/nodemeister | python | def get_json(url):
"\n uses requests to GET and return deserialized json\n\n uses anyjson if the Response object doesn't have .json()\n\n :param url: the URL to get\n :type url: string\n :rtype: dict/mixed or None\n :returns: unserialized JSON, or None\n "
r = requests.get(url)
if ('json' in dir(r)):
return r.json()
try:
j = anyjson.deserialize(r.content)
return j
except:
logger.error('could not deserialize JSON for {u} (got status code {s})'.format(s=r.status_code, u=url))
return None |
def get_group_names(nm_host):
'\n Return a dict of groups in the NM instance,\n id => name\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM groups, dict of the form {id<int>: name<string>}\n '
j = get_json(('http://%s/enc/groups/' % nm_host))
names = {}
for n in j:
names[n['id']] = n['name']
return names | 2,884,852,824,760,734,000 | Return a dict of groups in the NM instance,
id => name
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM groups, dict of the form {id<int>: name<string>} | contrib/cli_scripts/nodemeisterlib.py | get_group_names | coxmediagroup/nodemeister | python | def get_group_names(nm_host):
'\n Return a dict of groups in the NM instance,\n id => name\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM groups, dict of the form {id<int>: name<string>}\n '
j = get_json(('http://%s/enc/groups/' % nm_host))
names = {}
for n in j:
names[n['id']] = n['name']
return names |
def get_nm_group_classes(nm_host):
"\n Return a dict of all group classes in NM,\n with their id as the dict key.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM group classes, dict of the form:\n {id<int>: {'classname': <string>, 'classparams': <string or None>, 'group': <int>, 'id': <int>}\n "
r = {}
j = get_json(('http://%s/enc/classes/groups/' % nm_host))
for o in j:
r[o['id']] = o
return r | 1,286,843,342,070,683,100 | Return a dict of all group classes in NM,
with their id as the dict key.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM group classes, dict of the form:
{id<int>: {'classname': <string>, 'classparams': <string or None>, 'group': <int>, 'id': <int>} | contrib/cli_scripts/nodemeisterlib.py | get_nm_group_classes | coxmediagroup/nodemeister | python | def get_nm_group_classes(nm_host):
"\n Return a dict of all group classes in NM,\n with their id as the dict key.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM group classes, dict of the form:\n {id<int>: {'classname': <string>, 'classparams': <string or None>, 'group': <int>, 'id': <int>}\n "
r = {}
j = get_json(('http://%s/enc/classes/groups/' % nm_host))
for o in j:
r[o['id']] = o
return r |
def get_nm_group_params(nm_host):
"\n Return a dict of all group params in NM,\n with their id as the dict key.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM group params, dict of the form:\n {id<int>: {'paramkey': <string>, 'paramvalue': <string or None>, 'group': <int>, 'id': <int>}\n "
r = {}
j = get_json(('http://%s/enc/parameters/groups/' % nm_host))
for o in j:
if (o['paramvalue'] is not None):
o['paramvalue'] = clean_value(o['paramvalue'])
r[o['id']] = o
return r | -6,756,621,771,376,389,000 | Return a dict of all group params in NM,
with their id as the dict key.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM group params, dict of the form:
{id<int>: {'paramkey': <string>, 'paramvalue': <string or None>, 'group': <int>, 'id': <int>} | contrib/cli_scripts/nodemeisterlib.py | get_nm_group_params | coxmediagroup/nodemeister | python | def get_nm_group_params(nm_host):
"\n Return a dict of all group params in NM,\n with their id as the dict key.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM group params, dict of the form:\n {id<int>: {'paramkey': <string>, 'paramvalue': <string or None>, 'group': <int>, 'id': <int>}\n "
r = {}
j = get_json(('http://%s/enc/parameters/groups/' % nm_host))
for o in j:
if (o['paramvalue'] is not None):
o['paramvalue'] = clean_value(o['paramvalue'])
r[o['id']] = o
return r |
def get_nm_group(nm_host, gname=None, gid=None, groupnames=None):
"\n Return a dict of information about a group\n in NM, by either name or ID. If gname is specified,\n it will be resolved to the id.\n\n groupnames, if specified, is the output dict from get_group_names();\n if it is not specified, get_group_names() will be called internally.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :param gname: name of group to get\n :type gname: string\n :param gid: ID of group to get, overrides gname\n :type gid: int\n :param groupnames: output of get_group_names(), to prevent calling it again if we already have it\n :type groupnames: dict\n :rtype: dict\n :returns: unserialized JSON dict representing the specified group, of the form:\n {'name': <string>, 'parameters': [<param IDs>], 'classes': [<class IDs>], 'parents': [<group IDs>], 'groups': [<group IDs>], 'id': <int>, 'description': <string>}\n "
if ((gid is None) and (gname is None)):
raise ValueError('get_nm_group called without gname or gid')
if (gid is None):
if (groupnames is None):
groupnames = get_group_names(nm_host)
for n in groupnames:
if (groupnames[n] == gname):
gid = n
if (gid is None):
return {}
j = get_json(('http://%s/enc/groups/%d/' % (nm_host, gid)))
return j | -2,901,903,478,772,705,000 | Return a dict of information about a group
in NM, by either name or ID. If gname is specified,
it will be resolved to the id.
groupnames, if specified, is the output dict from get_group_names();
if it is not specified, get_group_names() will be called internally.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:param gname: name of group to get
:type gname: string
:param gid: ID of group to get, overrides gname
:type gid: int
:param groupnames: output of get_group_names(), to prevent calling it again if we already have it
:type groupnames: dict
:rtype: dict
:returns: unserialized JSON dict representing the specified group, of the form:
{'name': <string>, 'parameters': [<param IDs>], 'classes': [<class IDs>], 'parents': [<group IDs>], 'groups': [<group IDs>], 'id': <int>, 'description': <string>} | contrib/cli_scripts/nodemeisterlib.py | get_nm_group | coxmediagroup/nodemeister | python | def get_nm_group(nm_host, gname=None, gid=None, groupnames=None):
"\n Return a dict of information about a group\n in NM, by either name or ID. If gname is specified,\n it will be resolved to the id.\n\n groupnames, if specified, is the output dict from get_group_names();\n if it is not specified, get_group_names() will be called internally.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :param gname: name of group to get\n :type gname: string\n :param gid: ID of group to get, overrides gname\n :type gid: int\n :param groupnames: output of get_group_names(), to prevent calling it again if we already have it\n :type groupnames: dict\n :rtype: dict\n :returns: unserialized JSON dict representing the specified group, of the form:\n {'name': <string>, 'parameters': [<param IDs>], 'classes': [<class IDs>], 'parents': [<group IDs>], 'groups': [<group IDs>], 'id': <int>, 'description': <string>}\n "
if ((gid is None) and (gname is None)):
raise ValueError('get_nm_group called without gname or gid')
if (gid is None):
if (groupnames is None):
groupnames = get_group_names(nm_host)
for n in groupnames:
if (groupnames[n] == gname):
gid = n
if (gid is None):
return {}
j = get_json(('http://%s/enc/groups/%d/' % (nm_host, gid)))
return j |
def interpolate_group(group, classes, params, group_names):
'\n In the dict returned by get_nm_group, replace class\n and parameter IDs, and other group IDs, with their\n appropriate string or dict representations.\n\n :param group: the Group dict returned by get_nm_group()\n :type group: dict\n :param classes: the dict of classes returned by get_nm_group_classes()\n :type classes: dict\n :param params: the dict of parameters returned by get_nm_group_params()\n :type params: dict\n :param group_names: the dict of group IDs to names returned by get_group_names()\n :type group_names: dict\n :returns: group dict, with classes and params interpolated\n :rtype: dict\n '
g_params = group.get('parameters', {})
params_text = {}
for p in g_params:
foo = params[p]
params_text[foo['paramkey']] = foo['paramvalue']
group['parameters'] = params_text
g_classes = group.get('classes', {})
classes_text = {}
for c in g_classes:
foo = classes[c]
classes_text[foo['classname']] = foo['classparams']
group['classes'] = classes_text
g_parents = group.get('parents', {})
parents_text = []
for p in g_parents:
parents_text.append(group_names[p])
group['parents'] = parents_text
g_groups = group.get('groups', {})
groups_text = []
for g in g_groups:
groups_text.append(group_names[g])
group['groups'] = groups_text
return group | -3,105,283,020,348,467,700 | In the dict returned by get_nm_group, replace class
and parameter IDs, and other group IDs, with their
appropriate string or dict representations.
:param group: the Group dict returned by get_nm_group()
:type group: dict
:param classes: the dict of classes returned by get_nm_group_classes()
:type classes: dict
:param params: the dict of parameters returned by get_nm_group_params()
:type params: dict
:param group_names: the dict of group IDs to names returned by get_group_names()
:type group_names: dict
:returns: group dict, with classes and params interpolated
:rtype: dict | contrib/cli_scripts/nodemeisterlib.py | interpolate_group | coxmediagroup/nodemeister | python | def interpolate_group(group, classes, params, group_names):
'\n In the dict returned by get_nm_group, replace class\n and parameter IDs, and other group IDs, with their\n appropriate string or dict representations.\n\n :param group: the Group dict returned by get_nm_group()\n :type group: dict\n :param classes: the dict of classes returned by get_nm_group_classes()\n :type classes: dict\n :param params: the dict of parameters returned by get_nm_group_params()\n :type params: dict\n :param group_names: the dict of group IDs to names returned by get_group_names()\n :type group_names: dict\n :returns: group dict, with classes and params interpolated\n :rtype: dict\n '
g_params = group.get('parameters', {})
params_text = {}
for p in g_params:
foo = params[p]
params_text[foo['paramkey']] = foo['paramvalue']
group['parameters'] = params_text
g_classes = group.get('classes', {})
classes_text = {}
for c in g_classes:
foo = classes[c]
classes_text[foo['classname']] = foo['classparams']
group['classes'] = classes_text
g_parents = group.get('parents', {})
parents_text = []
for p in g_parents:
parents_text.append(group_names[p])
group['parents'] = parents_text
g_groups = group.get('groups', {})
groups_text = []
for g in g_groups:
groups_text.append(group_names[g])
group['groups'] = groups_text
return group |
def add_group(nm_host, name, description, parents=None, groups=None, dry_run=False):
'\n add a group to NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param name: name of the new group\n :type name: string\n :param description: description of the new group\n :type description: string\n :param parents: parents of this group\n :type parents: list of int IDs\n :param groups: child groups of this group\n :type groups: list of int IDs\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: int ID of the new group on success or False on failure\n :rtype: int or False\n '
payload = {'name': name, 'description': description}
if (parents is not None):
payload['parents'] = parents
if (groups is not None):
payload['groups'] = groups
url = ('http://%s/enc/groups/' % nm_host)
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return get_nm_group_id(nm_host, name, dry_run=dry_run)
logger.error(('ERROR: add_group got status code %d' % status_code))
return False | 6,958,710,027,952,785,000 | add a group to NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param name: name of the new group
:type name: string
:param description: description of the new group
:type description: string
:param parents: parents of this group
:type parents: list of int IDs
:param groups: child groups of this group
:type groups: list of int IDs
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: int ID of the new group on success or False on failure
:rtype: int or False | contrib/cli_scripts/nodemeisterlib.py | add_group | coxmediagroup/nodemeister | python | def add_group(nm_host, name, description, parents=None, groups=None, dry_run=False):
'\n add a group to NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param name: name of the new group\n :type name: string\n :param description: description of the new group\n :type description: string\n :param parents: parents of this group\n :type parents: list of int IDs\n :param groups: child groups of this group\n :type groups: list of int IDs\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: int ID of the new group on success or False on failure\n :rtype: int or False\n '
payload = {'name': name, 'description': description}
if (parents is not None):
payload['parents'] = parents
if (groups is not None):
payload['groups'] = groups
url = ('http://%s/enc/groups/' % nm_host)
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return get_nm_group_id(nm_host, name, dry_run=dry_run)
logger.error(('ERROR: add_group got status code %d' % status_code))
return False |
def get_nm_group_id(nm_host, name, groups=None, dry_run=False):
'\n Get the group ID of a group specified by name\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param name: name of the new group\n :type name: string\n :param groups: dict of groups as returned by get_group_names()\n :type groups: dict\n :returns: int ID of the group or False on failure\n :rtype: int or False\n '
if dry_run:
return 0
if (groups is None):
groups = get_group_names(nm_host)
for n in groups:
if (groups[n] == name):
return n
return False | 6,712,355,395,058,232,000 | Get the group ID of a group specified by name
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param name: name of the new group
:type name: string
:param groups: dict of groups as returned by get_group_names()
:type groups: dict
:returns: int ID of the group or False on failure
:rtype: int or False | contrib/cli_scripts/nodemeisterlib.py | get_nm_group_id | coxmediagroup/nodemeister | python | def get_nm_group_id(nm_host, name, groups=None, dry_run=False):
'\n Get the group ID of a group specified by name\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param name: name of the new group\n :type name: string\n :param groups: dict of groups as returned by get_group_names()\n :type groups: dict\n :returns: int ID of the group or False on failure\n :rtype: int or False\n '
if dry_run:
return 0
if (groups is None):
groups = get_group_names(nm_host)
for n in groups:
if (groups[n] == name):
return n
return False |
def add_param_to_group(nm_host, gid, pname, pval, dry_run=False):
'\n add a parameter to a group in NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param gid: numeric ID of the group to add param to\n :type gid: int\n :param pname: parameter name\n :type pname: string\n :param pval: parameter value\n :type pval: string\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: True on success or False on failure\n :rtype: boolean\n '
if (isinstance(pval, basestring) and ((pval.strip() == '') or (pval == '') or (pval == "''"))):
pval = None
payload = {'group': gid, 'paramkey': pname, 'paramvalue': pval}
url = ('http://%s/enc/parameters/groups/' % nm_host)
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return True
logger.error(('ERROR: add_param_to_group got status code %d' % status_code))
return False | 7,117,024,628,070,776,000 | add a parameter to a group in NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param gid: numeric ID of the group to add param to
:type gid: int
:param pname: parameter name
:type pname: string
:param pval: parameter value
:type pval: string
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: True on success or False on failure
:rtype: boolean | contrib/cli_scripts/nodemeisterlib.py | add_param_to_group | coxmediagroup/nodemeister | python | def add_param_to_group(nm_host, gid, pname, pval, dry_run=False):
'\n add a parameter to a group in NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param gid: numeric ID of the group to add param to\n :type gid: int\n :param pname: parameter name\n :type pname: string\n :param pval: parameter value\n :type pval: string\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: True on success or False on failure\n :rtype: boolean\n '
if (isinstance(pval, basestring) and ((pval.strip() == ) or (pval == ) or (pval == ))):
pval = None
payload = {'group': gid, 'paramkey': pname, 'paramvalue': pval}
url = ('http://%s/enc/parameters/groups/' % nm_host)
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return True
logger.error(('ERROR: add_param_to_group got status code %d' % status_code))
return False |
def add_class_to_group(nm_host, gid, classname, classparams=None, dry_run=False):
'\n add a class to a group in NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param gid: numeric ID of the group to add param to\n :type gid: int\n :param classname: class name\n :type classname: string\n :param classparams: class parameters, default None\n :type classparams: string or None\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: True on success or False on failure\n :rtype: boolean\n '
payload = {'group': gid, 'classname': classname, 'classparams': classparams}
url = ('http://%s/enc/classes/groups/' % nm_host)
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return True
logger.error(('ERROR: add_class_to_group got status code %d' % status_code))
return False | -6,649,117,288,331,533,000 | add a class to a group in NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param gid: numeric ID of the group to add param to
:type gid: int
:param classname: class name
:type classname: string
:param classparams: class parameters, default None
:type classparams: string or None
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: True on success or False on failure
:rtype: boolean | contrib/cli_scripts/nodemeisterlib.py | add_class_to_group | coxmediagroup/nodemeister | python | def add_class_to_group(nm_host, gid, classname, classparams=None, dry_run=False):
'\n add a class to a group in NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param gid: numeric ID of the group to add param to\n :type gid: int\n :param classname: class name\n :type classname: string\n :param classparams: class parameters, default None\n :type classparams: string or None\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: True on success or False on failure\n :rtype: boolean\n '
payload = {'group': gid, 'classname': classname, 'classparams': classparams}
url = ('http://%s/enc/classes/groups/' % nm_host)
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return True
logger.error(('ERROR: add_class_to_group got status code %d' % status_code))
return False |
def get_node_names(nm_host):
'\n Return a dict of nodes in the NM instance,\n id => hostname\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM nodes, dict of the form {id<int>: hostname<string>}\n '
j = get_json(('http://%s/enc/nodes/' % nm_host))
names = {}
for n in j:
names[n['id']] = n['hostname']
return names | -3,141,816,096,082,172,400 | Return a dict of nodes in the NM instance,
id => hostname
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM nodes, dict of the form {id<int>: hostname<string>} | contrib/cli_scripts/nodemeisterlib.py | get_node_names | coxmediagroup/nodemeister | python | def get_node_names(nm_host):
'\n Return a dict of nodes in the NM instance,\n id => hostname\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM nodes, dict of the form {id<int>: hostname<string>}\n '
j = get_json(('http://%s/enc/nodes/' % nm_host))
names = {}
for n in j:
names[n['id']] = n['hostname']
return names |
def get_nm_node_id(nm_host, hostname, nodenames=None, dry_run=False):
'\n Get the node ID of a node specified by hostname\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param hostname: hostname of the node\n :type hostname: string\n :param nodenames: dict of nodes as returned by get_node_names()\n :type nodenames: dict\n :returns: int ID of the group or False on failure\n :rtype: int or False\n '
if dry_run:
return 0
if (nodenames is None):
nodenames = get_node_names(nm_host)
for n in nodenames:
if (nodenames[n] == hostname):
return n
logger.error('could not find node ID for {h}'.format(h=hostname))
return False | -3,084,336,057,350,448,000 | Get the node ID of a node specified by hostname
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param hostname: hostname of the node
:type hostname: string
:param nodenames: dict of nodes as returned by get_node_names()
:type nodenames: dict
:returns: int ID of the group or False on failure
:rtype: int or False | contrib/cli_scripts/nodemeisterlib.py | get_nm_node_id | coxmediagroup/nodemeister | python | def get_nm_node_id(nm_host, hostname, nodenames=None, dry_run=False):
'\n Get the node ID of a node specified by hostname\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param hostname: hostname of the node\n :type hostname: string\n :param nodenames: dict of nodes as returned by get_node_names()\n :type nodenames: dict\n :returns: int ID of the group or False on failure\n :rtype: int or False\n '
if dry_run:
return 0
if (nodenames is None):
nodenames = get_node_names(nm_host)
for n in nodenames:
if (nodenames[n] == hostname):
return n
logger.error('could not find node ID for {h}'.format(h=hostname))
return False |
def get_nm_node(nm_host, hostname=None, node_id=None, nodenames=None):
"\n Return a dict of information about a node\n in NM, by either name or ID. If nodename is specified,\n it will be resolved to the id.\n\n nodenames, if specified, is the output dict from get_node_names();\n if it is not specified, get_node_names() will be called internally.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :param hostname: name of node to get\n :type hostname: string\n :param node_id: ID of node to get, overrides hostname\n :type node_id: int\n :param nodenames: output of get_node_names(), to prevent calling it again if we already have it\n :type nodenames: dict\n :rtype: dict\n :returns: unserialized JSON dict representing the specified group, of the form:\n {'hostname': <string>, 'parameters': [<param IDs>], 'classes': [<class IDs>], 'parents': [<group IDs>],\n 'groups': [<group IDs>], 'id': <int>, 'description': <string>}\n "
if ((node_id is None) and (hostname is None)):
raise ValueError('get_nm_node called without hostname or node_id')
if (node_id is None):
if (nodenames is None):
nodenames = get_node_names(nm_host)
for n in nodenames:
if (nodenames[n] == hostname):
node_id = n
if (node_id is None):
logger.error('could not find hode with hostname {h}'.format(h=hostname))
return {}
j = get_json(('http://%s/enc/nodes/%d/' % (nm_host, node_id)))
return j | -935,066,461,939,325,300 | Return a dict of information about a node
in NM, by either name or ID. If nodename is specified,
it will be resolved to the id.
nodenames, if specified, is the output dict from get_node_names();
if it is not specified, get_node_names() will be called internally.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:param hostname: name of node to get
:type hostname: string
:param node_id: ID of node to get, overrides hostname
:type node_id: int
:param nodenames: output of get_node_names(), to prevent calling it again if we already have it
:type nodenames: dict
:rtype: dict
:returns: unserialized JSON dict representing the specified group, of the form:
{'hostname': <string>, 'parameters': [<param IDs>], 'classes': [<class IDs>], 'parents': [<group IDs>],
'groups': [<group IDs>], 'id': <int>, 'description': <string>} | contrib/cli_scripts/nodemeisterlib.py | get_nm_node | coxmediagroup/nodemeister | python | def get_nm_node(nm_host, hostname=None, node_id=None, nodenames=None):
"\n Return a dict of information about a node\n in NM, by either name or ID. If nodename is specified,\n it will be resolved to the id.\n\n nodenames, if specified, is the output dict from get_node_names();\n if it is not specified, get_node_names() will be called internally.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :param hostname: name of node to get\n :type hostname: string\n :param node_id: ID of node to get, overrides hostname\n :type node_id: int\n :param nodenames: output of get_node_names(), to prevent calling it again if we already have it\n :type nodenames: dict\n :rtype: dict\n :returns: unserialized JSON dict representing the specified group, of the form:\n {'hostname': <string>, 'parameters': [<param IDs>], 'classes': [<class IDs>], 'parents': [<group IDs>],\n 'groups': [<group IDs>], 'id': <int>, 'description': <string>}\n "
if ((node_id is None) and (hostname is None)):
raise ValueError('get_nm_node called without hostname or node_id')
if (node_id is None):
if (nodenames is None):
nodenames = get_node_names(nm_host)
for n in nodenames:
if (nodenames[n] == hostname):
node_id = n
if (node_id is None):
logger.error('could not find hode with hostname {h}'.format(h=hostname))
return {}
j = get_json(('http://%s/enc/nodes/%d/' % (nm_host, node_id)))
return j |
def get_nm_node_classes(nm_host):
"\n Return a dict of all node classes in NM,\n with their id as the dict key.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM node classes, dict of the form:\n {id<int>: {'classname': <string>, 'classparams': <string or None>, 'node': <int>, 'id': <int>}\n "
r = {}
j = get_json(('http://%s/enc/classes/nodes/' % nm_host))
for o in j:
r[o['id']] = o
return r | 2,523,380,446,720,163,000 | Return a dict of all node classes in NM,
with their id as the dict key.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM node classes, dict of the form:
{id<int>: {'classname': <string>, 'classparams': <string or None>, 'node': <int>, 'id': <int>} | contrib/cli_scripts/nodemeisterlib.py | get_nm_node_classes | coxmediagroup/nodemeister | python | def get_nm_node_classes(nm_host):
"\n Return a dict of all node classes in NM,\n with their id as the dict key.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM node classes, dict of the form:\n {id<int>: {'classname': <string>, 'classparams': <string or None>, 'node': <int>, 'id': <int>}\n "
r = {}
j = get_json(('http://%s/enc/classes/nodes/' % nm_host))
for o in j:
r[o['id']] = o
return r |
def get_nm_node_params(nm_host):
"\n Return a dict of all node params in NM,\n with their id as the dict key.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM node params, dict of the form:\n {id<int>: {'paramkey': <string>, 'paramvalue': <string or None>, 'node': <int>, 'id': <int>}\n "
r = {}
j = get_json(('http://%s/enc/parameters/nodes/' % nm_host))
for o in j:
r[o['id']] = o
return r | 5,518,445,424,977,798,000 | Return a dict of all node params in NM,
with their id as the dict key.
:param nm_host: NodeMeister hostname/IP
:type nm_host: string
:rtype: dict
:returns: NM node params, dict of the form:
{id<int>: {'paramkey': <string>, 'paramvalue': <string or None>, 'node': <int>, 'id': <int>} | contrib/cli_scripts/nodemeisterlib.py | get_nm_node_params | coxmediagroup/nodemeister | python | def get_nm_node_params(nm_host):
"\n Return a dict of all node params in NM,\n with their id as the dict key.\n\n :param nm_host: NodeMeister hostname/IP\n :type nm_host: string\n :rtype: dict\n :returns: NM node params, dict of the form:\n {id<int>: {'paramkey': <string>, 'paramvalue': <string or None>, 'node': <int>, 'id': <int>}\n "
r = {}
j = get_json(('http://%s/enc/parameters/nodes/' % nm_host))
for o in j:
r[o['id']] = o
return r |
def add_node(nm_host, hostname, description, groups=None, dry_run=False):
'\n add a node to NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param hostname: hostname of the new node\n :type hostname: string\n :param description: description of the new node\n :type description: string\n :param groups: groups that this node is in\n :type groups: list of int IDs\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: int ID of the new node on success or False on failure\n :rtype: int or False\n '
payload = {'hostname': hostname, 'description': description}
if (groups is not None):
payload['groups'] = groups
url = ('http://%s/enc/nodes/' % nm_host)
logger.debug('adding node {h}'.format(h=hostname))
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return get_nm_node_id(nm_host, hostname, dry_run=dry_run)
logger.error(('ERROR: add_node got status code %d' % status_code))
return False | 5,612,093,654,777,876,000 | add a node to NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param hostname: hostname of the new node
:type hostname: string
:param description: description of the new node
:type description: string
:param groups: groups that this node is in
:type groups: list of int IDs
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: int ID of the new node on success or False on failure
:rtype: int or False | contrib/cli_scripts/nodemeisterlib.py | add_node | coxmediagroup/nodemeister | python | def add_node(nm_host, hostname, description, groups=None, dry_run=False):
'\n add a node to NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param hostname: hostname of the new node\n :type hostname: string\n :param description: description of the new node\n :type description: string\n :param groups: groups that this node is in\n :type groups: list of int IDs\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: int ID of the new node on success or False on failure\n :rtype: int or False\n '
payload = {'hostname': hostname, 'description': description}
if (groups is not None):
payload['groups'] = groups
url = ('http://%s/enc/nodes/' % nm_host)
logger.debug('adding node {h}'.format(h=hostname))
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return get_nm_node_id(nm_host, hostname, dry_run=dry_run)
logger.error(('ERROR: add_node got status code %d' % status_code))
return False |
def add_param_to_node(nm_host, node_id, pname, pval, dry_run=False):
'\n add a parameter to a node in NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param node_id: numeric ID of the node to add param to\n :type node_id: int\n :param pname: parameter name\n :type pname: string\n :param pval: parameter value\n :type pval: string\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: True on success or False on failure\n :rtype: boolean\n '
if ((pval.strip() == '') or (pval == '') or (pval == "''")):
pval = None
payload = {'node': node_id, 'paramkey': pname, 'paramvalue': pval}
url = ('http://%s/enc/parameters/nodes/' % nm_host)
logger.debug("adding param '{pname}' to node {n} with val: {pval}".format(n=node_id, pname=pname, pval=pval))
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return True
logger.error(('ERROR: add_param_to_node got status code %d' % status_code))
return False | 8,472,072,113,677,377,000 | add a parameter to a node in NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param node_id: numeric ID of the node to add param to
:type node_id: int
:param pname: parameter name
:type pname: string
:param pval: parameter value
:type pval: string
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: True on success or False on failure
:rtype: boolean | contrib/cli_scripts/nodemeisterlib.py | add_param_to_node | coxmediagroup/nodemeister | python | def add_param_to_node(nm_host, node_id, pname, pval, dry_run=False):
'\n add a parameter to a node in NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param node_id: numeric ID of the node to add param to\n :type node_id: int\n :param pname: parameter name\n :type pname: string\n :param pval: parameter value\n :type pval: string\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: True on success or False on failure\n :rtype: boolean\n '
if ((pval.strip() == ) or (pval == ) or (pval == )):
pval = None
payload = {'node': node_id, 'paramkey': pname, 'paramvalue': pval}
url = ('http://%s/enc/parameters/nodes/' % nm_host)
logger.debug("adding param '{pname}' to node {n} with val: {pval}".format(n=node_id, pname=pname, pval=pval))
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return True
logger.error(('ERROR: add_param_to_node got status code %d' % status_code))
return False |
def add_class_to_node(nm_host, node_id, classname, classparams=None, dry_run=False):
'\n add a class to a node in NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param node_id: numeric ID of the node to add param to\n :type node_id: int\n :param classname: class name\n :type classname: string\n :param classparams: class parameters, default None\n :type classparams: string or None\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: True on success or False on failure\n :rtype: boolean\n '
payload = {'node': node_id, 'classname': classname, 'classparams': classparams}
url = ('http://%s/enc/classes/nodes/' % nm_host)
logger.debug("adding class '{cn}' to node {n} with params: {cp}".format(n=node_id, cn=classname, cp=classparams))
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return True
logger.error(('ERROR: add_class_to_node got status code %d' % status_code))
return False | -8,682,323,673,473,580,000 | add a class to a node in NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param node_id: numeric ID of the node to add param to
:type node_id: int
:param classname: class name
:type classname: string
:param classparams: class parameters, default None
:type classparams: string or None
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: True on success or False on failure
:rtype: boolean | contrib/cli_scripts/nodemeisterlib.py | add_class_to_node | coxmediagroup/nodemeister | python | def add_class_to_node(nm_host, node_id, classname, classparams=None, dry_run=False):
'\n add a class to a node in NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param node_id: numeric ID of the node to add param to\n :type node_id: int\n :param classname: class name\n :type classname: string\n :param classparams: class parameters, default None\n :type classparams: string or None\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: True on success or False on failure\n :rtype: boolean\n '
payload = {'node': node_id, 'classname': classname, 'classparams': classparams}
url = ('http://%s/enc/classes/nodes/' % nm_host)
logger.debug("adding class '{cn}' to node {n} with params: {cp}".format(n=node_id, cn=classname, cp=classparams))
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return True
logger.error(('ERROR: add_class_to_node got status code %d' % status_code))
return False |
def get_name_for_class_exclusion(nm_host, class_exclusion_id, verbose):
'\n Get the excluded class name for a given ClassExclusion ID.\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param class_exclusion_id: numeric ID of the class exclusion\n :type class_exclusion_id: int\n :returns: string name of class, or False on faliure\n :rtype: string or False\n '
r = {}
j = get_json(('http://%s/enc/exclusions/classes/' % nm_host))
if (j is None):
return False
for o in j:
if (o['id'] == class_exclusion_id):
return o['exclusion']
return False | 5,429,085,462,293,692,000 | Get the excluded class name for a given ClassExclusion ID.
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param class_exclusion_id: numeric ID of the class exclusion
:type class_exclusion_id: int
:returns: string name of class, or False on faliure
:rtype: string or False | contrib/cli_scripts/nodemeisterlib.py | get_name_for_class_exclusion | coxmediagroup/nodemeister | python | def get_name_for_class_exclusion(nm_host, class_exclusion_id, verbose):
'\n Get the excluded class name for a given ClassExclusion ID.\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param class_exclusion_id: numeric ID of the class exclusion\n :type class_exclusion_id: int\n :returns: string name of class, or False on faliure\n :rtype: string or False\n '
r = {}
j = get_json(('http://%s/enc/exclusions/classes/' % nm_host))
if (j is None):
return False
for o in j:
if (o['id'] == class_exclusion_id):
return o['exclusion']
return False |
def add_node_class_exclusion(nm_host, node_id, classname, dry_run=False, verbose=False):
'\n add a class exclusion to a node in NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param node_id: numeric ID of the node to add param to\n :type node_id: int\n :param classname: class name to exclude\n :type classname: string\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: True on success or False on failure\n :rtype: boolean\n '
payload = {'node': node_id, 'exclusion': classname}
url = ('http://%s/enc/exclusions/classes/' % nm_host)
logger.debug("adding class exclusion for '{cn}' to node {n}".format(n=node_id, cn=classname))
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return True
logger.error(('ERROR: add_node_class_exclusion got status code %d' % status_code))
return False | 9,084,398,723,056,467,000 | add a class exclusion to a node in NodeMeister
:param nm_host: NodeMeister hostname or IP
:type nm_host: string
:param node_id: numeric ID of the node to add param to
:type node_id: int
:param classname: class name to exclude
:type classname: string
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: True on success or False on failure
:rtype: boolean | contrib/cli_scripts/nodemeisterlib.py | add_node_class_exclusion | coxmediagroup/nodemeister | python | def add_node_class_exclusion(nm_host, node_id, classname, dry_run=False, verbose=False):
'\n add a class exclusion to a node in NodeMeister\n\n :param nm_host: NodeMeister hostname or IP\n :type nm_host: string\n :param node_id: numeric ID of the node to add param to\n :type node_id: int\n :param classname: class name to exclude\n :type classname: string\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: True on success or False on failure\n :rtype: boolean\n '
payload = {'node': node_id, 'exclusion': classname}
url = ('http://%s/enc/exclusions/classes/' % nm_host)
logger.debug("adding class exclusion for '{cn}' to node {n}".format(n=node_id, cn=classname))
status_code = do_post(url, payload, dry_run=dry_run)
if (status_code == 201):
return True
logger.error(('ERROR: add_node_class_exclusion got status code %d' % status_code))
return False |
def clean_value(v, debug=False):
'\n Strip bad characters off of values\n '
if debug:
print(("clean_value '%s'" % v))
if ((type(v) == type('')) or (type(v) == type(u''))):
v = v.strip('"\\')
return v | -7,613,022,941,749,971,000 | Strip bad characters off of values | contrib/cli_scripts/nodemeisterlib.py | clean_value | coxmediagroup/nodemeister | python | def clean_value(v, debug=False):
'\n \n '
if debug:
print(("clean_value '%s'" % v))
if ((type(v) == type()) or (type(v) == type(u))):
v = v.strip('"\\')
return v |
def do_post(url, payload, dry_run=False):
'\n Do a POST request with Requests, return the status code.\n\n :param url: URL to POST to\n :type nm_host: string\n :param payload: the payload data, to be JSON encoded\n :type name: dict\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: HTTP status code from the request\n :rtype: int\n '
headers = {'content-type': 'application/json'}
if dry_run:
logger.warning(('DRY RUN: do_post to url %s - payload:\n\t%s\n' % (url, payload)))
return 201
r = requests.post(url, data=anyjson.serialize(payload), headers=headers)
return r.status_code | 7,076,742,732,408,014,000 | Do a POST request with Requests, return the status code.
:param url: URL to POST to
:type nm_host: string
:param payload: the payload data, to be JSON encoded
:type name: dict
:param dry_run: if True, only print what would be done, do not make any changes
:type dry_run: boolean
:returns: HTTP status code from the request
:rtype: int | contrib/cli_scripts/nodemeisterlib.py | do_post | coxmediagroup/nodemeister | python | def do_post(url, payload, dry_run=False):
'\n Do a POST request with Requests, return the status code.\n\n :param url: URL to POST to\n :type nm_host: string\n :param payload: the payload data, to be JSON encoded\n :type name: dict\n :param dry_run: if True, only print what would be done, do not make any changes\n :type dry_run: boolean\n :returns: HTTP status code from the request\n :rtype: int\n '
headers = {'content-type': 'application/json'}
if dry_run:
logger.warning(('DRY RUN: do_post to url %s - payload:\n\t%s\n' % (url, payload)))
return 201
r = requests.post(url, data=anyjson.serialize(payload), headers=headers)
return r.status_code |
def clone_nodemeister_node(nm_host, dst_name, src_name, munge_res, group_replace=None, noop=False, verbose=False):
'\n Clone a node in nodemeister, munging all parameters and class params through munge_re,\n a list of lists, each having 2 elements, a regex and a string to replace matches with.\n\n group_replace is a hash of old_group_id => new_group_id to replace when creating the new node\n '
nodes = get_node_names(nm_host)
dst_node_id = get_nm_node_id(nm_host, dst_name, nodenames=nodes)
if (dst_node_id is not False):
logger.error(('ERROR: node %s already exists in NodeMeister with id %d.' % (dst_name, dst_node_id)))
return False
src_node = get_nm_node(nm_host, hostname=src_name, nodenames=nodes)
if (len(src_node) == 0):
logger.error(('ERROR: could not find source node %s' % src_name))
return False
if verbose:
logger.debug('Got source node id: {n}\n{src}'.format(n=src_node['id'], src=src_node))
classes = get_nm_node_classes(nm_host)
params = get_nm_node_params(nm_host)
groups = []
for g in src_node['groups']:
if (group_replace is not None):
if (g in group_replace):
if verbose:
logger.debug((' changing group %d to %d (group_replace)' % (g, group_replace[g])))
g = group_replace[g]
groups.append(g)
node_id = add_node(nm_host, dst_name, ('imported by %s' % __file__), groups=groups, dry_run=noop)
if (node_id is False):
logger.error('ERROR adding node in Nodemeister.')
return False
else:
logger.info(('Node added to NodeMeister with id %d' % node_id))
ok = True
for c in src_node['excluded_classes']:
c_name = get_name_for_class_exclusion(nm_host, c, verbose=verbose)
if verbose:
logger.debug(('excluded class %s (%d)' % (c_name, c)))
res = add_node_class_exclusion(nm_host, node_id, c_name, dry_run=noop, verbose=verbose)
if (not res):
logger.error(("ERROR adding class exclusion of '%s' to node %d" % (c_name, node_id)))
ok = False
if verbose:
logger.info(("added class_exclusion of '%s' to group %d" % (c_name, node_id)))
for p in src_node['parameters']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_node['parameters'][p])
if ((foo != src_node['parameters'][p]) and verbose):
logger.debug(("Munged value of '%s' from '%s' to '%s'" % (p, src_node['parameters'][p], foo)))
src_node['parameters'][p] = foo
res = add_param_to_node(nm_host, node_id, p, src_node['parameters'][p], dry_run=noop)
if (not res):
logger.error(("ERROR adding param %s with value '%s' to node %d" % (p, src_node['parameters'][p], node_id)))
ok = False
if verbose:
logger.info(("\tadded param %s with value '%s' to group %d" % (p, src_node['parameters'][p], node_id)))
if (len(src_node['classes']) > 0):
logger.critical('ERROR: script does not yet migrate classes for nodes.')
ok = False
if (ok is False):
return False
return node_id | -3,694,976,738,779,596,000 | Clone a node in nodemeister, munging all parameters and class params through munge_re,
a list of lists, each having 2 elements, a regex and a string to replace matches with.
group_replace is a hash of old_group_id => new_group_id to replace when creating the new node | contrib/cli_scripts/nodemeisterlib.py | clone_nodemeister_node | coxmediagroup/nodemeister | python | def clone_nodemeister_node(nm_host, dst_name, src_name, munge_res, group_replace=None, noop=False, verbose=False):
'\n Clone a node in nodemeister, munging all parameters and class params through munge_re,\n a list of lists, each having 2 elements, a regex and a string to replace matches with.\n\n group_replace is a hash of old_group_id => new_group_id to replace when creating the new node\n '
nodes = get_node_names(nm_host)
dst_node_id = get_nm_node_id(nm_host, dst_name, nodenames=nodes)
if (dst_node_id is not False):
logger.error(('ERROR: node %s already exists in NodeMeister with id %d.' % (dst_name, dst_node_id)))
return False
src_node = get_nm_node(nm_host, hostname=src_name, nodenames=nodes)
if (len(src_node) == 0):
logger.error(('ERROR: could not find source node %s' % src_name))
return False
if verbose:
logger.debug('Got source node id: {n}\n{src}'.format(n=src_node['id'], src=src_node))
classes = get_nm_node_classes(nm_host)
params = get_nm_node_params(nm_host)
groups = []
for g in src_node['groups']:
if (group_replace is not None):
if (g in group_replace):
if verbose:
logger.debug((' changing group %d to %d (group_replace)' % (g, group_replace[g])))
g = group_replace[g]
groups.append(g)
node_id = add_node(nm_host, dst_name, ('imported by %s' % __file__), groups=groups, dry_run=noop)
if (node_id is False):
logger.error('ERROR adding node in Nodemeister.')
return False
else:
logger.info(('Node added to NodeMeister with id %d' % node_id))
ok = True
for c in src_node['excluded_classes']:
c_name = get_name_for_class_exclusion(nm_host, c, verbose=verbose)
if verbose:
logger.debug(('excluded class %s (%d)' % (c_name, c)))
res = add_node_class_exclusion(nm_host, node_id, c_name, dry_run=noop, verbose=verbose)
if (not res):
logger.error(("ERROR adding class exclusion of '%s' to node %d" % (c_name, node_id)))
ok = False
if verbose:
logger.info(("added class_exclusion of '%s' to group %d" % (c_name, node_id)))
for p in src_node['parameters']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_node['parameters'][p])
if ((foo != src_node['parameters'][p]) and verbose):
logger.debug(("Munged value of '%s' from '%s' to '%s'" % (p, src_node['parameters'][p], foo)))
src_node['parameters'][p] = foo
res = add_param_to_node(nm_host, node_id, p, src_node['parameters'][p], dry_run=noop)
if (not res):
logger.error(("ERROR adding param %s with value '%s' to node %d" % (p, src_node['parameters'][p], node_id)))
ok = False
if verbose:
logger.info(("\tadded param %s with value '%s' to group %d" % (p, src_node['parameters'][p], node_id)))
if (len(src_node['classes']) > 0):
logger.critical('ERROR: script does not yet migrate classes for nodes.')
ok = False
if (ok is False):
return False
return node_id |
def clone_nodemeister_group(nm_host, dst_gname, src_gname, munge_re=None, noop=False, verbose=False):
'\n Clone a group in nodemeister, munging all parameters and class params through munge_re,\n a list of lists, each having 2 elements, a regex and a string to replace matches with.\n '
group_names = get_group_names(nm_host)
dst_gid = get_nm_group_id(nm_host, dst_gname, groups=group_names)
if (dst_gid is not False):
logger.error(('ERROR: group %s already exists in NodeMeister with id %d.' % (dst_gname, dst_gid)))
return False
src_group = get_nm_group(nm_host, gname=src_gname, groupnames=group_names)
if (len(src_group) == 0):
logger.error(('ERROR: could not find source group %s' % src_gname))
return False
if verbose:
logger.debug('Got source group id: {n}\n{src}'.format(n=src_group['id'], src=src_group))
classes = get_nm_group_classes(nm_host)
params = get_nm_group_params(nm_host)
interp_src_group = interpolate_group(src_group, classes, params, group_names)
groups = []
for foo in src_group['groups']:
bar = get_nm_group_id(nm_host, foo, groups=group_names)
if bar:
groups.append(bar)
gid = add_group(nm_host, dst_gname, ('imported by %s' % __file__), groups=groups, dry_run=noop)
if (gid is False):
logger.error('ERROR adding group in Nodemeister.')
return False
else:
logger.info(('Group added to NodeMeister with id %d' % gid))
ok = True
for p in src_group['parameters']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_group['parameters'][p])
if ((foo != src_group['parameters'][p]) and verbose):
logger.debug(("Munged value of '%s' from '%s' to '%s'" % (p, src_group['parameters'][p], foo)))
src_group['parameters'][p] = foo
res = add_param_to_group(nm_host, gid, p, src_group['parameters'][p], dry_run=noop)
if (not res):
logger.error(("ERROR adding param %s with value '%s' to group %d" % (p, src_group['parameters'][p], gid)))
ok = False
if verbose:
logger.info(("added param %s with value '%s' to group %d" % (p, src_group['parameters'][p], gid)))
for c in src_group['classes']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_group['classes'][c])
if ((foo != src_group['classes'][c]) and verbose):
logger.debug(("Munged value of '%s' from '%s' to '%s'" % (c, src_group['classes'][c], foo)))
src_group['classes'][c] = foo
res = add_class_to_group(nm_host, gid, c, src_group['classes'][c], dry_run=noop)
if (not res):
logger.error(("ERROR adding class %s with value '%s' to group %d" % (c, src_group['classes'][c], gid)))
ok = False
if verbose:
logger.info(("added class %s with value '%s' to group %d" % (c, src_group['classes'][c], gid)))
if (ok is False):
logger.critical('cloning group failed.')
return False
return gid | 321,454,846,756,746,200 | Clone a group in nodemeister, munging all parameters and class params through munge_re,
a list of lists, each having 2 elements, a regex and a string to replace matches with. | contrib/cli_scripts/nodemeisterlib.py | clone_nodemeister_group | coxmediagroup/nodemeister | python | def clone_nodemeister_group(nm_host, dst_gname, src_gname, munge_re=None, noop=False, verbose=False):
'\n Clone a group in nodemeister, munging all parameters and class params through munge_re,\n a list of lists, each having 2 elements, a regex and a string to replace matches with.\n '
group_names = get_group_names(nm_host)
dst_gid = get_nm_group_id(nm_host, dst_gname, groups=group_names)
if (dst_gid is not False):
logger.error(('ERROR: group %s already exists in NodeMeister with id %d.' % (dst_gname, dst_gid)))
return False
src_group = get_nm_group(nm_host, gname=src_gname, groupnames=group_names)
if (len(src_group) == 0):
logger.error(('ERROR: could not find source group %s' % src_gname))
return False
if verbose:
logger.debug('Got source group id: {n}\n{src}'.format(n=src_group['id'], src=src_group))
classes = get_nm_group_classes(nm_host)
params = get_nm_group_params(nm_host)
interp_src_group = interpolate_group(src_group, classes, params, group_names)
groups = []
for foo in src_group['groups']:
bar = get_nm_group_id(nm_host, foo, groups=group_names)
if bar:
groups.append(bar)
gid = add_group(nm_host, dst_gname, ('imported by %s' % __file__), groups=groups, dry_run=noop)
if (gid is False):
logger.error('ERROR adding group in Nodemeister.')
return False
else:
logger.info(('Group added to NodeMeister with id %d' % gid))
ok = True
for p in src_group['parameters']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_group['parameters'][p])
if ((foo != src_group['parameters'][p]) and verbose):
logger.debug(("Munged value of '%s' from '%s' to '%s'" % (p, src_group['parameters'][p], foo)))
src_group['parameters'][p] = foo
res = add_param_to_group(nm_host, gid, p, src_group['parameters'][p], dry_run=noop)
if (not res):
logger.error(("ERROR adding param %s with value '%s' to group %d" % (p, src_group['parameters'][p], gid)))
ok = False
if verbose:
logger.info(("added param %s with value '%s' to group %d" % (p, src_group['parameters'][p], gid)))
for c in src_group['classes']:
for (ptn, repl) in munge_re:
foo = re.sub(ptn, repl, src_group['classes'][c])
if ((foo != src_group['classes'][c]) and verbose):
logger.debug(("Munged value of '%s' from '%s' to '%s'" % (c, src_group['classes'][c], foo)))
src_group['classes'][c] = foo
res = add_class_to_group(nm_host, gid, c, src_group['classes'][c], dry_run=noop)
if (not res):
logger.error(("ERROR adding class %s with value '%s' to group %d" % (c, src_group['classes'][c], gid)))
ok = False
if verbose:
logger.info(("added class %s with value '%s' to group %d" % (c, src_group['classes'][c], gid)))
if (ok is False):
logger.critical('cloning group failed.')
return False
return gid |
def DetectGae():
"Determine whether or not we're running on GAE.\n\n This is based on:\n https://developers.google.com/appengine/docs/python/#The_Environment\n\n Returns:\n True iff we're running on GAE.\n "
server_software = os.environ.get('SERVER_SOFTWARE', '')
return (server_software.startswith('Development/') or server_software.startswith('Google App Engine/')) | 6,583,939,300,005,637,000 | Determine whether or not we're running on GAE.
This is based on:
https://developers.google.com/appengine/docs/python/#The_Environment
Returns:
True iff we're running on GAE. | .install/.backup/lib/apitools/base/py/util.py | DetectGae | Technology-Hatchery/google-cloud-sdk | python | def DetectGae():
"Determine whether or not we're running on GAE.\n\n This is based on:\n https://developers.google.com/appengine/docs/python/#The_Environment\n\n Returns:\n True iff we're running on GAE.\n "
server_software = os.environ.get('SERVER_SOFTWARE', )
return (server_software.startswith('Development/') or server_software.startswith('Google App Engine/')) |
def DetectGce():
"Determine whether or not we're running on GCE.\n\n This is based on:\n https://developers.google.com/compute/docs/instances#dmi\n\n Returns:\n True iff we're running on a GCE instance.\n "
try:
o = urllib2.urlopen('http://metadata.google.internal')
except urllib2.URLError:
return False
return (o.getcode() == httplib.OK) | -1,671,743,839,594,448,400 | Determine whether or not we're running on GCE.
This is based on:
https://developers.google.com/compute/docs/instances#dmi
Returns:
True iff we're running on a GCE instance. | .install/.backup/lib/apitools/base/py/util.py | DetectGce | Technology-Hatchery/google-cloud-sdk | python | def DetectGce():
"Determine whether or not we're running on GCE.\n\n This is based on:\n https://developers.google.com/compute/docs/instances#dmi\n\n Returns:\n True iff we're running on a GCE instance.\n "
try:
o = urllib2.urlopen('http://metadata.google.internal')
except urllib2.URLError:
return False
return (o.getcode() == httplib.OK) |
def NormalizeScopes(scope_spec):
'Normalize scope_spec to a set of strings.'
if isinstance(scope_spec, types.StringTypes):
return set(scope_spec.split(' '))
elif isinstance(scope_spec, collections.Iterable):
return set(scope_spec)
raise exceptions.TypecheckError(('NormalizeScopes expected string or iterable, found %s' % (type(scope_spec),))) | 7,627,925,049,917,214,000 | Normalize scope_spec to a set of strings. | .install/.backup/lib/apitools/base/py/util.py | NormalizeScopes | Technology-Hatchery/google-cloud-sdk | python | def NormalizeScopes(scope_spec):
if isinstance(scope_spec, types.StringTypes):
return set(scope_spec.split(' '))
elif isinstance(scope_spec, collections.Iterable):
return set(scope_spec)
raise exceptions.TypecheckError(('NormalizeScopes expected string or iterable, found %s' % (type(scope_spec),))) |
def __init__(self, model: Model, sampler: Optional[MCSampler]=None, objective: Optional[MCAcquisitionObjective]=None, X_pending: Optional[Tensor]=None) -> None:
'Constructor for the MCAcquisitionFunction base class.\n\n Args:\n model: A fitted model.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated.\n '
super().__init__(model=model)
if (sampler is None):
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module('sampler', sampler)
if (objective is None):
objective = IdentityMCObjective()
elif (not isinstance(objective, MCAcquisitionObjective)):
raise UnsupportedError('Only objectives of type MCAcquisitionObjective are supported for MC acquisition functions.')
self.add_module('objective', objective)
self.set_X_pending(X_pending) | -5,483,613,012,783,740,000 | Constructor for the MCAcquisitionFunction base class.
Args:
model: A fitted model.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. | botorch/acquisition/monte_carlo.py | __init__ | BradyBromley/botorch | python | def __init__(self, model: Model, sampler: Optional[MCSampler]=None, objective: Optional[MCAcquisitionObjective]=None, X_pending: Optional[Tensor]=None) -> None:
'Constructor for the MCAcquisitionFunction base class.\n\n Args:\n model: A fitted model.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)`.\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated.\n '
super().__init__(model=model)
if (sampler is None):
sampler = SobolQMCNormalSampler(num_samples=512, collapse_batch_dims=True)
self.add_module('sampler', sampler)
if (objective is None):
objective = IdentityMCObjective()
elif (not isinstance(objective, MCAcquisitionObjective)):
raise UnsupportedError('Only objectives of type MCAcquisitionObjective are supported for MC acquisition functions.')
self.add_module('objective', objective)
self.set_X_pending(X_pending) |
@abstractmethod
def forward(self, X: Tensor) -> Tensor:
'Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim\n design points each, and returns a one-dimensional Tensor with\n `(b)` elements. Should utilize the result of set_X_pending as needed\n to account for pending function evaluations.\n '
pass | 216,779,565,676,812,380 | Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim
design points each, and returns a one-dimensional Tensor with
`(b)` elements. Should utilize the result of set_X_pending as needed
to account for pending function evaluations. | botorch/acquisition/monte_carlo.py | forward | BradyBromley/botorch | python | @abstractmethod
def forward(self, X: Tensor) -> Tensor:
'Takes in a `(b) x q x d` X Tensor of `(b)` t-batches with `q` `d`-dim\n design points each, and returns a one-dimensional Tensor with\n `(b)` elements. Should utilize the result of set_X_pending as needed\n to account for pending function evaluations.\n '
pass |
def __init__(self, model: Model, best_f: Union[(float, Tensor)], sampler: Optional[MCSampler]=None, objective: Optional[MCAcquisitionObjective]=None, X_pending: Optional[Tensor]=None) -> None:
'q-Expected Improvement.\n\n Args:\n model: A fitted model.\n best_f: The best objective value observed so far (assumed noiseless).\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n '
super().__init__(model=model, sampler=sampler, objective=objective, X_pending=X_pending)
if (not torch.is_tensor(best_f)):
best_f = torch.tensor(float(best_f))
self.register_buffer('best_f', best_f) | 821,717,853,403,361,700 | q-Expected Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient. | botorch/acquisition/monte_carlo.py | __init__ | BradyBromley/botorch | python | def __init__(self, model: Model, best_f: Union[(float, Tensor)], sampler: Optional[MCSampler]=None, objective: Optional[MCAcquisitionObjective]=None, X_pending: Optional[Tensor]=None) -> None:
'q-Expected Improvement.\n\n Args:\n model: A fitted model.\n best_f: The best objective value observed so far (assumed noiseless).\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n '
super().__init__(model=model, sampler=sampler, objective=objective, X_pending=X_pending)
if (not torch.is_tensor(best_f)):
best_f = torch.tensor(float(best_f))
self.register_buffer('best_f', best_f) |
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
'Evaluate qExpectedImprovement on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Expected Improvement values at the given\n design points `X`.\n '
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=(- 1))[0].mean(dim=0)
return q_ei | 1,334,818,452,204,513,800 | Evaluate qExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Expected Improvement values at the given
design points `X`. | botorch/acquisition/monte_carlo.py | forward | BradyBromley/botorch | python | @concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
'Evaluate qExpectedImprovement on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Expected Improvement values at the given\n design points `X`.\n '
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
obj = (obj - self.best_f).clamp_min(0)
q_ei = obj.max(dim=(- 1))[0].mean(dim=0)
return q_ei |
def __init__(self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler]=None, objective: Optional[MCAcquisitionObjective]=None, X_pending: Optional[Tensor]=None, prune_baseline: bool=False) -> None:
'q-Noisy Expected Improvement.\n\n Args:\n model: A fitted model.\n X_baseline: A `r x d`-dim Tensor of `r` design points that have\n already been observed. These points are considered as the\n potential best design point.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n prune_baseline: If True, remove points in `X_baseline` that are\n highly unlikely to be the best point. This can significantly\n improve performance and is generally recommended. In order to\n customize pruning parameters, instead manually call\n `botorch.acquisition.utils.prune_inferior_points` on `X_baseline`\n before instantiating the acquisition function.\n '
super().__init__(model=model, sampler=sampler, objective=objective, X_pending=X_pending)
if prune_baseline:
X_baseline = prune_inferior_points(model=model, X=X_baseline, objective=objective)
self.register_buffer('X_baseline', X_baseline) | 7,793,565,535,815,692,000 | q-Noisy Expected Improvement.
Args:
model: A fitted model.
X_baseline: A `r x d`-dim Tensor of `r` design points that have
already been observed. These points are considered as the
potential best design point.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
prune_baseline: If True, remove points in `X_baseline` that are
highly unlikely to be the best point. This can significantly
improve performance and is generally recommended. In order to
customize pruning parameters, instead manually call
`botorch.acquisition.utils.prune_inferior_points` on `X_baseline`
before instantiating the acquisition function. | botorch/acquisition/monte_carlo.py | __init__ | BradyBromley/botorch | python | def __init__(self, model: Model, X_baseline: Tensor, sampler: Optional[MCSampler]=None, objective: Optional[MCAcquisitionObjective]=None, X_pending: Optional[Tensor]=None, prune_baseline: bool=False) -> None:
'q-Noisy Expected Improvement.\n\n Args:\n model: A fitted model.\n X_baseline: A `r x d`-dim Tensor of `r` design points that have\n already been observed. These points are considered as the\n potential best design point.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`.\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n prune_baseline: If True, remove points in `X_baseline` that are\n highly unlikely to be the best point. This can significantly\n improve performance and is generally recommended. In order to\n customize pruning parameters, instead manually call\n `botorch.acquisition.utils.prune_inferior_points` on `X_baseline`\n before instantiating the acquisition function.\n '
super().__init__(model=model, sampler=sampler, objective=objective, X_pending=X_pending)
if prune_baseline:
X_baseline = prune_inferior_points(model=model, X=X_baseline, objective=objective)
self.register_buffer('X_baseline', X_baseline) |
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
'Evaluate qNoisyExpectedImprovement on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Noisy Expected Improvement values at the given\n design points `X`.\n '
q = X.shape[(- 2)]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=(- 2))
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = (obj[:, :, :q].max(dim=(- 1))[0] - obj[:, :, q:].max(dim=(- 1))[0])
return diffs.clamp_min(0).mean(dim=0) | 2,343,125,599,921,369,600 | Evaluate qNoisyExpectedImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Noisy Expected Improvement values at the given
design points `X`. | botorch/acquisition/monte_carlo.py | forward | BradyBromley/botorch | python | @concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
'Evaluate qNoisyExpectedImprovement on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Noisy Expected Improvement values at the given\n design points `X`.\n '
q = X.shape[(- 2)]
X_full = torch.cat([X, match_batch_shape(self.X_baseline, X)], dim=(- 2))
posterior = self.model.posterior(X_full)
samples = self.sampler(posterior)
obj = self.objective(samples)
diffs = (obj[:, :, :q].max(dim=(- 1))[0] - obj[:, :, q:].max(dim=(- 1))[0])
return diffs.clamp_min(0).mean(dim=0) |
def __init__(self, model: Model, best_f: Union[(float, Tensor)], sampler: Optional[MCSampler]=None, objective: Optional[MCAcquisitionObjective]=None, X_pending: Optional[Tensor]=None, tau: float=0.001) -> None:
'q-Probability of Improvement.\n\n Args:\n model: A fitted model.\n best_f: The best objective value observed so far (assumed noiseless).\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n tau: The temperature parameter used in the sigmoid approximation\n of the step function. Smaller values yield more accurate\n approximations of the function, but result in gradients\n estimates with higher variance.\n '
super().__init__(model=model, sampler=sampler, objective=objective, X_pending=X_pending)
if (not torch.is_tensor(best_f)):
best_f = torch.tensor(float(best_f))
self.register_buffer('best_f', best_f)
if (not torch.is_tensor(tau)):
tau = torch.tensor(float(tau))
self.register_buffer('tau', tau) | -4,439,551,676,147,822,000 | q-Probability of Improvement.
Args:
model: A fitted model.
best_f: The best objective value observed so far (assumed noiseless).
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient.
tau: The temperature parameter used in the sigmoid approximation
of the step function. Smaller values yield more accurate
approximations of the function, but result in gradients
estimates with higher variance. | botorch/acquisition/monte_carlo.py | __init__ | BradyBromley/botorch | python | def __init__(self, model: Model, best_f: Union[(float, Tensor)], sampler: Optional[MCSampler]=None, objective: Optional[MCAcquisitionObjective]=None, X_pending: Optional[Tensor]=None, tau: float=0.001) -> None:
'q-Probability of Improvement.\n\n Args:\n model: A fitted model.\n best_f: The best objective value observed so far (assumed noiseless).\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n tau: The temperature parameter used in the sigmoid approximation\n of the step function. Smaller values yield more accurate\n approximations of the function, but result in gradients\n estimates with higher variance.\n '
super().__init__(model=model, sampler=sampler, objective=objective, X_pending=X_pending)
if (not torch.is_tensor(best_f)):
best_f = torch.tensor(float(best_f))
self.register_buffer('best_f', best_f)
if (not torch.is_tensor(tau)):
tau = torch.tensor(float(tau))
self.register_buffer('tau', tau) |
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
'Evaluate qProbabilityOfImprovement on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Probability of Improvement values at the given\n design points `X`.\n '
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=(- 1))[0]
val = torch.sigmoid(((max_obj - self.best_f) / self.tau)).mean(dim=0)
return val | -2,381,835,318,596,340,700 | Evaluate qProbabilityOfImprovement on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Probability of Improvement values at the given
design points `X`. | botorch/acquisition/monte_carlo.py | forward | BradyBromley/botorch | python | @concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
'Evaluate qProbabilityOfImprovement on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Probability of Improvement values at the given\n design points `X`.\n '
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
max_obj = obj.max(dim=(- 1))[0]
val = torch.sigmoid(((max_obj - self.best_f) / self.tau)).mean(dim=0)
return val |
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
'Evaluate qSimpleRegret on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Simple Regret values at the given design\n points `X`.\n '
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=(- 1))[0].mean(dim=0)
return val | -2,640,521,809,605,749,000 | Evaluate qSimpleRegret on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Simple Regret values at the given design
points `X`. | botorch/acquisition/monte_carlo.py | forward | BradyBromley/botorch | python | @concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
'Evaluate qSimpleRegret on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Simple Regret values at the given design\n points `X`.\n '
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
val = obj.max(dim=(- 1))[0].mean(dim=0)
return val |
def __init__(self, model: Model, beta: float, sampler: Optional[MCSampler]=None, objective: Optional[MCAcquisitionObjective]=None, X_pending: Optional[Tensor]=None) -> None:
'q-Upper Confidence Bound.\n\n Args:\n model: A fitted model.\n beta: Controls tradeoff between mean and standard deviation in UCB.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n '
super().__init__(model=model, sampler=sampler, objective=objective, X_pending=X_pending)
self.beta_prime = math.sqrt(((beta * math.pi) / 2)) | -9,073,965,729,121,521,000 | q-Upper Confidence Bound.
Args:
model: A fitted model.
beta: Controls tradeoff between mean and standard deviation in UCB.
sampler: The sampler used to draw base samples. Defaults to
`SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`
objective: The MCAcquisitionObjective under which the samples are
evaluated. Defaults to `IdentityMCObjective()`.
X_pending: A `m x d`-dim Tensor of `m` design points that have
points that have been submitted for function evaluation
but have not yet been evaluated. Concatenated into X upon
forward call. Copied and set to have no gradient. | botorch/acquisition/monte_carlo.py | __init__ | BradyBromley/botorch | python | def __init__(self, model: Model, beta: float, sampler: Optional[MCSampler]=None, objective: Optional[MCAcquisitionObjective]=None, X_pending: Optional[Tensor]=None) -> None:
'q-Upper Confidence Bound.\n\n Args:\n model: A fitted model.\n beta: Controls tradeoff between mean and standard deviation in UCB.\n sampler: The sampler used to draw base samples. Defaults to\n `SobolQMCNormalSampler(num_samples=500, collapse_batch_dims=True)`\n objective: The MCAcquisitionObjective under which the samples are\n evaluated. Defaults to `IdentityMCObjective()`.\n X_pending: A `m x d`-dim Tensor of `m` design points that have\n points that have been submitted for function evaluation\n but have not yet been evaluated. Concatenated into X upon\n forward call. Copied and set to have no gradient.\n '
super().__init__(model=model, sampler=sampler, objective=objective, X_pending=X_pending)
self.beta_prime = math.sqrt(((beta * math.pi) / 2)) |
@concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
'Evaluate qUpperConfidenceBound on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Upper Confidence Bound values at the given\n design points `X`.\n '
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = (mean + (self.beta_prime * (obj - mean).abs()))
return ucb_samples.max(dim=(- 1))[0].mean(dim=0) | 4,111,730,714,202,724,000 | Evaluate qUpperConfidenceBound on the candidate set `X`.
Args:
X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim
design points each.
Returns:
A `(b)`-dim Tensor of Upper Confidence Bound values at the given
design points `X`. | botorch/acquisition/monte_carlo.py | forward | BradyBromley/botorch | python | @concatenate_pending_points
@t_batch_mode_transform()
def forward(self, X: Tensor) -> Tensor:
'Evaluate qUpperConfidenceBound on the candidate set `X`.\n\n Args:\n X: A `(b) x q x d`-dim Tensor of `(b)` t-batches with `q` `d`-dim\n design points each.\n\n Returns:\n A `(b)`-dim Tensor of Upper Confidence Bound values at the given\n design points `X`.\n '
posterior = self.model.posterior(X)
samples = self.sampler(posterior)
obj = self.objective(samples)
mean = obj.mean(dim=0)
ucb_samples = (mean + (self.beta_prime * (obj - mean).abs()))
return ucb_samples.max(dim=(- 1))[0].mean(dim=0) |
def resize_img(img, input_size=600):
'\n resize img and limit the longest side of the image to input_size\n '
img = np.array(img)
im_shape = img.shape
im_size_max = np.max(im_shape[0:2])
im_scale = (float(input_size) / float(im_size_max))
img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)
return img | 2,730,486,028,993,369,000 | resize img and limit the longest side of the image to input_size | tools/infer/utility.py | resize_img | OcrOrg/PaddleOCR | python | def resize_img(img, input_size=600):
'\n \n '
img = np.array(img)
im_shape = img.shape
im_size_max = np.max(im_shape[0:2])
im_scale = (float(input_size) / float(im_size_max))
img = cv2.resize(img, None, None, fx=im_scale, fy=im_scale)
return img |
def draw_ocr(image, boxes, txts=None, scores=None, drop_score=0.5, font_path='./doc/simfang.ttf'):
'\n Visualize the results of OCR detection and recognition\n args:\n image(Image|array): RGB image\n boxes(list): boxes with shape(N, 4, 2)\n txts(list): the texts\n scores(list): txxs corresponding scores\n drop_score(float): only scores greater than drop_threshold will be visualized\n font_path: the path of font which is used to draw text\n return(array):\n the visualized img\n '
if (scores is None):
scores = ([1] * len(boxes))
box_num = len(boxes)
for i in range(box_num):
if ((scores is not None) and ((scores[i] < drop_score) or math.isnan(scores[i]))):
continue
box = np.reshape(np.array(boxes[i]), [(- 1), 1, 2]).astype(np.int64)
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
if (txts is not None):
img = np.array(resize_img(image, input_size=600))
txt_img = text_visual(txts, scores, img_h=img.shape[0], img_w=600, threshold=drop_score, font_path=font_path)
img = np.concatenate([np.array(img), np.array(txt_img)], axis=1)
return img
return image | 5,244,719,996,499,496,000 | Visualize the results of OCR detection and recognition
args:
image(Image|array): RGB image
boxes(list): boxes with shape(N, 4, 2)
txts(list): the texts
scores(list): txxs corresponding scores
drop_score(float): only scores greater than drop_threshold will be visualized
font_path: the path of font which is used to draw text
return(array):
the visualized img | tools/infer/utility.py | draw_ocr | OcrOrg/PaddleOCR | python | def draw_ocr(image, boxes, txts=None, scores=None, drop_score=0.5, font_path='./doc/simfang.ttf'):
'\n Visualize the results of OCR detection and recognition\n args:\n image(Image|array): RGB image\n boxes(list): boxes with shape(N, 4, 2)\n txts(list): the texts\n scores(list): txxs corresponding scores\n drop_score(float): only scores greater than drop_threshold will be visualized\n font_path: the path of font which is used to draw text\n return(array):\n the visualized img\n '
if (scores is None):
scores = ([1] * len(boxes))
box_num = len(boxes)
for i in range(box_num):
if ((scores is not None) and ((scores[i] < drop_score) or math.isnan(scores[i]))):
continue
box = np.reshape(np.array(boxes[i]), [(- 1), 1, 2]).astype(np.int64)
image = cv2.polylines(np.array(image), [box], True, (255, 0, 0), 2)
if (txts is not None):
img = np.array(resize_img(image, input_size=600))
txt_img = text_visual(txts, scores, img_h=img.shape[0], img_w=600, threshold=drop_score, font_path=font_path)
img = np.concatenate([np.array(img), np.array(txt_img)], axis=1)
return img
return image |
def str_count(s):
'\n Count the number of Chinese characters,\n a single English character and a single number\n equal to half the length of Chinese characters.\n args:\n s(string): the input of string\n return(int):\n the number of Chinese characters\n '
import string
count_zh = count_pu = 0
s_len = len(s)
en_dg_count = 0
for c in s:
if ((c in string.ascii_letters) or c.isdigit() or c.isspace()):
en_dg_count += 1
elif c.isalpha():
count_zh += 1
else:
count_pu += 1
return (s_len - math.ceil((en_dg_count / 2))) | -4,828,038,653,253,307,000 | Count the number of Chinese characters,
a single English character and a single number
equal to half the length of Chinese characters.
args:
s(string): the input of string
return(int):
the number of Chinese characters | tools/infer/utility.py | str_count | OcrOrg/PaddleOCR | python | def str_count(s):
'\n Count the number of Chinese characters,\n a single English character and a single number\n equal to half the length of Chinese characters.\n args:\n s(string): the input of string\n return(int):\n the number of Chinese characters\n '
import string
count_zh = count_pu = 0
s_len = len(s)
en_dg_count = 0
for c in s:
if ((c in string.ascii_letters) or c.isdigit() or c.isspace()):
en_dg_count += 1
elif c.isalpha():
count_zh += 1
else:
count_pu += 1
return (s_len - math.ceil((en_dg_count / 2))) |
def text_visual(texts, scores, img_h=400, img_w=600, threshold=0.0, font_path='./doc/simfang.ttf'):
'\n create new blank img and draw txt on it\n args:\n texts(list): the text will be draw\n scores(list|None): corresponding score of each txt\n img_h(int): the height of blank img\n img_w(int): the width of blank img\n font_path: the path of font which is used to draw text\n return(array):\n '
if (scores is not None):
assert (len(texts) == len(scores)), 'The number of txts and corresponding scores must match'
def create_blank_img():
blank_img = (np.ones(shape=[img_h, img_w], dtype=np.int8) * 255)
blank_img[:, (img_w - 1):] = 0
blank_img = Image.fromarray(blank_img).convert('RGB')
draw_txt = ImageDraw.Draw(blank_img)
return (blank_img, draw_txt)
(blank_img, draw_txt) = create_blank_img()
font_size = 20
txt_color = (0, 0, 0)
font = ImageFont.truetype(font_path, font_size, encoding='utf-8')
gap = (font_size + 5)
txt_img_list = []
(count, index) = (1, 0)
for (idx, txt) in enumerate(texts):
index += 1
if ((scores[idx] < threshold) or math.isnan(scores[idx])):
index -= 1
continue
first_line = True
while (str_count(txt) >= ((img_w // font_size) - 4)):
tmp = txt
txt = tmp[:((img_w // font_size) - 4)]
if first_line:
new_txt = ((str(index) + ': ') + txt)
first_line = False
else:
new_txt = (' ' + txt)
draw_txt.text((0, (gap * count)), new_txt, txt_color, font=font)
txt = tmp[((img_w // font_size) - 4):]
if (count >= ((img_h // gap) - 1)):
txt_img_list.append(np.array(blank_img))
(blank_img, draw_txt) = create_blank_img()
count = 0
count += 1
if first_line:
new_txt = ((((str(index) + ': ') + txt) + ' ') + ('%.3f' % scores[idx]))
else:
new_txt = (((' ' + txt) + ' ') + ('%.3f' % scores[idx]))
draw_txt.text((0, (gap * count)), new_txt, txt_color, font=font)
if ((count >= ((img_h // gap) - 1)) and ((idx + 1) < len(texts))):
txt_img_list.append(np.array(blank_img))
(blank_img, draw_txt) = create_blank_img()
count = 0
count += 1
txt_img_list.append(np.array(blank_img))
if (len(txt_img_list) == 1):
blank_img = np.array(txt_img_list[0])
else:
blank_img = np.concatenate(txt_img_list, axis=1)
return np.array(blank_img) | -803,037,385,994,058,100 | create new blank img and draw txt on it
args:
texts(list): the text will be draw
scores(list|None): corresponding score of each txt
img_h(int): the height of blank img
img_w(int): the width of blank img
font_path: the path of font which is used to draw text
return(array): | tools/infer/utility.py | text_visual | OcrOrg/PaddleOCR | python | def text_visual(texts, scores, img_h=400, img_w=600, threshold=0.0, font_path='./doc/simfang.ttf'):
'\n create new blank img and draw txt on it\n args:\n texts(list): the text will be draw\n scores(list|None): corresponding score of each txt\n img_h(int): the height of blank img\n img_w(int): the width of blank img\n font_path: the path of font which is used to draw text\n return(array):\n '
if (scores is not None):
assert (len(texts) == len(scores)), 'The number of txts and corresponding scores must match'
def create_blank_img():
blank_img = (np.ones(shape=[img_h, img_w], dtype=np.int8) * 255)
blank_img[:, (img_w - 1):] = 0
blank_img = Image.fromarray(blank_img).convert('RGB')
draw_txt = ImageDraw.Draw(blank_img)
return (blank_img, draw_txt)
(blank_img, draw_txt) = create_blank_img()
font_size = 20
txt_color = (0, 0, 0)
font = ImageFont.truetype(font_path, font_size, encoding='utf-8')
gap = (font_size + 5)
txt_img_list = []
(count, index) = (1, 0)
for (idx, txt) in enumerate(texts):
index += 1
if ((scores[idx] < threshold) or math.isnan(scores[idx])):
index -= 1
continue
first_line = True
while (str_count(txt) >= ((img_w // font_size) - 4)):
tmp = txt
txt = tmp[:((img_w // font_size) - 4)]
if first_line:
new_txt = ((str(index) + ': ') + txt)
first_line = False
else:
new_txt = (' ' + txt)
draw_txt.text((0, (gap * count)), new_txt, txt_color, font=font)
txt = tmp[((img_w // font_size) - 4):]
if (count >= ((img_h // gap) - 1)):
txt_img_list.append(np.array(blank_img))
(blank_img, draw_txt) = create_blank_img()
count = 0
count += 1
if first_line:
new_txt = ((((str(index) + ': ') + txt) + ' ') + ('%.3f' % scores[idx]))
else:
new_txt = (((' ' + txt) + ' ') + ('%.3f' % scores[idx]))
draw_txt.text((0, (gap * count)), new_txt, txt_color, font=font)
if ((count >= ((img_h // gap) - 1)) and ((idx + 1) < len(texts))):
txt_img_list.append(np.array(blank_img))
(blank_img, draw_txt) = create_blank_img()
count = 0
count += 1
txt_img_list.append(np.array(blank_img))
if (len(txt_img_list) == 1):
blank_img = np.array(txt_img_list[0])
else:
blank_img = np.concatenate(txt_img_list, axis=1)
return np.array(blank_img) |
def __init__(self, report, metrics, destination_uuid, destination):
'Initialise the Notification with the required info.'
self.report_title = report['title']
self.url = report.get('url')
self.metrics: list[MetricNotificationData] = metrics
self.destination_uuid = destination_uuid
self.destination = destination | -5,459,359,732,503,704,000 | Initialise the Notification with the required info. | components/notifier/src/models/notification.py | __init__ | m-zakeri/quality-time | python | def __init__(self, report, metrics, destination_uuid, destination):
self.report_title = report['title']
self.url = report.get('url')
self.metrics: list[MetricNotificationData] = metrics
self.destination_uuid = destination_uuid
self.destination = destination |
def __eq__(self, other):
'Check if the notification itself is the same, regardless of its metric content.'
return ((self.report_title == other.report_title) and (self.destination_uuid == other.destination_uuid) and (self.destination == other.destination)) | -6,105,355,902,732,706,000 | Check if the notification itself is the same, regardless of its metric content. | components/notifier/src/models/notification.py | __eq__ | m-zakeri/quality-time | python | def __eq__(self, other):
return ((self.report_title == other.report_title) and (self.destination_uuid == other.destination_uuid) and (self.destination == other.destination)) |
def merge_notification(self, new_metrics):
'Merge new metrics into this notification.'
self.metrics.extend(new_metrics) | -4,852,404,083,510,270,000 | Merge new metrics into this notification. | components/notifier/src/models/notification.py | merge_notification | m-zakeri/quality-time | python | def merge_notification(self, new_metrics):
self.metrics.extend(new_metrics) |
def get_express_route_gateway(express_route_gateway_name: Optional[str]=None, resource_group_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetExpressRouteGatewayResult:
'\n ExpressRoute gateway resource.\n API Version: 2020-08-01.\n\n\n :param str express_route_gateway_name: The name of the ExpressRoute gateway.\n :param str resource_group_name: The name of the resource group.\n '
__args__ = dict()
__args__['expressRouteGatewayName'] = express_route_gateway_name
__args__['resourceGroupName'] = resource_group_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network:getExpressRouteGateway', __args__, opts=opts, typ=GetExpressRouteGatewayResult).value
return AwaitableGetExpressRouteGatewayResult(auto_scale_configuration=__ret__.auto_scale_configuration, etag=__ret__.etag, express_route_connections=__ret__.express_route_connections, id=__ret__.id, location=__ret__.location, name=__ret__.name, provisioning_state=__ret__.provisioning_state, tags=__ret__.tags, type=__ret__.type, virtual_hub=__ret__.virtual_hub) | -1,198,269,896,106,264,000 | ExpressRoute gateway resource.
API Version: 2020-08-01.
:param str express_route_gateway_name: The name of the ExpressRoute gateway.
:param str resource_group_name: The name of the resource group. | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | get_express_route_gateway | pulumi/pulumi-azure-nextgen | python | def get_express_route_gateway(express_route_gateway_name: Optional[str]=None, resource_group_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableGetExpressRouteGatewayResult:
'\n ExpressRoute gateway resource.\n API Version: 2020-08-01.\n\n\n :param str express_route_gateway_name: The name of the ExpressRoute gateway.\n :param str resource_group_name: The name of the resource group.\n '
__args__ = dict()
__args__['expressRouteGatewayName'] = express_route_gateway_name
__args__['resourceGroupName'] = resource_group_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network:getExpressRouteGateway', __args__, opts=opts, typ=GetExpressRouteGatewayResult).value
return AwaitableGetExpressRouteGatewayResult(auto_scale_configuration=__ret__.auto_scale_configuration, etag=__ret__.etag, express_route_connections=__ret__.express_route_connections, id=__ret__.id, location=__ret__.location, name=__ret__.name, provisioning_state=__ret__.provisioning_state, tags=__ret__.tags, type=__ret__.type, virtual_hub=__ret__.virtual_hub) |
@property
@pulumi.getter(name='autoScaleConfiguration')
def auto_scale_configuration(self) -> Optional['outputs.ExpressRouteGatewayPropertiesResponseAutoScaleConfiguration']:
'\n Configuration for auto scaling.\n '
return pulumi.get(self, 'auto_scale_configuration') | -8,462,896,628,956,177,000 | Configuration for auto scaling. | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | auto_scale_configuration | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter(name='autoScaleConfiguration')
def auto_scale_configuration(self) -> Optional['outputs.ExpressRouteGatewayPropertiesResponseAutoScaleConfiguration']:
'\n \n '
return pulumi.get(self, 'auto_scale_configuration') |
@property
@pulumi.getter
def etag(self) -> str:
'\n A unique read-only string that changes whenever the resource is updated.\n '
return pulumi.get(self, 'etag') | -4,757,010,955,465,940,000 | A unique read-only string that changes whenever the resource is updated. | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | etag | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter
def etag(self) -> str:
'\n \n '
return pulumi.get(self, 'etag') |
@property
@pulumi.getter(name='expressRouteConnections')
def express_route_connections(self) -> Sequence['outputs.ExpressRouteConnectionResponse']:
'\n List of ExpressRoute connections to the ExpressRoute gateway.\n '
return pulumi.get(self, 'express_route_connections') | 7,243,677,662,968,671,000 | List of ExpressRoute connections to the ExpressRoute gateway. | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | express_route_connections | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter(name='expressRouteConnections')
def express_route_connections(self) -> Sequence['outputs.ExpressRouteConnectionResponse']:
'\n \n '
return pulumi.get(self, 'express_route_connections') |
@property
@pulumi.getter
def id(self) -> Optional[str]:
'\n Resource ID.\n '
return pulumi.get(self, 'id') | 6,887,155,523,158,811,000 | Resource ID. | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | id | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter
def id(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'id') |
@property
@pulumi.getter
def location(self) -> Optional[str]:
'\n Resource location.\n '
return pulumi.get(self, 'location') | 8,841,543,228,718,414,000 | Resource location. | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | location | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter
def location(self) -> Optional[str]:
'\n \n '
return pulumi.get(self, 'location') |
@property
@pulumi.getter
def name(self) -> str:
'\n Resource name.\n '
return pulumi.get(self, 'name') | -2,625,941,459,458,898,000 | Resource name. | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | name | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter
def name(self) -> str:
'\n \n '
return pulumi.get(self, 'name') |
@property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> str:
'\n The provisioning state of the express route gateway resource.\n '
return pulumi.get(self, 'provisioning_state') | -3,724,907,156,352,075,000 | The provisioning state of the express route gateway resource. | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | provisioning_state | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter(name='provisioningState')
def provisioning_state(self) -> str:
'\n \n '
return pulumi.get(self, 'provisioning_state') |
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[(str, str)]]:
'\n Resource tags.\n '
return pulumi.get(self, 'tags') | 562,229,697,900,116,900 | Resource tags. | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | tags | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter
def tags(self) -> Optional[Mapping[(str, str)]]:
'\n \n '
return pulumi.get(self, 'tags') |
@property
@pulumi.getter
def type(self) -> str:
'\n Resource type.\n '
return pulumi.get(self, 'type') | -5,079,398,349,541,291,000 | Resource type. | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | type | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter
def type(self) -> str:
'\n \n '
return pulumi.get(self, 'type') |
@property
@pulumi.getter(name='virtualHub')
def virtual_hub(self) -> 'outputs.VirtualHubIdResponse':
'\n The Virtual Hub where the ExpressRoute gateway is or will be deployed.\n '
return pulumi.get(self, 'virtual_hub') | -8,851,470,528,751,838,000 | The Virtual Hub where the ExpressRoute gateway is or will be deployed. | sdk/python/pulumi_azure_nextgen/network/get_express_route_gateway.py | virtual_hub | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter(name='virtualHub')
def virtual_hub(self) -> 'outputs.VirtualHubIdResponse':
'\n \n '
return pulumi.get(self, 'virtual_hub') |
def _fix_conf_defaults(config):
'Update some configuration defaults.'
config['sid'] = config.pop(CONF_MAC, None)
if (config.get(CONF_KEY) is None):
_LOGGER.warning('Key is not provided for gateway %s. Controlling the gateway will not be possible', config['sid'])
if (config.get(CONF_HOST) is None):
config.pop(CONF_PORT)
return config | -4,031,799,852,486,938,600 | Update some configuration defaults. | homeassistant/components/xiaomi_aqara.py | _fix_conf_defaults | phispi/home-assistant | python | def _fix_conf_defaults(config):
config['sid'] = config.pop(CONF_MAC, None)
if (config.get(CONF_KEY) is None):
_LOGGER.warning('Key is not provided for gateway %s. Controlling the gateway will not be possible', config['sid'])
if (config.get(CONF_HOST) is None):
config.pop(CONF_PORT)
return config |
def setup(hass, config):
'Set up the Xiaomi component.'
gateways = []
interface = 'any'
discovery_retry = 3
if (DOMAIN in config):
gateways = config[DOMAIN][CONF_GATEWAYS]
interface = config[DOMAIN][CONF_INTERFACE]
discovery_retry = config[DOMAIN][CONF_DISCOVERY_RETRY]
async def xiaomi_gw_discovered(service, discovery_info):
'Perform action when Xiaomi Gateway device(s) has been found.'
discovery.listen(hass, SERVICE_XIAOMI_GW, xiaomi_gw_discovered)
from xiaomi_gateway import XiaomiGatewayDiscovery
xiaomi = hass.data[PY_XIAOMI_GATEWAY] = XiaomiGatewayDiscovery(hass.add_job, gateways, interface)
_LOGGER.debug('Expecting %s gateways', len(gateways))
for k in range(discovery_retry):
_LOGGER.info('Discovering Xiaomi Gateways (Try %s)', (k + 1))
xiaomi.discover_gateways()
if (len(xiaomi.gateways) >= len(gateways)):
break
if (not xiaomi.gateways):
_LOGGER.error('No gateway discovered')
return False
xiaomi.listen()
_LOGGER.debug('Gateways discovered. Listening for broadcasts')
for component in ['binary_sensor', 'sensor', 'switch', 'light', 'cover', 'lock']:
discovery.load_platform(hass, component, DOMAIN, {}, config)
def stop_xiaomi(event):
'Stop Xiaomi Socket.'
_LOGGER.info('Shutting down Xiaomi Hub')
xiaomi.stop_listen()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_xiaomi)
def play_ringtone_service(call):
'Service to play ringtone through Gateway.'
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {'mid': ring_id}
ring_vol = call.data.get(ATTR_RINGTONE_VOL)
if (ring_vol is not None):
kwargs['vol'] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs)
def stop_ringtone_service(call):
'Service to stop playing ringtone on Gateway.'
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000)
def add_device_service(call):
'Service to add a new sub-device within the next 30 seconds.'
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission='yes')
hass.components.persistent_notification.async_create('Join permission enabled for 30 seconds! Please press the pairing button of the new device once.', title='Xiaomi Aqara Gateway')
def remove_device_service(call):
'Service to remove a sub-device from the gateway.'
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id)
gateway_only_schema = _add_gateway_to_schema(xiaomi, vol.Schema({}))
hass.services.register(DOMAIN, SERVICE_PLAY_RINGTONE, play_ringtone_service, schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_PLAY_RINGTONE))
hass.services.register(DOMAIN, SERVICE_STOP_RINGTONE, stop_ringtone_service, schema=gateway_only_schema)
hass.services.register(DOMAIN, SERVICE_ADD_DEVICE, add_device_service, schema=gateway_only_schema)
hass.services.register(DOMAIN, SERVICE_REMOVE_DEVICE, remove_device_service, schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_REMOVE_DEVICE))
return True | 5,895,890,946,076,640,000 | Set up the Xiaomi component. | homeassistant/components/xiaomi_aqara.py | setup | phispi/home-assistant | python | def setup(hass, config):
gateways = []
interface = 'any'
discovery_retry = 3
if (DOMAIN in config):
gateways = config[DOMAIN][CONF_GATEWAYS]
interface = config[DOMAIN][CONF_INTERFACE]
discovery_retry = config[DOMAIN][CONF_DISCOVERY_RETRY]
async def xiaomi_gw_discovered(service, discovery_info):
'Perform action when Xiaomi Gateway device(s) has been found.'
discovery.listen(hass, SERVICE_XIAOMI_GW, xiaomi_gw_discovered)
from xiaomi_gateway import XiaomiGatewayDiscovery
xiaomi = hass.data[PY_XIAOMI_GATEWAY] = XiaomiGatewayDiscovery(hass.add_job, gateways, interface)
_LOGGER.debug('Expecting %s gateways', len(gateways))
for k in range(discovery_retry):
_LOGGER.info('Discovering Xiaomi Gateways (Try %s)', (k + 1))
xiaomi.discover_gateways()
if (len(xiaomi.gateways) >= len(gateways)):
break
if (not xiaomi.gateways):
_LOGGER.error('No gateway discovered')
return False
xiaomi.listen()
_LOGGER.debug('Gateways discovered. Listening for broadcasts')
for component in ['binary_sensor', 'sensor', 'switch', 'light', 'cover', 'lock']:
discovery.load_platform(hass, component, DOMAIN, {}, config)
def stop_xiaomi(event):
'Stop Xiaomi Socket.'
_LOGGER.info('Shutting down Xiaomi Hub')
xiaomi.stop_listen()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_xiaomi)
def play_ringtone_service(call):
'Service to play ringtone through Gateway.'
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {'mid': ring_id}
ring_vol = call.data.get(ATTR_RINGTONE_VOL)
if (ring_vol is not None):
kwargs['vol'] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs)
def stop_ringtone_service(call):
'Service to stop playing ringtone on Gateway.'
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000)
def add_device_service(call):
'Service to add a new sub-device within the next 30 seconds.'
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission='yes')
hass.components.persistent_notification.async_create('Join permission enabled for 30 seconds! Please press the pairing button of the new device once.', title='Xiaomi Aqara Gateway')
def remove_device_service(call):
'Service to remove a sub-device from the gateway.'
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id)
gateway_only_schema = _add_gateway_to_schema(xiaomi, vol.Schema({}))
hass.services.register(DOMAIN, SERVICE_PLAY_RINGTONE, play_ringtone_service, schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_PLAY_RINGTONE))
hass.services.register(DOMAIN, SERVICE_STOP_RINGTONE, stop_ringtone_service, schema=gateway_only_schema)
hass.services.register(DOMAIN, SERVICE_ADD_DEVICE, add_device_service, schema=gateway_only_schema)
hass.services.register(DOMAIN, SERVICE_REMOVE_DEVICE, remove_device_service, schema=_add_gateway_to_schema(xiaomi, SERVICE_SCHEMA_REMOVE_DEVICE))
return True |
def _add_gateway_to_schema(xiaomi, schema):
'Extend a voluptuous schema with a gateway validator.'
def gateway(sid):
'Convert sid to a gateway.'
sid = str(sid).replace(':', '').lower()
for gateway in xiaomi.gateways.values():
if (gateway.sid == sid):
return gateway
raise vol.Invalid('Unknown gateway sid {}'.format(sid))
gateways = list(xiaomi.gateways.values())
kwargs = {}
if (len(gateways) == 1):
kwargs['default'] = gateways[0]
return schema.extend({vol.Required(ATTR_GW_MAC, **kwargs): gateway}) | -9,154,849,926,144,047,000 | Extend a voluptuous schema with a gateway validator. | homeassistant/components/xiaomi_aqara.py | _add_gateway_to_schema | phispi/home-assistant | python | def _add_gateway_to_schema(xiaomi, schema):
def gateway(sid):
'Convert sid to a gateway.'
sid = str(sid).replace(':', ).lower()
for gateway in xiaomi.gateways.values():
if (gateway.sid == sid):
return gateway
raise vol.Invalid('Unknown gateway sid {}'.format(sid))
gateways = list(xiaomi.gateways.values())
kwargs = {}
if (len(gateways) == 1):
kwargs['default'] = gateways[0]
return schema.extend({vol.Required(ATTR_GW_MAC, **kwargs): gateway}) |
async def xiaomi_gw_discovered(service, discovery_info):
'Perform action when Xiaomi Gateway device(s) has been found.' | -155,846,655,710,240,350 | Perform action when Xiaomi Gateway device(s) has been found. | homeassistant/components/xiaomi_aqara.py | xiaomi_gw_discovered | phispi/home-assistant | python | async def xiaomi_gw_discovered(service, discovery_info):
|
def stop_xiaomi(event):
'Stop Xiaomi Socket.'
_LOGGER.info('Shutting down Xiaomi Hub')
xiaomi.stop_listen() | -8,394,709,030,353,044,000 | Stop Xiaomi Socket. | homeassistant/components/xiaomi_aqara.py | stop_xiaomi | phispi/home-assistant | python | def stop_xiaomi(event):
_LOGGER.info('Shutting down Xiaomi Hub')
xiaomi.stop_listen() |
def play_ringtone_service(call):
'Service to play ringtone through Gateway.'
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {'mid': ring_id}
ring_vol = call.data.get(ATTR_RINGTONE_VOL)
if (ring_vol is not None):
kwargs['vol'] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs) | 6,053,461,574,489,661,000 | Service to play ringtone through Gateway. | homeassistant/components/xiaomi_aqara.py | play_ringtone_service | phispi/home-assistant | python | def play_ringtone_service(call):
ring_id = call.data.get(ATTR_RINGTONE_ID)
gateway = call.data.get(ATTR_GW_MAC)
kwargs = {'mid': ring_id}
ring_vol = call.data.get(ATTR_RINGTONE_VOL)
if (ring_vol is not None):
kwargs['vol'] = ring_vol
gateway.write_to_hub(gateway.sid, **kwargs) |
def stop_ringtone_service(call):
'Service to stop playing ringtone on Gateway.'
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000) | 6,169,792,271,970,421,000 | Service to stop playing ringtone on Gateway. | homeassistant/components/xiaomi_aqara.py | stop_ringtone_service | phispi/home-assistant | python | def stop_ringtone_service(call):
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, mid=10000) |
def add_device_service(call):
'Service to add a new sub-device within the next 30 seconds.'
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission='yes')
hass.components.persistent_notification.async_create('Join permission enabled for 30 seconds! Please press the pairing button of the new device once.', title='Xiaomi Aqara Gateway') | -6,641,737,974,181,730,000 | Service to add a new sub-device within the next 30 seconds. | homeassistant/components/xiaomi_aqara.py | add_device_service | phispi/home-assistant | python | def add_device_service(call):
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, join_permission='yes')
hass.components.persistent_notification.async_create('Join permission enabled for 30 seconds! Please press the pairing button of the new device once.', title='Xiaomi Aqara Gateway') |
def remove_device_service(call):
'Service to remove a sub-device from the gateway.'
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id) | 4,640,170,528,080,460,000 | Service to remove a sub-device from the gateway. | homeassistant/components/xiaomi_aqara.py | remove_device_service | phispi/home-assistant | python | def remove_device_service(call):
device_id = call.data.get(ATTR_DEVICE_ID)
gateway = call.data.get(ATTR_GW_MAC)
gateway.write_to_hub(gateway.sid, remove_device=device_id) |
def __init__(self, device, device_type, xiaomi_hub):
'Initialize the Xiaomi device.'
self._state = None
self._is_available = True
self._sid = device['sid']
self._name = '{}_{}'.format(device_type, self._sid)
self._type = device_type
self._write_to_hub = xiaomi_hub.write_to_hub
self._get_from_hub = xiaomi_hub.get_from_hub
self._device_state_attributes = {}
self._remove_unavailability_tracker = None
self._xiaomi_hub = xiaomi_hub
self.parse_data(device['data'], device['raw_data'])
self.parse_voltage(device['data'])
if (hasattr(self, '_data_key') and self._data_key):
self._unique_id = slugify('{}-{}'.format(self._data_key, self._sid))
else:
self._unique_id = slugify('{}-{}'.format(self._type, self._sid)) | 2,500,651,193,361,393,700 | Initialize the Xiaomi device. | homeassistant/components/xiaomi_aqara.py | __init__ | phispi/home-assistant | python | def __init__(self, device, device_type, xiaomi_hub):
self._state = None
self._is_available = True
self._sid = device['sid']
self._name = '{}_{}'.format(device_type, self._sid)
self._type = device_type
self._write_to_hub = xiaomi_hub.write_to_hub
self._get_from_hub = xiaomi_hub.get_from_hub
self._device_state_attributes = {}
self._remove_unavailability_tracker = None
self._xiaomi_hub = xiaomi_hub
self.parse_data(device['data'], device['raw_data'])
self.parse_voltage(device['data'])
if (hasattr(self, '_data_key') and self._data_key):
self._unique_id = slugify('{}-{}'.format(self._data_key, self._sid))
else:
self._unique_id = slugify('{}-{}'.format(self._type, self._sid)) |
async def async_added_to_hass(self):
'Start unavailability tracking.'
self._xiaomi_hub.callbacks[self._sid].append(self._add_push_data_job)
self._async_track_unavailable() | -9,045,418,221,189,626,000 | Start unavailability tracking. | homeassistant/components/xiaomi_aqara.py | async_added_to_hass | phispi/home-assistant | python | async def async_added_to_hass(self):
self._xiaomi_hub.callbacks[self._sid].append(self._add_push_data_job)
self._async_track_unavailable() |
@property
def name(self):
'Return the name of the device.'
return self._name | -4,231,536,673,663,769,600 | Return the name of the device. | homeassistant/components/xiaomi_aqara.py | name | phispi/home-assistant | python | @property
def name(self):
return self._name |
@property
def unique_id(self) -> str:
'Return a unique ID.'
return self._unique_id | -4,749,013,748,456,637,000 | Return a unique ID. | homeassistant/components/xiaomi_aqara.py | unique_id | phispi/home-assistant | python | @property
def unique_id(self) -> str:
return self._unique_id |
@property
def available(self):
'Return True if entity is available.'
return self._is_available | -7,264,764,334,597,754,000 | Return True if entity is available. | homeassistant/components/xiaomi_aqara.py | available | phispi/home-assistant | python | @property
def available(self):
return self._is_available |
@property
def should_poll(self):
'Return the polling state. No polling needed.'
return False | -8,466,736,641,829,833,000 | Return the polling state. No polling needed. | homeassistant/components/xiaomi_aqara.py | should_poll | phispi/home-assistant | python | @property
def should_poll(self):
return False |
@property
def device_state_attributes(self):
'Return the state attributes.'
return self._device_state_attributes | 7,697,970,802,956,560,000 | Return the state attributes. | homeassistant/components/xiaomi_aqara.py | device_state_attributes | phispi/home-assistant | python | @property
def device_state_attributes(self):
return self._device_state_attributes |
@callback
def _async_set_unavailable(self, now):
'Set state to UNAVAILABLE.'
self._remove_unavailability_tracker = None
self._is_available = False
self.async_schedule_update_ha_state() | 2,169,749,372,944,836,600 | Set state to UNAVAILABLE. | homeassistant/components/xiaomi_aqara.py | _async_set_unavailable | phispi/home-assistant | python | @callback
def _async_set_unavailable(self, now):
self._remove_unavailability_tracker = None
self._is_available = False
self.async_schedule_update_ha_state() |
@callback
def push_data(self, data, raw_data):
'Push from Hub.'
_LOGGER.debug('PUSH >> %s: %s', self, data)
was_unavailable = self._async_track_unavailable()
is_data = self.parse_data(data, raw_data)
is_voltage = self.parse_voltage(data)
if (is_data or is_voltage or was_unavailable):
self.async_schedule_update_ha_state() | 4,364,394,288,379,428,400 | Push from Hub. | homeassistant/components/xiaomi_aqara.py | push_data | phispi/home-assistant | python | @callback
def push_data(self, data, raw_data):
_LOGGER.debug('PUSH >> %s: %s', self, data)
was_unavailable = self._async_track_unavailable()
is_data = self.parse_data(data, raw_data)
is_voltage = self.parse_voltage(data)
if (is_data or is_voltage or was_unavailable):
self.async_schedule_update_ha_state() |
def parse_voltage(self, data):
'Parse battery level data sent by gateway.'
if ('voltage' not in data):
return False
max_volt = 3300
min_volt = 2800
voltage = data['voltage']
voltage = min(voltage, max_volt)
voltage = max(voltage, min_volt)
percent = (((voltage - min_volt) / (max_volt - min_volt)) * 100)
self._device_state_attributes[ATTR_BATTERY_LEVEL] = round(percent, 1)
return True | 5,407,283,607,935,144,000 | Parse battery level data sent by gateway. | homeassistant/components/xiaomi_aqara.py | parse_voltage | phispi/home-assistant | python | def parse_voltage(self, data):
if ('voltage' not in data):
return False
max_volt = 3300
min_volt = 2800
voltage = data['voltage']
voltage = min(voltage, max_volt)
voltage = max(voltage, min_volt)
percent = (((voltage - min_volt) / (max_volt - min_volt)) * 100)
self._device_state_attributes[ATTR_BATTERY_LEVEL] = round(percent, 1)
return True |
def parse_data(self, data, raw_data):
'Parse data sent by gateway.'
raise NotImplementedError() | -2,793,087,297,486,568,400 | Parse data sent by gateway. | homeassistant/components/xiaomi_aqara.py | parse_data | phispi/home-assistant | python | def parse_data(self, data, raw_data):
raise NotImplementedError() |
def gateway(sid):
'Convert sid to a gateway.'
sid = str(sid).replace(':', '').lower()
for gateway in xiaomi.gateways.values():
if (gateway.sid == sid):
return gateway
raise vol.Invalid('Unknown gateway sid {}'.format(sid)) | 7,615,367,604,917,559,000 | Convert sid to a gateway. | homeassistant/components/xiaomi_aqara.py | gateway | phispi/home-assistant | python | def gateway(sid):
sid = str(sid).replace(':', ).lower()
for gateway in xiaomi.gateways.values():
if (gateway.sid == sid):
return gateway
raise vol.Invalid('Unknown gateway sid {}'.format(sid)) |
def fake_method(self, name):
"This doesn't do anything.\n\n Args:\n name: str. Means nothing.\n\n Yields:\n tuple(str, str). The argument passed in but twice in a tuple.\n "
(yield (name, name)) | 1,632,981,890,375,594,500 | This doesn't do anything.
Args:
name: str. Means nothing.
Yields:
tuple(str, str). The argument passed in but twice in a tuple. | scripts/linters/test_files/invalid_python_three.py | fake_method | Aarjav-Jain/oppia | python | def fake_method(self, name):
"This doesn't do anything.\n\n Args:\n name: str. Means nothing.\n\n Yields:\n tuple(str, str). The argument passed in but twice in a tuple.\n "
(yield (name, name)) |
def calc_fall_flush_durations_2(filter_data, date):
'Left side sharp'
der_percent_threshold_left = 50
flow_percent_threshold_left = 80
'Right side mellow'
der_percent_threshold_right = 30
flow_percent_threshold_right = 80
duration = None
left = 0
right = 0
if (date or (date == 0)):
date = int(date)
(left_maxarray, left_minarray) = peakdet(filter_data[:date], 0.01)
(right_maxarray, right_minarray) = peakdet(filter_data[date:], 0.01)
if (not list(left_minarray)):
left = 0
else:
left = int(left_minarray[(- 1)][0])
if (not list(right_minarray)):
right = 0
else:
right = int(((date - 2) + right_minarray[0][0]))
if ((date - left) > 10):
'create spline, and find derivative'
x_axis_left = list(range(len(filter_data[left:date])))
spl_left = ip.UnivariateSpline(x_axis_left, filter_data[left:date], k=3, s=3)
spl_first_left = spl_left.derivative(1)
'check if derivative value falls below certain threshold'
spl_first_left_median = np.nanpercentile(spl_first_left(x_axis_left), der_percent_threshold_left)
'check if actual value falls below threshold, avoiding the rounded peak'
median_left = np.nanpercentile(list(set(filter_data[left:date])), flow_percent_threshold_left)
for (index_left, der) in enumerate(reversed(spl_first_left(x_axis_left))):
if ((der < spl_first_left_median) and (filter_data[(date - index_left)] < median_left)):
left = (date - index_left)
break
if ((right - date) > 10):
x_axis_right = list(range(len(filter_data[date:right])))
spl_right = ip.UnivariateSpline(x_axis_right, filter_data[date:right], k=3, s=3)
spl_first_right = spl_right.derivative(1)
spl_first_right_median = abs(np.nanpercentile(spl_first_right(x_axis_right), der_percent_threshold_right))
median_right = np.nanpercentile(list(set(filter_data[date:right])), flow_percent_threshold_right)
for (index_right, der) in enumerate(spl_first_right(x_axis_right)):
if ((abs(der) < spl_first_right_median) and (filter_data[(date + index_right)] < median_right)):
right = (date + index_right)
break
if left:
duration = int((date - left))
elif ((not left) and right):
duration = int((right - date))
else:
duration = 0
return (duration, left, right) | 8,728,510,604,129,855,000 | Left side sharp | utils/calc_fall_flush.py | calc_fall_flush_durations_2 | NoellePatterson/func-flow-plot | python | def calc_fall_flush_durations_2(filter_data, date):
der_percent_threshold_left = 50
flow_percent_threshold_left = 80
'Right side mellow'
der_percent_threshold_right = 30
flow_percent_threshold_right = 80
duration = None
left = 0
right = 0
if (date or (date == 0)):
date = int(date)
(left_maxarray, left_minarray) = peakdet(filter_data[:date], 0.01)
(right_maxarray, right_minarray) = peakdet(filter_data[date:], 0.01)
if (not list(left_minarray)):
left = 0
else:
left = int(left_minarray[(- 1)][0])
if (not list(right_minarray)):
right = 0
else:
right = int(((date - 2) + right_minarray[0][0]))
if ((date - left) > 10):
'create spline, and find derivative'
x_axis_left = list(range(len(filter_data[left:date])))
spl_left = ip.UnivariateSpline(x_axis_left, filter_data[left:date], k=3, s=3)
spl_first_left = spl_left.derivative(1)
'check if derivative value falls below certain threshold'
spl_first_left_median = np.nanpercentile(spl_first_left(x_axis_left), der_percent_threshold_left)
'check if actual value falls below threshold, avoiding the rounded peak'
median_left = np.nanpercentile(list(set(filter_data[left:date])), flow_percent_threshold_left)
for (index_left, der) in enumerate(reversed(spl_first_left(x_axis_left))):
if ((der < spl_first_left_median) and (filter_data[(date - index_left)] < median_left)):
left = (date - index_left)
break
if ((right - date) > 10):
x_axis_right = list(range(len(filter_data[date:right])))
spl_right = ip.UnivariateSpline(x_axis_right, filter_data[date:right], k=3, s=3)
spl_first_right = spl_right.derivative(1)
spl_first_right_median = abs(np.nanpercentile(spl_first_right(x_axis_right), der_percent_threshold_right))
median_right = np.nanpercentile(list(set(filter_data[date:right])), flow_percent_threshold_right)
for (index_right, der) in enumerate(spl_first_right(x_axis_right)):
if ((abs(der) < spl_first_right_median) and (filter_data[(date + index_right)] < median_right)):
right = (date + index_right)
break
if left:
duration = int((date - left))
elif ((not left) and right):
duration = int((right - date))
else:
duration = 0
return (duration, left, right) |
def wait_until_upload_url_changed(self, uploadproxy_url, timeout=TIMEOUT):
'\n Wait until upload proxy url is changed\n\n Args:\n timeout (int): Time to wait for CDI Config.\n\n Returns:\n bool: True if url is equal to uploadProxyURL.\n '
LOGGER.info(f'Wait for {self.kind} {self.name} to ensure current URL == uploadProxyURL')
samples = TimeoutSampler(wait_timeout=timeout, sleep=1, exceptions_dict=PROTOCOL_ERROR_EXCEPTION_DICT, func=self.api.get, field_selector=f'metadata.name=={self.name}')
for sample in samples:
if sample.items:
status = sample.items[0].status
current_url = status.uploadProxyURL
if (current_url == uploadproxy_url):
return | -8,378,396,817,678,230,000 | Wait until upload proxy url is changed
Args:
timeout (int): Time to wait for CDI Config.
Returns:
bool: True if url is equal to uploadProxyURL. | ocp_resources/cdi_config.py | wait_until_upload_url_changed | amastbau/openshift-python-wrapper | python | def wait_until_upload_url_changed(self, uploadproxy_url, timeout=TIMEOUT):
'\n Wait until upload proxy url is changed\n\n Args:\n timeout (int): Time to wait for CDI Config.\n\n Returns:\n bool: True if url is equal to uploadProxyURL.\n '
LOGGER.info(f'Wait for {self.kind} {self.name} to ensure current URL == uploadProxyURL')
samples = TimeoutSampler(wait_timeout=timeout, sleep=1, exceptions_dict=PROTOCOL_ERROR_EXCEPTION_DICT, func=self.api.get, field_selector=f'metadata.name=={self.name}')
for sample in samples:
if sample.items:
status = sample.items[0].status
current_url = status.uploadProxyURL
if (current_url == uploadproxy_url):
return |
def validate(coll, record, schemas):
'Validate a record for a given db\n\n Parameters\n ----------\n coll : str\n The name of the db in question\n record : dict\n The record to be validated\n schemas : dict\n The schema to validate against\n\n Returns\n -------\n rtn : bool\n True is valid\n errors: dict\n The errors encountered (if any)\n\n '
if (coll in schemas):
schema = copy.deepcopy(schemas[coll])
v = NoDescriptionValidator(schema)
return (v.validate(record), v.errors)
else:
return (True, ()) | 1,143,343,369,521,928,200 | Validate a record for a given db
Parameters
----------
coll : str
The name of the db in question
record : dict
The record to be validated
schemas : dict
The schema to validate against
Returns
-------
rtn : bool
True is valid
errors: dict
The errors encountered (if any) | regolith/schemas.py | validate | priyankaanehra/regolith | python | def validate(coll, record, schemas):
'Validate a record for a given db\n\n Parameters\n ----------\n coll : str\n The name of the db in question\n record : dict\n The record to be validated\n schemas : dict\n The schema to validate against\n\n Returns\n -------\n rtn : bool\n True is valid\n errors: dict\n The errors encountered (if any)\n\n '
if (coll in schemas):
schema = copy.deepcopy(schemas[coll])
v = NoDescriptionValidator(schema)
return (v.validate(record), v.errors)
else:
return (True, ()) |
def _validate_description(self, description, field, value):
"Don't validate descriptions\n\n The rule's arguments are validated against this schema:\n {'type': 'string'}"
if False:
pass | 6,530,752,815,826,422,000 | Don't validate descriptions
The rule's arguments are validated against this schema:
{'type': 'string'} | regolith/schemas.py | _validate_description | priyankaanehra/regolith | python | def _validate_description(self, description, field, value):
"Don't validate descriptions\n\n The rule's arguments are validated against this schema:\n {'type': 'string'}"
if False:
pass |
def _validate_eallowed(self, eallowed, field, value):
"Test if value is in list\n The rule's arguments are validated against this schema:\n {'type': 'list'}\n "
if (value not in eallowed):
warn('"{}" is not in the preferred entries for "{}", please consider changing this entry to conform or add this to the ``eallowed`` field in the schema.'.format(value, field)) | 1,803,606,705,388,359,200 | Test if value is in list
The rule's arguments are validated against this schema:
{'type': 'list'} | regolith/schemas.py | _validate_eallowed | priyankaanehra/regolith | python | def _validate_eallowed(self, eallowed, field, value):
"Test if value is in list\n The rule's arguments are validated against this schema:\n {'type': 'list'}\n "
if (value not in eallowed):
warn('"{}" is not in the preferred entries for "{}", please consider changing this entry to conform or add this to the ``eallowed`` field in the schema.'.format(value, field)) |
def count_vocab_items(self, token: Token, counter: Dict[(str, Dict[(str, int)])]):
'\n The :class:`Vocabulary` needs to assign indices to whatever strings we see in the training\n data (possibly doing some frequency filtering and using an OOV, or out of vocabulary,\n token). This method takes a token and a dictionary of counts and increments counts for\n whatever vocabulary items are present in the token. If this is a single token ID\n representation, the vocabulary item is likely the token itself. If this is a token\n characters representation, the vocabulary items are all of the characters in the token.\n '
raise NotImplementedError | 7,749,317,807,429,429,000 | The :class:`Vocabulary` needs to assign indices to whatever strings we see in the training
data (possibly doing some frequency filtering and using an OOV, or out of vocabulary,
token). This method takes a token and a dictionary of counts and increments counts for
whatever vocabulary items are present in the token. If this is a single token ID
representation, the vocabulary item is likely the token itself. If this is a token
characters representation, the vocabulary items are all of the characters in the token. | allennlp/data/token_indexers/token_indexer.py | count_vocab_items | loopylangur/allennlp | python | def count_vocab_items(self, token: Token, counter: Dict[(str, Dict[(str, int)])]):
'\n The :class:`Vocabulary` needs to assign indices to whatever strings we see in the training\n data (possibly doing some frequency filtering and using an OOV, or out of vocabulary,\n token). This method takes a token and a dictionary of counts and increments counts for\n whatever vocabulary items are present in the token. If this is a single token ID\n representation, the vocabulary item is likely the token itself. If this is a token\n characters representation, the vocabulary items are all of the characters in the token.\n '
raise NotImplementedError |
def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary, index_name: str) -> Dict[(str, List[TokenType])]:
'\n Takes a list of tokens and converts them to one or more sets of indices.\n This could be just an ID for each token from the vocabulary.\n Or it could split each token into characters and return one ID per character.\n Or (for instance, in the case of byte-pair encoding) there might not be a clean\n mapping from individual tokens to indices.\n '
raise NotImplementedError | 2,723,525,293,100,898,300 | Takes a list of tokens and converts them to one or more sets of indices.
This could be just an ID for each token from the vocabulary.
Or it could split each token into characters and return one ID per character.
Or (for instance, in the case of byte-pair encoding) there might not be a clean
mapping from individual tokens to indices. | allennlp/data/token_indexers/token_indexer.py | tokens_to_indices | loopylangur/allennlp | python | def tokens_to_indices(self, tokens: List[Token], vocabulary: Vocabulary, index_name: str) -> Dict[(str, List[TokenType])]:
'\n Takes a list of tokens and converts them to one or more sets of indices.\n This could be just an ID for each token from the vocabulary.\n Or it could split each token into characters and return one ID per character.\n Or (for instance, in the case of byte-pair encoding) there might not be a clean\n mapping from individual tokens to indices.\n '
raise NotImplementedError |
def get_padding_token(self) -> TokenType:
'\n Deprecated. Please just implement the padding token in `as_padded_tensor` instead.\n TODO(Mark): remove in 1.0 release. This is only a concrete implementation to preserve\n backward compatability, otherwise it would be abstract.\n\n When we need to add padding tokens, what should they look like? This method returns a\n "blank" token of whatever type is returned by :func:`tokens_to_indices`.\n '
warnings.warn('Using a Field with get_padding_token as an inherited method, which will be depreciated in 1.0.0.Please implement as_padded_tensor instead.', FutureWarning)
return 0 | 9,106,309,190,863,320,000 | Deprecated. Please just implement the padding token in `as_padded_tensor` instead.
TODO(Mark): remove in 1.0 release. This is only a concrete implementation to preserve
backward compatability, otherwise it would be abstract.
When we need to add padding tokens, what should they look like? This method returns a
"blank" token of whatever type is returned by :func:`tokens_to_indices`. | allennlp/data/token_indexers/token_indexer.py | get_padding_token | loopylangur/allennlp | python | def get_padding_token(self) -> TokenType:
'\n Deprecated. Please just implement the padding token in `as_padded_tensor` instead.\n TODO(Mark): remove in 1.0 release. This is only a concrete implementation to preserve\n backward compatability, otherwise it would be abstract.\n\n When we need to add padding tokens, what should they look like? This method returns a\n "blank" token of whatever type is returned by :func:`tokens_to_indices`.\n '
warnings.warn('Using a Field with get_padding_token as an inherited method, which will be depreciated in 1.0.0.Please implement as_padded_tensor instead.', FutureWarning)
return 0 |
def get_padding_lengths(self, token: TokenType) -> Dict[(str, int)]:
'\n This method returns a padding dictionary for the given token that specifies lengths for\n all arrays that need padding. For example, for single ID tokens the returned dictionary\n will be empty, but for a token characters representation, this will return the number\n of characters in the token.\n '
raise NotImplementedError | -3,874,557,666,197,784,600 | This method returns a padding dictionary for the given token that specifies lengths for
all arrays that need padding. For example, for single ID tokens the returned dictionary
will be empty, but for a token characters representation, this will return the number
of characters in the token. | allennlp/data/token_indexers/token_indexer.py | get_padding_lengths | loopylangur/allennlp | python | def get_padding_lengths(self, token: TokenType) -> Dict[(str, int)]:
'\n This method returns a padding dictionary for the given token that specifies lengths for\n all arrays that need padding. For example, for single ID tokens the returned dictionary\n will be empty, but for a token characters representation, this will return the number\n of characters in the token.\n '
raise NotImplementedError |
def get_token_min_padding_length(self) -> int:
'\n This method returns the minimum padding length required for this TokenIndexer.\n For example, the minimum padding length of `SingleIdTokenIndexer` is the largest\n size of filter when using `CnnEncoder`.\n '
return self._token_min_padding_length | 5,854,117,235,276,605,000 | This method returns the minimum padding length required for this TokenIndexer.
For example, the minimum padding length of `SingleIdTokenIndexer` is the largest
size of filter when using `CnnEncoder`. | allennlp/data/token_indexers/token_indexer.py | get_token_min_padding_length | loopylangur/allennlp | python | def get_token_min_padding_length(self) -> int:
'\n This method returns the minimum padding length required for this TokenIndexer.\n For example, the minimum padding length of `SingleIdTokenIndexer` is the largest\n size of filter when using `CnnEncoder`.\n '
return self._token_min_padding_length |
def as_padded_tensor(self, tokens: Dict[(str, List[TokenType])], desired_num_tokens: Dict[(str, int)], padding_lengths: Dict[(str, int)]) -> Dict[(str, torch.Tensor)]:
'\n This method pads a list of tokens to ``desired_num_tokens`` and returns that padded list\n of input tokens as a torch Tensor. If the input token list is longer than ``desired_num_tokens``\n then it will be truncated.\n\n ``padding_lengths`` is used to provide supplemental padding parameters which are needed\n in some cases. For example, it contains the widths to pad characters to when doing\n character-level padding.\n\n Note that this method should be abstract, but it is implemented to allow backward compatability.\n '
if (not self.has_warned_for_as_padded_tensor):
warnings.warn('Using a Field with pad_token_sequence, which will be depreciated in 1.0.0.Please implement as_padded_tensor instead.', FutureWarning)
self.has_warned_for_as_padded_tensor = True
padded = self.pad_token_sequence(tokens, desired_num_tokens, padding_lengths)
return {key: torch.LongTensor(array) for (key, array) in padded.items()} | 6,763,238,428,948,606,000 | This method pads a list of tokens to ``desired_num_tokens`` and returns that padded list
of input tokens as a torch Tensor. If the input token list is longer than ``desired_num_tokens``
then it will be truncated.
``padding_lengths`` is used to provide supplemental padding parameters which are needed
in some cases. For example, it contains the widths to pad characters to when doing
character-level padding.
Note that this method should be abstract, but it is implemented to allow backward compatability. | allennlp/data/token_indexers/token_indexer.py | as_padded_tensor | loopylangur/allennlp | python | def as_padded_tensor(self, tokens: Dict[(str, List[TokenType])], desired_num_tokens: Dict[(str, int)], padding_lengths: Dict[(str, int)]) -> Dict[(str, torch.Tensor)]:
'\n This method pads a list of tokens to ``desired_num_tokens`` and returns that padded list\n of input tokens as a torch Tensor. If the input token list is longer than ``desired_num_tokens``\n then it will be truncated.\n\n ``padding_lengths`` is used to provide supplemental padding parameters which are needed\n in some cases. For example, it contains the widths to pad characters to when doing\n character-level padding.\n\n Note that this method should be abstract, but it is implemented to allow backward compatability.\n '
if (not self.has_warned_for_as_padded_tensor):
warnings.warn('Using a Field with pad_token_sequence, which will be depreciated in 1.0.0.Please implement as_padded_tensor instead.', FutureWarning)
self.has_warned_for_as_padded_tensor = True
padded = self.pad_token_sequence(tokens, desired_num_tokens, padding_lengths)
return {key: torch.LongTensor(array) for (key, array) in padded.items()} |
def pad_token_sequence(self, tokens: Dict[(str, List[TokenType])], desired_num_tokens: Dict[(str, int)], padding_lengths: Dict[(str, int)]) -> Dict[(str, TokenType)]:
'\n Deprecated. Please use `as_padded_tensor` instead.\n TODO(Mark): remove in 1.0 release.\n '
raise NotImplementedError | 4,965,965,602,543,824,000 | Deprecated. Please use `as_padded_tensor` instead.
TODO(Mark): remove in 1.0 release. | allennlp/data/token_indexers/token_indexer.py | pad_token_sequence | loopylangur/allennlp | python | def pad_token_sequence(self, tokens: Dict[(str, List[TokenType])], desired_num_tokens: Dict[(str, int)], padding_lengths: Dict[(str, int)]) -> Dict[(str, TokenType)]:
'\n Deprecated. Please use `as_padded_tensor` instead.\n TODO(Mark): remove in 1.0 release.\n '
raise NotImplementedError |
def get_keys(self, index_name: str) -> List[str]:
'\n Return a list of the keys this indexer return from ``tokens_to_indices``.\n '
return [index_name] | -478,031,282,990,556,700 | Return a list of the keys this indexer return from ``tokens_to_indices``. | allennlp/data/token_indexers/token_indexer.py | get_keys | loopylangur/allennlp | python | def get_keys(self, index_name: str) -> List[str]:
'\n \n '
return [index_name] |
def run_executer(params, train_input_shapes=None, eval_input_shapes=None, train_input_fn=None, eval_input_fn=None):
'Runs Mask RCNN model on distribution strategy defined by the user.'
executer = tpu_executor.TPUEstimatorExecuter(unet_model.unet_model_fn, params, train_input_shapes=train_input_shapes, eval_input_shapes=eval_input_shapes)
if (FLAGS.mode == 'train'):
assert (train_input_fn is not None)
results = executer.train(train_input_fn)
elif (FLAGS.mode == 'eval'):
assert (eval_input_fn is not None)
results = executer.evaluate(eval_input_fn)
elif (FLAGS.mode == 'train_and_eval'):
assert (train_input_fn is not None)
assert (eval_input_fn is not None)
results = executer.train_and_eval(train_input_fn, eval_input_fn)
else:
raise ValueError('Mode must be one of `train`, `eval`, or `train_and_eval`')
return results | -3,124,367,094,866,476,500 | Runs Mask RCNN model on distribution strategy defined by the user. | models/official/unet3d/unet_main.py | run_executer | tensorflow/tpu-demos | python | def run_executer(params, train_input_shapes=None, eval_input_shapes=None, train_input_fn=None, eval_input_fn=None):
executer = tpu_executor.TPUEstimatorExecuter(unet_model.unet_model_fn, params, train_input_shapes=train_input_shapes, eval_input_shapes=eval_input_shapes)
if (FLAGS.mode == 'train'):
assert (train_input_fn is not None)
results = executer.train(train_input_fn)
elif (FLAGS.mode == 'eval'):
assert (eval_input_fn is not None)
results = executer.evaluate(eval_input_fn)
elif (FLAGS.mode == 'train_and_eval'):
assert (train_input_fn is not None)
assert (eval_input_fn is not None)
results = executer.train_and_eval(train_input_fn, eval_input_fn)
else:
raise ValueError('Mode must be one of `train`, `eval`, or `train_and_eval`')
return results |
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)')
(parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion'),)
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') | -7,860,622,762,592,880,000 | Add model-specific arguments to the parser. | models/transformer.py | add_args | NCTUMLlab/Adversarial-Masking-Transformers-for-Language-Understanding | python | @staticmethod
def add_args(parser):
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)')
(parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion'),)
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') |
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_architecture(args)
if (not hasattr(args, 'max_source_positions')):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if (not hasattr(args, 'max_target_positions')):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
(src_dict, tgt_dict) = (task.source_dictionary, task.target_dictionary)
if (len(task.datasets) > 0):
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if (src_dict != tgt_dict):
raise ValueError('--share-all-embeddings requires a joined dictionary')
if (args.encoder_embed_dim != args.decoder_embed_dim):
raise ValueError('--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if (args.decoder_embed_path and (args.decoder_embed_path != args.encoder_embed_path)):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(src_dict, args.encoder_embed_dim, args.encoder_embed_path)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(src_dict, args.encoder_embed_dim, args.encoder_embed_path)
decoder_embed_tokens = build_embedding(tgt_dict, args.decoder_embed_dim, args.decoder_embed_path)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) | -8,093,440,201,363,817,000 | Build a new model instance. | models/transformer.py | build_model | NCTUMLlab/Adversarial-Masking-Transformers-for-Language-Understanding | python | @classmethod
def build_model(cls, args, task):
base_architecture(args)
if (not hasattr(args, 'max_source_positions')):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if (not hasattr(args, 'max_target_positions')):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
(src_dict, tgt_dict) = (task.source_dictionary, task.target_dictionary)
if (len(task.datasets) > 0):
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if (src_dict != tgt_dict):
raise ValueError('--share-all-embeddings requires a joined dictionary')
if (args.encoder_embed_dim != args.decoder_embed_dim):
raise ValueError('--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if (args.decoder_embed_path and (args.decoder_embed_path != args.encoder_embed_path)):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(src_dict, args.encoder_embed_dim, args.encoder_embed_path)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(src_dict, args.encoder_embed_dim, args.encoder_embed_path)
decoder_embed_tokens = build_embedding(tgt_dict, args.decoder_embed_dim, args.decoder_embed_path)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerModel(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) |
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)')
(parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion'),)
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') | -7,860,622,762,592,880,000 | Add model-specific arguments to the parser. | models/transformer.py | add_args | NCTUMLlab/Adversarial-Masking-Transformers-for-Language-Understanding | python | @staticmethod
def add_args(parser):
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)')
(parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion'),)
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') |
@classmethod
def build_model(cls, args, task):
'Build a new model instance.'
base_architecture(args)
if (not hasattr(args, 'max_source_positions')):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if (not hasattr(args, 'max_target_positions')):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
(src_dict, tgt_dict) = (task.source_dictionary, task.target_dictionary)
if (len(task.datasets) > 0):
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if (src_dict != tgt_dict):
raise ValueError('--share-all-embeddings requires a joined dictionary')
if (args.encoder_embed_dim != args.decoder_embed_dim):
raise ValueError('--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if (args.decoder_embed_path and (args.decoder_embed_path != args.encoder_embed_path)):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(src_dict, args.encoder_embed_dim, args.encoder_embed_path)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(src_dict, args.encoder_embed_dim, args.encoder_embed_path)
decoder_embed_tokens = build_embedding(tgt_dict, args.decoder_embed_dim, args.decoder_embed_path)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) | 2,629,639,965,958,634,000 | Build a new model instance. | models/transformer.py | build_model | NCTUMLlab/Adversarial-Masking-Transformers-for-Language-Understanding | python | @classmethod
def build_model(cls, args, task):
base_architecture(args)
if (not hasattr(args, 'max_source_positions')):
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if (not hasattr(args, 'max_target_positions')):
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
(src_dict, tgt_dict) = (task.source_dictionary, task.target_dictionary)
if (len(task.datasets) > 0):
src_berttokenizer = next(iter(task.datasets.values())).berttokenizer
else:
src_berttokenizer = BertTokenizer.from_pretrained(args.bert_model_name)
def build_embedding(dictionary, embed_dim, path=None):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx)
if path:
embed_dict = utils.parse_embedding(path)
utils.load_embedding(embed_dict, dictionary, emb)
return emb
if args.share_all_embeddings:
if (src_dict != tgt_dict):
raise ValueError('--share-all-embeddings requires a joined dictionary')
if (args.encoder_embed_dim != args.decoder_embed_dim):
raise ValueError('--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim')
if (args.decoder_embed_path and (args.decoder_embed_path != args.encoder_embed_path)):
raise ValueError('--share-all-embeddings not compatible with --decoder-embed-path')
encoder_embed_tokens = build_embedding(src_dict, args.encoder_embed_dim, args.encoder_embed_path)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = build_embedding(src_dict, args.encoder_embed_dim, args.encoder_embed_path)
decoder_embed_tokens = build_embedding(tgt_dict, args.decoder_embed_dim, args.decoder_embed_path)
bertencoder = BertModel.from_pretrained(args.bert_model_name)
args.bert_out_dim = bertencoder.hidden_size
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return TransformerS2Model(encoder, decoder, bertencoder, src_berttokenizer, args.mask_cls_sep, args) |
def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs):
"\n Run the forward pass for an encoder-decoder model.\n\n First feed a batch of source tokens through the encoder. Then, feed the\n encoder output and previous decoder outputs (i.e., input feeding/teacher\n forcing) to the decoder to produce the next outputs::\n\n encoder_out = self.encoder(src_tokens, src_lengths)\n return self.decoder(prev_output_tokens, encoder_out)\n\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for input feeding/teacher forcing\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n "
bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad())
(bert_encoder_out, _) = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask=(~ bert_encoder_padding_mask))
bert_encoder_out = bert_encoder_out[self.bert_output_layer]
if self.mask_cls_sep:
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls())
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep())
bert_encoder_out = bert_encoder_out.permute(1, 0, 2).contiguous()
bert_encoder_out = {'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask}
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs)
return decoder_out | -2,871,094,157,983,944,700 | Run the forward pass for an encoder-decoder model.
First feed a batch of source tokens through the encoder. Then, feed the
encoder output and previous decoder outputs (i.e., input feeding/teacher
forcing) to the decoder to produce the next outputs::
encoder_out = self.encoder(src_tokens, src_lengths)
return self.decoder(prev_output_tokens, encoder_out)
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (LongTensor): source sentence lengths of shape `(batch)`
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs | models/transformer.py | forward | NCTUMLlab/Adversarial-Masking-Transformers-for-Language-Understanding | python | def forward(self, src_tokens, src_lengths, prev_output_tokens, bert_input, **kwargs):
"\n Run the forward pass for an encoder-decoder model.\n\n First feed a batch of source tokens through the encoder. Then, feed the\n encoder output and previous decoder outputs (i.e., input feeding/teacher\n forcing) to the decoder to produce the next outputs::\n\n encoder_out = self.encoder(src_tokens, src_lengths)\n return self.decoder(prev_output_tokens, encoder_out)\n\n Args:\n src_tokens (LongTensor): tokens in the source language of shape\n `(batch, src_len)`\n src_lengths (LongTensor): source sentence lengths of shape `(batch)`\n prev_output_tokens (LongTensor): previous decoder outputs of shape\n `(batch, tgt_len)`, for input feeding/teacher forcing\n\n Returns:\n tuple:\n - the decoder's output of shape `(batch, tgt_len, vocab)`\n - a dictionary with any model-specific outputs\n "
bert_encoder_padding_mask = bert_input.eq(self.berttokenizer.pad())
(bert_encoder_out, _) = self.bert_encoder(bert_input, output_all_encoded_layers=True, attention_mask=(~ bert_encoder_padding_mask))
bert_encoder_out = bert_encoder_out[self.bert_output_layer]
if self.mask_cls_sep:
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.cls())
bert_encoder_padding_mask += bert_input.eq(self.berttokenizer.sep())
bert_encoder_out = bert_encoder_out.permute(1, 0, 2).contiguous()
bert_encoder_out = {'bert_encoder_out': bert_encoder_out, 'bert_encoder_padding_mask': bert_encoder_padding_mask}
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, bert_encoder_out=bert_encoder_out)
decoder_out = self.decoder(prev_output_tokens, encoder_out=encoder_out, bert_encoder_out=bert_encoder_out, **kwargs)
return decoder_out |
@staticmethod
def add_args(parser):
'Add model-specific arguments to the parser.'
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)')
(parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion'),)
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') | -7,860,622,762,592,880,000 | Add model-specific arguments to the parser. | models/transformer.py | add_args | NCTUMLlab/Adversarial-Masking-Transformers-for-Language-Understanding | python | @staticmethod
def add_args(parser):
parser.add_argument('--activation-fn', choices=utils.get_available_activation_fns(), help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D', help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D', help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D', help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR', help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N', help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N', help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N', help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N', help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true', help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true', help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR', help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N', help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N', help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N', help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N', help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true', help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true', help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true', help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true', help='share encoder, decoder and output embeddings (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true', help='if set, disables positional embeddings (outside self attention)')
(parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR', help='comma separated list of adaptive softmax cutoff points. Must be used with adaptive_loss criterion'),)
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D', help='sets adaptive softmax dropout for the tail projections') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.