code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def list_taxa(pdb_list, sleep_time=.1):
'''Given a list of PDB IDs, look up their associated species
This function digs through the search results returned
by the get_all_info() function and returns any information on
taxonomy included within the description.
The PDB website description of each entry includes the name
of the species (and sometimes details of organ or body part)
for each protein structure sample.
Parameters
----------
pdb_list : list of str
List of PDB IDs
sleep_time : float
Time (in seconds) to wait between requests. If this number is too small
the API will stop working, but it appears to vary among different systems
Returns
-------
taxa : list of str
A list of the names or classifictions of species
associated with entries
Examples
--------
>>> crispr_query = make_query('crispr')
>>> crispr_results = do_search(crispr_query)
>>> print(list_taxa(crispr_results[:10]))
['Thermus thermophilus',
'Sulfolobus solfataricus P2',
'Hyperthermus butylicus DSM 5456',
'unidentified phage',
'Sulfolobus solfataricus P2',
'Pseudomonas aeruginosa UCBPP-PA14',
'Pseudomonas aeruginosa UCBPP-PA14',
'Pseudomonas aeruginosa UCBPP-PA14',
'Sulfolobus solfataricus',
'Thermus thermophilus HB8']
'''
if len(pdb_list)*sleep_time > 30:
warnings.warn("Because of API limitations, this function\
will take at least " + str(len(pdb_list)*sleep_time) + " seconds to return results.\
If you need greater speed, try modifying the optional argument sleep_time=.1, (although \
this may cause the search to time out)" )
taxa = []
for pdb_id in pdb_list:
all_info = get_all_info(pdb_id)
species_results = walk_nested_dict(all_info, 'Taxonomy', maxdepth=25,outputs=[])
first_result = walk_nested_dict(species_results,'@name',outputs=[])
if first_result:
taxa.append(first_result[-1])
else:
taxa.append('Unknown')
time.sleep(sleep_time)
return taxa | Given a list of PDB IDs, look up their associated species
This function digs through the search results returned
by the get_all_info() function and returns any information on
taxonomy included within the description.
The PDB website description of each entry includes the name
of the species (and sometimes details of organ or body part)
for each protein structure sample.
Parameters
----------
pdb_list : list of str
List of PDB IDs
sleep_time : float
Time (in seconds) to wait between requests. If this number is too small
the API will stop working, but it appears to vary among different systems
Returns
-------
taxa : list of str
A list of the names or classifictions of species
associated with entries
Examples
--------
>>> crispr_query = make_query('crispr')
>>> crispr_results = do_search(crispr_query)
>>> print(list_taxa(crispr_results[:10]))
['Thermus thermophilus',
'Sulfolobus solfataricus P2',
'Hyperthermus butylicus DSM 5456',
'unidentified phage',
'Sulfolobus solfataricus P2',
'Pseudomonas aeruginosa UCBPP-PA14',
'Pseudomonas aeruginosa UCBPP-PA14',
'Pseudomonas aeruginosa UCBPP-PA14',
'Sulfolobus solfataricus',
'Thermus thermophilus HB8'] |
def _get_tmp_gcs_bucket(cls, writer_spec):
"""Returns bucket used for writing tmp files."""
if cls.TMP_BUCKET_NAME_PARAM in writer_spec:
return writer_spec[cls.TMP_BUCKET_NAME_PARAM]
return cls._get_gcs_bucket(writer_spec) | Returns bucket used for writing tmp files. |
def pow(base, exp):
"""Returns element-wise result of base element raised to powers from exp element.
Both inputs can be Symbol or scalar number.
Broadcasting is not supported. Use `broadcast_pow` instead.
`sym.pow` is being deprecated, please use `sym.power` instead.
Parameters
---------
base : Symbol or scalar
The base symbol
exp : Symbol or scalar
The exponent symbol
Returns
-------
Symbol or scalar
The bases in x raised to the exponents in y.
Examples
--------
>>> mx.sym.pow(2, 3)
8
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.pow(x, 2)
>>> z.eval(x=mx.nd.array([1,2]))[0].asnumpy()
array([ 1., 4.], dtype=float32)
>>> z = mx.sym.pow(3, y)
>>> z.eval(y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 27.], dtype=float32)
>>> z = mx.sym.pow(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 64.], dtype=float32)
"""
if isinstance(base, Symbol) and isinstance(exp, Symbol):
return _internal._Power(base, exp)
if isinstance(base, Symbol) and isinstance(exp, Number):
return _internal._PowerScalar(base, scalar=exp)
if isinstance(base, Number) and isinstance(exp, Symbol):
return _internal._RPowerScalar(exp, scalar=base)
if isinstance(base, Number) and isinstance(exp, Number):
return base**exp
else:
raise TypeError('types (%s, %s) not supported' % (str(type(base)), str(type(exp)))) | Returns element-wise result of base element raised to powers from exp element.
Both inputs can be Symbol or scalar number.
Broadcasting is not supported. Use `broadcast_pow` instead.
`sym.pow` is being deprecated, please use `sym.power` instead.
Parameters
---------
base : Symbol or scalar
The base symbol
exp : Symbol or scalar
The exponent symbol
Returns
-------
Symbol or scalar
The bases in x raised to the exponents in y.
Examples
--------
>>> mx.sym.pow(2, 3)
8
>>> x = mx.sym.Variable('x')
>>> y = mx.sym.Variable('y')
>>> z = mx.sym.pow(x, 2)
>>> z.eval(x=mx.nd.array([1,2]))[0].asnumpy()
array([ 1., 4.], dtype=float32)
>>> z = mx.sym.pow(3, y)
>>> z.eval(y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 27.], dtype=float32)
>>> z = mx.sym.pow(x, y)
>>> z.eval(x=mx.nd.array([3,4]), y=mx.nd.array([2,3]))[0].asnumpy()
array([ 9., 64.], dtype=float32) |
def dropEvent(self, event):
"""
Listens for query's being dragged and dropped onto this tree.
:param event | <QDropEvent>
"""
# overload the current filtering options
data = event.mimeData()
if data.hasFormat('application/x-orb-table') and \
data.hasFormat('application/x-orb-query'):
tableName = self.tableTypeName()
if nstr(data.data('application/x-orb-table')) == tableName:
data = nstr(data.data('application/x-orb-query'))
query = Q.fromXmlString(data)
self.setQuery(query)
return
elif self.tableType() and data.hasFormat('application/x-orb-records'):
from projexui.widgets.xorbtreewidget import XOrbTreeWidget
records = XOrbTreeWidget.dataRestoreRecords(data)
for record in records:
if isinstance(record, self.tableType()):
self.setCurrentRecord(record)
return
super(XOrbRecordBox, self).dropEvent(event) | Listens for query's being dragged and dropped onto this tree.
:param event | <QDropEvent> |
def describe_numeric_1d(series, **kwargs):
"""Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys.
"""
# Format a number as a percentage. For example 0.25 will be turned to 25%.
_percentile_format = "{:.0%}"
stats = dict()
stats['type'] = base.TYPE_NUM
stats['mean'] = series.mean()
stats['std'] = series.std()
stats['variance'] = series.var()
stats['min'] = series.min()
stats['max'] = series.max()
stats['range'] = stats['max'] - stats['min']
# To avoid to compute it several times
_series_no_na = series.dropna()
for percentile in np.array([0.05, 0.25, 0.5, 0.75, 0.95]):
# The dropna() is a workaround for https://github.com/pydata/pandas/issues/13098
stats[_percentile_format.format(percentile)] = _series_no_na.quantile(percentile)
stats['iqr'] = stats['75%'] - stats['25%']
stats['kurtosis'] = series.kurt()
stats['skewness'] = series.skew()
stats['sum'] = series.sum()
stats['mad'] = series.mad()
stats['cv'] = stats['std'] / stats['mean'] if stats['mean'] else np.NaN
stats['n_zeros'] = (len(series) - np.count_nonzero(series))
stats['p_zeros'] = stats['n_zeros'] * 1.0 / len(series)
# Histograms
stats['histogram'] = histogram(series, **kwargs)
stats['mini_histogram'] = mini_histogram(series, **kwargs)
return pd.Series(stats, name=series.name) | Compute summary statistics of a numerical (`TYPE_NUM`) variable (a Series).
Also create histograms (mini an full) of its distribution.
Parameters
----------
series : Series
The variable to describe.
Returns
-------
Series
The description of the variable as a Series with index being stats keys. |
def to_dict(self):
"""Return common list python object.
:returns: Dictionary of groups and data
:rtype: dict
"""
list_data = []
for key, value in list(self.data.items()):
row = list(key)
row.append(value)
list_data.append(row)
return {
'groups': self.groups,
'data': list_data
} | Return common list python object.
:returns: Dictionary of groups and data
:rtype: dict |
def mptt_before_update(mapper, connection, instance):
""" Based on this example:
http://stackoverflow.com/questions/889527/move-node-in-nested-set
"""
node_id = getattr(instance, instance.get_pk_name())
table = _get_tree_table(mapper)
db_pk = instance.get_pk_column()
default_level = instance.get_default_level()
table_pk = getattr(table.c, db_pk.name)
mptt_move_inside = None
left_sibling = None
left_sibling_tree_id = None
if hasattr(instance, 'mptt_move_inside'):
mptt_move_inside = instance.mptt_move_inside
if hasattr(instance, 'mptt_move_before'):
(
right_sibling_left,
right_sibling_right,
right_sibling_parent,
right_sibling_level,
right_sibling_tree_id
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.level,
table.c.tree_id
]
).where(
table_pk == instance.mptt_move_before
)
).fetchone()
current_lvl_nodes = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.tree_id
]
).where(
and_(
table.c.level == right_sibling_level,
table.c.tree_id == right_sibling_tree_id,
table.c.lft < right_sibling_left
)
)
).fetchall()
if current_lvl_nodes:
(
left_sibling_left,
left_sibling_right,
left_sibling_parent,
left_sibling_tree_id
) = current_lvl_nodes[-1]
instance.parent_id = left_sibling_parent
left_sibling = {
'lft': left_sibling_left,
'rgt': left_sibling_right,
'is_parent': False
}
# if move_before to top level
elif not right_sibling_parent:
left_sibling_tree_id = right_sibling_tree_id - 1
# if placed after a particular node
if hasattr(instance, 'mptt_move_after'):
(
left_sibling_left,
left_sibling_right,
left_sibling_parent,
left_sibling_tree_id
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.parent_id,
table.c.tree_id
]
).where(
table_pk == instance.mptt_move_after
)
).fetchone()
instance.parent_id = left_sibling_parent
left_sibling = {
'lft': left_sibling_left,
'rgt': left_sibling_right,
'is_parent': False
}
""" Get subtree from node
SELECT id, name, level FROM my_tree
WHERE left_key >= $left_key AND right_key <= $right_key
ORDER BY left_key
"""
subtree = connection.execute(
select([table_pk])
.where(
and_(
table.c.lft >= instance.left,
table.c.rgt <= instance.right,
table.c.tree_id == instance.tree_id
)
).order_by(
table.c.lft
)
).fetchall()
subtree = [x[0] for x in subtree]
""" step 0: Initialize parameters.
Put there left and right position of moving node
"""
(
node_pos_left,
node_pos_right,
node_tree_id,
node_parent_id,
node_level
) = connection.execute(
select(
[
table.c.lft,
table.c.rgt,
table.c.tree_id,
table.c.parent_id,
table.c.level
]
).where(
table_pk == node_id
)
).fetchone()
# if instance just update w/o move
# XXX why this str() around parent_id comparison?
if not left_sibling \
and str(node_parent_id) == str(instance.parent_id) \
and not mptt_move_inside:
if left_sibling_tree_id is None:
return
# fix tree shorting
if instance.parent_id is not None:
(
parent_id,
parent_pos_right,
parent_pos_left,
parent_tree_id,
parent_level
) = connection.execute(
select(
[
table_pk,
table.c.rgt,
table.c.lft,
table.c.tree_id,
table.c.level
]
).where(
table_pk == instance.parent_id
)
).fetchone()
if node_parent_id is None and node_tree_id == parent_tree_id:
instance.parent_id = None
return
# delete from old tree
mptt_before_delete(mapper, connection, instance, False)
if instance.parent_id is not None:
""" Put there right position of new parent node (there moving node
should be moved)
"""
(
parent_id,
parent_pos_right,
parent_pos_left,
parent_tree_id,
parent_level
) = connection.execute(
select(
[
table_pk,
table.c.rgt,
table.c.lft,
table.c.tree_id,
table.c.level
]
).where(
table_pk == instance.parent_id
)
).fetchone()
# 'size' of moving node (including all it's sub nodes)
node_size = node_pos_right - node_pos_left + 1
# left sibling node
if not left_sibling:
left_sibling = {
'lft': parent_pos_left,
'rgt': parent_pos_right,
'is_parent': True
}
# insert subtree in exist tree
instance.tree_id = parent_tree_id
_insert_subtree(
table,
connection,
node_size,
node_pos_left,
node_pos_right,
parent_pos_left,
parent_pos_right,
subtree,
parent_tree_id,
parent_level,
node_level,
left_sibling,
table_pk
)
else:
# if insert after
if left_sibling_tree_id or left_sibling_tree_id == 0:
tree_id = left_sibling_tree_id + 1
connection.execute(
table.update(
table.c.tree_id > left_sibling_tree_id
).values(
tree_id=table.c.tree_id + 1
)
)
# if just insert
else:
tree_id = connection.scalar(
select(
[
func.max(table.c.tree_id) + 1
]
)
)
connection.execute(
table.update(
table_pk.in_(
subtree
)
).values(
lft=table.c.lft - node_pos_left + 1,
rgt=table.c.rgt - node_pos_left + 1,
level=table.c.level - node_level + default_level,
tree_id=tree_id
)
) | Based on this example:
http://stackoverflow.com/questions/889527/move-node-in-nested-set |
def pad_sequence_to_length(sequence: List,
desired_length: int,
default_value: Callable[[], Any] = lambda: 0,
padding_on_right: bool = True) -> List:
"""
Take a list of objects and pads it to the desired length, returning the padded list. The
original list is not modified.
Parameters
----------
sequence : List
A list of objects to be padded.
desired_length : int
Maximum length of each sequence. Longer sequences are truncated to this length, and
shorter ones are padded to it.
default_value: Callable, default=lambda: 0
Callable that outputs a default value (of any type) to use as padding values. This is
a lambda to avoid using the same object when the default value is more complex, like a
list.
padding_on_right : bool, default=True
When we add padding tokens (or truncate the sequence), should we do it on the right or
the left?
Returns
-------
padded_sequence : List
"""
# Truncates the sequence to the desired length.
if padding_on_right:
padded_sequence = sequence[:desired_length]
else:
padded_sequence = sequence[-desired_length:]
# Continues to pad with default_value() until we reach the desired length.
for _ in range(desired_length - len(padded_sequence)):
if padding_on_right:
padded_sequence.append(default_value())
else:
padded_sequence.insert(0, default_value())
return padded_sequence | Take a list of objects and pads it to the desired length, returning the padded list. The
original list is not modified.
Parameters
----------
sequence : List
A list of objects to be padded.
desired_length : int
Maximum length of each sequence. Longer sequences are truncated to this length, and
shorter ones are padded to it.
default_value: Callable, default=lambda: 0
Callable that outputs a default value (of any type) to use as padding values. This is
a lambda to avoid using the same object when the default value is more complex, like a
list.
padding_on_right : bool, default=True
When we add padding tokens (or truncate the sequence), should we do it on the right or
the left?
Returns
-------
padded_sequence : List |
def _add_devices_from_config(args):
""" Add devices from config. """
config = _parse_config(args.config)
for device in config['devices']:
if args.default:
if device == "default":
raise ValueError('devicename "default" in config is not allowed if default param is set')
if config['devices'][device]['host'] == args.default:
raise ValueError('host set in default param must not be defined in config')
add(device, config['devices'][device]['host'], config['devices'][device].get('adbkey', ''),
config['devices'][device].get('adb_server_ip', ''), config['devices'][device].get('adb_server_port', 5037)) | Add devices from config. |
def get_time(self):
"""Time of the TIFF file
Currently, only the file modification time is supported.
Note that the modification time of the TIFF file is
dependent on the file system and may have temporal
resolution as low as 3 seconds.
"""
if isinstance(self.path, pathlib.Path):
thetime = self.path.stat().st_mtime
else:
thetime = np.nan
return thetime | Time of the TIFF file
Currently, only the file modification time is supported.
Note that the modification time of the TIFF file is
dependent on the file system and may have temporal
resolution as low as 3 seconds. |
def where(self, where: str) -> 'SASdata':
"""
This method returns a clone of the SASdata object, with the where attribute set. The original SASdata object is not affected.
:param where: the where clause to apply
:return: SAS data object
"""
sd = SASdata(self.sas, self.libref, self.table, dsopts=dict(self.dsopts))
sd.HTML = self.HTML
sd.dsopts['where'] = where
return sd | This method returns a clone of the SASdata object, with the where attribute set. The original SASdata object is not affected.
:param where: the where clause to apply
:return: SAS data object |
def reassign(self, user_ids, requester):
"""Reassign this incident to a user or list of users
:param user_ids: A non-empty list of user ids
:param requester: The email address of individual requesting reassign
"""
path = '{0}'.format(self.collection.name)
assignments = []
if not user_ids:
raise Error('Must pass at least one user id')
for user_id in user_ids:
ref = {
"assignee": {
"id": user_id,
"type": "user_reference"
}
}
assignments.append(ref)
data = {
"incidents": [
{
"id": self.id,
"type": "incident_reference",
"assignments": assignments
}
]
}
extra_headers = {"From": requester}
return self.pagerduty.request('PUT', path, data=_json_dumper(data), extra_headers=extra_headers) | Reassign this incident to a user or list of users
:param user_ids: A non-empty list of user ids
:param requester: The email address of individual requesting reassign |
def Ctrl_c(self, dl = 0):
"""Ctrl + c 复制
"""
self.Delay(dl)
self.keyboard.press_key(self.keyboard.control_key)
self.keyboard.tap_key("c")
self.keyboard.release_key(self.keyboard.control_key) | Ctrl + c 复制 |
def API_GET(self, courseid=None): # pylint: disable=arguments-differ
"""
List courses available to the connected client. Returns a dict in the form
::
{
"courseid1":
{
"name": "Name of the course", #the name of the course
"require_password": False, #indicates if this course requires a password or not
"is_registered": False, #indicates if the user is registered to this course or not
"tasks": #only appears if is_registered is True
{
"taskid1": "name of task1",
"taskid2": "name of task2"
#...
},
"grade": 0.0 #the current grade in the course. Only appears if is_registered is True
}
#...
}
If you use the endpoint /api/v0/courses/the_course_id, this dict will contain one entry or the page will return 404 Not Found.
"""
output = []
if courseid is None:
courses = self.course_factory.get_all_courses()
else:
try:
courses = {courseid: self.course_factory.get_course(courseid)}
except:
raise APINotFound("Course not found")
username = self.user_manager.session_username()
user_info = self.database.users.find_one({"username": username})
for courseid, course in courses.items():
if self.user_manager.course_is_open_to_user(course, username, False) or course.is_registration_possible(user_info):
data = {
"id": courseid,
"name": course.get_name(self.user_manager.session_language()),
"require_password": course.is_password_needed_for_registration(),
"is_registered": self.user_manager.course_is_open_to_user(course, username, False)
}
if self.user_manager.course_is_open_to_user(course, username, False):
data["tasks"] = {taskid: task.get_name(self.user_manager.session_language()) for taskid, task in course.get_tasks().items()}
data["grade"] = self.user_manager.get_course_cache(username, course)["grade"]
output.append(data)
return 200, output | List courses available to the connected client. Returns a dict in the form
::
{
"courseid1":
{
"name": "Name of the course", #the name of the course
"require_password": False, #indicates if this course requires a password or not
"is_registered": False, #indicates if the user is registered to this course or not
"tasks": #only appears if is_registered is True
{
"taskid1": "name of task1",
"taskid2": "name of task2"
#...
},
"grade": 0.0 #the current grade in the course. Only appears if is_registered is True
}
#...
}
If you use the endpoint /api/v0/courses/the_course_id, this dict will contain one entry or the page will return 404 Not Found. |
def get_path(self, temp_ver):
"""
Get the path of the given version in this store
Args:
temp_ver TemplateVersion: version to look for
Returns:
str: The path to the template version inside the store
Raises:
RuntimeError: if the template is not in the store
"""
if temp_ver not in self:
raise RuntimeError(
'Template: {} not present'.format(temp_ver.name)
)
return self._prefixed(temp_ver.name) | Get the path of the given version in this store
Args:
temp_ver TemplateVersion: version to look for
Returns:
str: The path to the template version inside the store
Raises:
RuntimeError: if the template is not in the store |
def get_current_qualification_score(self, name, worker_id):
"""Return the current score for a worker, on a qualification with the
provided name.
"""
qtype = self.get_qualification_type_by_name(name)
if qtype is None:
raise QualificationNotFoundException(
'No Qualification exists with name "{}"'.format(name)
)
try:
score = self.get_qualification_score(qtype["id"], worker_id)
except (WorkerLacksQualification, RevokedQualification):
score = None
return {"qtype": qtype, "score": score} | Return the current score for a worker, on a qualification with the
provided name. |
def _is_prime(bit_size, n):
"""
An implementation of Miller–Rabin for checking if a number is prime.
:param bit_size:
An integer of the number of bits in the prime number
:param n:
An integer, the prime number
:return:
A boolean
"""
r = 0
s = n - 1
while s % 2 == 0:
r += 1
s //= 2
if bit_size >= 1300:
k = 2
elif bit_size >= 850:
k = 3
elif bit_size >= 650:
k = 4
elif bit_size >= 550:
k = 5
elif bit_size >= 450:
k = 6
for _ in range(k):
a = random.randrange(2, n - 1)
x = pow(a, s, n)
if x == 1 or x == n - 1:
continue
for _ in range(r - 1):
x = pow(x, 2, n)
if x == n - 1:
break
else:
return False
return True | An implementation of Miller–Rabin for checking if a number is prime.
:param bit_size:
An integer of the number of bits in the prime number
:param n:
An integer, the prime number
:return:
A boolean |
def remove_gaps(A, B):
"""
skip column if either is a gap
"""
a_seq, b_seq = [], []
for a, b in zip(list(A), list(B)):
if a == '-' or a == '.' or b == '-' or b == '.':
continue
a_seq.append(a)
b_seq.append(b)
return ''.join(a_seq), ''.join(b_seq) | skip column if either is a gap |
def get_table_cache_key(db_alias, table):
"""
Generates a cache key from a SQL table.
:arg db_alias: Alias of the used database
:type db_alias: str or unicode
:arg table: Name of the SQL table
:type table: str or unicode
:return: A cache key
:rtype: int
"""
cache_key = '%s:%s' % (db_alias, table)
return sha1(cache_key.encode('utf-8')).hexdigest() | Generates a cache key from a SQL table.
:arg db_alias: Alias of the used database
:type db_alias: str or unicode
:arg table: Name of the SQL table
:type table: str or unicode
:return: A cache key
:rtype: int |
def domain_search(auth=None, **kwargs):
'''
Search domains
CLI Example:
.. code-block:: bash
salt '*' keystoneng.domain_search
salt '*' keystoneng.domain_search name=domain1
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.search_domains(**kwargs) | Search domains
CLI Example:
.. code-block:: bash
salt '*' keystoneng.domain_search
salt '*' keystoneng.domain_search name=domain1 |
def add_update_user(self, user, capacity=None):
# type: (Union[hdx.data.user.User,Dict,str],Optional[str]) -> None
"""Add new or update existing user in organization with new metadata. Capacity eg. member, admin
must be supplied either within the User object or dictionary or using the capacity argument (which takes
precedence).
Args:
user (Union[User,Dict,str]): Either a user id or user metadata either from a User object or a dictionary
capacity (Optional[str]): Capacity of user eg. member, admin. Defaults to None.
Returns:
None
"""
if isinstance(user, str):
user = hdx.data.user.User.read_from_hdx(user, configuration=self.configuration)
elif isinstance(user, dict):
user = hdx.data.user.User(user, configuration=self.configuration)
if isinstance(user, hdx.data.user.User):
users = self.data.get('users')
if users is None:
users = list()
self.data['users'] = users
if capacity is not None:
user['capacity'] = capacity
self._addupdate_hdxobject(users, 'name', user)
return
raise HDXError('Type %s cannot be added as a user!' % type(user).__name__) | Add new or update existing user in organization with new metadata. Capacity eg. member, admin
must be supplied either within the User object or dictionary or using the capacity argument (which takes
precedence).
Args:
user (Union[User,Dict,str]): Either a user id or user metadata either from a User object or a dictionary
capacity (Optional[str]): Capacity of user eg. member, admin. Defaults to None.
Returns:
None |
def scale_vmss(access_token, subscription_id, resource_group, vmss_name, capacity):
'''Change the instance count of an existing VM Scale Set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
capacity (int): New number of VMs.
Returns:
HTTP response.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'?api-version=', COMP_API])
body = '{"sku":{"capacity":"' + str(capacity) + '"}}'
return do_patch(endpoint, body, access_token) | Change the instance count of an existing VM Scale Set.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
capacity (int): New number of VMs.
Returns:
HTTP response. |
def parse_env(envlist):
'''parse_env will parse a single line (with prefix like ENV removed) to
a list of commands in the format KEY=VALUE For example:
ENV PYTHONBUFFER 1 --> [PYTHONBUFFER=1]
::Notes
Docker: https://docs.docker.com/engine/reference/builder/#env
'''
if not isinstance(envlist, list):
envlist = [envlist]
exports = []
for env in envlist:
pieces = re.split("( |\\\".*?\\\"|'.*?')", env)
pieces = [p for p in pieces if p.strip()]
while len(pieces) > 0:
current = pieces.pop(0)
if current.endswith('='):
# Case 1: ['A='] --> A=
next = ""
# Case 2: ['A=', '"1 2"'] --> A=1 2
if len(pieces) > 0:
next = pieces.pop(0)
exports.append("%s%s" %(current, next))
# Case 2: ['A=B'] --> A=B
elif '=' in current:
exports.append(current)
# Case 3: ENV \\
elif current.endswith('\\'):
continue
# Case 4: ['A', 'B'] --> A=B
else:
next = pieces.pop(0)
exports.append("%s=%s" %(current, next))
return exports | parse_env will parse a single line (with prefix like ENV removed) to
a list of commands in the format KEY=VALUE For example:
ENV PYTHONBUFFER 1 --> [PYTHONBUFFER=1]
::Notes
Docker: https://docs.docker.com/engine/reference/builder/#env |
def ready(self):
"""Auto load Trionyx"""
models_config.auto_load_configs()
self.auto_load_app_modules(['layouts', 'signals'])
app_menu.auto_load_model_menu()
auto_register_search_models()
tabs.auto_generate_missing_tabs() | Auto load Trionyx |
def check_key(self, key, raise_error=True, *args, **kwargs):
"""
Checks whether the key is a valid formatoption
Parameters
----------
%(check_key.parameters.no_possible_keys|name)s
Returns
-------
%(check_key.returns)s
Raises
------
%(check_key.raises)s"""
return check_key(
key, possible_keys=list(self), raise_error=raise_error,
name='formatoption keyword', *args, **kwargs) | Checks whether the key is a valid formatoption
Parameters
----------
%(check_key.parameters.no_possible_keys|name)s
Returns
-------
%(check_key.returns)s
Raises
------
%(check_key.raises)s |
def relpath_to_modname(relpath):
"""Convert relative path to module name
Within a project, a path to the source file is uniquely identified with a
module name. Relative paths of the form 'foo/bar' are *not* converted to
module names 'foo.bar', because (1) they identify directories, not regular
files, and (2) already 'foo/bar/__init__.py' would claim that conversion.
Args:
relpath (str): Relative path from some location on sys.path
Example:
>>> relpath_to_modname('ballet/util/_util.py')
'ballet.util._util'
"""
# don't try to resolve!
p = pathlib.Path(relpath)
if p.name == '__init__.py':
p = p.parent
elif p.suffix == '.py':
p = p.with_suffix('')
else:
msg = 'Cannot convert a non-python file to a modname'
msg_detail = 'The relpath given is: {}'.format(relpath)
logger.error(msg + '\n' + msg_detail)
raise ValueError(msg)
return '.'.join(p.parts) | Convert relative path to module name
Within a project, a path to the source file is uniquely identified with a
module name. Relative paths of the form 'foo/bar' are *not* converted to
module names 'foo.bar', because (1) they identify directories, not regular
files, and (2) already 'foo/bar/__init__.py' would claim that conversion.
Args:
relpath (str): Relative path from some location on sys.path
Example:
>>> relpath_to_modname('ballet/util/_util.py')
'ballet.util._util' |
def cosine_similarity(sent1: str, sent2: str) -> float:
"""
Calculates cosine similarity between 2 sentences/documents.
Thanks to @vpekar, see http://goo.gl/ykibJY
"""
WORD = re.compile(r'\w+')
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x]**2 for x in vec1.keys()])
sum2 = sum([vec2[x]**2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
def text_to_vector(text):
words = WORD.findall(text)
return Counter(words)
vector1 = text_to_vector(sent1)
vector2 = text_to_vector(sent2)
cosine = get_cosine(vector1, vector2)
return cosine | Calculates cosine similarity between 2 sentences/documents.
Thanks to @vpekar, see http://goo.gl/ykibJY |
def _create_tag_lowlevel(self, tag_name, message=None, force=True,
patch=False):
"""Create a tag on the toplevel or patch repo
If the tag exists, and force is False, no tag is made. If force is True,
and a tag exists, but it is a direct ancestor of the current commit,
and there is no difference in filestate between the current commit
and the tagged commit, no tag is made. Otherwise, the old tag is
overwritten to point at the current commit.
Returns True or False indicating whether the tag was actually committed
"""
# check if tag already exists, and if it does, if it is a direct
# ancestor, and there is NO difference in the files between the tagged
# state and current state
#
# This check is mainly to avoid re-creating the same tag over and over
# on what is essentially the same commit, since tagging will
# technically create a new commit, and update the working copy to it.
#
# Without this check, say you were releasing to three different
# locations, one right after another; the first would create the tag,
# and a new tag commit. The second would then recreate the exact same
# tag, but now pointing at the commit that made the first tag.
# The third would create the tag a THIRD time, but now pointing at the
# commit that created the 2nd tag.
tags = self.get_tags(patch=patch)
old_commit = tags.get(tag_name)
if old_commit is not None:
if not force:
return False
old_rev = old_commit['rev']
# ok, now check to see if direct ancestor...
if self.is_ancestor(old_rev, '.', patch=patch):
# ...and if filestates are same
altered = self.hg('status', '--rev', old_rev, '--rev', '.',
'--no-status')
if not altered or altered == ['.hgtags']:
force = False
if not force:
return False
tag_args = ['tag', tag_name]
if message:
tag_args += ['--message', message]
# we should be ok with ALWAYS having force flag on now, since we should
# have already checked if the commit exists.. but be paranoid, in case
# we've missed some edge case...
if force:
tag_args += ['--force']
self.hg(patch=patch, *tag_args)
return True | Create a tag on the toplevel or patch repo
If the tag exists, and force is False, no tag is made. If force is True,
and a tag exists, but it is a direct ancestor of the current commit,
and there is no difference in filestate between the current commit
and the tagged commit, no tag is made. Otherwise, the old tag is
overwritten to point at the current commit.
Returns True or False indicating whether the tag was actually committed |
def save_series(self) -> None:
"""Save time series data as defined by the actual XML `writer`
element.
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> from hydpy import HydPy, TestIO, XMLInterface
>>> hp = HydPy('LahnH')
>>> with TestIO():
... hp.prepare_network()
... hp.init_models()
... interface = XMLInterface('single_run.xml')
... interface.update_options()
>>> interface.update_timegrids()
>>> series_io = interface.series_io
>>> series_io.prepare_series()
>>> hp.elements.land_dill.model.sequences.fluxes.pc.series[2, 3] = 9.0
>>> hp.nodes.lahn_2.sequences.sim.series[4] = 7.0
>>> with TestIO():
... series_io.save_series()
>>> import numpy
>>> with TestIO():
... os.path.exists(
... 'LahnH/series/output/land_lahn_2_flux_pc.npy')
... os.path.exists(
... 'LahnH/series/output/land_lahn_3_flux_pc.npy')
... numpy.load(
... 'LahnH/series/output/land_dill_flux_pc.npy')[13+2, 3]
... numpy.load(
... 'LahnH/series/output/lahn_2_sim_q_mean.npy')[13+4]
True
False
9.0
7.0
"""
hydpy.pub.sequencemanager.open_netcdf_writer(
flatten=hydpy.pub.options.flattennetcdf,
isolate=hydpy.pub.options.isolatenetcdf)
self.prepare_sequencemanager()
for sequence in self._iterate_sequences():
sequence.save_ext()
hydpy.pub.sequencemanager.close_netcdf_writer() | Save time series data as defined by the actual XML `writer`
element.
>>> from hydpy.core.examples import prepare_full_example_1
>>> prepare_full_example_1()
>>> from hydpy import HydPy, TestIO, XMLInterface
>>> hp = HydPy('LahnH')
>>> with TestIO():
... hp.prepare_network()
... hp.init_models()
... interface = XMLInterface('single_run.xml')
... interface.update_options()
>>> interface.update_timegrids()
>>> series_io = interface.series_io
>>> series_io.prepare_series()
>>> hp.elements.land_dill.model.sequences.fluxes.pc.series[2, 3] = 9.0
>>> hp.nodes.lahn_2.sequences.sim.series[4] = 7.0
>>> with TestIO():
... series_io.save_series()
>>> import numpy
>>> with TestIO():
... os.path.exists(
... 'LahnH/series/output/land_lahn_2_flux_pc.npy')
... os.path.exists(
... 'LahnH/series/output/land_lahn_3_flux_pc.npy')
... numpy.load(
... 'LahnH/series/output/land_dill_flux_pc.npy')[13+2, 3]
... numpy.load(
... 'LahnH/series/output/lahn_2_sim_q_mean.npy')[13+4]
True
False
9.0
7.0 |
def endpoint_delete(auth=None, **kwargs):
'''
Delete an endpoint
CLI Example:
.. code-block:: bash
salt '*' keystoneng.endpoint_delete id=3bee4bd8c2b040ee966adfda1f0bfca9
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
return cloud.delete_endpoint(**kwargs) | Delete an endpoint
CLI Example:
.. code-block:: bash
salt '*' keystoneng.endpoint_delete id=3bee4bd8c2b040ee966adfda1f0bfca9 |
def plot_options(cls, obj, percent_size):
"""
Given a holoviews object and a percentage size, apply heuristics
to compute a suitable figure size. For instance, scaling layouts
and grids linearly can result in unwieldy figure sizes when there
are a large number of elements. As ad hoc heuristics are used,
this functionality is kept separate from the plotting classes
themselves.
Used by the IPython Notebook display hooks and the save
utility. Note that this can be overridden explicitly per object
using the fig_size and size plot options.
"""
from .plot import MPLPlot
factor = percent_size / 100.0
obj = obj.last if isinstance(obj, HoloMap) else obj
options = Store.lookup_options(cls.backend, obj, 'plot').options
fig_size = options.get('fig_size', MPLPlot.fig_size)*factor
return dict({'fig_size':fig_size},
**MPLPlot.lookup_options(obj, 'plot').options) | Given a holoviews object and a percentage size, apply heuristics
to compute a suitable figure size. For instance, scaling layouts
and grids linearly can result in unwieldy figure sizes when there
are a large number of elements. As ad hoc heuristics are used,
this functionality is kept separate from the plotting classes
themselves.
Used by the IPython Notebook display hooks and the save
utility. Note that this can be overridden explicitly per object
using the fig_size and size plot options. |
def handle_signature(self, sig, signode):
"""Parse the signature *sig* into individual nodes and append them to the
*signode*. If ValueError is raises, parsing is aborted and the whole
*sig* string is put into a single desc_name node.
The return value is the value that identifies the object. IOW, it is
the identifier that will be used to reference this object, datum,
attribute, proc, etc. It is a tuple of "fullname" (including module and
class(es)) and the classes. See also :py:meth:`add_target_and_index`.
"""
if self._is_attr_like():
sig_match = chpl_attr_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, retann = sig_match.groups()
arglist = None
else:
sig_match = chpl_sig_pattern.match(sig)
if sig_match is None:
raise ValueError('Signature does not parse: {0}'.format(sig))
func_prefix, name_prefix, name, arglist, retann = \
sig_match.groups()
modname = self.options.get(
'module', self.env.temp_data.get('chpl:module'))
classname = self.env.temp_data.get('chpl:class')
if classname:
if name_prefix and name_prefix.startswith(classname):
fullname = name_prefix + name
# class name is given again in the signature
name_prefix = name_prefix[len(classname):].lstrip('.')
elif name_prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + name_prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
if name_prefix:
classname = name_prefix.rstrip('.')
fullname = name_prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
# if func_prefix:
# signode += addnodes.desc_addname(func_prefix, func_prefix)
if name_prefix:
signode += addnodes.desc_addname(name_prefix, name_prefix)
anno = self.options.get('annotation')
signode += addnodes.desc_name(name, name)
if not arglist:
# If this needs an arglist, and parens were provided in the
# signature, add a parameterlist. Chapel supports paren-less
# functions and methods, which can act as computed properties. If
# arglist is the empty string, the signature included parens. If
# arglist is None, it did not include parens.
if self.needs_arglist() and arglist is not None:
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix
self._pseudo_parse_arglist(signode, arglist)
if retann:
signode += addnodes.desc_type(retann, retann)
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, name_prefix | Parse the signature *sig* into individual nodes and append them to the
*signode*. If ValueError is raises, parsing is aborted and the whole
*sig* string is put into a single desc_name node.
The return value is the value that identifies the object. IOW, it is
the identifier that will be used to reference this object, datum,
attribute, proc, etc. It is a tuple of "fullname" (including module and
class(es)) and the classes. See also :py:meth:`add_target_and_index`. |
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(axes=self._AXIS_ORDERS, method=method,
copy=copy, limit=limit,
tolerance=tolerance)
return self.reindex(**d) | Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium |
def find_ent_space_price(package, category, size, tier_level):
"""Find the space price for the given category, size, and tier
:param package: The Enterprise (Endurance) product package
:param category: The category of space (endurance, replication, snapshot)
:param size: The size for which a price is desired
:param tier_level: The endurance tier for which a price is desired
:return: Returns the matching price, or an error if not found
"""
if category == 'snapshot':
category_code = 'storage_snapshot_space'
elif category == 'replication':
category_code = 'performance_storage_replication'
else: # category == 'endurance'
category_code = 'performance_storage_space'
level = ENDURANCE_TIERS.get(tier_level)
for item in package['items']:
if int(item['capacity']) != size:
continue
price_id = _find_price_id(item['prices'], category_code, 'STORAGE_TIER_LEVEL', level)
if price_id:
return price_id
raise ValueError("Could not find price for %s storage space" % category) | Find the space price for the given category, size, and tier
:param package: The Enterprise (Endurance) product package
:param category: The category of space (endurance, replication, snapshot)
:param size: The size for which a price is desired
:param tier_level: The endurance tier for which a price is desired
:return: Returns the matching price, or an error if not found |
def fromXml(cls, elem):
"""
Converts the inputted element to a Python object by looking through
the IO addons for the element's tag.
:param elem | <xml.etree.ElementTree.Element>
:return <variant>
"""
if elem is None:
return None
addon = cls.byName(elem.tag)
if not addon:
raise RuntimeError('{0} is not a supported XML tag'.format(elem.tag))
return addon.load(elem) | Converts the inputted element to a Python object by looking through
the IO addons for the element's tag.
:param elem | <xml.etree.ElementTree.Element>
:return <variant> |
def mtr_tr_dense(sz):
"""Series of machine translation models.
All models are trained on sequences of 256 tokens.
You can use the dataset translate_enfr_wmt32k_packed.
154000 steps = 3 epochs.
Args:
sz: an integer
Returns:
a hparams
"""
n = 2 ** sz
hparams = mtf_bitransformer_base()
hparams.d_model = 1024
hparams.max_length = 256
hparams.batch_size = 128
hparams.d_ff = int(4096 * n)
hparams.d_kv = 128
hparams.encoder_num_heads = int(8 * n)
hparams.decoder_num_heads = int(8 * n)
# one epoch for translate_enfr_wmt32k_packed = 51400 steps
hparams.learning_rate_decay_steps = 51400
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:32"
hparams.label_smoothing = 0.1
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
return hparams | Series of machine translation models.
All models are trained on sequences of 256 tokens.
You can use the dataset translate_enfr_wmt32k_packed.
154000 steps = 3 epochs.
Args:
sz: an integer
Returns:
a hparams |
def distance_to_closest(self, ps: Union["Units", List["Point2"], Set["Point2"]]) -> Union[int, float]:
""" This function assumes the 2d distance is meant """
assert ps
closest_distance_squared = math.inf
for p2 in ps:
if not isinstance(p2, Point2):
p2 = p2.position
distance = (self[0] - p2[0]) ** 2 + (self[1] - p2[1]) ** 2
if distance < closest_distance_squared:
closest_distance_squared = distance
return closest_distance_squared ** 0.5 | This function assumes the 2d distance is meant |
def metrics(self):
"""
Calculate and return the metrics.
"""
masterThrp, backupThrp = self.getThroughputs(self.instances.masterId)
r = self.instance_throughput_ratio(self.instances.masterId)
m = [
("{} Monitor metrics:".format(self), None),
("Delta", self.Delta),
("Lambda", self.Lambda),
("Omega", self.Omega),
("instances started", self.instances.started),
("ordered request counts",
{i: r[0] for i, r in self.numOrderedRequests.items()}),
("ordered request durations",
{i: r[1] for i, r in self.numOrderedRequests.items()}),
("master request latencies", self.masterReqLatencies),
("client avg request latencies", {i: self.getLatency(i)
for i in self.instances.ids}),
("throughput", {i: self.getThroughput(i)
for i in self.instances.ids}),
("master throughput", masterThrp),
("total requests", self.totalRequests),
("avg backup throughput", backupThrp),
("master throughput ratio", r)]
return m | Calculate and return the metrics. |
def set_dash(self, dashes, offset=0):
"""Sets the dash pattern to be used by :meth:`stroke`.
A dash pattern is specified by dashes, a list of positive values.
Each value provides the length of alternate "on" and "off"
portions of the stroke.
:obj:`offset` specifies an offset into the pattern
at which the stroke begins.
Each "on" segment will have caps applied
as if the segment were a separate sub-path.
In particular, it is valid to use an "on" length of 0
with :obj:`LINE_CAP_ROUND` or :obj:`LINE_CAP_SQUARE`
in order to distributed dots or squares along a path.
Note: The length values are in user-space units
as evaluated at the time of stroking.
This is not necessarily the same as the user space
at the time of :meth:`set_dash`.
If :obj:`dashes` is empty dashing is disabled.
If it is of length 1 a symmetric pattern is assumed
with alternating on and off portions of the size specified
by the single value.
:param dashes:
A list of floats specifying alternate lengths
of on and off stroke portions.
:type offset: float
:param offset:
An offset into the dash pattern at which the stroke should start.
:raises:
:exc:`CairoError`
if any value in dashes is negative,
or if all values are 0.
The context will be put into an error state.
"""
cairo.cairo_set_dash(
self._pointer, ffi.new('double[]', dashes), len(dashes), offset)
self._check_status() | Sets the dash pattern to be used by :meth:`stroke`.
A dash pattern is specified by dashes, a list of positive values.
Each value provides the length of alternate "on" and "off"
portions of the stroke.
:obj:`offset` specifies an offset into the pattern
at which the stroke begins.
Each "on" segment will have caps applied
as if the segment were a separate sub-path.
In particular, it is valid to use an "on" length of 0
with :obj:`LINE_CAP_ROUND` or :obj:`LINE_CAP_SQUARE`
in order to distributed dots or squares along a path.
Note: The length values are in user-space units
as evaluated at the time of stroking.
This is not necessarily the same as the user space
at the time of :meth:`set_dash`.
If :obj:`dashes` is empty dashing is disabled.
If it is of length 1 a symmetric pattern is assumed
with alternating on and off portions of the size specified
by the single value.
:param dashes:
A list of floats specifying alternate lengths
of on and off stroke portions.
:type offset: float
:param offset:
An offset into the dash pattern at which the stroke should start.
:raises:
:exc:`CairoError`
if any value in dashes is negative,
or if all values are 0.
The context will be put into an error state. |
def search_cloud_integration_deleted_for_facet(self, facet, **kwargs): # noqa: E501
"""Lists the values of a specific facet over the customer's deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_cloud_integration_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
else:
(data) = self.search_cloud_integration_deleted_for_facet_with_http_info(facet, **kwargs) # noqa: E501
return data | Lists the values of a specific facet over the customer's deleted cloud integrations # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_cloud_integration_deleted_for_facet(facet, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str facet: (required)
:param FacetSearchRequestContainer body:
:return: ResponseContainerFacetResponse
If the method is called asynchronously,
returns the request thread. |
def add_permission(self, name):
"""
Adds a permission to the backend, model permission
:param name:
name of the permission: 'can_add','can_edit' etc...
"""
perm = self.find_permission(name)
if perm is None:
try:
perm = self.permission_model(name=name)
perm.save()
return perm
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMISSION.format(str(e)))
return perm | Adds a permission to the backend, model permission
:param name:
name of the permission: 'can_add','can_edit' etc... |
def zeros_coefs(nmax, mmax, coef_type=scalar):
"""Returns a ScalarCoefs object or a VectorCoeffs object where each of the
coefficients is set to 0. The structure is such that *nmax* is th largest
*n* can be in c[n, m], and *mmax* is the largest *m* can be for any *n*.
(See *ScalarCoefs* and *VectorCoefs* for details.)
Examples::
>>> c = spherepy.zeros_coefs(5, 3, coef_type = spherepy.scalar)
>>> c = spherepy.zeros_coefs(5, 3) # same as above
>>> vc = spherepy.zeros_coefs(5, 3, coef_type = spherepy.vector)
Args:
nmax (int): Largest *n* value in the set of modes.
mmax (int): Largest abs(*m*) value in the set of modes.
coef_type (int, optional): Set to 0 for scalar, and 1 for vector.
The default option is scalar. If you would like to return a set of
vector spherical hamonic coefficients, the preferred way to do so
is vc = spherepy.zeros_coefs( 10, 12, coef_type = spherepy.vector).
Returns:
coefs: Returns a ScalarCoefs object if coef_type is either blank or
set to 0. Returns a VectorCoefs object if coef_type = 1.
Raises:
TypeError: If coef_type is anything but 0 or 1.
"""
if(mmax > nmax):
raise ValueError(err_msg['nmax_g_mmax'])
if(coef_type == scalar):
L = (nmax + 1) + mmax * (2 * nmax - mmax + 1)
vec = np.zeros(L, dtype=np.complex128)
return ScalarCoefs(vec, nmax, mmax)
elif(coef_type == vector):
L = (nmax + 1) + mmax * (2 * nmax - mmax + 1)
vec1 = np.zeros(L, dtype=np.complex128)
vec2 = np.zeros(L, dtype=np.complex128)
return VectorCoefs(vec1, vec2, nmax, mmax)
else:
raise TypeError(err_msg['ukn_coef_t']) | Returns a ScalarCoefs object or a VectorCoeffs object where each of the
coefficients is set to 0. The structure is such that *nmax* is th largest
*n* can be in c[n, m], and *mmax* is the largest *m* can be for any *n*.
(See *ScalarCoefs* and *VectorCoefs* for details.)
Examples::
>>> c = spherepy.zeros_coefs(5, 3, coef_type = spherepy.scalar)
>>> c = spherepy.zeros_coefs(5, 3) # same as above
>>> vc = spherepy.zeros_coefs(5, 3, coef_type = spherepy.vector)
Args:
nmax (int): Largest *n* value in the set of modes.
mmax (int): Largest abs(*m*) value in the set of modes.
coef_type (int, optional): Set to 0 for scalar, and 1 for vector.
The default option is scalar. If you would like to return a set of
vector spherical hamonic coefficients, the preferred way to do so
is vc = spherepy.zeros_coefs( 10, 12, coef_type = spherepy.vector).
Returns:
coefs: Returns a ScalarCoefs object if coef_type is either blank or
set to 0. Returns a VectorCoefs object if coef_type = 1.
Raises:
TypeError: If coef_type is anything but 0 or 1. |
def xmoe_2d():
"""Two-dimensional hierarchical mixture of 16 experts."""
hparams = xmoe_top_2()
hparams.decoder_layers = ["att", "hmoe"] * 4
hparams.mesh_shape = "b0:2;b1:4"
hparams.outer_batch_size = 4
hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0"
hparams.moe_num_experts = [4, 4]
return hparams | Two-dimensional hierarchical mixture of 16 experts. |
def constraint_present(name, constraint_id, constraint_type, constraint_options=None, cibname=None):
'''
Ensure that a constraint is created
Should be run on one cluster node only
(there may be races)
Can only be run on a node with a functional pacemaker/corosync
name
Irrelevant, not used (recommended: {{formulaname}}__constraint_present_{{constraint_id}})
constraint_id
name for the constraint (try first to create manually to find out the autocreated name)
constraint_type
constraint type (location, colocation, order)
constraint_options
options for creating the constraint
cibname
use a cached CIB-file named like cibname instead of the live CIB
Example:
.. code-block:: yaml
haproxy_pcs__constraint_present_colocation-vip_galera-haproxy-clone-INFINITY:
pcs.constraint_present:
- constraint_id: colocation-vip_galera-haproxy-clone-INFINITY
- constraint_type: colocation
- constraint_options:
- 'add'
- 'vip_galera'
- 'with'
- 'haproxy-clone'
- cibname: cib_for_haproxy
'''
return _item_present(name=name,
item='constraint',
item_id=constraint_id,
item_type=constraint_type,
create=None,
extra_args=constraint_options,
cibname=cibname) | Ensure that a constraint is created
Should be run on one cluster node only
(there may be races)
Can only be run on a node with a functional pacemaker/corosync
name
Irrelevant, not used (recommended: {{formulaname}}__constraint_present_{{constraint_id}})
constraint_id
name for the constraint (try first to create manually to find out the autocreated name)
constraint_type
constraint type (location, colocation, order)
constraint_options
options for creating the constraint
cibname
use a cached CIB-file named like cibname instead of the live CIB
Example:
.. code-block:: yaml
haproxy_pcs__constraint_present_colocation-vip_galera-haproxy-clone-INFINITY:
pcs.constraint_present:
- constraint_id: colocation-vip_galera-haproxy-clone-INFINITY
- constraint_type: colocation
- constraint_options:
- 'add'
- 'vip_galera'
- 'with'
- 'haproxy-clone'
- cibname: cib_for_haproxy |
def bezier(self, points):
"""Draw a Bezier-curve.
:param points: ex.) ((5, 5), (6, 6), (7, 7))
:type points: list
"""
coordinates = pgmagick.CoordinateList()
for point in points:
x, y = float(point[0]), float(point[1])
coordinates.append(pgmagick.Coordinate(x, y))
self.drawer.append(pgmagick.DrawableBezier(coordinates)) | Draw a Bezier-curve.
:param points: ex.) ((5, 5), (6, 6), (7, 7))
:type points: list |
def delete(self, refobj):
"""Delete the content of the given refobj
:param refobj: the refobj that represents the content that should be deleted
:type refobj: refobj
:returns: None
:rtype: None
:raises: None
"""
refobjinter = self.get_refobjinter()
reference = refobjinter.get_reference(refobj)
if reference:
fullns = cmds.referenceQuery(reference, namespace=True)
cmds.file(removeReference=True, referenceNode=reference)
else:
parentns = common.get_namespace(refobj)
ns = cmds.getAttr("%s.namespace" % refobj)
fullns = ":".join((parentns.rstrip(":"), ns.lstrip(":")))
cmds.namespace(removeNamespace=fullns, deleteNamespaceContent=True) | Delete the content of the given refobj
:param refobj: the refobj that represents the content that should be deleted
:type refobj: refobj
:returns: None
:rtype: None
:raises: None |
def ebrisk(rupgetter, srcfilter, param, monitor):
"""
:param rupgetter:
a RuptureGetter instance
:param srcfilter:
a SourceFilter instance
:param param:
a dictionary of parameters
:param monitor:
:class:`openquake.baselib.performance.Monitor` instance
:returns:
an ArrayWrapper with shape (E, L, T, ...)
"""
riskmodel = param['riskmodel']
E = rupgetter.num_events
L = len(riskmodel.lti)
N = len(srcfilter.sitecol.complete)
e1 = rupgetter.first_event
with monitor('getting assets', measuremem=False):
with datastore.read(srcfilter.filename) as dstore:
assetcol = dstore['assetcol']
assets_by_site = assetcol.assets_by_site()
A = len(assetcol)
getter = getters.GmfGetter(rupgetter, srcfilter, param['oqparam'])
with monitor('getting hazard'):
getter.init() # instantiate the computers
hazard = getter.get_hazard() # sid -> (rlzi, sid, eid, gmv)
mon_risk = monitor('computing risk', measuremem=False)
mon_agg = monitor('aggregating losses', measuremem=False)
events = rupgetter.get_eid_rlz()
# numpy.testing.assert_equal(events['eid'], sorted(events['eid']))
eid2idx = dict(zip(events['eid'], range(e1, e1 + E)))
tagnames = param['aggregate_by']
shape = assetcol.tagcol.agg_shape((E, L), tagnames)
elt_dt = [('eid', U64), ('rlzi', U16), ('loss', (F32, shape[1:]))]
if param['asset_loss_table']:
alt = numpy.zeros((A, E, L), F32)
acc = numpy.zeros(shape, F32) # shape (E, L, T...)
if param['avg_losses']:
losses_by_A = numpy.zeros((A, L), F32)
else:
losses_by_A = 0
# NB: IMT-dependent weights are not supported in ebrisk
times = numpy.zeros(N) # risk time per site_id
num_events_per_sid = 0
epspath = param['epspath']
for sid, haz in hazard.items():
t0 = time.time()
assets_on_sid = assets_by_site[sid]
if len(assets_on_sid) == 0:
continue
num_events_per_sid += len(haz)
weights = getter.weights[haz['rlzi'], 0]
assets_by_taxo = get_assets_by_taxo(assets_on_sid, epspath)
eidx = numpy.array([eid2idx[eid] for eid in haz['eid']]) - e1
haz['eid'] = eidx + e1
with mon_risk:
out = riskmodel.get_output(assets_by_taxo, haz)
with mon_agg:
for a, asset in enumerate(assets_on_sid):
aid = asset['ordinal']
tagi = asset[tagnames] if tagnames else ()
tagidxs = tuple(idx - 1 for idx in tagi)
for lti, lt in enumerate(riskmodel.loss_types):
lratios = out[lt][a]
if lt == 'occupants':
losses = lratios * asset['occupants_None']
else:
losses = lratios * asset['value-' + lt]
if param['asset_loss_table']:
alt[aid, eidx, lti] = losses
acc[(eidx, lti) + tagidxs] += losses
if param['avg_losses']:
losses_by_A[aid, lti] += losses @ weights
times[sid] = time.time() - t0
if hazard:
num_events_per_sid /= len(hazard)
with monitor('building event loss table'):
elt = numpy.fromiter(
((event['eid'], event['rlz'], losses)
for event, losses in zip(events, acc) if losses.sum()), elt_dt)
agg = general.AccumDict(accum=numpy.zeros(shape[1:], F32)) # rlz->agg
for rec in elt:
agg[rec['rlzi']] += rec['loss'] * param['ses_ratio']
res = {'elt': elt, 'agg_losses': agg, 'times': times,
'events_per_sid': num_events_per_sid}
if param['avg_losses']:
res['losses_by_A'] = losses_by_A * param['ses_ratio']
if param['asset_loss_table']:
res['alt_eids'] = alt, events['eid']
return res | :param rupgetter:
a RuptureGetter instance
:param srcfilter:
a SourceFilter instance
:param param:
a dictionary of parameters
:param monitor:
:class:`openquake.baselib.performance.Monitor` instance
:returns:
an ArrayWrapper with shape (E, L, T, ...) |
def _get_seal_key_ntlm2(negotiate_flags, exported_session_key, magic_constant):
"""
3.4.5.3 SEALKEY
Calculates the seal_key used to seal (encrypt) messages. This for authentication where
NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY has been negotiated. Will weaken the keys
if NTLMSSP_NEGOTIATE_128 is not negotiated, will try NEGOTIATE_56 and then will default
to the 40-bit key
@param negotiate_flags: The negotiate_flags structure sent by the server
@param exported_session_key: A 128-bit session key used to derive signing and sealing keys
@param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants)
@return seal_key: Key used to seal messages
"""
if negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_128:
seal_key = exported_session_key
elif negotiate_flags & NegotiateFlags.NTLMSSP_NEGOTIATE_56:
seal_key = exported_session_key[:7]
else:
seal_key = exported_session_key[:5]
seal_key = hashlib.md5(seal_key + magic_constant).digest()
return seal_key | 3.4.5.3 SEALKEY
Calculates the seal_key used to seal (encrypt) messages. This for authentication where
NTLMSSP_NEGOTIATE_EXTENDED_SESSIONSECURITY has been negotiated. Will weaken the keys
if NTLMSSP_NEGOTIATE_128 is not negotiated, will try NEGOTIATE_56 and then will default
to the 40-bit key
@param negotiate_flags: The negotiate_flags structure sent by the server
@param exported_session_key: A 128-bit session key used to derive signing and sealing keys
@param magic_constant: A constant value set in the MS-NLMP documentation (constants.SignSealConstants)
@return seal_key: Key used to seal messages |
def _stop_trial(self, trial, error=False, error_msg=None,
stop_logger=True):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger.
"""
if stop_logger:
trial.close_logger()
if error:
self.set_status(trial, Trial.ERROR)
else:
self.set_status(trial, Trial.TERMINATED)
try:
trial.write_error_log(error_msg)
if hasattr(trial, "runner") and trial.runner:
if (not error and self._reuse_actors
and self._cached_actor is None):
logger.debug("Reusing actor for {}".format(trial.runner))
self._cached_actor = trial.runner
else:
logger.info(
"Destroying actor for trial {}. If your trainable is "
"slow to initialize, consider setting "
"reuse_actors=True to reduce actor creation "
"overheads.".format(trial))
trial.runner.stop.remote()
trial.runner.__ray_terminate__.remote()
except Exception:
logger.exception("Error stopping runner for Trial %s", str(trial))
self.set_status(trial, Trial.ERROR)
finally:
trial.runner = None | Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger. |
def p_factor_unary_operators(self, p):
"""
term : SUB factor
| ADD factor
"""
p[0] = p[2]
if p[1] == '-':
p[0] = Instruction('-x', context={'x': p[0]}) | term : SUB factor
| ADD factor |
def prep_cwl(samples, workflow_fn, out_dir, out_file, integrations=None,
add_container_tag=None):
"""Output a CWL description with sub-workflows and steps.
"""
if add_container_tag is None:
container_tags = None
elif add_container_tag.lower() == "quay_lookup":
container_tags = {}
else:
container_tags = collections.defaultdict(lambda: add_container_tag)
step_dir = utils.safe_makedir(os.path.join(out_dir, "steps"))
get_retriever = GetRetriever(integrations, samples)
variables, keyvals = _flatten_samples(samples, out_file, get_retriever)
cur_remotes = _get_cur_remotes(keyvals)
file_estimates = _calc_input_estimates(keyvals, get_retriever)
out = _cwl_workflow_template(variables)
parent_wfs = []
step_parallelism = {}
steps, wfoutputs = workflow_fn(samples)
used_inputs = set([])
for cur in workflow.generate(variables, steps, wfoutputs):
if cur[0] == "step":
_, name, parallel, inputs, outputs, image, programs, disk, cores, no_files = cur
step_file = _write_tool(step_dir, name, inputs, outputs, parallel, image, programs,
file_estimates, disk, cores, samples, cur_remotes, no_files, container_tags)
out["steps"].append(_step_template(name, step_file, inputs, outputs, parallel, step_parallelism))
used_inputs |= set(x["id"] for x in inputs)
elif cur[0] == "expressiontool":
_, name, inputs, outputs, expression, parallel = cur
step_file = _write_expressiontool(step_dir, name, inputs, outputs, expression, parallel)
out["steps"].append(_step_template(name, step_file, inputs, outputs, parallel, step_parallelism))
used_inputs |= set(x["id"] for x in inputs)
elif cur[0] == "upload":
for output in cur[1]:
wf_output = copy.deepcopy(output)
if "outputSource" not in wf_output:
wf_output["outputSource"] = wf_output.pop("source")
wf_output = _clean_record(wf_output)
# Avoid input/output naming clashes
if wf_output["id"] in used_inputs:
wf_output["id"] = "%s_out" % wf_output["id"]
out["outputs"].append(wf_output)
elif cur[0] == "wf_start":
parent_wfs.append(out)
out = _cwl_workflow_template(cur[1])
elif cur[0] == "wf_finish":
_, name, parallel, inputs, outputs, scatter = cur
wf_out_file = "wf-%s.cwl" % name
with open(os.path.join(out_dir, wf_out_file), "w") as out_handle:
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
out = parent_wfs.pop(-1)
out["steps"].append(_step_template(name, wf_out_file, inputs, outputs, parallel,
step_parallelism, scatter))
used_inputs |= set(x["id"] for x in inputs)
else:
raise ValueError("Unexpected workflow value %s" % str(cur))
step_parallelism[name] = parallel
with open(out_file, "w") as out_handle:
out["inputs"] = [x for x in out["inputs"] if x["id"] in used_inputs]
yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)
sample_json = "%s-samples.json" % utils.splitext_plus(out_file)[0]
out_clean = _clean_final_outputs(copy.deepcopy({k: v for k, v in keyvals.items() if k in used_inputs}),
get_retriever)
with open(sample_json, "w") as out_handle:
json.dump(out_clean, out_handle, sort_keys=True, indent=4, separators=(',', ': '))
return out_file, sample_json | Output a CWL description with sub-workflows and steps. |
def charge_parent(self, mol, skip_standardize=False):
"""Return the charge parent of a given molecule.
The charge parent is the uncharged version of the fragment parent.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The charge parent molecule.
:rtype: rdkit.Chem.rdchem.Mol
"""
# TODO: All ionized acids and bases should be neutralised.
if not skip_standardize:
mol = self.standardize(mol)
fragment = self.fragment_parent(mol, skip_standardize=True)
if fragment:
uncharged = self.uncharge(fragment)
# During final standardization, the Reionizer ensures any remaining charges are in the right places
uncharged = self.standardize(uncharged)
return uncharged | Return the charge parent of a given molecule.
The charge parent is the uncharged version of the fragment parent.
:param mol: The input molecule.
:type mol: rdkit.Chem.rdchem.Mol
:param bool skip_standardize: Set to True if mol has already been standardized.
:returns: The charge parent molecule.
:rtype: rdkit.Chem.rdchem.Mol |
def check_expected_infos(self, test_method):
"""
This method is called after each test. It will read decorated
informations and check if there are expected infos.
You can set expected infos by decorators :py:func:`.expected_info_messages`
and :py:func:`.allowed_info_messages`.
"""
f = lambda key, default=[]: getattr(test_method, key, default)
expected_info_messages = f(EXPECTED_INFO_MESSAGES)
allowed_info_messages = f(ALLOWED_INFO_MESSAGES)
self.check_infos(expected_info_messages, allowed_info_messages) | This method is called after each test. It will read decorated
informations and check if there are expected infos.
You can set expected infos by decorators :py:func:`.expected_info_messages`
and :py:func:`.allowed_info_messages`. |
def server_add(s_name, s_ip, s_state=None, **connection_args):
'''
Add a server
Note: The default server state is ENABLED
CLI Example:
.. code-block:: bash
salt '*' netscaler.server_add 'serverName' 'serverIpAddress'
salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState'
'''
ret = True
if server_exists(s_name, **connection_args):
return False
nitro = _connect(**connection_args)
if nitro is None:
return False
server = NSServer()
server.set_name(s_name)
server.set_ipaddress(s_ip)
if s_state is not None:
server.set_state(s_state)
try:
NSServer.add(nitro, server)
except NSNitroError as error:
log.debug('netscaler module error - NSServer.add() failed: %s', error)
ret = False
_disconnect(nitro)
return ret | Add a server
Note: The default server state is ENABLED
CLI Example:
.. code-block:: bash
salt '*' netscaler.server_add 'serverName' 'serverIpAddress'
salt '*' netscaler.server_add 'serverName' 'serverIpAddress' 'serverState' |
def _set_tcp_keepalive(sock, opts):
'''
Ensure that TCP keepalives are set for the socket.
'''
if hasattr(socket, 'SO_KEEPALIVE'):
if opts.get('tcp_keepalive', False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'SOL_TCP'):
if hasattr(socket, 'TCP_KEEPIDLE'):
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE,
int(tcp_keepalive_idle))
if hasattr(socket, 'TCP_KEEPCNT'):
tcp_keepalive_cnt = opts.get('tcp_keepalive_cnt', -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT,
int(tcp_keepalive_cnt))
if hasattr(socket, 'TCP_KEEPINTVL'):
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl))
if hasattr(socket, 'SIO_KEEPALIVE_VALS'):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
1, int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000)))
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0) | Ensure that TCP keepalives are set for the socket. |
def validation_step(self, Xi, yi, **fit_params):
"""Perform a forward step using batched data and return the
resulting loss.
The module is set to be in evaluation mode (e.g. dropout is
not applied).
Parameters
----------
Xi : input data
A batch of the input data.
yi : target data
A batch of the target data.
**fit_params : dict
Additional parameters passed to the ``forward`` method of
the module and to the ``self.train_split`` call.
"""
self.module_.eval()
with torch.no_grad():
y_pred = self.infer(Xi, **fit_params)
loss = self.get_loss(y_pred, yi, X=Xi, training=False)
return {
'loss': loss,
'y_pred': y_pred,
} | Perform a forward step using batched data and return the
resulting loss.
The module is set to be in evaluation mode (e.g. dropout is
not applied).
Parameters
----------
Xi : input data
A batch of the input data.
yi : target data
A batch of the target data.
**fit_params : dict
Additional parameters passed to the ``forward`` method of
the module and to the ``self.train_split`` call. |
def combine_dictionaries(a, b):
"""
returns the combined dictionary. a's values preferentially chosen
"""
c = {}
for key in list(b.keys()): c[key]=b[key]
for key in list(a.keys()): c[key]=a[key]
return c | returns the combined dictionary. a's values preferentially chosen |
def request_generic(self, act, coro, perform, complete):
"""
Performs an overlapped request (via `perform` callable) and saves
the token and the (`overlapped`, `perform`, `complete`) trio.
"""
overlapped = OVERLAPPED()
overlapped.object = act
self.add_token(act, coro, (overlapped, perform, complete))
rc, nbytes = perform(act, overlapped)
completion_key = c_long(0)
if rc == 0:
# ah geez, it didn't got in the iocp, we have a result!
pass
# ok this is weird, apparently this doesn't need to be requeued
# - need to investigate why (TODO)
#~ PostQueuedCompletionStatus(
#~ self.iocp, # HANDLE CompletionPort
#~ nbytes, # DWORD dwNumberOfBytesTransferred
#~ byref(completion_key), # ULONG_PTR dwCompletionKey
#~ overlapped # LPOVERLAPPED lpOverlapped
#~ )
elif rc != WSA_IO_PENDING:
self.remove_token(act)
raise SocketError(rc, "%s on %r" % (ctypes.FormatError(rc), act)) | Performs an overlapped request (via `perform` callable) and saves
the token and the (`overlapped`, `perform`, `complete`) trio. |
def compute_absolute_error(self, predicted_data, record, dataframe_record):
'''Calculate the absolute error for this case.'''
absolute_error = abs(record['DDG'] - predicted_data[self.ddg_analysis_type])
dataframe_record['AbsoluteError'] = absolute_error | Calculate the absolute error for this case. |
def teleport(self, agent_name, location=None, rotation=None):
"""Teleports the target agent to any given location, and applies a specific rotation.
Args:
agent_name (str): The name of the agent to teleport.
location (np.ndarray or list): XYZ coordinates (in meters) for the agent to be teleported to.
If no location is given, it isn't teleported, but may still be rotated. Defaults to None.
rotation (np.ndarray or list): A new rotation target for the agent.
If no rotation is given, it isn't rotated, but may still be teleported. Defaults to None.
"""
self.agents[agent_name].teleport(location * 100, rotation) # * 100 to convert m to cm
self.tick() | Teleports the target agent to any given location, and applies a specific rotation.
Args:
agent_name (str): The name of the agent to teleport.
location (np.ndarray or list): XYZ coordinates (in meters) for the agent to be teleported to.
If no location is given, it isn't teleported, but may still be rotated. Defaults to None.
rotation (np.ndarray or list): A new rotation target for the agent.
If no rotation is given, it isn't rotated, but may still be teleported. Defaults to None. |
def close(self):
"""
Closes the project, but keep information on disk
"""
project_nodes_id = set([n.id for n in self.nodes])
for module in self.compute():
module_nodes_id = set([n.id for n in module.instance().nodes])
# We close the project only for the modules using it
if len(module_nodes_id & project_nodes_id):
yield from module.instance().project_closing(self)
yield from self._close_and_clean(False)
for module in self.compute():
module_nodes_id = set([n.id for n in module.instance().nodes])
# We close the project only for the modules using it
if len(module_nodes_id & project_nodes_id):
yield from module.instance().project_closed(self)
try:
if os.path.exists(self.tmp_working_directory()):
shutil.rmtree(self.tmp_working_directory())
except OSError:
pass | Closes the project, but keep information on disk |
def __clear_buffer_watch(self, bw):
"""
Used by L{dont_watch_buffer} and L{dont_stalk_buffer}.
@type bw: L{BufferWatch}
@param bw: Buffer watch identifier.
"""
# Get the PID and the start and end addresses of the buffer.
pid = bw.pid
start = bw.start
end = bw.end
# Get the base address and size in pages required for the buffer.
base = MemoryAddresses.align_address_to_page_start(start)
limit = MemoryAddresses.align_address_to_page_end(end)
pages = MemoryAddresses.get_buffer_size_in_pages(start, end - start)
# For each page, get the breakpoint and it's condition object.
# For each condition, remove the buffer.
# For each breakpoint, if no buffers are on watch, erase it.
cset = set() # condition objects
page_addr = base
pageSize = MemoryAddresses.pageSize
while page_addr < limit:
if self.has_page_breakpoint(pid, page_addr):
bp = self.get_page_breakpoint(pid, page_addr)
condition = bp.get_condition()
if condition not in cset:
if not isinstance(condition, _BufferWatchCondition):
# this shouldn't happen unless you tinkered with it
# or defined your own page breakpoints manually.
continue
cset.add(condition)
condition.remove(bw)
if condition.count() == 0:
try:
self.erase_page_breakpoint(pid, bp.get_address())
except WindowsError:
msg = "Cannot remove page breakpoint at address %s"
msg = msg % HexDump.address( bp.get_address() )
warnings.warn(msg, BreakpointWarning)
page_addr = page_addr + pageSize | Used by L{dont_watch_buffer} and L{dont_stalk_buffer}.
@type bw: L{BufferWatch}
@param bw: Buffer watch identifier. |
def get_roles(self):
"""
Returns:
Role instances according to task definition.
"""
if self.role.exist:
# return explicitly selected role
return [self.role]
else:
roles = []
if self.role_query_code:
# use given "role_query_code"
roles = RoleModel.objects.filter(**self.role_query_code)
elif self.unit.exist:
# get roles from selected unit or sub-units of it
if self.recursive_units:
# this returns a list, we're converting it to a Role generator!
roles = (RoleModel.objects.get(k) for k in
UnitModel.get_role_keys(self.unit.key))
else:
roles = RoleModel.objects.filter(unit=self.unit)
elif self.get_roles_from:
# get roles from selected predefined "get_roles_from" method
return ROLE_GETTER_METHODS[self.get_roles_from](RoleModel)
if self.abstract_role.exist and roles:
# apply abstract_role filtering on roles we got
if isinstance(roles, (list, types.GeneratorType)):
roles = [a for a in roles if a.abstract_role.key == self.abstract_role.key]
else:
roles = roles.filter(abstract_role=self.abstract_role)
else:
roles = RoleModel.objects.filter(abstract_role=self.abstract_role)
return roles | Returns:
Role instances according to task definition. |
def list_pr_comments(repo: GithubRepository, pull_id: int
) -> List[Dict[str, Any]]:
"""
References:
https://developer.github.com/v3/issues/comments/#list-comments-on-an-issue
"""
url = ("https://api.github.com/repos/{}/{}/issues/{}/comments"
"?access_token={}".format(repo.organization,
repo.name,
pull_id,
repo.access_token))
response = requests.get(url)
if response.status_code != 200:
raise RuntimeError(
'Comments get failed. Code: {}. Content: {}.'.format(
response.status_code, response.content))
payload = json.JSONDecoder().decode(response.content.decode())
return payload | References:
https://developer.github.com/v3/issues/comments/#list-comments-on-an-issue |
def get_http_method_arg_name(self):
"""
Return the HTTP function to call and the params/data argument name
"""
if self.method == 'get':
arg_name = 'params'
else:
arg_name = 'data'
return getattr(requests, self.method), arg_name | Return the HTTP function to call and the params/data argument name |
def dateadd(value: fields.DateTime(),
addend: fields.Int(validate=Range(min=1)),
unit: fields.Str(validate=OneOf(['minutes', 'days']))='days'):
"""Add a value to a date."""
value = value or dt.datetime.utcnow()
if unit == 'minutes':
delta = dt.timedelta(minutes=addend)
else:
delta = dt.timedelta(days=addend)
result = value + delta
return {'result': result} | Add a value to a date. |
def _profile_module(self):
"""Runs statistical profiler on a module."""
with open(self._run_object, 'rb') as srcfile, _StatProfiler() as prof:
code = compile(srcfile.read(), self._run_object, 'exec')
prof.base_frame = inspect.currentframe()
try:
exec(code, self._globs, None)
except SystemExit:
pass
call_tree = prof.call_tree
return {
'objectName': self._object_name,
'sampleInterval': _SAMPLE_INTERVAL,
'runTime': prof.run_time,
'callStats': call_tree,
'totalSamples': call_tree.get('sampleCount', 0),
'timestamp': int(time.time())
} | Runs statistical profiler on a module. |
def diff(new, old):
"""
Compute the difference in items of two revisioned collections. If only
`new' is specified, it is assumed it is not an update. If both are set,
the removed items are returned first. Otherwise, the updated and edited
ones are returned.
:param set new: Set of new objects
:param set old: Set of old objects
:return: A tuple consisting of `(added, removed, is_update)`.
:rtype: tuple
"""
if old is not None:
is_update = True
removed = set(new.removed(old))
updated = set(new.updated(old))
else:
is_update = False
updated = new
removed = set()
return updated, removed, is_update | Compute the difference in items of two revisioned collections. If only
`new' is specified, it is assumed it is not an update. If both are set,
the removed items are returned first. Otherwise, the updated and edited
ones are returned.
:param set new: Set of new objects
:param set old: Set of old objects
:return: A tuple consisting of `(added, removed, is_update)`.
:rtype: tuple |
def get_translation_lookup(identifier, field, value):
"""
Mapper that takes a language field, its value and returns the
related lookup for Translation model.
"""
# Split by transformers
parts = field.split("__")
# Store transformers
transformers = parts[1:] if len(parts) > 1 else None
# defaults to "title" and default language
field_name = parts[0]
language = get_fallback_language()
name_parts = parts[0].split("_")
if len(name_parts) > 1:
supported_languages = get_supported_languages()
last_part = name_parts[-1]
if last_part in supported_languages:
# title_with_underscore_fr?
field_name = "_".join(name_parts[:-1])
language = last_part
else:
# title_with_underscore?
# Let's use default language
field_name = "_".join(name_parts)
value_lookup = (
"field_value"
if transformers is None
else "field_value__%s" % "__".join(transformers)
)
lookup = {"field_name": field_name, "identifier": identifier, "language": language}
lookup[value_lookup] = value
return lookup | Mapper that takes a language field, its value and returns the
related lookup for Translation model. |
def _augment_url_with_version(auth_url):
"""Optionally augment auth_url path with version suffix.
Check if path component already contains version suffix and if it does
not, append version suffix to the end of path, not erasing the previous
path contents, since keystone web endpoint (like /identity) could be
there. Keystone version needs to be added to endpoint because as of Kilo,
the identity URLs returned by Keystone might no longer contain API
versions, leaving the version choice up to the user.
"""
if has_in_url_path(auth_url, ["/v2.0", "/v3"]):
return auth_url
if get_keystone_version() >= 3:
return url_path_append(auth_url, "/v3")
else:
return url_path_append(auth_url, "/v2.0") | Optionally augment auth_url path with version suffix.
Check if path component already contains version suffix and if it does
not, append version suffix to the end of path, not erasing the previous
path contents, since keystone web endpoint (like /identity) could be
there. Keystone version needs to be added to endpoint because as of Kilo,
the identity URLs returned by Keystone might no longer contain API
versions, leaving the version choice up to the user. |
def django(line):
'''
>>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true }
'''
#TODO we need to handle case2 logs
data = {}
log = re.findall(r'^(\[\d+/\w+/\d+ \d+:\d+:\d+\].*)', line)
if len(log) == 1:
data['timestamp'] = datetime.datetime.strptime(re.findall(r'(\d+/\w+/\d+ \d+:\d+:\d+)',\
log[0])[0],"%d/%b/%Y %H:%M:%S").isoformat()
data['loglevel'] = re.findall('[A-Z]+', log[0])[1]
data['logname'] = re.findall('\[\D+.\w+:\d+\]', log[0])[0]
message = re.findall('\{.+\}', log[0])
try:
if len(message) > 0:
message = json.loads(message[0])
else:
message = re.split(']', log[0])
message = ''.join(message[2:])
except ValueError:
message = re.split(']', log[0])
message = ''.join(message[2:])
data['message'] = message
return dict(
timestamp=data['timestamp'],
level=data['loglevel'],
data=data,
)
else:
return dict(
timestamp=datetime.datetime.isoformat(datetime.datetime.utcnow()),
data={raw:line}
) | >>> import pprint
>>> input_line1 = '[23/Aug/2017 11:35:25] INFO [app.middleware_log_req:50]View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }'
>>> output_line1 = django(input_line1)
>>> pprint.pprint(output_line1)
{'data': {'loglevel': 'INFO',
'logname': '[app.middleware_log_req:50]',
'message': 'View func called:{"exception": null,"processing_time": 0.00011801719665527344, "url": "<url>",host": "localhost", "user": "testing", "post_contents": "", "method": "POST" }',
'timestamp': '2017-08-23T11:35:25'},
'level': 'INFO',
'timestamp': '2017-08-23T11:35:25'}
>>> input_line2 = '[22/Sep/2017 06:32:15] INFO [app.function:6022] {"UUID": "c47f3530-9f5f-11e7-a559-917d011459f7", "timestamp":1506061932546, "misc": {"status": 200, "ready_state": 4, "end_time_ms": 1506061932546, "url": "/api/function?", "start_time_ms": 1506061932113, "response_length": 31, "status_message": "OK", "request_time_ms": 433}, "user": "root", "host_url": "localhost:8888", "message": "ajax success"}'
>>> output_line2 = django(input_line2)
>>> pprint.pprint(output_line2)
{'data': {'loglevel': 'INFO',
'logname': '[app.function:6022]',
'message': {u'UUID': u'c47f3530-9f5f-11e7-a559-917d011459f7',
u'host_url': u'localhost:8888',
u'message': u'ajax success',
u'misc': {u'end_time_ms': 1506061932546L,
u'ready_state': 4,
u'request_time_ms': 433,
u'response_length': 31,
u'start_time_ms': 1506061932113L,
u'status': 200,
u'status_message': u'OK',
u'url': u'/api/function?'},
u'timestamp': 1506061932546L,
u'user': u'root'},
'timestamp': '2017-09-22T06:32:15'},
'level': 'INFO',
'timestamp': '2017-09-22T06:32:15'}
Case2:
[18/Sep/2017 05:40:36] ERROR [app.apps:78] failed to get the record, collection = Collection(Database(MongoClient(host=['localhost:27017'], document_class=dict, tz_aware=False, connect=True, serverselectiontimeoutms=3000), u'collection_cache'), u'function_dummy_version')
Traceback (most recent call last):
File "/usr/local/lib/python2.7/dist-packages/mongo_cache/mongocache.py", line 70, in __getitem__
result = self.collection.find_one({"_id": key})
OperationFailure: not authorized on collection_cache to execute command { find: "function", filter: { _id: "zydelig-cosine-20" }, limit: 1, singleBatch: true } |
def ekopw(fname):
"""
Open an existing E-kernel file for writing.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekopw_c.html
:param fname: Name of EK file.
:type fname: str
:return: Handle attached to EK file.
:rtype: int
"""
fname = stypes.stringToCharP(fname)
handle = ctypes.c_int()
libspice.ekopw_c(fname, ctypes.byref(handle))
return handle.value | Open an existing E-kernel file for writing.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekopw_c.html
:param fname: Name of EK file.
:type fname: str
:return: Handle attached to EK file.
:rtype: int |
def check_read_inputs(self, sampfrom, sampto, channels, physical,
smooth_frames, return_res):
"""
Ensure that input read parameters (from rdsamp) are valid for
the record
"""
# Data Type Check
if not hasattr(sampfrom, '__index__'):
raise TypeError('sampfrom must be an integer')
if not hasattr(sampto, '__index__'):
raise TypeError('sampto must be an integer')
if not isinstance(channels, list):
raise TypeError('channels must be a list of integers')
# Duration Ranges
if sampfrom < 0:
raise ValueError('sampfrom must be a non-negative integer')
if sampfrom > self.sig_len:
raise ValueError('sampfrom must be shorter than the signal length')
if sampto < 0:
raise ValueError('sampto must be a non-negative integer')
if sampto > self.sig_len:
raise ValueError('sampto must be shorter than the signal length')
if sampto <= sampfrom:
raise ValueError('sampto must be greater than sampfrom')
# Channel Ranges
if len(channels):
if min(channels) < 0:
raise ValueError('Input channels must all be non-negative integers')
if max(channels) > self.n_sig - 1:
raise ValueError('Input channels must all be lower than the total number of channels')
if return_res not in [64, 32, 16, 8]:
raise ValueError("return_res must be one of the following: 64, 32, 16, 8")
if physical is True and return_res == 8:
raise ValueError("return_res must be one of the following when physical is True: 64, 32, 16")
# Cannot expand multiple samples/frame for multi-segment records
if isinstance(self, MultiRecord):
if smooth_frames is False:
raise ValueError('This package version cannot expand all samples when reading multi-segment records. Must enable frame smoothing.') | Ensure that input read parameters (from rdsamp) are valid for
the record |
def switch_window(self, window_id: int):
"""
Switches currently active tmux window for given task. 0 is the default window
Args:
window_id: integer id of tmux window to use
"""
# windows are numbered sequentially 0, 1, 2, ...
# create any missing windows and make them point to the same directory
if window_id not in self.tmux_available_window_ids:
for i in range(max(self.tmux_available_window_ids)+1, window_id+1):
self._run_raw(f'tmux new-window -t {self.tmux_session} -d')
tmux_window = self.tmux_session + ':' + str(i)
cmd = shlex.quote(f'cd {self.taskdir}')
tmux_cmd = f'tmux send-keys -t {tmux_window} {cmd} Enter'
self._run_raw(tmux_cmd)
self.tmux_available_window_ids.append(i)
self.tmux_window_id = window_id | Switches currently active tmux window for given task. 0 is the default window
Args:
window_id: integer id of tmux window to use |
def transfer_config_dict(soap_object, data_dict):
"""
This is a utility function used in the certification modules to transfer
the data dicts above to SOAP objects. This avoids repetition and allows
us to store all of our variable configuration here rather than in
each certification script.
"""
for key, val in data_dict.items():
# Transfer each key to the matching attribute ont he SOAP object.
setattr(soap_object, key, val) | This is a utility function used in the certification modules to transfer
the data dicts above to SOAP objects. This avoids repetition and allows
us to store all of our variable configuration here rather than in
each certification script. |
def version_range(guid, version, before=None, app_versions=None):
"""Returns all values after (and including) `version` for the app `guid`"""
if app_versions is None:
app_versions = validator.constants.APPROVED_APPLICATIONS
app_key = None
# Support for shorthand instead of full GUIDs.
for app_guid, app_name in APPLICATIONS.items():
if app_name == guid:
guid = app_guid
break
for key in app_versions.keys():
if app_versions[key]['guid'] == guid:
app_key = key
break
if not app_key or version not in app_versions[app_key]['versions']:
raise Exception('Bad GUID or version provided for version range: %s'
% version)
all_versions = app_versions[app_key]['versions']
version_pos = all_versions.index(version)
before_pos = None
if before is not None and before in all_versions:
before_pos = all_versions.index(before)
return all_versions[version_pos:before_pos] | Returns all values after (and including) `version` for the app `guid` |
def pip_install(self, reqs):
"""Install dependencies into this env by calling pip in a subprocess"""
if not reqs:
return
log.info('Calling pip to install %s', reqs)
check_call([
sys.executable, '-m', 'pip', 'install', '--ignore-installed',
'--prefix', self.path] + list(reqs)) | Install dependencies into this env by calling pip in a subprocess |
def slugable(self):
"""
A node is slugable in following cases:
1 - Node doesn't have children.
2 - Node has children but its page doesn't have a regex.
3 - Node has children, its page has regex but it doesn't show it.
4 - Node has children, its page shows his regex and node has a default value for regex.
5 - Node hasn't a page but it ins't hidden in url.
"""
if self.page:
if self.is_leaf_node():
return True
if not self.is_leaf_node() and not self.page.regex:
return True
if not self.is_leaf_node() and self.page.regex and not self.page.show_regex:
return True
if not self.is_leaf_node() and self.page.regex and self.page.show_regex and self.value_regex:
return True
elif not self.is_leaf_node() and not self.hide_in_url:
return True
return False | A node is slugable in following cases:
1 - Node doesn't have children.
2 - Node has children but its page doesn't have a regex.
3 - Node has children, its page has regex but it doesn't show it.
4 - Node has children, its page shows his regex and node has a default value for regex.
5 - Node hasn't a page but it ins't hidden in url. |
def setup_plugins(extra_plugin_dir=None):
"""Loads any additional plugins."""
if os.path.isdir(PLUGINS_DIR):
load_plugins([PLUGINS_DIR])
if extra_plugin_dir:
load_plugins(extra_plugin_dir) | Loads any additional plugins. |
def authenticate_user(username, password):
"""
Authenticate a username and password against our database
:param username:
:param password:
:return: authenticated username
"""
user_model = Query()
user = db.get(user_model.username == username)
if not user:
logger.warning("User %s not found", username)
return False
if user['password'] == hash_password(password, user.get('salt')):
return user['username']
return False | Authenticate a username and password against our database
:param username:
:param password:
:return: authenticated username |
def mft_mirror_offset(self):
"""
Returns:
int: Mirror MFT Table offset from the beginning of the partition \
in bytes
"""
return self.bpb.bytes_per_sector * \
self.bpb.sectors_per_cluster * self.extended_bpb.mft_mirror_cluster | Returns:
int: Mirror MFT Table offset from the beginning of the partition \
in bytes |
def __process_by_ccore(self):
"""!
@brief Performs processing using CCORE (C/C++ part of pyclustering library).
"""
ccore_metric = metric_wrapper.create_instance(self.__metric)
self.__score = wrapper.silhoeutte(self.__data, self.__clusters, ccore_metric.get_pointer()) | !
@brief Performs processing using CCORE (C/C++ part of pyclustering library). |
def _get_boolean(data, position, dummy0, dummy1):
"""Decode a BSON true/false to python True/False."""
end = position + 1
return data[position:end] == b"\x01", end | Decode a BSON true/false to python True/False. |
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed | Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings. |
def get_variables(self) -> Set[str]:
"""Find all the variables specified in a format string.
This returns a list of all the different variables specified in a format string,
that is the variables inside the braces.
"""
variables = set()
for cmd in self._cmd:
for var in self.__formatter.parse(cmd):
logger.debug("Checking variable: %s", var)
# creates and requires are special class values
if var[1] is not None and var[1] not in ["creates", "requires"]:
variables.add(var[1])
return variables | Find all the variables specified in a format string.
This returns a list of all the different variables specified in a format string,
that is the variables inside the braces. |
def can_user_access_build(param_name):
"""Determines if the current user can access the build ID in the request.
Args:
param_name: Parameter name to use for getting the build ID from the
request. Will fetch from GET or POST requests.
Returns:
The build the user has access to.
"""
build_id = (
request.args.get(param_name, type=int) or
request.form.get(param_name, type=int) or
request.json[param_name])
if not build_id:
logging.debug('Build ID in param_name=%r was missing', param_name)
abort(400)
ops = operations.UserOps(current_user.get_id())
build, user_is_owner = ops.owns_build(build_id)
if not build:
logging.debug('Could not find build_id=%r', build_id)
abort(404)
if current_user.is_authenticated() and not user_is_owner:
# Assume the user should be able to access the build but can't because
# the cache is out of date. This forces the cache to repopulate, any
# outstanding user invitations to be completed, hopefully resulting in
# the user having access to the build.
ops.evict()
claim_invitations(current_user)
build, user_is_owner = ops.owns_build(build_id)
if not user_is_owner:
if current_user.is_authenticated() and current_user.superuser:
pass
elif request.method != 'GET':
logging.debug('No way to log in user via modifying request')
abort(403)
elif build.public:
pass
elif current_user.is_authenticated():
logging.debug('User does not have access to this build')
abort(flask.Response('You cannot access this build', 403))
else:
logging.debug('Redirecting user to login to get build access')
abort(login.unauthorized())
elif not login_fresh():
logging.debug('User login is old; forcing refresh')
abort(login.needs_refresh())
return build | Determines if the current user can access the build ID in the request.
Args:
param_name: Parameter name to use for getting the build ID from the
request. Will fetch from GET or POST requests.
Returns:
The build the user has access to. |
def to_python(self, value):
"""Convert the value to the appropriate timezone."""
# pylint: disable=newstyle
value = super(LinkedTZDateTimeField, self).to_python(value)
if not value:
return value
return value.astimezone(self.timezone) | Convert the value to the appropriate timezone. |
def _get_metricsmgr_cmd(self, metricsManagerId, sink_config_file, port):
''' get the command to start the metrics manager processes '''
metricsmgr_main_class = 'org.apache.heron.metricsmgr.MetricsManager'
metricsmgr_cmd = [os.path.join(self.heron_java_home, 'bin/java'),
# We could not rely on the default -Xmx setting, which could be very big,
# for instance, the default -Xmx in Twitter mesos machine is around 18GB
'-Xmx1024M',
'-XX:+PrintCommandLineFlags',
'-verbosegc',
'-XX:+PrintGCDetails',
'-XX:+PrintGCTimeStamps',
'-XX:+PrintGCDateStamps',
'-XX:+PrintGCCause',
'-XX:+UseGCLogFileRotation',
'-XX:NumberOfGCLogFiles=5',
'-XX:GCLogFileSize=100M',
'-XX:+PrintPromotionFailure',
'-XX:+PrintTenuringDistribution',
'-XX:+PrintHeapAtGC',
'-XX:+HeapDumpOnOutOfMemoryError',
'-XX:+UseConcMarkSweepGC',
'-XX:+PrintCommandLineFlags',
'-Xloggc:log-files/gc.metricsmgr.log',
'-Djava.net.preferIPv4Stack=true',
'-cp',
self.metrics_manager_classpath,
metricsmgr_main_class,
'--id=' + metricsManagerId,
'--port=' + str(port),
'--topology=' + self.topology_name,
'--cluster=' + self.cluster,
'--role=' + self.role,
'--environment=' + self.environment,
'--topology-id=' + self.topology_id,
'--system-config-file=' + self.heron_internals_config_file,
'--override-config-file=' + self.override_config_file,
'--sink-config-file=' + sink_config_file]
return Command(metricsmgr_cmd, self.shell_env) | get the command to start the metrics manager processes |
def generate_ast(path):
"""Generate an Abstract Syntax Tree using the ast module.
Args:
path(str): The path to the file e.g. example/foo/bar.py
"""
if os.path.isfile(path):
with open(path, 'r') as f:
try:
tree = ast.parse(f.read())
return PytTransformer().visit(tree)
except SyntaxError: # pragma: no cover
global recursive
if not recursive:
_convert_to_3(path)
recursive = True
return generate_ast(path)
else:
raise SyntaxError('The ast module can not parse the file'
' and the python 2 to 3 conversion'
' also failed.')
raise IOError('Input needs to be a file. Path: ' + path) | Generate an Abstract Syntax Tree using the ast module.
Args:
path(str): The path to the file e.g. example/foo/bar.py |
def set_params(w, src):
"""
Set source parameters.
"""
params = extract_source_params(src)
# this is done because for characteristic sources geometry is in
# 'surface' attribute
params.update(extract_geometry_params(src))
mfd_pars, rate_pars = extract_mfd_params(src)
params.update(mfd_pars)
params.update(rate_pars)
strikes, dips, rakes, np_weights = extract_source_nodal_planes(src)
params.update(strikes)
params.update(dips)
params.update(rakes)
params.update(np_weights)
hds, hdsw = extract_source_hypocentral_depths(src)
params.update(hds)
params.update(hdsw)
pstrikes, pdips = extract_source_planes_strikes_dips(src)
params.update(pstrikes)
params.update(pdips)
params['sourcetype'] = striptag(src.tag)
w.record(**params) | Set source parameters. |
def make_cashed(self):
"""
Включает кэширование запросов к descend
"""
self._descendance_cash = [dict() for _ in self.graph]
self.descend = self._descend_cashed | Включает кэширование запросов к descend |
def verify_file_exists(file_name, file_location):
"""
Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False
"""
return __os.path.isfile(__os.path.join(file_location, file_name)) | Function to verify if a file exists
Args:
file_name: The name of file to check
file_location: The location of the file, derive from the os module
Returns: returns boolean True or False |
def deactivate_lvm_volume_group(block_device):
'''
Deactivate any volume gruop associated with an LVM physical volume.
:param block_device: str: Full path to LVM physical volume
'''
vg = list_lvm_volume_group(block_device)
if vg:
cmd = ['vgchange', '-an', vg]
check_call(cmd) | Deactivate any volume gruop associated with an LVM physical volume.
:param block_device: str: Full path to LVM physical volume |
def list_vdirs(site, app=_DEFAULT_APP):
'''
Get all configured IIS virtual directories for the specified site, or for
the combination of site and application.
Args:
site (str): The IIS site name.
app (str): The IIS application.
Returns:
dict: A dictionary of the virtual directory names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_vdirs site
'''
ret = dict()
ps_cmd = ['Get-WebVirtualDirectory',
'-Site', r"'{0}'".format(site),
'-Application', r"'{0}'".format(app),
'|', "Select-Object PhysicalPath, @{ Name = 'name';",
r"Expression = { $_.path.Split('/')[-1] } }"]
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
for item in items:
ret[item['name']] = {'sourcepath': item['physicalPath']}
if not ret:
log.warning('No vdirs found in output: %s', cmd_ret)
return ret | Get all configured IIS virtual directories for the specified site, or for
the combination of site and application.
Args:
site (str): The IIS site name.
app (str): The IIS application.
Returns:
dict: A dictionary of the virtual directory names and properties.
CLI Example:
.. code-block:: bash
salt '*' win_iis.list_vdirs site |
def generate_property_names(self):
"""
Means that keys of object must to follow this definition.
.. code-block:: python
{
'propertyNames': {
'maxLength': 3,
},
}
Valid keys of object for this definition are foo, bar, ... but not foobar for example.
"""
property_names_definition = self._definition.get('propertyNames', {})
if property_names_definition is True:
pass
elif property_names_definition is False:
self.create_variable_keys()
with self.l('if {variable}_keys:'):
self.l('raise JsonSchemaException("{name} must not be there")')
else:
self.create_variable_is_dict()
with self.l('if {variable}_is_dict:'):
self.create_variable_with_length()
with self.l('if {variable}_len != 0:'):
self.l('{variable}_property_names = True')
with self.l('for {variable}_key in {variable}:'):
with self.l('try:'):
self.generate_func_code_block(
property_names_definition,
'{}_key'.format(self._variable),
self._variable_name,
clear_variables=True,
)
with self.l('except JsonSchemaException:'):
self.l('{variable}_property_names = False')
with self.l('if not {variable}_property_names:'):
self.l('raise JsonSchemaException("{name} must be named by propertyName definition")') | Means that keys of object must to follow this definition.
.. code-block:: python
{
'propertyNames': {
'maxLength': 3,
},
}
Valid keys of object for this definition are foo, bar, ... but not foobar for example. |
def encode(self, sequence):
"""Returns a tuple (binary reprensentation, default sequence, polymorphisms list)"""
polymorphisms = []
defaultSequence = ''
binSequence = array.array(self.forma.typecode)
b = 0
i = 0
trueI = 0 #not inc in case if poly
poly = set()
while i < len(sequence)-1:
b = b | self.forma[self.charToBin[sequence[i]]]
if sequence[i+1] == '/' :
poly.add(sequence[i])
i += 2
else :
binSequence.append(b)
if len(poly) > 0 :
poly.add(sequence[i])
polymorphisms.append((trueI, poly))
poly = set()
bb = 0
while b % 2 != 0 :
b = b/2
defaultSequence += sequence[i]
b = 0
i += 1
trueI += 1
if i < len(sequence) :
b = b | self.forma[self.charToBin[sequence[i]]]
binSequence.append(b)
if len(poly) > 0 :
if sequence[i] not in poly :
poly.add(sequence[i])
polymorphisms.append((trueI, poly))
defaultSequence += sequence[i]
return (binSequence, defaultSequence, polymorphisms) | Returns a tuple (binary reprensentation, default sequence, polymorphisms list) |
def from_dict(cls, pref, prefix = None):
""" Create a Prefix object from a dict.
Suitable for creating Prefix objects from XML-RPC input.
"""
if prefix is None:
prefix = Prefix()
prefix.id = pref['id']
if pref['vrf_id'] is not None: # VRF is not mandatory
prefix.vrf = VRF.get(pref['vrf_id'])
prefix.family = pref['family']
prefix.prefix = pref['prefix']
prefix.display_prefix = pref['display_prefix']
prefix.description = pref['description']
prefix.comment = pref['comment']
prefix.node = pref['node']
if pref['pool_id'] is not None: # Pool is not mandatory
prefix.pool = Pool.get(pref['pool_id'])
prefix.type = pref['type']
prefix.indent = pref['indent']
prefix.country = pref['country']
prefix.order_id = pref['order_id']
prefix.customer_id = pref['customer_id']
prefix.external_key = pref['external_key']
prefix.authoritative_source = pref['authoritative_source']
prefix.alarm_priority = pref['alarm_priority']
prefix.monitor = pref['monitor']
prefix.vlan = pref['vlan']
prefix.added = pref['added']
prefix.last_modified = pref['last_modified']
prefix.total_addresses = int(pref['total_addresses'])
prefix.used_addresses = int(pref['used_addresses'])
prefix.free_addresses = int(pref['free_addresses'])
prefix.status = pref['status']
prefix.avps = pref['avps']
prefix.expires = pref['expires']
prefix.inherited_tags = {}
for tag_name in pref['inherited_tags']:
tag = Tag.from_dict({'name': tag_name })
prefix.inherited_tags[tag_name] = tag
prefix.tags = {}
for tag_name in pref['tags']:
tag = Tag.from_dict({'name': tag_name })
prefix.tags[tag_name] = tag
if 'match' in pref:
prefix.match = pref['match']
if 'display' in pref:
prefix.display = pref['display']
if 'children' in pref:
prefix.children = pref['children']
return prefix | Create a Prefix object from a dict.
Suitable for creating Prefix objects from XML-RPC input. |
def validate(retval, func, args):
# type: (int, Any, Tuple[Any, Any]) -> Optional[Tuple[Any, Any]]
""" Validate the returned value of a Xlib or XRANDR function. """
if retval != 0 and not ERROR.details:
return args
err = "{}() failed".format(func.__name__)
details = {"retval": retval, "args": args}
raise ScreenShotError(err, details=details) | Validate the returned value of a Xlib or XRANDR function. |
def raw(self, module, method='GET', data=None):
'''
Submits or requsts raw input
'''
request = self.session
url = 'http://%s:%s/%s' % (self.host, self.port, module)
if self.verbose:
print data
if method=='GET':
response = request.get(url)
elif method=='POST':
response = request.post(url,data)
elif method=='PUT':
response = request.put(url,data)
elif method=='DELETE':
response = request.delete(url)
else:
return {'error' : 'No such request method %s' % method}
return response | Submits or requsts raw input |
def update_subscription(self, update_parameters, subscription_id):
"""UpdateSubscription.
[Preview API] Update an existing subscription. Depending on the type of subscription and permissions, the caller can update the description, filter settings, channel (delivery) settings and more.
:param :class:`<NotificationSubscriptionUpdateParameters> <azure.devops.v5_0.notification.models.NotificationSubscriptionUpdateParameters>` update_parameters:
:param str subscription_id:
:rtype: :class:`<NotificationSubscription> <azure.devops.v5_0.notification.models.NotificationSubscription>`
"""
route_values = {}
if subscription_id is not None:
route_values['subscriptionId'] = self._serialize.url('subscription_id', subscription_id, 'str')
content = self._serialize.body(update_parameters, 'NotificationSubscriptionUpdateParameters')
response = self._send(http_method='PATCH',
location_id='70f911d6-abac-488c-85b3-a206bf57e165',
version='5.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('NotificationSubscription', response) | UpdateSubscription.
[Preview API] Update an existing subscription. Depending on the type of subscription and permissions, the caller can update the description, filter settings, channel (delivery) settings and more.
:param :class:`<NotificationSubscriptionUpdateParameters> <azure.devops.v5_0.notification.models.NotificationSubscriptionUpdateParameters>` update_parameters:
:param str subscription_id:
:rtype: :class:`<NotificationSubscription> <azure.devops.v5_0.notification.models.NotificationSubscription>` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.