code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def add_user(name, profile='github'):
'''
Add a GitHub user.
name
The user for which to obtain information.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.add_user github-handle
'''
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, 'org_name')
)
try:
github_named_user = client.get_user(name)
except UnknownObjectException:
log.exception("Resource not found")
return False
headers, data = organization._requester.requestJsonAndCheck(
"PUT",
organization.url + "/memberships/" + github_named_user._identity
)
return data.get('state') == 'pending' | Add a GitHub user.
name
The user for which to obtain information.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.add_user github-handle |
def actionAngleTorus_xvFreqs_c(pot,jr,jphi,jz,
angler,anglephi,anglez,
tol=0.003):
"""
NAME:
actionAngleTorus_xvFreqs_c
PURPOSE:
compute configuration (x,v) and frequencies of a set of angles on a single torus
INPUT:
pot - Potential object or list thereof
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (0.003) goal for |dJ|/|J| along the torus
OUTPUT:
(R,vR,vT,z,vz,phi,Omegar,Omegaphi,Omegaz,flag)
HISTORY:
2015-08-05/07 - Written - Bovy (UofT)
"""
#Parse the potential
from galpy.orbit.integrateFullOrbit import _parse_pot
npot, pot_type, pot_args= _parse_pot(pot,potfortorus=True)
#Set up result arrays
R= numpy.empty(len(angler))
vR= numpy.empty(len(angler))
vT= numpy.empty(len(angler))
z= numpy.empty(len(angler))
vz= numpy.empty(len(angler))
phi= numpy.empty(len(angler))
Omegar= numpy.empty(1)
Omegaphi= numpy.empty(1)
Omegaz= numpy.empty(1)
flag= ctypes.c_int(0)
#Set up the C code
ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
actionAngleTorus_xvFreqsFunc= _lib.actionAngleTorus_xvFreqs
actionAngleTorus_xvFreqsFunc.argtypes=\
[ctypes.c_double,
ctypes.c_double,
ctypes.c_double,
ctypes.c_int,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_int,
ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.c_double,
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
ctypes.POINTER(ctypes.c_int)]
#Array requirements, first store old order
f_cont= [angler.flags['F_CONTIGUOUS'],
anglephi.flags['F_CONTIGUOUS'],
anglez.flags['F_CONTIGUOUS']]
angler= numpy.require(angler,dtype=numpy.float64,requirements=['C','W'])
anglephi= numpy.require(anglephi,dtype=numpy.float64,requirements=['C','W'])
anglez= numpy.require(anglez,dtype=numpy.float64,requirements=['C','W'])
R= numpy.require(R,dtype=numpy.float64,requirements=['C','W'])
vR= numpy.require(vR,dtype=numpy.float64,requirements=['C','W'])
vT= numpy.require(vT,dtype=numpy.float64,requirements=['C','W'])
z= numpy.require(z,dtype=numpy.float64,requirements=['C','W'])
vz= numpy.require(vz,dtype=numpy.float64,requirements=['C','W'])
phi= numpy.require(phi,dtype=numpy.float64,requirements=['C','W'])
Omegar= numpy.require(Omegar,dtype=numpy.float64,requirements=['C','W'])
Omegaphi= numpy.require(Omegaphi,dtype=numpy.float64,requirements=['C','W'])
Omegaz= numpy.require(Omegaz,dtype=numpy.float64,requirements=['C','W'])
#Run the C code
actionAngleTorus_xvFreqsFunc(ctypes.c_double(jr),
ctypes.c_double(jphi),
ctypes.c_double(jz),
ctypes.c_int(len(angler)),
angler,
anglephi,
anglez,
ctypes.c_int(npot),
pot_type,
pot_args,
ctypes.c_double(tol),
R,vR,vT,z,vz,phi,
Omegar,Omegaphi,Omegaz,
ctypes.byref(flag))
#Reset input arrays
if f_cont[0]: angler= numpy.asfortranarray(angler)
if f_cont[1]: anglephi= numpy.asfortranarray(anglephi)
if f_cont[2]: anglez= numpy.asfortranarray(anglez)
return (R,vR,vT,z,vz,phi,Omegar[0],Omegaphi[0],Omegaz[0],flag.value) | NAME:
actionAngleTorus_xvFreqs_c
PURPOSE:
compute configuration (x,v) and frequencies of a set of angles on a single torus
INPUT:
pot - Potential object or list thereof
jr - radial action (scalar)
jphi - azimuthal action (scalar)
jz - vertical action (scalar)
angler - radial angle (array [N])
anglephi - azimuthal angle (array [N])
anglez - vertical angle (array [N])
tol= (0.003) goal for |dJ|/|J| along the torus
OUTPUT:
(R,vR,vT,z,vz,phi,Omegar,Omegaphi,Omegaz,flag)
HISTORY:
2015-08-05/07 - Written - Bovy (UofT) |
def to_dict(cls, obj):
'''Serialises the object, by default serialises anything
that isn't prefixed with __, isn't in the blacklist, and isn't
callable.
'''
return {
k: getattr(obj, k)
for k in dir(obj)
if cls.serialisable(k, obj)
} | Serialises the object, by default serialises anything
that isn't prefixed with __, isn't in the blacklist, and isn't
callable. |
def create_folder_structure(self):
"""Creates a folder structure based on the project and batch name.
Project - Batch-name - Raw-data-dir
The info_df JSON-file will be stored in the Project folder.
The summary-files will be saved in the Batch-name folder.
The raw data (including exported cycles and ica-data) will be saved to
the Raw-data-dir.
"""
self.info_file, directories = create_folder_structure(self.project,
self.name)
self.project_dir, self.batch_dir, self.raw_dir = directories
logger.debug("create folders:" + str(directories)) | Creates a folder structure based on the project and batch name.
Project - Batch-name - Raw-data-dir
The info_df JSON-file will be stored in the Project folder.
The summary-files will be saved in the Batch-name folder.
The raw data (including exported cycles and ica-data) will be saved to
the Raw-data-dir. |
def timed_operation(msg, log_start=False):
"""
Surround a context with a timer.
Args:
msg(str): the log to print.
log_start(bool): whether to print also at the beginning.
Example:
.. code-block:: python
with timed_operation('Good Stuff'):
time.sleep(1)
Will print:
.. code-block:: python
Good stuff finished, time:1sec.
"""
assert len(msg)
if log_start:
logger.info('Start {} ...'.format(msg))
start = timer()
yield
msg = msg[0].upper() + msg[1:]
logger.info('{} finished, time:{:.4f} sec.'.format(
msg, timer() - start)) | Surround a context with a timer.
Args:
msg(str): the log to print.
log_start(bool): whether to print also at the beginning.
Example:
.. code-block:: python
with timed_operation('Good Stuff'):
time.sleep(1)
Will print:
.. code-block:: python
Good stuff finished, time:1sec. |
def mcast_ip_mask(ip_addr_and_mask, return_tuple=True):
"""
Function to check if a address is multicast and that the CIDR mask is good
Args:
ip_addr_and_mask: Multicast IP address and mask in the following format 239.1.1.1/24
return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False
Returns: see return_tuple for return options
"""
regex_mcast_ip_and_mask = __re.compile("^(((2[2-3][4-9])|(23[0-3]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))\.((25[0-5])|(2[0-4][0-9])|(1[0-9][0-9])|([1-9]?[0-9]))/((3[0-2])|([1-2][0-9])|[3-9]))$")
if return_tuple:
while not regex_mcast_ip_and_mask.match(ip_addr_and_mask):
print("Not a good multicast IP and CIDR mask combo.")
print("Please try again.")
ip_addr_and_mask = input("Please enter a multicast IP address and mask in the follwing format x.x.x.x/x: ")
ip_cidr_split = ip_addr_and_mask.split("/")
ip_addr = ip_cidr_split[0]
cidr = ip_cidr_split[1]
return ip_addr, cidr
elif not return_tuple:
if not regex_mcast_ip_and_mask.match(ip_addr_and_mask):
return False
else:
return True | Function to check if a address is multicast and that the CIDR mask is good
Args:
ip_addr_and_mask: Multicast IP address and mask in the following format 239.1.1.1/24
return_tuple: Set to True it returns a IP and mask in a tuple, set to False returns True or False
Returns: see return_tuple for return options |
def join_room(self, room_id_or_alias):
"""Performs /join/$room_id
Args:
room_id_or_alias (str): The room ID or room alias to join.
"""
if not room_id_or_alias:
raise MatrixError("No alias or room ID to join.")
path = "/join/%s" % quote(room_id_or_alias)
return self._send("POST", path) | Performs /join/$room_id
Args:
room_id_or_alias (str): The room ID or room alias to join. |
def split_header_words(header_values):
r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]]
"""
assert not isinstance(header_values, str)
result = []
for text in header_values:
orig_text = text
pairs = []
while text:
m = HEADER_TOKEN_RE.search(text)
if m:
text = unmatched(m)
name = m.group(1)
m = HEADER_QUOTED_VALUE_RE.search(text)
if m: # quoted value
text = unmatched(m)
value = m.group(1)
value = HEADER_ESCAPE_RE.sub(r"\1", value)
else:
m = HEADER_VALUE_RE.search(text)
if m: # unquoted value
text = unmatched(m)
value = m.group(1)
value = value.rstrip()
else:
# no value, a lone token
value = None
pairs.append((name, value))
elif text.lstrip().startswith(","):
# concatenated headers, as per RFC 2616 section 4.2
text = text.lstrip()[1:]
if pairs: result.append(pairs)
pairs = []
else:
# skip junk
non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
assert nr_junk_chars > 0, (
"split_header_words bug: '%s', '%s', %s" %
(orig_text, text, pairs))
text = non_junk
if pairs: result.append(pairs)
return result | r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]] |
def MRA(biomf, sampleIDs=None, transform=None):
"""
Calculate the mean relative abundance percentage.
:type biomf: A BIOM file.
:param biomf: OTU table format.
:type sampleIDs: list
:param sampleIDs: A list of sample id's from BIOM format OTU table.
:param transform: Mathematical function which is used to transform smax to another
format. By default, the function has been set to None.
:rtype: dict
:return: A dictionary keyed on OTUID's and their mean relative abundance for a given
number of sampleIDs.
"""
ra = relative_abundance(biomf, sampleIDs)
if transform is not None:
ra = {sample: {otuID: transform(abd) for otuID, abd in ra[sample].items()}
for sample in ra.keys()}
otuIDs = biomf.ids(axis="observation")
return mean_otu_pct_abundance(ra, otuIDs) | Calculate the mean relative abundance percentage.
:type biomf: A BIOM file.
:param biomf: OTU table format.
:type sampleIDs: list
:param sampleIDs: A list of sample id's from BIOM format OTU table.
:param transform: Mathematical function which is used to transform smax to another
format. By default, the function has been set to None.
:rtype: dict
:return: A dictionary keyed on OTUID's and their mean relative abundance for a given
number of sampleIDs. |
def run_shell_command(commands, **kwargs):
"""Run a shell command."""
p = subprocess.Popen(commands,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs)
output, error = p.communicate()
return p.returncode, output, error | Run a shell command. |
def xml_filter(self, content):
r"""Filter and preprocess xml content
:param content: xml content
:rtype: str
"""
content = utils.strip_whitespace(content, True) if self.__options['strip'] else content.strip()
if not self.__options['encoding']:
encoding = self.guess_xml_encoding(content) or self.__encoding
self.set_options(encoding=encoding)
if self.__options['encoding'].lower() != self.__encoding:
# 编码转换去除xml头
content = self.strip_xml_header(content.decode(self.__options['encoding'], errors=self.__options['errors']))
if self.__options['unescape']:
content = utils.html_entity_decode(content)
return content | r"""Filter and preprocess xml content
:param content: xml content
:rtype: str |
def load_simple_endpoint(category, name):
'''fetches the entry point for a plugin and calls it with the given
aux_info'''
for ep in pkg_resources.iter_entry_points(category):
if ep.name == name:
return ep.load()
raise KeyError(name) | fetches the entry point for a plugin and calls it with the given
aux_info |
def removeTab(self, index):
"""
Removes the view at the inputed index and disconnects it from the \
panel.
:param index | <int>
"""
view = self.widget(index)
if isinstance(view, XView):
try:
view.windowTitleChanged.disconnect(self.refreshTitles)
view.sizeConstraintChanged.disconnect(self.adjustSizeConstraint)
except:
pass
return super(XViewPanel, self).removeTab(index) | Removes the view at the inputed index and disconnects it from the \
panel.
:param index | <int> |
def readACTIONRECORD(self):
""" Read a SWFActionRecord """
action = None
actionCode = self.readUI8()
if actionCode != 0:
actionLength = self.readUI16() if actionCode >= 0x80 else 0
#print "0x%x"%actionCode, actionLength
action = SWFActionFactory.create(actionCode, actionLength)
action.parse(self)
return action | Read a SWFActionRecord |
def cutR_seq(seq, cutR, max_palindrome):
"""Cut genomic sequence from the right.
Parameters
----------
seq : str
Nucleotide sequence to be cut from the right
cutR : int
cutR - max_palindrome = how many nucleotides to cut from the right.
Negative cutR implies complementary palindromic insertions.
max_palindrome : int
Length of the maximum palindromic insertion.
Returns
-------
seq : str
Nucleotide sequence after being cut from the right
Examples
--------
>>> cutR_seq('TGCGCCAGCAGTGAGTC', 0, 4)
'TGCGCCAGCAGTGAGTCGACT'
>>> cutR_seq('TGCGCCAGCAGTGAGTC', 8, 4)
'TGCGCCAGCAGTG'
"""
complement_dict = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'} #can include lower case if wanted
if cutR < max_palindrome:
seq = seq + ''.join([complement_dict[nt] for nt in seq[cutR - max_palindrome:]][::-1]) #reverse complement palindrome insertions
else:
seq = seq[:len(seq) - cutR + max_palindrome] #deletions
return seq | Cut genomic sequence from the right.
Parameters
----------
seq : str
Nucleotide sequence to be cut from the right
cutR : int
cutR - max_palindrome = how many nucleotides to cut from the right.
Negative cutR implies complementary palindromic insertions.
max_palindrome : int
Length of the maximum palindromic insertion.
Returns
-------
seq : str
Nucleotide sequence after being cut from the right
Examples
--------
>>> cutR_seq('TGCGCCAGCAGTGAGTC', 0, 4)
'TGCGCCAGCAGTGAGTCGACT'
>>> cutR_seq('TGCGCCAGCAGTGAGTC', 8, 4)
'TGCGCCAGCAGTG' |
def to_tgt(self):
"""
Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format
"""
enc_part = EncryptedData({'etype': 1, 'cipher': b''})
tgt_rep = {}
tgt_rep['pvno'] = krb5_pvno
tgt_rep['msg-type'] = MESSAGE_TYPE.KRB_AS_REP.value
tgt_rep['crealm'] = self.server.realm.to_string()
tgt_rep['cname'] = self.client.to_asn1()[0]
tgt_rep['ticket'] = Ticket.load(self.ticket.to_asn1()).native
tgt_rep['enc-part'] = enc_part.native
t = EncryptionKey(self.key.to_asn1()).native
return tgt_rep, t | Returns the native format of an AS_REP message and the sessionkey in EncryptionKey native format |
def from_string(cls, link):
"""Return a new SheetUrl instance from parsed URL string.
>>> SheetUrl.from_string('https://docs.google.com/spreadsheets/d/spam')
<SheetUrl id='spam' gid=0>
"""
ma = cls._pattern.search(link)
if ma is None:
raise ValueError(link)
id = ma.group('id')
return cls(id) | Return a new SheetUrl instance from parsed URL string.
>>> SheetUrl.from_string('https://docs.google.com/spreadsheets/d/spam')
<SheetUrl id='spam' gid=0> |
def run_with(self, inputs, options):
""" Store the run parameters (inputs and options)
"""
self._inputs = inputs
self._options = options | Store the run parameters (inputs and options) |
def from_json(cls, data):
"""Create a Design Day from a dictionary.
Args:
data = {
"name": string,
"day_type": string,
"location": ladybug Location schema,
"dry_bulb_condition": ladybug DryBulbCondition schema,
"humidity_condition": ladybug HumidityCondition schema,
"wind_condition": ladybug WindCondition schema,
"sky_condition": ladybug SkyCondition schema}
"""
required_keys = ('name', 'day_type', 'location', 'dry_bulb_condition',
'humidity_condition', 'wind_condition', 'sky_condition')
for key in required_keys:
assert key in data, 'Required key "{}" is missing!'.format(key)
return cls(data['name'], data['day_type'], Location.from_json(data['location']),
DryBulbCondition.from_json(data['dry_bulb_condition']),
HumidityCondition.from_json(data['humidity_condition']),
WindCondition.from_json(data['wind_condition']),
SkyCondition.from_json(data['sky_condition'])) | Create a Design Day from a dictionary.
Args:
data = {
"name": string,
"day_type": string,
"location": ladybug Location schema,
"dry_bulb_condition": ladybug DryBulbCondition schema,
"humidity_condition": ladybug HumidityCondition schema,
"wind_condition": ladybug WindCondition schema,
"sky_condition": ladybug SkyCondition schema} |
def do_b0(self, line):
"""Send the Master a BinaryInput (group 2) value of False at index 6. Command syntax is: b0"""
self.application.apply_update(opendnp3.Binary(False), index=6) | Send the Master a BinaryInput (group 2) value of False at index 6. Command syntax is: b0 |
def get_base(vpc, **conn):
"""
The base will return:
- ARN
- Region
- Name
- Id
- Tags
- IsDefault
- InstanceTenancy
- CidrBlock
- CidrBlockAssociationSet
- Ipv6CidrBlockAssociationSet
- DhcpOptionsId
- Attributes
- _version
:param bucket_name:
:param conn:
:return:
"""
# Get the base:
base_result = describe_vpcs(VpcIds=[vpc["id"]], **conn)[0]
# The name of the VPC is in the tags:
vpc_name = None
for t in base_result.get("Tags", []):
if t["Key"] == "Name":
vpc_name = t["Value"]
dhcp_opts = None
# Get the DHCP Options:
if base_result.get("DhcpOptionsId"):
# There should only be exactly 1 attached to a VPC:
dhcp_opts = describe_dhcp_options(DhcpOptionsIds=[base_result["DhcpOptionsId"]], **conn)[0]["DhcpOptionsId"]
# Get the Attributes:
attributes = {}
attr_vals = [
("EnableDnsHostnames", "enableDnsHostnames"),
("EnableDnsSupport", "enableDnsSupport")
]
for attr, query in attr_vals:
attributes[attr] = describe_vpc_attribute(VpcId=vpc["id"], Attribute=query, **conn)[attr]
vpc.update({
'name': vpc_name,
'region': conn["region"],
'tags': base_result.get("Tags", []),
'is_default': base_result["IsDefault"],
'instance_tenancy': base_result["InstanceTenancy"],
'dhcp_options_id': dhcp_opts,
'cidr_block': base_result["CidrBlock"],
'cidr_block_association_set': base_result.get("CidrBlockAssociationSet", []),
'ipv6_cidr_block_association_set': base_result.get("Ipv6CidrBlockAssociationSet", []),
'attributes': attributes,
'_version': 1
})
return vpc | The base will return:
- ARN
- Region
- Name
- Id
- Tags
- IsDefault
- InstanceTenancy
- CidrBlock
- CidrBlockAssociationSet
- Ipv6CidrBlockAssociationSet
- DhcpOptionsId
- Attributes
- _version
:param bucket_name:
:param conn:
:return: |
def attributes(self):
"""List of attributes available for the dataset (cached)."""
if self._attributes is None:
self._filters, self._attributes = self._fetch_configuration()
return self._attributes | List of attributes available for the dataset (cached). |
def _fw_rule_create(self, drvr_name, data, cache):
"""Firewall Rule create routine.
This function updates its local cache with rule parameters.
It checks if local cache has information about the Policy
associated with the rule. If not, it means a restart has happened.
It retrieves the policy associated with the FW by calling
Openstack API's and calls t he policy create internal routine.
"""
tenant_id = data.get('firewall_rule').get('tenant_id')
fw_rule = data.get('firewall_rule')
rule = self._fw_rule_decode_store(data)
fw_pol_id = fw_rule.get('firewall_policy_id')
rule_id = fw_rule.get('id')
if tenant_id not in self.fwid_attr:
self.fwid_attr[tenant_id] = FwMapAttr(tenant_id)
self.fwid_attr[tenant_id].store_rule(rule_id, rule)
if not cache:
self._check_create_fw(tenant_id, drvr_name)
self.tenant_db.store_rule_tenant(rule_id, tenant_id)
if fw_pol_id is not None and not (
self.fwid_attr[tenant_id].is_policy_present(fw_pol_id)):
pol_data = self.os_helper.get_fw_policy(fw_pol_id)
if pol_data is not None:
self.fw_policy_create(pol_data, cache=cache) | Firewall Rule create routine.
This function updates its local cache with rule parameters.
It checks if local cache has information about the Policy
associated with the rule. If not, it means a restart has happened.
It retrieves the policy associated with the FW by calling
Openstack API's and calls t he policy create internal routine. |
def execute_function(function_request):
"""
Given a request created by
`beanstalk_dispatch.common.create_request_body`, executes the
request. This function is to be run on a beanstalk worker.
"""
dispatch_table = getattr(settings, 'BEANSTALK_DISPATCH_TABLE', None)
if dispatch_table is None:
raise BeanstalkDispatchError('No beanstalk dispatch table configured')
for key in (FUNCTION, ARGS, KWARGS):
if key not in function_request.keys():
raise BeanstalkDispatchError(
'Please provide a {} argument'.format(key))
function_path = dispatch_table.get(
function_request[FUNCTION], ''
)
if function_path:
runnable = locate(function_path)
if not runnable:
raise BeanstalkDispatchError(
'Unable to locate function: {}'.format(function_path))
args = function_request[ARGS]
kwargs = function_request[KWARGS]
if inspect.isclass(runnable):
if issubclass(runnable, SafeTask):
task = runnable()
else:
raise BeanstalkDispatchError(
'Requested task is not a SafeTask subclass: {}'.format(
function_request[FUNCTION]))
else:
task = SafeTask()
task.run = runnable
task.process(*args, **kwargs)
else:
raise BeanstalkDispatchError(
'Requested function not found: {}'.format(
function_request[FUNCTION])) | Given a request created by
`beanstalk_dispatch.common.create_request_body`, executes the
request. This function is to be run on a beanstalk worker. |
def _create_alignment_button(self):
"""Creates vertical alignment button"""
iconnames = ["AlignTop", "AlignCenter", "AlignBottom"]
bmplist = [icons[iconname] for iconname in iconnames]
self.alignment_tb = _widgets.BitmapToggleButton(self, bmplist)
self.alignment_tb.SetToolTipString(_(u"Alignment"))
self.Bind(wx.EVT_BUTTON, self.OnAlignment, self.alignment_tb)
self.AddControl(self.alignment_tb) | Creates vertical alignment button |
def heartbeat(self):
"""
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
"""
try:
with create_session() as session:
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
if job.state == State.SHUTDOWN:
self.kill()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
if not is_unit_test:
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
seconds_remaining = self.heartrate - \
(timezone.utcnow() - job.latest_heartbeat)\
.total_seconds()
sleep_for = max(0, seconds_remaining)
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = timezone.utcnow()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
self.log.debug('[heartbeat]')
except OperationalError as e:
self.log.error("Scheduler heartbeat got an exception: %s", str(e)) | Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all. |
def write(self, data):
"""
Intercepted method for writing data.
:param data:
Data to write
:returns:
Whatever the original method returns
:raises:
Whatever the original method raises
This method updates the internal digest object with with the new data
and then proceeds to call the original write method.
"""
# Intercept the write method (that's what @direct does) and both write
# the data using the original write method (using proxiee(self).write)
# and update the hash of the data written so far (using
# proxy.state(self).digest).
proxy.state(self).digest.update(data)
return proxy.original(self).write(data) | Intercepted method for writing data.
:param data:
Data to write
:returns:
Whatever the original method returns
:raises:
Whatever the original method raises
This method updates the internal digest object with with the new data
and then proceeds to call the original write method. |
def read(self):
'''Read from any of the connections that need it'''
# We'll check all living connections
connections = [c for c in self.connections() if c.alive()]
if not connections:
# If there are no connections, obviously we return no messages, but
# we should wait the duration of the timeout
time.sleep(self._timeout)
return []
# Not all connections need to be written to, so we'll only concern
# ourselves with those that require writes
writes = [c for c in connections if c.pending()]
try:
readable, writable, exceptable = select.select(
connections, writes, connections, self._timeout)
except exceptions.ConnectionClosedException:
logger.exception('Tried selecting on closed client')
return []
except select.error:
logger.exception('Error running select')
return []
# If we returned because the timeout interval passed, log it and return
if not (readable or writable or exceptable):
logger.debug('Timed out...')
return []
responses = []
# For each readable socket, we'll try to read some responses
for conn in readable:
try:
for res in conn.read():
# We'll capture heartbeats and respond to them automatically
if (isinstance(res, Response) and res.data == HEARTBEAT):
logger.info('Sending heartbeat to %s', conn)
conn.nop()
logger.debug('Setting last_recv_timestamp')
self.last_recv_timestamp = time.time()
continue
elif isinstance(res, Error):
nonfatal = (
exceptions.FinFailedException,
exceptions.ReqFailedException,
exceptions.TouchFailedException
)
if not isinstance(res.exception(), nonfatal):
# If it's not any of the non-fatal exceptions, then
# we have to close this connection
logger.error(
'Closing %s: %s', conn, res.exception())
self.close_connection(conn)
responses.append(res)
logger.debug('Setting last_recv_timestamp')
self.last_recv_timestamp = time.time()
except exceptions.NSQException:
logger.exception('Failed to read from %s', conn)
self.close_connection(conn)
except socket.error:
logger.exception('Failed to read from %s', conn)
self.close_connection(conn)
# For each writable socket, flush some data out
for conn in writable:
try:
conn.flush()
except socket.error:
logger.exception('Failed to flush %s', conn)
self.close_connection(conn)
# For each connection with an exception, try to close it and remove it
# from our connections
for conn in exceptable:
self.close_connection(conn)
return responses | Read from any of the connections that need it |
def remove_tag(self, tag):
"""Remove tag from existing device tags
:param tag: the tag to be removed from the list
:raises ValueError: If tag does not exist in list
"""
tags = self.get_tags()
tags.remove(tag)
post_data = TAGS_TEMPLATE.format(connectware_id=self.get_connectware_id(),
tags=escape(",".join(tags)))
self._conn.put('/ws/DeviceCore', post_data)
# Invalidate cache
self._device_json = None | Remove tag from existing device tags
:param tag: the tag to be removed from the list
:raises ValueError: If tag does not exist in list |
def write_info(dirs, parallel, config):
"""Write cluster or local filesystem resources, spinning up cluster if not present.
"""
if parallel["type"] in ["ipython"] and not parallel.get("run_local"):
out_file = _get_cache_file(dirs, parallel)
if not utils.file_exists(out_file):
sys_config = copy.deepcopy(config)
minfos = _get_machine_info(parallel, sys_config, dirs, config)
with open(out_file, "w") as out_handle:
yaml.safe_dump(minfos, out_handle, default_flow_style=False, allow_unicode=False) | Write cluster or local filesystem resources, spinning up cluster if not present. |
def set_up(self):
"""Set up your applications and the test environment."""
self.path.profile = self.path.gen.joinpath("profile")
if not self.path.profile.exists():
self.path.profile.mkdir()
self.python = hitchpylibrarytoolkit.project_build(
"strictyaml",
self.path,
self.given["python version"],
{"ruamel.yaml": self.given["ruamel version"]},
).bin.python
self.example_py_code = (
ExamplePythonCode(self.python, self.path.gen)
.with_code(self.given.get("code", ""))
.with_setup_code(
self.given.get("setup", "")
)
.with_terminal_size(160, 100)
.with_strings(
yaml_snippet_1=self.given.get("yaml_snippet_1"),
yaml_snippet=self.given.get("yaml_snippet"),
yaml_snippet_2=self.given.get("yaml_snippet_2"),
modified_yaml_snippet=self.given.get("modified_yaml_snippet"),
)
) | Set up your applications and the test environment. |
def check_aggregations_privacy(self, aggregations_params):
""" Check per-field privacy rules in aggregations.
Privacy is checked by making sure user has access to the fields
used in aggregations.
"""
fields = self.get_aggregations_fields(aggregations_params)
fields_dict = dictset.fromkeys(fields)
fields_dict['_type'] = self.view.Model.__name__
try:
validate_data_privacy(self.view.request, fields_dict)
except wrappers.ValidationError as ex:
raise JHTTPForbidden(
'Not enough permissions to aggregate on '
'fields: {}'.format(ex)) | Check per-field privacy rules in aggregations.
Privacy is checked by making sure user has access to the fields
used in aggregations. |
def Maybe(validator):
"""
Wraps the given validator callable, only using it for the given value if it
is not ``None``.
"""
@wraps(Maybe)
def built(value):
if value != None:
return validator(value)
return built | Wraps the given validator callable, only using it for the given value if it
is not ``None``. |
def pif_list(call=None):
'''
Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen
'''
if call != 'function':
raise SaltCloudSystemExit(
'This function must be called with -f, --function argument.'
)
ret = {}
session = _get_session()
pifs = session.xenapi.PIF.get_all()
for pif in pifs:
record = session.xenapi.PIF.get_record(pif)
ret[record['uuid']] = record
return ret | Get a list of Resource Pools
.. code-block:: bash
salt-cloud -f pool_list myxen |
def all(self, *, collection, attribute, word, func=None, operation=None):
""" Performs a filter with the OData 'all' keyword on the collection
For example:
q.any(collection='email_addresses', attribute='address',
operation='eq', word='[email protected]')
will transform to a filter such as:
emailAddresses/all(a:a/address eq '[email protected]')
:param str collection: the collection to apply the any keyword on
:param str attribute: the attribute of the collection to check
:param str word: the word to check
:param str func: the logical function to apply to the attribute
inside the collection
:param str operation: the logical operation to apply to the
attribute inside the collection
:rtype: Query
"""
return self.iterable('all', collection=collection, attribute=attribute,
word=word, func=func, operation=operation) | Performs a filter with the OData 'all' keyword on the collection
For example:
q.any(collection='email_addresses', attribute='address',
operation='eq', word='[email protected]')
will transform to a filter such as:
emailAddresses/all(a:a/address eq '[email protected]')
:param str collection: the collection to apply the any keyword on
:param str attribute: the attribute of the collection to check
:param str word: the word to check
:param str func: the logical function to apply to the attribute
inside the collection
:param str operation: the logical operation to apply to the
attribute inside the collection
:rtype: Query |
def get_new_connection(self, connection_params):
"""
Receives a dictionary connection_params to setup
a connection to the database.
Dictionary correct setup is made through the
get_connection_params method.
TODO: This needs to be made more generic to accept
other MongoClient parameters.
"""
name = connection_params.pop('name')
es = connection_params.pop('enforce_schema')
connection_params['document_class'] = OrderedDict
# connection_params['tz_aware'] = True
# To prevent leaving unclosed connections behind,
# client_conn must be closed before a new connection
# is created.
if self.client_connection is not None:
self.client_connection.close()
self.client_connection = Database.connect(**connection_params)
database = self.client_connection[name]
self.djongo_connection = DjongoClient(database, es)
return self.client_connection[name] | Receives a dictionary connection_params to setup
a connection to the database.
Dictionary correct setup is made through the
get_connection_params method.
TODO: This needs to be made more generic to accept
other MongoClient parameters. |
def is_tp(self, atol=None, rtol=None):
"""Test if a channel is completely-positive (CP)"""
choi = _to_choi(self.rep, self._data, *self.dim)
return self._is_tp_helper(choi, atol, rtol) | Test if a channel is completely-positive (CP) |
def density(self, *args):
""" Mean density in g/cc
"""
M = self.mass(*args) * MSUN
V = 4./3 * np.pi * (self.radius(*args) * RSUN)**3
return M/V | Mean density in g/cc |
def load_manual_sequence_file(self, ident, seq_file, copy_file=False, outdir=None, set_as_representative=False):
"""Load a manual sequence, given as a FASTA file and optionally set it as the representative sequence.
Also store it in the sequences attribute.
Args:
ident (str): Sequence ID
seq_file (str): Path to sequence FASTA file
copy_file (bool): If the FASTA file should be copied to the protein's sequences folder or the ``outdir``, if
protein folder has not been set
outdir (str): Path to output directory
set_as_representative (bool): If this sequence should be set as the representative one
Returns:
SeqProp: Sequence that was loaded into the ``sequences`` attribute
"""
if copy_file:
if not outdir:
outdir = self.sequence_dir
if not outdir:
raise ValueError('Output directory must be specified')
shutil.copy(seq_file, outdir)
seq_file = op.join(outdir, seq_file)
manual_sequence = SeqProp(id=ident, sequence_path=seq_file, seq=None)
self.sequences.append(manual_sequence)
if set_as_representative:
self.representative_sequence = manual_sequence
return self.sequences.get_by_id(ident) | Load a manual sequence, given as a FASTA file and optionally set it as the representative sequence.
Also store it in the sequences attribute.
Args:
ident (str): Sequence ID
seq_file (str): Path to sequence FASTA file
copy_file (bool): If the FASTA file should be copied to the protein's sequences folder or the ``outdir``, if
protein folder has not been set
outdir (str): Path to output directory
set_as_representative (bool): If this sequence should be set as the representative one
Returns:
SeqProp: Sequence that was loaded into the ``sequences`` attribute |
def kong_61_2007():
r"""Kong 61 pt Hankel filter, as published in [Kong07]_.
Taken from file ``FilterModules.f90`` provided with 1DCSEM_.
License: `Apache License, Version 2.0,
<http://www.apache.org/licenses/LICENSE-2.0>`_.
"""
dlf = DigitalFilter('Kong 61', 'kong_61_2007')
dlf.base = np.array([
2.3517745856009100e-02, 2.6649097336355482e-02,
3.0197383422318501e-02, 3.4218118311666032e-02,
3.8774207831722009e-02, 4.3936933623407420e-02,
4.9787068367863938e-02, 5.6416139503777350e-02,
6.3927861206707570e-02, 7.2439757034251456e-02,
8.2084998623898800e-02, 9.3014489210663506e-02,
1.0539922456186430e-01, 1.1943296826671961e-01,
1.3533528323661270e-01, 1.5335496684492850e-01,
1.7377394345044520e-01, 1.9691167520419400e-01,
2.2313016014842979e-01, 2.5283959580474641e-01,
2.8650479686019009e-01, 3.2465246735834979e-01,
3.6787944117144239e-01, 4.1686201967850839e-01,
4.7236655274101469e-01, 5.3526142851899028e-01,
6.0653065971263342e-01, 6.8728927879097224e-01,
7.7880078307140488e-01, 8.8249690258459546e-01,
1.0000000000000000e+00, 1.1331484530668261e+00,
1.2840254166877421e+00, 1.4549914146182010e+00,
1.6487212707001280e+00, 1.8682459574322221e+00,
2.1170000166126748e+00, 2.3988752939670981e+00,
2.7182818284590451e+00, 3.0802168489180310e+00,
3.4903429574618419e+00, 3.9550767229205772e+00,
4.4816890703380636e+00, 5.0784190371800806e+00,
5.7546026760057307e+00, 6.5208191203301116e+00,
7.3890560989306504e+00, 8.3728974881272649e+00,
9.4877358363585262e+00, 1.0751013186076360e+01,
1.2182493960703470e+01, 1.3804574186067100e+01,
1.5642631884188170e+01, 1.7725424121461639e+01,
2.0085536923187671e+01, 2.2759895093526730e+01,
2.5790339917193059e+01, 2.9224283781234941e+01,
3.3115451958692312e+01, 3.7524723159601002e+01,
4.2521082000062783e+01])
dlf.factor = np.array([1.1331484530668261])
dlf.j0 = np.array([
1.4463210615326699e+02, -1.1066222143752420e+03,
3.7030010025325978e+03, -6.8968188464424520e+03,
7.1663544112656937e+03, -2.4507884783377681e+03,
-4.0166567754046082e+03, 6.8623845298546094e+03,
-5.0013321011775661e+03, 2.1291291365196648e+03,
-1.3845222435542289e+03, 2.1661554291595580e+03,
-2.2260393789657141e+03, 8.0317156013986391e+02,
1.0142221718890841e+03, -1.9350455051432630e+03,
1.6601169447226580e+03, -7.5159684285420133e+02,
-9.0315984178183285e+01, 5.0705574889546148e+02,
-5.1207646422722519e+02, 2.9722959494490038e+02,
-5.0248319908072993e+01, -1.2290725861955920e+02,
1.9695244755899429e+02, -1.9175679966946601e+02,
1.4211755630338590e+02, -7.7463216543224149e+01,
1.7638009334931201e+01, 2.8855056499202671e+01,
-5.9225643887809561e+01, 7.5987941373668960e+01,
-8.1687962781233580e+01, 8.0599209238447102e+01,
-7.4895905328771619e+01, 6.7516291538794434e+01,
-5.9325033647358048e+01, 5.1617042242841528e+01,
-4.4664967446820263e+01, 3.8366152052928278e+01,
-3.3308787868993100e+01, 2.8278671651033459e+01,
-2.4505863388620480e+01, 2.0469632532079750e+01,
-1.7074034940700429e+01, 1.4206119215530070e+01,
-1.0904435643084650e+01, 8.7518389425802283e+00,
-6.7721665239085622e+00, 4.5096884588095891e+00,
-3.2704247166629590e+00, 2.6827195063720430e+00,
-1.8406031821386459e+00, 9.1586697140412443e-01,
-3.2436011485890798e-01, 8.0675176189581893e-02,
-1.2881307195759690e-02, 7.0489137468452920e-04,
2.3846917590855061e-04, -6.9102205995825531e-05,
6.7792635718095777e-06])
dlf.j1 = np.array([
4.6440396425864918e+01, -4.5034239857914162e+02,
1.7723440076223640e+03, -3.7559735516994660e+03,
4.4736494009764137e+03, -2.2476603569606068e+03,
-1.5219842155931799e+03, 3.4904608559273802e+03,
-2.4814243247472318e+03, 5.7328164634108396e+02,
5.3132044837659631e-01, 6.8895205008006235e+02,
-1.2012013872160269e+03, 7.9679138423597340e+02,
4.9874460187939818e+01, -5.6367338332457007e+02,
4.7971936503711203e+02, -5.8979702298044558e+01,
-3.1935800954986922e+02, 4.5762551999442371e+02,
-3.7239927283248380e+02, 1.8255852885279569e+02,
-2.3504740340815669e-01, -1.1588151583545380e+02,
1.5740956677133170e+02, -1.4334746114883359e+02,
9.9857411013284818e+01, -4.8246322019171487e+01,
2.0371404343057380e+00, 3.3003938094974323e+01,
-5.5476151884197712e+01, 6.7354852323852583e+01,
-7.0735403363284121e+01, 6.8872932663164747e+01,
-6.3272750944993042e+01, 5.6501568721817442e+01,
-4.8706577819918110e+01, 4.1737211284663481e+01,
-3.4776621242200903e+01, 2.9161717578906430e+01,
-2.3886749056000909e+01, 1.9554007583544220e+01,
-1.5966397353366460e+01, 1.2429310210239199e+01,
-1.0139180791868180e+01, 7.4716493393871861e+00,
-5.5509479014742613e+00, 4.3380799768234208e+00,
-2.5911516181746550e+00, 1.6300524630626780e+00,
-1.4041567266387460e+00, 7.5225141726873213e-01,
4.6808777208492733e-02, -3.6630197849601159e-01,
2.8948389902792782e-01, -1.3705521898064801e-01,
4.6292091649913013e-02, -1.1721281347435180e-02,
2.2002397354029149e-03, -2.8146036357227600e-04,
1.8788896009128770e-05])
return dlf | r"""Kong 61 pt Hankel filter, as published in [Kong07]_.
Taken from file ``FilterModules.f90`` provided with 1DCSEM_.
License: `Apache License, Version 2.0,
<http://www.apache.org/licenses/LICENSE-2.0>`_. |
def feed_ssldata(self, data):
"""Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling :meth:`shutdown`.
"""
if self._state == self.S_UNWRAPPED:
# If unwrapped, pass plaintext data straight through.
return ([], [data] if data else [])
ssldata = []; appdata = []
self._need_ssldata = False
if data:
self._incoming.write(data)
try:
if self._state == self.S_DO_HANDSHAKE:
# Call do_handshake() until it doesn't raise anymore.
self._sslobj.do_handshake()
self._state = self.S_WRAPPED
if self._handshake_cb:
self._handshake_cb()
if self._state == self.S_WRAPPED:
# Main state: read data from SSL until close_notify
while True:
chunk = self._sslobj.read(self.bufsize)
appdata.append(chunk)
if not chunk: # close_notify
break
if self._state == self.S_SHUTDOWN:
# Call shutdown() until it doesn't raise anymore.
self._sslobj.unwrap()
self._sslobj = None
self._state = self.S_UNWRAPPED
if self._shutdown_cb:
self._shutdown_cb()
if self._state == self.S_UNWRAPPED:
# Drain possible plaintext data after close_notify.
appdata.append(self._incoming.read())
except (ssl.SSLError, sslcompat.CertificateError) as e:
if getattr(e, 'errno', None) not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE, ssl.SSL_ERROR_SYSCALL):
if self._state == self.S_DO_HANDSHAKE and self._handshake_cb:
self._handshake_cb(e)
raise
self._need_ssldata = e.errno == ssl.SSL_ERROR_WANT_READ
# Check for record level data that needs to be sent back.
# Happens for the initial handshake and renegotiations.
if self._outgoing.pending:
ssldata.append(self._outgoing.read())
return (ssldata, appdata) | Feed SSL record level data into the pipe.
The data must be a bytes instance. It is OK to send an empty bytes
instance. This can be used to get ssldata for a handshake initiated by
this endpoint.
Return a (ssldata, appdata) tuple. The ssldata element is a list of
buffers containing SSL data that needs to be sent to the remote SSL.
The appdata element is a list of buffers containing plaintext data that
needs to be forwarded to the application. The appdata list may contain
an empty buffer indicating an SSL "close_notify" alert. This alert must
be acknowledged by calling :meth:`shutdown`. |
def build_from_generator(cls,
generator,
target_size,
max_subtoken_length=None,
reserved_tokens=None):
"""Builds a SubwordTextEncoder from the generated text.
Args:
generator: yields text.
target_size: int, approximate vocabulary size to create.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
Returns:
SubwordTextEncoder with `vocab_size` approximately `target_size`.
"""
token_counts = collections.defaultdict(int)
for item in generator:
for tok in tokenizer.encode(native_to_unicode(item)):
token_counts[tok] += 1
encoder = cls.build_to_target_size(
target_size, token_counts, 1, 1e3,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
return encoder | Builds a SubwordTextEncoder from the generated text.
Args:
generator: yields text.
target_size: int, approximate vocabulary size to create.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
Returns:
SubwordTextEncoder with `vocab_size` approximately `target_size`. |
async def _get_difference(self, channel_id, pts_date):
"""
Get the difference for this `channel_id` if any, then load entities.
Calls :tl:`updates.getDifference`, which fills the entities cache
(always done by `__call__`) and lets us know about the full entities.
"""
# Fetch since the last known pts/date before this update arrived,
# in order to fetch this update at full, including its entities.
self.client._log[__name__].debug('Getting difference for entities')
if channel_id:
try:
where = await self.client.get_input_entity(channel_id)
except ValueError:
return
result = await self.client(functions.updates.GetChannelDifferenceRequest(
channel=where,
filter=types.ChannelMessagesFilterEmpty(),
pts=pts_date, # just pts
limit=100,
force=True
))
else:
result = await self.client(functions.updates.GetDifferenceRequest(
pts=pts_date[0],
date=pts_date[1],
qts=0
))
if isinstance(result, (types.updates.Difference,
types.updates.DifferenceSlice,
types.updates.ChannelDifference,
types.updates.ChannelDifferenceTooLong)):
self.original_update._entities.update({
utils.get_peer_id(x): x for x in
itertools.chain(result.users, result.chats)
})
if not self._load_entities():
self.client._log[__name__].info(
'Could not find all entities for update.pts = %s',
getattr(self.original_update, 'pts', None)
) | Get the difference for this `channel_id` if any, then load entities.
Calls :tl:`updates.getDifference`, which fills the entities cache
(always done by `__call__`) and lets us know about the full entities. |
def benchmark(self, func, gpu_args, instance, times, verbose):
"""benchmark the kernel instance"""
logging.debug('benchmark ' + instance.name)
logging.debug('thread block dimensions x,y,z=%d,%d,%d', *instance.threads)
logging.debug('grid dimensions x,y,z=%d,%d,%d', *instance.grid)
time = None
try:
time = self.dev.benchmark(func, gpu_args, instance.threads, instance.grid, times)
except Exception as e:
#some launches may fail because too many registers are required
#to run the kernel given the current thread block size
#the desired behavior is to simply skip over this configuration
#and proceed to try the next one
skippable_exceptions = ["too many resources requested for launch", "OUT_OF_RESOURCES", "INVALID_WORK_GROUP_SIZE"]
if any([skip_str in str(e) for skip_str in skippable_exceptions]):
logging.debug('benchmark fails due to runtime failure too many resources required')
if verbose:
print("skipping config", instance.name, "reason: too many resources requested for launch")
else:
logging.debug('benchmark encountered runtime failure: ' + str(e))
print("Error while benchmarking:", instance.name)
raise e
return time | benchmark the kernel instance |
def rollback(self, dt):
"""
Roll provided date backward to next offset only if not on offset.
"""
if not self.onOffset(dt):
businesshours = self._get_business_hours_by_sec
if self.n >= 0:
dt = self._prev_opening_time(
dt) + timedelta(seconds=businesshours)
else:
dt = self._next_opening_time(
dt) + timedelta(seconds=businesshours)
return dt | Roll provided date backward to next offset only if not on offset. |
def rsa_private_key_pkcs1_to_pkcs8(pkcs1_key):
"""Convert a PKCS1-encoded RSA private key to PKCS8."""
algorithm = RsaAlgorithmIdentifier()
algorithm["rsaEncryption"] = RSA_ENCRYPTION_ASN1_OID
pkcs8_key = PKCS8PrivateKey()
pkcs8_key["version"] = 0
pkcs8_key["privateKeyAlgorithm"] = algorithm
pkcs8_key["privateKey"] = pkcs1_key
return encoder.encode(pkcs8_key) | Convert a PKCS1-encoded RSA private key to PKCS8. |
def _load_data_alignment(self, chain1, chain2):
"""
Extract the sequences from the PDB file, perform the alignment,
and load the coordinates of the CA of the common residues.
"""
parser = PDB.PDBParser(QUIET=True)
ppb = PDB.PPBuilder()
structure1 = parser.get_structure(chain1, self.pdb1)
structure2 = parser.get_structure(chain2, self.pdb2)
seq1 = str(ppb.build_peptides(structure1)[0].get_sequence())
seq2 = str(ppb.build_peptides(structure2)[0].get_sequence())
# Alignment parameters taken from PconsFold renumbering script.
align = pairwise2.align.globalms(seq1, seq2, 2, -1, -0.5, -0.1)[0]
indexes = set(i for i, (s1, s2) in enumerate(zip(align[0], align[1]))
if s1 != '-' and s2 != '-')
coord1 = np.hstack([np.concatenate((r['CA'].get_coord(), (1,)))[:, None]
for i, r in enumerate(structure1.get_residues())
if i in indexes and 'CA' in r]).astype(DTYPE,
copy=False)
coord2 = np.hstack([np.concatenate((r['CA'].get_coord(), (1,)))[:, None]
for i, r in enumerate(structure2.get_residues())
if i in indexes and 'CA' in r]).astype(DTYPE,
copy=False)
self.coord1 = coord1
self.coord2 = coord2
self.N = len(seq1) | Extract the sequences from the PDB file, perform the alignment,
and load the coordinates of the CA of the common residues. |
def export_coreml(self, filename):
"""
Save the model in Core ML format.
See Also
--------
save
Examples
--------
>>> model.export_coreml('./myModel.mlmodel')
"""
import coremltools
from coremltools.proto.FeatureTypes_pb2 import ArrayFeatureType
from .._mxnet import _mxnet_utils
prob_name = self.target + 'Probability'
def get_custom_model_spec():
from coremltools.models.neural_network import NeuralNetworkBuilder
from coremltools.models.datatypes import Array, Dictionary, String
input_name = 'output1'
input_length = self._feature_extractor.output_length
builder = NeuralNetworkBuilder([(input_name, Array(input_length,))],
[(prob_name, Dictionary(String))],
'classifier')
ctx = _mxnet_utils.get_mxnet_context()[0]
input_name, output_name = input_name, 0
for i, cur_layer in enumerate(self._custom_classifier):
W = cur_layer.weight.data(ctx).asnumpy()
nC, nB = W.shape
Wb = cur_layer.bias.data(ctx).asnumpy()
builder.add_inner_product(name="inner_product_"+str(i),
W=W,
b=Wb,
input_channels=nB,
output_channels=nC,
has_bias=True,
input_name=str(input_name),
output_name='inner_product_'+str(output_name))
if cur_layer.act:
builder.add_activation("activation"+str(i), 'RELU', 'inner_product_'+str(output_name), str(output_name))
input_name = i
output_name = i + 1
last_output = builder.spec.neuralNetworkClassifier.layers[-1].output[0]
builder.add_softmax('softmax', last_output, self.target)
builder.set_class_labels(self.classes)
builder.set_input([input_name], [(input_length,)])
builder.set_output([self.target], [(self.num_classes,)])
return builder.spec
top_level_spec = coremltools.proto.Model_pb2.Model()
top_level_spec.specificationVersion = 3
# Set input
desc = top_level_spec.description
input = desc.input.add()
input.name = self.feature
input.type.multiArrayType.dataType = ArrayFeatureType.ArrayDataType.Value('FLOAT32')
input.type.multiArrayType.shape.append(15600)
# Set outputs
prob_output = desc.output.add()
prob_output.name = prob_name
label_output = desc.output.add()
label_output.name = 'classLabel'
desc.predictedFeatureName = 'classLabel'
desc.predictedProbabilitiesName = prob_name
if type(self.classes[0]) == int:
# Class labels are ints
prob_output.type.dictionaryType.int64KeyType.MergeFromString(b'')
label_output.type.int64Type.MergeFromString(b'')
else: # Class are strings
prob_output.type.dictionaryType.stringKeyType.MergeFromString(b'')
label_output.type.stringType.MergeFromString(b'')
pipeline = top_level_spec.pipelineClassifier.pipeline
# Add the preprocessing model
preprocessing_model = pipeline.models.add()
preprocessing_model.customModel.className = 'TCSoundClassifierPreprocessing'
preprocessing_model.specificationVersion = 3
preprocessing_input = preprocessing_model.description.input.add()
preprocessing_input.CopyFrom(input)
preprocessed_output = preprocessing_model.description.output.add()
preprocessed_output.name = 'preprocessed_data'
preprocessed_output.type.multiArrayType.dataType = ArrayFeatureType.ArrayDataType.Value('DOUBLE')
preprocessed_output.type.multiArrayType.shape.append(1)
preprocessed_output.type.multiArrayType.shape.append(96)
preprocessed_output.type.multiArrayType.shape.append(64)
# Add the feature extractor, updating its input name
feature_extractor_spec = self._feature_extractor.get_spec()
pipeline.models.add().CopyFrom(feature_extractor_spec)
pipeline.models[-1].description.input[0].name = preprocessed_output.name
pipeline.models[-1].neuralNetwork.layers[0].input[0] = preprocessed_output.name
# Add the custom neural network
pipeline.models.add().CopyFrom(get_custom_model_spec())
# Set key type for the probability dictionary
prob_output_type = pipeline.models[-1].description.output[0].type.dictionaryType
if type(self.classes[0]) == int:
prob_output_type.int64KeyType.MergeFromString(b'')
else: # String labels
prob_output_type.stringKeyType.MergeFromString(b'')
mlmodel = coremltools.models.MLModel(top_level_spec)
mlmodel.save(filename) | Save the model in Core ML format.
See Also
--------
save
Examples
--------
>>> model.export_coreml('./myModel.mlmodel') |
def splitSymbol(self, index):
"""Give relevant values for computations:
(insertSymbol, copySymbol, dist0flag)
"""
#determine insert and copy upper bits from table
row = [0,0,1,1,2,2,1,3,2,3,3][index>>6]
col = [0,1,0,1,0,1,2,0,2,1,2][index>>6]
#determine inserts and copy sub codes
insertLengthCode = row<<3 | index>>3&7
if row: insertLengthCode -= 8
copyLengthCode = col<<3 | index&7
return (
Symbol(self.insertLengthAlphabet, insertLengthCode),
Symbol(self.copyLengthAlphabet, copyLengthCode),
row==0
) | Give relevant values for computations:
(insertSymbol, copySymbol, dist0flag) |
def affiliation_history(self):
"""Unordered list of IDs of all affiliations the author was
affiliated with acccording to Scopus.
"""
affs = self._json.get('affiliation-history', {}).get('affiliation')
try:
return [d['@id'] for d in affs]
except TypeError: # No affiliation history
return None | Unordered list of IDs of all affiliations the author was
affiliated with acccording to Scopus. |
def import_crud(app):
'''
Import crud module and register all model cruds which it contains
'''
try:
app_path = import_module(app).__path__
except (AttributeError, ImportError):
return None
try:
imp.find_module('crud', app_path)
except ImportError:
return None
module = import_module("%s.crud" % app)
return module | Import crud module and register all model cruds which it contains |
def to_url(self):
"""Serialize as a URL for a GET request."""
base_url = urlparse(self.url)
if PY3:
query = parse_qs(base_url.query)
for k, v in self.items():
query.setdefault(k, []).append(to_utf8_optional_iterator(v))
scheme = base_url.scheme
netloc = base_url.netloc
path = base_url.path
params = base_url.params
fragment = base_url.fragment
else:
query = parse_qs(to_utf8(base_url.query))
for k, v in self.items():
query.setdefault(to_utf8(k), []).append(to_utf8_optional_iterator(v))
scheme = to_utf8(base_url.scheme)
netloc = to_utf8(base_url.netloc)
path = to_utf8(base_url.path)
params = to_utf8(base_url.params)
fragment = to_utf8(base_url.fragment)
url = (scheme, netloc, path, params, urlencode(query, True), fragment)
return urlunparse(url) | Serialize as a URL for a GET request. |
def get_activities_for_project(self, module=None, **kwargs):
"""Get the related activities of a project.
:param str module: Stages of a given module
:return: JSON
"""
_module_id = kwargs.get('module', module)
_activities_url = ACTIVITIES_URL.format(module_id=_module_id)
return self._request_api(url=_activities_url).json() | Get the related activities of a project.
:param str module: Stages of a given module
:return: JSON |
def _options_request(self, url, **kwargs):
''' a method to catch and report http options request connectivity errors '''
# construct request kwargs
request_kwargs = {
'method': 'OPTIONS',
'url': url
}
for key, value in kwargs.items():
request_kwargs[key] = value
# send request and handle response
return self._request(**request_kwargs) | a method to catch and report http options request connectivity errors |
def pathconf(path,
os_name=os.name,
isdir_fnc=os.path.isdir,
pathconf_fnc=getattr(os, 'pathconf', None),
pathconf_names=getattr(os, 'pathconf_names', ())):
'''
Get all pathconf variables for given path.
:param path: absolute fs path
:type path: str
:returns: dictionary containing pathconf keys and their values (both str)
:rtype: dict
'''
if pathconf_fnc and pathconf_names:
return {key: pathconf_fnc(path, key) for key in pathconf_names}
if os_name == 'nt':
maxpath = 246 if isdir_fnc(path) else 259 # 260 minus <END>
else:
maxpath = 255 # conservative sane default
return {
'PC_PATH_MAX': maxpath,
'PC_NAME_MAX': maxpath - len(path),
} | Get all pathconf variables for given path.
:param path: absolute fs path
:type path: str
:returns: dictionary containing pathconf keys and their values (both str)
:rtype: dict |
def add_instruction(self, specification):
"""Add an instruction specification
:param specification: a specification with a key
:data:`knittingpattern.Instruction.TYPE`
.. seealso:: :meth:`as_instruction`
"""
instruction = self.as_instruction(specification)
self._type_to_instruction[instruction.type] = instruction | Add an instruction specification
:param specification: a specification with a key
:data:`knittingpattern.Instruction.TYPE`
.. seealso:: :meth:`as_instruction` |
def _gen_success_message(publish_output):
"""
Generate detailed success message for published applications.
Parameters
----------
publish_output : dict
Output from serverlessrepo publish_application
Returns
-------
str
Detailed success message
"""
application_id = publish_output.get('application_id')
details = json.dumps(publish_output.get('details'), indent=2)
if CREATE_APPLICATION in publish_output.get('actions'):
return "Created new application with the following metadata:\n{}".format(details)
return 'The following metadata of application "{}" has been updated:\n{}'.format(application_id, details) | Generate detailed success message for published applications.
Parameters
----------
publish_output : dict
Output from serverlessrepo publish_application
Returns
-------
str
Detailed success message |
def dfbool2intervals(df,colbool):
"""
ds contains bool values
"""
df.index=range(len(df))
intervals=bools2intervals(df[colbool])
for intervali,interval in enumerate(intervals):
df.loc[interval[0]:interval[1],f'{colbool} interval id']=intervali
df.loc[interval[0]:interval[1],f'{colbool} interval start']=interval[0]
df.loc[interval[0]:interval[1],f'{colbool} interval stop']=interval[1]
df.loc[interval[0]:interval[1],f'{colbool} interval length']=interval[1]-interval[0]+1
df.loc[interval[0]:interval[1],f'{colbool} interval within index']=range(interval[1]-interval[0]+1)
df[f'{colbool} interval index']=df.index
return df | ds contains bool values |
def get_instance(self, payload):
"""
Build an instance of AuthTypeCallsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.AuthTypeCallsInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.AuthTypeCallsInstance
"""
return AuthTypeCallsInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
) | Build an instance of AuthTypeCallsInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.AuthTypeCallsInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.AuthTypeCallsInstance |
def evaluate(self, global_state: GlobalState, post=False) -> List[GlobalState]:
"""Performs the mutation for this instruction.
:param global_state:
:param post:
:return:
"""
# Generalize some ops
log.debug("Evaluating {}".format(self.op_code))
op = self.op_code.lower()
if self.op_code.startswith("PUSH"):
op = "push"
elif self.op_code.startswith("DUP"):
op = "dup"
elif self.op_code.startswith("SWAP"):
op = "swap"
elif self.op_code.startswith("LOG"):
op = "log"
instruction_mutator = (
getattr(self, op + "_", None)
if not post
else getattr(self, op + "_" + "post", None)
)
if instruction_mutator is None:
raise NotImplementedError
if self.iprof is None:
result = instruction_mutator(global_state)
else:
start_time = datetime.now()
result = instruction_mutator(global_state)
end_time = datetime.now()
self.iprof.record(op, start_time, end_time)
return result | Performs the mutation for this instruction.
:param global_state:
:param post:
:return: |
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version) | Convert an arbitrary string to a standard version string |
def dump_to_stream(self, cnf, stream, **opts):
"""
:param cnf: Configuration data to dump
:param stream: Config file or file like object write to
:param opts: optional keyword parameters
"""
tree = container_to_etree(cnf, **opts)
etree_write(tree, stream) | :param cnf: Configuration data to dump
:param stream: Config file or file like object write to
:param opts: optional keyword parameters |
def save_split_next(self):
"""
Save out blurbs created from "blurb split".
They don't have dates, so we have to get creative.
"""
filenames = []
# the "date" MUST have a leading zero.
# this ensures these files sort after all
# newly created blurbs.
width = int(math.ceil(math.log(len(self), 10))) + 1
i = 1
blurb = Blurbs()
while self:
metadata, body = self.pop()
metadata['date'] = str(i).rjust(width, '0')
if 'release date' in metadata:
del metadata['release date']
blurb.append((metadata, body))
filename = blurb._extract_next_filename()
blurb.save(filename)
blurb.clear()
filenames.append(filename)
i += 1
return filenames | Save out blurbs created from "blurb split".
They don't have dates, so we have to get creative. |
def destroy(self):
"""
Destroy and close the App.
:return:
None.
:note:
Once destroyed an App can no longer be used.
"""
# if this is the main_app - set the _main_app class variable to `None`.
if self == App._main_app:
App._main_app = None
self.tk.destroy() | Destroy and close the App.
:return:
None.
:note:
Once destroyed an App can no longer be used. |
def get_electron_number(self, charge=0):
"""Return the number of electrons.
Args:
charge (int): Charge of the molecule.
Returns:
int:
"""
atomic_number = constants.elements['atomic_number'].to_dict()
return sum([atomic_number[atom] for atom in self['atom']]) - charge | Return the number of electrons.
Args:
charge (int): Charge of the molecule.
Returns:
int: |
def measure_impedance(self, sampling_window_ms, n_sampling_windows,
delay_between_windows_ms, interleave_samples, rms,
state):
'''
Measure voltage across load of each of the following control board
feedback circuits:
- Reference _(i.e., attenuated high-voltage amplifier output)_.
- Load _(i.e., voltage across DMF device)_.
The measured voltage _(i.e., ``V2``)_ can be used to compute the
impedance of the measured load, the input voltage _(i.e., ``V1``)_,
etc.
Parameters
----------
sampling_window_ms : float
Length of sampling window (in milleseconds) for each
RMS/peak-to-peak voltage measurement.
n_sampling_windows : int
Number of RMS/peak-to-peak voltage measurements to take.
delay_between_windows_ms : float
Delay (in milleseconds) between RMS/peak-to-peak voltage
measurements.
interleave_samples : bool
If ``True``, interleave RMS/peak-to-peak measurements for analog
channels.
For example, ``[<i_0>, <j_0>, <i_1>, <j_1>, ..., <i_n>, <j_n>]``
where ``i`` and ``j`` correspond to two different analog channels.
If ``False``, all measurements for each analog channel are taken
together. For example, ``[<i_0>, ..., <i_n>, <j_0>, ..., <j_n>]``
where ``i`` and ``j`` correspond to two different analog channels.
rms : bool
If ``True``, a RMS voltage measurement is collected for each
sampling window.
Otherwise, peak-to-peak measurements are collected.
state : list
State of device channels. Length should be equal to the number of
device channels.
Returns
-------
:class:`FeedbackResults`
'''
state_ = uint8_tVector()
for i in range(0, len(state)):
state_.append(int(state[i]))
buffer = np.array(Base.measure_impedance(self,
sampling_window_ms,
n_sampling_windows,
delay_between_windows_ms,
interleave_samples,
rms,
state_))
return self.measure_impedance_buffer_to_feedback_result(buffer) | Measure voltage across load of each of the following control board
feedback circuits:
- Reference _(i.e., attenuated high-voltage amplifier output)_.
- Load _(i.e., voltage across DMF device)_.
The measured voltage _(i.e., ``V2``)_ can be used to compute the
impedance of the measured load, the input voltage _(i.e., ``V1``)_,
etc.
Parameters
----------
sampling_window_ms : float
Length of sampling window (in milleseconds) for each
RMS/peak-to-peak voltage measurement.
n_sampling_windows : int
Number of RMS/peak-to-peak voltage measurements to take.
delay_between_windows_ms : float
Delay (in milleseconds) between RMS/peak-to-peak voltage
measurements.
interleave_samples : bool
If ``True``, interleave RMS/peak-to-peak measurements for analog
channels.
For example, ``[<i_0>, <j_0>, <i_1>, <j_1>, ..., <i_n>, <j_n>]``
where ``i`` and ``j`` correspond to two different analog channels.
If ``False``, all measurements for each analog channel are taken
together. For example, ``[<i_0>, ..., <i_n>, <j_0>, ..., <j_n>]``
where ``i`` and ``j`` correspond to two different analog channels.
rms : bool
If ``True``, a RMS voltage measurement is collected for each
sampling window.
Otherwise, peak-to-peak measurements are collected.
state : list
State of device channels. Length should be equal to the number of
device channels.
Returns
-------
:class:`FeedbackResults` |
def _make_index_list(num_samples, num_params, num_groups=None):
"""Identify indices of input sample associated with each trajectory
For each trajectory, identifies the indexes of the input sample which
is a function of the number of factors/groups and the number of samples
Arguments
---------
num_samples : int
The number of trajectories
num_params : int
The number of parameters
num_groups : int
The number of groups
Returns
-------
list of numpy.ndarray
Example
-------
>>> BruteForce()._make_index_list(num_samples=4, num_params=3,
num_groups=2)
[np.array([0, 1, 2]), np.array([3, 4, 5]), np.array([6, 7, 8]),
np.array([9, 10, 11])]
"""
if num_groups is None:
num_groups = num_params
index_list = []
for j in range(num_samples):
index_list.append(np.arange(num_groups + 1) + j * (num_groups + 1))
return index_list | Identify indices of input sample associated with each trajectory
For each trajectory, identifies the indexes of the input sample which
is a function of the number of factors/groups and the number of samples
Arguments
---------
num_samples : int
The number of trajectories
num_params : int
The number of parameters
num_groups : int
The number of groups
Returns
-------
list of numpy.ndarray
Example
-------
>>> BruteForce()._make_index_list(num_samples=4, num_params=3,
num_groups=2)
[np.array([0, 1, 2]), np.array([3, 4, 5]), np.array([6, 7, 8]),
np.array([9, 10, 11])] |
def write_data(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
result = self.devh.controlMsg(
usb.ENDPOINT_OUT + usb.TYPE_CLASS + usb.RECIP_INTERFACE,
usb.REQ_SET_CONFIGURATION, buf, value=0x200, timeout=50)
if result != len(buf):
raise IOError('pywws.device_libusb.USBDevice.write_data failed')
return True | Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool |
def SegmentSum(a, ids, *args):
"""
Segmented sum op.
"""
func = lambda idxs: reduce(np.add, a[idxs])
return seg_map(func, a, ids), | Segmented sum op. |
def _readoct(self, length, start):
"""Read bits and interpret as an octal string."""
if length % 3:
raise InterpretError("Cannot convert to octal unambiguously - "
"not multiple of 3 bits.")
if not length:
return ''
# Get main octal bit by converting from int.
# Strip starting 0 or 0o depending on Python version.
end = oct(self._readuint(length, start))[LEADING_OCT_CHARS:]
if end.endswith('L'):
end = end[:-1]
middle = '0' * (length // 3 - len(end))
return middle + end | Read bits and interpret as an octal string. |
def add_prefix(self):
""" Add prefix according to the specification.
The following keys can be used:
vrf ID of VRF to place the prefix in
prefix the prefix to add if already known
family address family (4 or 6)
description A short description
expires Expiry time of assignment
comment Longer comment
node Hostname of node
type Type of prefix; reservation, assignment, host
status Status of prefix; assigned, reserved, quarantine
pool ID of pool
country Country where the prefix is used
order_id Order identifier
customer_id Customer identifier
vlan VLAN ID
alarm_priority Alarm priority of prefix
monitor If the prefix should be monitored or not
from-prefix A prefix the prefix is to be allocated from
from-pool A pool (ID) the prefix is to be allocated from
prefix_length Prefix length of allocated prefix
"""
p = Prefix()
# Sanitize input parameters
if 'vrf' in request.json:
try:
if request.json['vrf'] is None or len(unicode(request.json['vrf'])) == 0:
p.vrf = None
else:
p.vrf = VRF.get(int(request.json['vrf']))
except ValueError:
return json.dumps({'error': 1, 'message': "Invalid VRF ID '%s'" % request.json['vrf']})
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'description' in request.json:
p.description = validate_string(request.json, 'description')
if 'expires' in request.json:
p.expires = validate_string(request.json, 'expires')
if 'comment' in request.json:
p.comment = validate_string(request.json, 'comment')
if 'node' in request.json:
p.node = validate_string(request.json, 'node')
if 'status' in request.json:
p.status = validate_string(request.json, 'status')
if 'type' in request.json:
p.type = validate_string(request.json, 'type')
if 'pool' in request.json:
if request.json['pool'] is not None:
try:
p.pool = Pool.get(int(request.json['pool']))
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'country' in request.json:
p.country = validate_string(request.json, 'country')
if 'order_id' in request.json:
p.order_id = validate_string(request.json, 'order_id')
if 'customer_id' in request.json:
p.customer_id = validate_string(request.json, 'customer_id')
if 'alarm_priority' in request.json:
p.alarm_priority = validate_string(request.json, 'alarm_priority')
if 'monitor' in request.json:
p.monitor = request.json['monitor']
if 'vlan' in request.json:
p.vlan = request.json['vlan']
if 'tags' in request.json:
p.tags = request.json['tags']
if 'avps' in request.json:
p.avps = request.json['avps']
# arguments
args = {}
if 'from_prefix' in request.json:
args['from-prefix'] = request.json['from_prefix']
if 'from_pool' in request.json:
try:
args['from-pool'] = Pool.get(int(request.json['from_pool']))
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'family' in request.json:
args['family'] = request.json['family']
if 'prefix_length' in request.json:
args['prefix_length'] = request.json['prefix_length']
# manual allocation?
if args == {}:
if 'prefix' in request.json:
p.prefix = request.json['prefix']
try:
p.save(args)
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(p, cls=NipapJSONEncoder) | Add prefix according to the specification.
The following keys can be used:
vrf ID of VRF to place the prefix in
prefix the prefix to add if already known
family address family (4 or 6)
description A short description
expires Expiry time of assignment
comment Longer comment
node Hostname of node
type Type of prefix; reservation, assignment, host
status Status of prefix; assigned, reserved, quarantine
pool ID of pool
country Country where the prefix is used
order_id Order identifier
customer_id Customer identifier
vlan VLAN ID
alarm_priority Alarm priority of prefix
monitor If the prefix should be monitored or not
from-prefix A prefix the prefix is to be allocated from
from-pool A pool (ID) the prefix is to be allocated from
prefix_length Prefix length of allocated prefix |
def _select_list_view(self, model, **kwargs):
"""
:param model:
:param fields_convert_map: it's different from ListView
:param kwargs:
:return:
"""
from uliweb import request
# add download fields process
fields = kwargs.pop('fields', None)
meta = kwargs.pop('meta', 'Table')
if 'download' in request.GET:
if 'download_fields' in kwargs:
fields = kwargs.pop('download_fields', fields)
if 'download_meta' in kwargs:
meta = kwargs.pop('download_meta')
else:
if hasattr(model, 'Download'):
meta = 'Download'
else:
meta = meta
view = functions.SelectListView(model, fields=fields, meta=meta, **kwargs)
return view | :param model:
:param fields_convert_map: it's different from ListView
:param kwargs:
:return: |
def _validate_row_label(label, column_type_map):
"""
Validate a row label column.
Parameters
----------
label : str
Name of the row label column.
column_type_map : dict[str, type]
Dictionary mapping the name of each column in an SFrame to the type of
the values in the column.
"""
if not isinstance(label, str):
raise TypeError("The row label column name must be a string.")
if not label in column_type_map.keys():
raise ToolkitError("Row label column not found in the dataset.")
if not column_type_map[label] in (str, int):
raise TypeError("Row labels must be integers or strings.") | Validate a row label column.
Parameters
----------
label : str
Name of the row label column.
column_type_map : dict[str, type]
Dictionary mapping the name of each column in an SFrame to the type of
the values in the column. |
def line(ax, p1, p2, permutation=None, **kwargs):
"""
Draws a line on `ax` from p1 to p2.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
p1: 2-tuple
The (x,y) starting coordinates
p2: 2-tuple
The (x,y) ending coordinates
kwargs:
Any kwargs to pass through to Matplotlib.
"""
pp1 = project_point(p1, permutation=permutation)
pp2 = project_point(p2, permutation=permutation)
ax.add_line(Line2D((pp1[0], pp2[0]), (pp1[1], pp2[1]), **kwargs)) | Draws a line on `ax` from p1 to p2.
Parameters
----------
ax: Matplotlib AxesSubplot, None
The subplot to draw on.
p1: 2-tuple
The (x,y) starting coordinates
p2: 2-tuple
The (x,y) ending coordinates
kwargs:
Any kwargs to pass through to Matplotlib. |
def sliced(seq, n):
"""Yield slices of length *n* from the sequence *seq*.
>>> list(sliced((1, 2, 3, 4, 5, 6), 3))
[(1, 2, 3), (4, 5, 6)]
If the length of the sequence is not divisible by the requested slice
length, the last slice will be shorter.
>>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
[(1, 2, 3), (4, 5, 6), (7, 8)]
This function will only work for iterables that support slicing.
For non-sliceable iterables, see :func:`chunked`.
"""
return takewhile(bool, (seq[i: i + n] for i in count(0, n))) | Yield slices of length *n* from the sequence *seq*.
>>> list(sliced((1, 2, 3, 4, 5, 6), 3))
[(1, 2, 3), (4, 5, 6)]
If the length of the sequence is not divisible by the requested slice
length, the last slice will be shorter.
>>> list(sliced((1, 2, 3, 4, 5, 6, 7, 8), 3))
[(1, 2, 3), (4, 5, 6), (7, 8)]
This function will only work for iterables that support slicing.
For non-sliceable iterables, see :func:`chunked`. |
def set_raw_tag_data(filename, data, act=True, verbose=False):
"Replace the ID3 tag in FILENAME with DATA."
check_tag_data(data)
with open(filename, "rb+") as file:
try:
(cls, offset, length) = stagger.tags.detect_tag(file)
except stagger.NoTagError:
(offset, length) = (0, 0)
if length > 0:
verb(verbose, "{0}: replaced tag with {1} bytes of data"
.format(filename, len(data)))
else:
verb(verbose, "{0}: created tag with {1} bytes of data"
.format(filename, len(data)))
if act:
stagger.fileutil.replace_chunk(file, offset, length, data) | Replace the ID3 tag in FILENAME with DATA. |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'from_') and self.from_ is not None:
_dict['from'] = self.from_
if hasattr(self, 'to') and self.to is not None:
_dict['to'] = self.to
if hasattr(self, 'speaker') and self.speaker is not None:
_dict['speaker'] = self.speaker
if hasattr(self, 'confidence') and self.confidence is not None:
_dict['confidence'] = self.confidence
if hasattr(self, 'final_results') and self.final_results is not None:
_dict['final'] = self.final_results
return _dict | Return a json dictionary representing this model. |
def get_constantvalue(self):
"""
the constant pool index for this field, or None if this is not a
contant field
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.2
""" # noqa
buff = self.get_attribute("ConstantValue")
if buff is None:
return None
with unpack(buff) as up:
(cval_ref, ) = up.unpack_struct(_H)
return cval_ref | the constant pool index for this field, or None if this is not a
contant field
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.2 |
def find_slack_bus(sub_network):
"""Find the slack bus in a connected sub-network."""
gens = sub_network.generators()
if len(gens) == 0:
logger.warning("No generators in sub-network {}, better hope power is already balanced".format(sub_network.name))
sub_network.slack_generator = None
sub_network.slack_bus = sub_network.buses_i()[0]
else:
slacks = gens[gens.control == "Slack"].index
if len(slacks) == 0:
sub_network.slack_generator = gens.index[0]
sub_network.network.generators.loc[sub_network.slack_generator,"control"] = "Slack"
logger.debug("No slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator))
elif len(slacks) == 1:
sub_network.slack_generator = slacks[0]
else:
sub_network.slack_generator = slacks[0]
sub_network.network.generators.loc[slacks[1:],"control"] = "PV"
logger.debug("More than one slack generator found in sub-network {}, using {} as the slack generator".format(sub_network.name, sub_network.slack_generator))
sub_network.slack_bus = gens.bus[sub_network.slack_generator]
#also put it into the dataframe
sub_network.network.sub_networks.at[sub_network.name,"slack_bus"] = sub_network.slack_bus
logger.info("Slack bus for sub-network {} is {}".format(sub_network.name, sub_network.slack_bus)) | Find the slack bus in a connected sub-network. |
def writeClient(self, fd, sdClass=None, **kw):
"""write out client module to file descriptor.
Parameters and Keywords arguments:
fd -- file descriptor
sdClass -- service description class name
imports -- list of imports
readerclass -- class name of ParsedSoap reader
writerclass -- class name of SoapWriter writer
"""
sdClass = sdClass or ServiceDescription
assert issubclass(sdClass, ServiceDescription), \
'parameter sdClass must subclass ServiceDescription'
# header = '%s \n# %s.py \n# generated by %s\n%s\n'\
# %('#'*50, self.getClientModuleName(), self.__module__, '#'*50)
print >>fd, '#'*50
print >>fd, '# file: %s.py' %self.getClientModuleName()
print >>fd, '# '
print >>fd, '# client stubs generated by "%s"' %self.__class__
print >>fd, '# %s' %' '.join(sys.argv)
print >>fd, '# '
print >>fd, '#'*50
self.services = []
for service in self._wsdl.services:
sd = sdClass(self._addressing, do_extended=self.do_extended,
wsdl=self._wsdl)
if len(self._wsdl.types) > 0:
sd.setTypesModuleName(self.getTypesModuleName(),
self.getTypesModulePath())
# sd.setMessagesModuleName(self.getMessagesModuleName(),
# self.getMessagesModulePath())
self.gatherNamespaces()
sd.fromWsdl(service, **kw)
sd.write(fd)
self.services.append(sd) | write out client module to file descriptor.
Parameters and Keywords arguments:
fd -- file descriptor
sdClass -- service description class name
imports -- list of imports
readerclass -- class name of ParsedSoap reader
writerclass -- class name of SoapWriter writer |
def cli_help(context, command_name, general_parser, command_parsers):
"""
Outputs help information.
See :py:mod:`swiftly.cli.help` for context usage information.
See :py:class:`CLIHelp` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param command_name: The command_name to output help information
for, or set to None or an empty string to output the general
help information.
:param general_parser: The
:py:class:`swiftly.cli.optionparser.OptionParser` for general
usage.
:param command_parsers: A dict of (name, :py:class:`CLICommand`)
for specific command usage.
"""
if command_name == 'for':
command_name = 'fordo'
with context.io_manager.with_stdout() as stdout:
if not command_name:
general_parser.print_help(stdout)
elif command_name in command_parsers:
command_parsers[command_name].option_parser.print_help(stdout)
else:
raise ReturnCode('unknown command %r' % command_name) | Outputs help information.
See :py:mod:`swiftly.cli.help` for context usage information.
See :py:class:`CLIHelp` for more information.
:param context: The :py:class:`swiftly.cli.context.CLIContext` to
use.
:param command_name: The command_name to output help information
for, or set to None or an empty string to output the general
help information.
:param general_parser: The
:py:class:`swiftly.cli.optionparser.OptionParser` for general
usage.
:param command_parsers: A dict of (name, :py:class:`CLICommand`)
for specific command usage. |
def manage(cls, entity, unit_of_work):
"""
Manages the given entity under the given Unit Of Work.
If `entity` is already managed by the given Unit Of Work, nothing
is done.
:raises ValueError: If the given entity is already under management
by a different Unit Of Work.
"""
if hasattr(entity, '__everest__'):
if not unit_of_work is entity.__everest__.unit_of_work:
raise ValueError('Trying to register an entity that has been '
'registered with another session!')
else:
entity.__everest__ = cls(entity, unit_of_work) | Manages the given entity under the given Unit Of Work.
If `entity` is already managed by the given Unit Of Work, nothing
is done.
:raises ValueError: If the given entity is already under management
by a different Unit Of Work. |
def make_value_from_env(self, param, value_type, function):
"""
get environment variable
"""
value = os.getenv(param)
if value is None:
self.notify_user("Environment variable `%s` undefined" % param)
return self.value_convert(value, value_type) | get environment variable |
def acquire(self):
"""
Acquire the lock, if possible. If the lock is in use, it check again
every `delay` seconds. It does this until it either gets the lock or
exceeds `timeout` number of seconds, in which case it throws
an exception.
"""
start_time = time.time()
while True:
try:
self.fd = os.open(self.lockfile,
os.O_CREAT | os.O_EXCL | os.O_RDWR)
break
except (OSError,) as e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= self.timeout:
raise FileLockException("%s: Timeout occured." %
self.lockfile)
time.sleep(self.delay)
self.is_locked = True | Acquire the lock, if possible. If the lock is in use, it check again
every `delay` seconds. It does this until it either gets the lock or
exceeds `timeout` number of seconds, in which case it throws
an exception. |
def snapshot_list(self):
'''
This command will list all the snapshots taken.
'''
NO_SNAPSHOTS_TAKEN = 'No snapshots have been taken yet!'
output = self._run_vagrant_command(['snapshot', 'list'])
if NO_SNAPSHOTS_TAKEN in output:
return []
else:
return output.splitlines() | This command will list all the snapshots taken. |
def worker_bonus(self, chosen_hit, auto, amount, reason='',
assignment_ids=None):
''' Bonus worker '''
if self.config.has_option('Shell Parameters', 'bonus_message'):
reason = self.config.get('Shell Parameters', 'bonus_message')
while not reason:
user_input = raw_input("Type the reason for the bonus. Workers "
"will see this message: ")
reason = user_input
# Bonus already-bonused workers if the user explicitly lists their
# assignment IDs
override_status = True
if chosen_hit:
override_status = False
workers = self.amt_services.get_workers("Approved", chosen_hit)
if not workers:
print "No approved workers for HIT", chosen_hit
return
print 'bonusing workers for HIT', chosen_hit
elif len(assignment_ids) == 1:
workers = [self.amt_services.get_worker(assignment_ids[0])]
if not workers:
print "No submissions found for requested assignment ID"
return
else:
workers = self.amt_services.get_workers("Approved")
if not workers:
print "No approved workers found."
return
workers = [worker for worker in workers if \
worker['assignmentId'] in assignment_ids]
for worker in workers:
assignment_id = worker['assignmentId']
try:
init_db()
part = Participant.query.\
filter(Participant.assignmentid == assignment_id).\
filter(Participant.workerid == worker['workerId']).\
filter(Participant.endhit != None).\
one()
if auto:
amount = part.bonus
status = part.status
if amount <= 0:
print "bonus amount <=$0, no bonus given for assignment", assignment_id
elif status == 7 and not override_status:
print "bonus already awarded for assignment", assignment_id
else:
success = self.amt_services.bonus_worker(assignment_id,
amount, reason)
if success:
print "gave bonus of $" + str(amount) + " for assignment " + \
assignment_id
part.status = 7
db_session.add(part)
db_session.commit()
db_session.remove()
else:
print "*** failed to bonus assignment", assignment_id
except Exception as e:
print e
print "*** failed to bonus assignment", assignment_id | Bonus worker |
def get_field_cache(self, cache_type='es'):
"""Return a list of fields' mappings"""
if cache_type == 'kibana':
try:
search_results = urlopen(self.get_url).read().decode('utf-8')
except HTTPError: # as e:
# self.pr_err("get_field_cache(kibana), HTTPError: %s" % e)
return []
index_pattern = json.loads(search_results)
# Results look like: {"_index":".kibana","_type":"index-pattern","_id":"aaa*","_version":6,"found":true,"_source":{"title":"aaa*","fields":"<what we want>"}} # noqa
fields_str = index_pattern['_source']['fields']
return json.loads(fields_str)
elif cache_type == 'es' or cache_type.startswith('elastic'):
search_results = urlopen(self.es_get_url).read().decode('utf-8')
es_mappings = json.loads(search_results)
# Results look like: {"<index_name>":{"mappings":{"<doc_type>":{"<field_name>":{"full_name":"<field_name>","mapping":{"<sub-field_name>":{"type":"date","index_name":"<sub-field_name>","boost":1.0,"index":"not_analyzed","store":false,"doc_values":false,"term_vector":"no","norms":{"enabled":false},"index_options":"docs","index_analyzer":"_date/16","search_analyzer":"_date/max","postings_format":"default","doc_values_format":"default","similarity":"default","fielddata":{},"ignore_malformed":false,"coerce":true,"precision_step":16,"format":"dateOptionalTime","null_value":null,"include_in_all":false,"numeric_resolution":"milliseconds","locale":""}}}, # noqa
# now convert the mappings into the .kibana format
field_cache = []
for (index_name, val) in iteritems(es_mappings):
if index_name != self.index: # only get non-'.kibana' indices
# self.pr_dbg("index: %s" % index_name)
m_dict = es_mappings[index_name]['mappings']
# self.pr_dbg('m_dict %s' % m_dict)
mappings = self.get_index_mappings(m_dict)
# self.pr_dbg('mappings %s' % mappings)
field_cache.extend(mappings)
field_cache = self.dedup_field_cache(field_cache)
return field_cache
self.pr_err("Unknown cache type: %s" % cache_type)
return None | Return a list of fields' mappings |
def delete_message(self, id, remove):
"""
Delete a message.
Delete messages from this conversation. Note that this only affects this
user's view of the conversation. If all messages are deleted, the
conversation will be as well (equivalent to DELETE)
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# REQUIRED - remove
"""Array of message ids to be deleted"""
data["remove"] = remove
self.logger.debug("POST /api/v1/conversations/{id}/remove_messages with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/conversations/{id}/remove_messages".format(**path), data=data, params=params, no_data=True) | Delete a message.
Delete messages from this conversation. Note that this only affects this
user's view of the conversation. If all messages are deleted, the
conversation will be as well (equivalent to DELETE) |
def rlgt(self, time=None, times=1,
disallow_sibling_lgts=False):
""" Uses class LGT to perform random lateral gene transfer on
ultrametric tree """
lgt = LGT(self.copy())
for _ in range(times):
lgt.rlgt(time, disallow_sibling_lgts)
return lgt.tree | Uses class LGT to perform random lateral gene transfer on
ultrametric tree |
def populate(self, priority, address, rtr, data):
"""
data bytes (high + low)
1 + 2 = current temp
3 + 4 = min temp
5 + 6 = max temp
:return: None
"""
assert isinstance(data, bytes)
self.needs_no_rtr(rtr)
self.needs_data(data, 6)
self.set_attributes(priority, address, rtr)
self.cur = (((data[0] << 8)| data[1]) / 32 ) * 0.0625
self.min = (((data[2] << 8) | data[3]) / 32 ) * 0.0625
self.max = (((data[4] << 8) | data[5]) / 32 ) * 0.0625 | data bytes (high + low)
1 + 2 = current temp
3 + 4 = min temp
5 + 6 = max temp
:return: None |
def construct_inlines(self):
"""
Returns the inline formset instances
"""
inline_formsets = []
for inline_class in self.get_inlines():
inline_instance = inline_class(self.model, self.request, self.object, self.kwargs, self)
inline_formset = inline_instance.construct_formset()
inline_formsets.append(inline_formset)
return inline_formsets | Returns the inline formset instances |
def _pcca_connected_isa(evec, n_clusters):
"""
PCCA+ spectral clustering method using the inner simplex algorithm.
Clusters the first n_cluster eigenvectors of a transition matrix in order to cluster the states.
This function assumes that the state space is fully connected, i.e. the transition matrix whose
eigenvectors are used is supposed to have only one eigenvalue 1, and the corresponding first
eigenvector (evec[:,0]) must be constant.
Parameters
----------
eigenvectors : ndarray
A matrix with the sorted eigenvectors in the columns. The stationary eigenvector should
be first, then the one to the slowest relaxation process, etc.
n_clusters : int
Number of clusters to group to.
Returns
-------
(chi, rot_mat)
chi : ndarray (n x m)
A matrix containing the probability or membership of each state to be assigned to each cluster.
The rows sum to 1.
rot_mat : ndarray (m x m)
A rotation matrix that rotates the dominant eigenvectors to yield the PCCA memberships, i.e.:
chi = np.dot(evec, rot_matrix
References
----------
[1] P. Deuflhard and M. Weber, Robust Perron cluster analysis in conformation dynamics.
in: Linear Algebra Appl. 398C M. Dellnitz and S. Kirkland and M. Neumann and C. Schuette (Editors)
Elsevier, New York, 2005, pp. 161-184
"""
(n, m) = evec.shape
# do we have enough eigenvectors?
if n_clusters > m:
raise ValueError("Cannot cluster the (" + str(n) + " x " + str(m)
+ " eigenvector matrix to " + str(n_clusters) + " clusters.")
# check if the first, and only the first eigenvector is constant
diffs = np.abs(np.max(evec, axis=0) - np.min(evec, axis=0))
assert diffs[0] < 1e-6, "First eigenvector is not constant. This indicates that the transition matrix " \
"is not connected or the eigenvectors are incorrectly sorted. Cannot do PCCA."
assert diffs[1] > 1e-6, "An eigenvector after the first one is constant. " \
"Probably the eigenvectors are incorrectly sorted. Cannot do PCCA."
# local copy of the eigenvectors
c = evec[:, list(range(n_clusters))]
ortho_sys = np.copy(c)
max_dist = 0.0
# representative states
ind = np.zeros(n_clusters, dtype=np.int32)
# select the first representative as the most outlying point
for (i, row) in enumerate(c):
if np.linalg.norm(row, 2) > max_dist:
max_dist = np.linalg.norm(row, 2)
ind[0] = i
# translate coordinates to make the first representative the origin
ortho_sys -= c[ind[0], None]
# select the other m-1 representatives using a Gram-Schmidt orthogonalization
for k in range(1, n_clusters):
max_dist = 0.0
temp = np.copy(ortho_sys[ind[k - 1]])
# select next farthest point that is not yet a representative
for (i, row) in enumerate(ortho_sys):
row -= np.dot(np.dot(temp, np.transpose(row)), temp)
distt = np.linalg.norm(row, 2)
if distt > max_dist and i not in ind[0:k]:
max_dist = distt
ind[k] = i
ortho_sys /= np.linalg.norm(ortho_sys[ind[k]], 2)
# print "Final selection ", ind
# obtain transformation matrix of eigenvectors to membership matrix
rot_mat = np.linalg.inv(c[ind])
#print "Rotation matrix \n ", rot_mat
# compute membership matrix
chi = np.dot(c, rot_mat)
#print "chi \n ", chi
return (chi, rot_mat) | PCCA+ spectral clustering method using the inner simplex algorithm.
Clusters the first n_cluster eigenvectors of a transition matrix in order to cluster the states.
This function assumes that the state space is fully connected, i.e. the transition matrix whose
eigenvectors are used is supposed to have only one eigenvalue 1, and the corresponding first
eigenvector (evec[:,0]) must be constant.
Parameters
----------
eigenvectors : ndarray
A matrix with the sorted eigenvectors in the columns. The stationary eigenvector should
be first, then the one to the slowest relaxation process, etc.
n_clusters : int
Number of clusters to group to.
Returns
-------
(chi, rot_mat)
chi : ndarray (n x m)
A matrix containing the probability or membership of each state to be assigned to each cluster.
The rows sum to 1.
rot_mat : ndarray (m x m)
A rotation matrix that rotates the dominant eigenvectors to yield the PCCA memberships, i.e.:
chi = np.dot(evec, rot_matrix
References
----------
[1] P. Deuflhard and M. Weber, Robust Perron cluster analysis in conformation dynamics.
in: Linear Algebra Appl. 398C M. Dellnitz and S. Kirkland and M. Neumann and C. Schuette (Editors)
Elsevier, New York, 2005, pp. 161-184 |
def all_subclasses(cls):
"""Recursively returns all the subclasses of the provided class.
"""
subclasses = cls.__subclasses__()
descendants = (descendant for subclass in subclasses
for descendant in all_subclasses(subclass))
return set(subclasses) | set(descendants) | Recursively returns all the subclasses of the provided class. |
def generate_daily(day_end_hour, use_dst,
calib_data, hourly_data, daily_data, process_from):
"""Generate daily summaries from calibrated and hourly data."""
start = daily_data.before(datetime.max)
if start is None:
start = datetime.min
start = calib_data.after(start + SECOND)
if process_from:
if start:
start = min(start, process_from)
else:
start = process_from
if start is None:
return start
# round to start of this day, in local time
start = timezone.local_replace(
start, use_dst=use_dst, hour=day_end_hour, minute=0, second=0)
del daily_data[start:]
stop = calib_data.before(datetime.max)
acc = DayAcc()
def dailygen(inputdata):
"""Internal generator function"""
day_start = start
count = 0
while day_start <= stop:
count += 1
if count % 30 == 0:
logger.info("daily: %s", day_start.isoformat(' '))
else:
logger.debug("daily: %s", day_start.isoformat(' '))
day_end = day_start + DAY
if use_dst:
# day might be 23 or 25 hours long
day_end = timezone.local_replace(
day_end + HOURx3, use_dst=use_dst, hour=day_end_hour)
acc.reset()
for data in inputdata[day_start:day_end]:
acc.add_raw(data)
for data in hourly_data[day_start:day_end]:
acc.add_hourly(data)
new_data = acc.result()
if new_data:
new_data['start'] = day_start
yield new_data
day_start = day_end
daily_data.update(dailygen(calib_data))
return start | Generate daily summaries from calibrated and hourly data. |
def double_ell_distance (mjr0, mnr0, pa0, mjr1, mnr1, pa1, dx, dy):
"""Given two ellipses separated by *dx* and *dy*, compute their separation in
terms of σ. Based on Pineau et al (2011A&A...527A.126P).
The "0" ellipse is taken to be centered at (0, 0), while the "1"
ellipse is centered at (dx, dy).
"""
# 1. We need to rotate the frame so that ellipse 1 lies on the X axis.
theta = -np.arctan2 (dy, dx)
# 2. We also need to express these rotated ellipses in "biv" format.
sx0, sy0, cxy0 = ellbiv (mjr0, mnr0, pa0 + theta)
sx1, sy1, cxy1 = ellbiv (mjr1, mnr1, pa1 + theta)
# 3. Their convolution is:
sx, sy, cxy = bivconvolve (sx0, sy0, cxy0, sx1, sy1, cxy1)
# 4. The separation between the centers is still just:
d = np.sqrt (dx**2 + dy**2)
# 5. The effective sigma in the purely X direction, taking into account
# the covariance term, is:
sigma_eff = sx * np.sqrt (1 - (cxy / (sx * sy))**2)
# 6. Therefore the answer is:
return d / sigma_eff | Given two ellipses separated by *dx* and *dy*, compute their separation in
terms of σ. Based on Pineau et al (2011A&A...527A.126P).
The "0" ellipse is taken to be centered at (0, 0), while the "1"
ellipse is centered at (dx, dy). |
def escape(s, quote=True):
"""
Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true (the default), the quotation mark
characters, both double quote (") and single quote (') characters are also
translated.
"""
assert not isinstance(s, bytes), 'Pass a unicode string'
if quote:
return s.translate(_escape_map_full)
return s.translate(_escape_map) | Replace special characters "&", "<" and ">" to HTML-safe sequences.
If the optional flag quote is true (the default), the quotation mark
characters, both double quote (") and single quote (') characters are also
translated. |
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise UnrecognizedFormat(
"%s is not a compressed or uncompressed tar file" % (filename,)
)
try:
tarobj.chown = lambda *args: None # don't do any chowning!
for member in tarobj:
name = member.name
# don't extract absolute paths or ones with .. in them
if not name.startswith('/') and '..' not in name:
prelim_dst = os.path.join(extract_dir, *name.split('/'))
final_dst = progress_filter(name, prelim_dst)
# If progress_filter returns None, then we do not extract
# this file
# TODO: Do we really need to limit to just these file types?
# tarobj.extract() will handle all files on all platforms,
# turning file types that aren't allowed on that platform into
# regular files.
if final_dst and (member.isfile() or member.isdir() or
member.islnk() or member.issym()):
tarobj.extract(member, extract_dir)
if final_dst != prelim_dst:
shutil.move(prelim_dst, final_dst)
return True
finally:
tarobj.close() | Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument. |
def add_new_data_port(self):
"""Add a new port with default values and select it"""
try:
new_data_port_ids = gui_helper_state_machine.add_data_port_to_selected_states('OUTPUT', int, [self.model])
if new_data_port_ids:
self.select_entry(new_data_port_ids[self.model.state])
except ValueError:
pass | Add a new port with default values and select it |
def predict(self, pairs):
"""Predicts the learned metric between input pairs. (For now it just
calls decision function).
Returns the learned metric value between samples in every pair. It should
ideally be low for similar samples and high for dissimilar samples.
Parameters
----------
pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2)
3D Array of pairs to predict, with each row corresponding to two
points, or 2D array of indices of pairs if the metric learner uses a
preprocessor.
Returns
-------
y_predicted : `numpy.ndarray` of floats, shape=(n_constraints,)
The predicted learned metric value between samples in every pair.
"""
check_is_fitted(self, ['threshold_', 'transformer_'])
return 2 * (- self.decision_function(pairs) <= self.threshold_) - 1 | Predicts the learned metric between input pairs. (For now it just
calls decision function).
Returns the learned metric value between samples in every pair. It should
ideally be low for similar samples and high for dissimilar samples.
Parameters
----------
pairs : array-like, shape=(n_pairs, 2, n_features) or (n_pairs, 2)
3D Array of pairs to predict, with each row corresponding to two
points, or 2D array of indices of pairs if the metric learner uses a
preprocessor.
Returns
-------
y_predicted : `numpy.ndarray` of floats, shape=(n_constraints,)
The predicted learned metric value between samples in every pair. |
def render_latex(latex: str) -> PIL.Image: # pragma: no cover
"""
Convert a single page LaTeX document into an image.
To display the returned image, `img.show()`
Required external dependencies: `pdflatex` (with `qcircuit` package),
and `poppler` (for `pdftocairo`).
Args:
A LaTeX document as a string.
Returns:
A PIL Image
Raises:
OSError: If an external dependency is not installed.
"""
tmpfilename = 'circ'
with tempfile.TemporaryDirectory() as tmpdirname:
tmppath = os.path.join(tmpdirname, tmpfilename)
with open(tmppath + '.tex', 'w') as latex_file:
latex_file.write(latex)
subprocess.run(["pdflatex",
"-halt-on-error",
"-output-directory={}".format(tmpdirname),
"{}".format(tmpfilename+'.tex')],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
check=True)
subprocess.run(['pdftocairo',
'-singlefile',
'-png',
'-q',
tmppath + '.pdf',
tmppath])
img = PIL.Image.open(tmppath + '.png')
return img | Convert a single page LaTeX document into an image.
To display the returned image, `img.show()`
Required external dependencies: `pdflatex` (with `qcircuit` package),
and `poppler` (for `pdftocairo`).
Args:
A LaTeX document as a string.
Returns:
A PIL Image
Raises:
OSError: If an external dependency is not installed. |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.