code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def indentLine(self, block, autoIndent):
""" Indent line.
Return filler or null.
"""
indent = None
if indent is None:
indent = self.tryMatchedAnchor(block, autoIndent)
if indent is None:
indent = self.tryCComment(block)
if indent is None and not autoIndent:
indent = self.tryCppComment(block)
if indent is None:
indent = self.trySwitchStatement(block)
if indent is None:
indent = self.tryAccessModifiers(block)
if indent is None:
indent = self.tryBrace(block)
if indent is None:
indent = self.tryCKeywords(block, block.text().lstrip().startswith('{'))
if indent is None:
indent = self.tryCondition(block)
if indent is None:
indent = self.tryStatement(block)
if indent is not None:
return indent
else:
dbg("Nothing matched")
return self._prevNonEmptyBlockIndent(block) | Indent line.
Return filler or null. |
def severity_level(self, value):
"""The severity_level property.
Args:
value (int). the property value.
"""
if value == self._defaults['severityLevel'] and 'severityLevel' in self._values:
del self._values['severityLevel']
else:
self._values['severityLevel'] = value | The severity_level property.
Args:
value (int). the property value. |
def getBlocks(sentences, n):
"""
Get blocks of n sentences together.
:param sentences: List of strings where each string is a sentence.
:type sentences: list
:param n: Maximum blocksize for sentences, i.e. a block will be composed of
``n`` sentences.
:type n: int.
:returns: Blocks of n sentences.
:rtype: list-of-lists
.. code-block:: python
import rnlp
example = "Hello there. How are you? I am fine."
sentences = rnlp.getSentences(example)
# ['Hello there', 'How are you', 'I am fine']
blocks = rnlp.getBlocks(sentences, 2)
# with 1: [['Hello there'], ['How are you'], ['I am fine']]
# with 2: [['Hello there', 'How are you'], ['I am fine']]
# with 3: [['Hello there', 'How are you', 'I am fine']]
"""
blocks = []
for i in range(0, len(sentences), n):
blocks.append(sentences[i:(i+n)])
return blocks | Get blocks of n sentences together.
:param sentences: List of strings where each string is a sentence.
:type sentences: list
:param n: Maximum blocksize for sentences, i.e. a block will be composed of
``n`` sentences.
:type n: int.
:returns: Blocks of n sentences.
:rtype: list-of-lists
.. code-block:: python
import rnlp
example = "Hello there. How are you? I am fine."
sentences = rnlp.getSentences(example)
# ['Hello there', 'How are you', 'I am fine']
blocks = rnlp.getBlocks(sentences, 2)
# with 1: [['Hello there'], ['How are you'], ['I am fine']]
# with 2: [['Hello there', 'How are you'], ['I am fine']]
# with 3: [['Hello there', 'How are you', 'I am fine']] |
def create_groups(self, container):
"""CreateGroups.
:param :class:`<object> <azure.devops.v5_0.identity.models.object>` container:
:rtype: [Identity]
"""
content = self._serialize.body(container, 'object')
response = self._send(http_method='POST',
location_id='5966283b-4196-4d57-9211-1b68f41ec1c2',
version='5.0',
content=content)
return self._deserialize('[Identity]', self._unwrap_collection(response)) | CreateGroups.
:param :class:`<object> <azure.devops.v5_0.identity.models.object>` container:
:rtype: [Identity] |
def get_post_alter_table_index_foreign_key_sql(self, diff):
"""
:param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: list
"""
if not isinstance(diff.from_table, Table):
raise DBALException(
"Sqlite platform requires for alter table the table"
"diff with reference to original table schema"
)
sql = []
if diff.new_name:
table_name = diff.get_new_name()
else:
table_name = diff.get_name(self)
for index in self._get_indexes_in_altered_table(diff).values():
if index.is_primary():
continue
sql.append(
self.get_create_index_sql(index, table_name.get_quoted_name(self))
)
return sql | :param diff: The table diff
:type diff: orator.dbal.table_diff.TableDiff
:rtype: list |
def _scan_block(self, cfg_job):
"""
Scan a basic block starting at a specific address
:param CFGJob cfg_job: The CFGJob instance.
:return: a list of successors
:rtype: list
"""
addr = cfg_job.addr
current_func_addr = cfg_job.func_addr
# Fix the function address
# This is for rare cases where we cannot successfully determine the end boundary of a previous function, and
# as a consequence, our analysis mistakenly thinks the previous function goes all the way across the boundary,
# resulting the missing of the second function in function manager.
if addr in self._function_addresses_from_symbols:
current_func_addr = addr
if self._addr_hooked_or_syscall(addr):
entries = self._scan_procedure(cfg_job, current_func_addr)
else:
entries = self._scan_irsb(cfg_job, current_func_addr)
return entries | Scan a basic block starting at a specific address
:param CFGJob cfg_job: The CFGJob instance.
:return: a list of successors
:rtype: list |
def mail_logger(app, level = None):
"""
Get mail logger
Returns configured instance of mail logger ready to be attached to app.
Important: app.config['DEBUG'] must be False!
:param app: application instance
:param level: mail errors of this level
:return: SMTPHandler
"""
credentials = None
if app.config['MAIL_USERNAME'] and app.config['MAIL_PASSWORD']:
credentials = (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = tuple()
# @todo: move to configuration
config = dict(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr=app.config['MAIL_DEFAULT_SENDER'],
toaddrs=app.config['ADMINS'],
credentials = credentials,
subject='Application exception',
secure = secure,
timeout=1.0
)
mail_handler = SMTPHandler(**config)
if level is None: level = logging.ERROR
mail_handler.setLevel(level)
mail_log_format = '''
Message type: %(levelname)s
Location: %(pathname)s:%(lineno)d
Module: %(module)s
Function: %(funcName)s
Time: %(asctime)s
Message:
%(message)s
'''
mail_handler.setFormatter(logging.Formatter(mail_log_format))
return mail_handler | Get mail logger
Returns configured instance of mail logger ready to be attached to app.
Important: app.config['DEBUG'] must be False!
:param app: application instance
:param level: mail errors of this level
:return: SMTPHandler |
def _check_node_parameters(self, **kwargs):
"""See discussion in issue #840."""
if 'fqdn' in kwargs:
kwargs['fqdn'].pop('autopopulate', '')
kwargs['fqdn'].pop('addressFamily', '')
if 'fqdn' in self.__dict__:
self.__dict__['fqdn'].pop('autopopulate', '')
self.__dict__['fqdn'].pop('addressFamily', '')
if 'state' in kwargs:
if kwargs['state'] != 'user-up' and kwargs['state'] != \
'user-down':
kwargs.pop('state')
if 'state' in self.__dict__:
if self.__dict__['state'] != 'user-up' and self.__dict__['state'] \
!= 'user-down':
self.__dict__.pop('state')
if 'session' in kwargs:
if kwargs['session'] != 'user-enabled' and kwargs['session'] != \
'user-disabled':
kwargs.pop('session')
if 'session' in self.__dict__:
if self.__dict__['session'] != 'user-enabled' and \
self.__dict__['session'] != 'user-disabled':
self.__dict__.pop('session')
# Until we implement sanity checks for __dict__ this needs to stay here
self.__dict__.pop('ephemeral', '')
self.__dict__.pop('address', '')
return kwargs | See discussion in issue #840. |
def projection(radius=5e-6, sphere_index=1.339, medium_index=1.333,
wavelength=550e-9, pixel_size=1e-7, grid_size=(80, 80),
center=(39.5, 39.5)):
"""Optical path difference projection of a dielectric sphere
Parameters
----------
radius: float
Radius of the sphere [m]
sphere_index: float
Refractive index of the sphere
medium_index: float
Refractive index of the surrounding medium
wavelength: float
Vacuum wavelength of the imaging light [m]
pixel_size: float
Pixel size [m]
grid_size: tuple of floats
Resulting image size in x and y [px]
center: tuple of floats
Center position in image coordinates [px]
Returns
-------
qpi: qpimage.QPImage
Quantitative phase data set
"""
# grid
x = np.arange(grid_size[0]).reshape(-1, 1)
y = np.arange(grid_size[1]).reshape(1, -1)
cx, cy = center
# sphere location
rpx = radius / pixel_size
r = rpx**2 - (x - cx)**2 - (y - cy)**2
# distance
z = np.zeros_like(r)
rvalid = r > 0
z[rvalid] = 2 * np.sqrt(r[rvalid]) * pixel_size
# phase = delta_n * 2PI * z / wavelength
phase = (sphere_index - medium_index) * 2 * np.pi * z / wavelength
meta_data = {"pixel size": pixel_size,
"wavelength": wavelength,
"medium index": medium_index,
"sim center": center,
"sim radius": radius,
"sim index": sphere_index,
"sim model": "projection",
}
qpi = qpimage.QPImage(data=phase, which_data="phase",
meta_data=meta_data)
return qpi | Optical path difference projection of a dielectric sphere
Parameters
----------
radius: float
Radius of the sphere [m]
sphere_index: float
Refractive index of the sphere
medium_index: float
Refractive index of the surrounding medium
wavelength: float
Vacuum wavelength of the imaging light [m]
pixel_size: float
Pixel size [m]
grid_size: tuple of floats
Resulting image size in x and y [px]
center: tuple of floats
Center position in image coordinates [px]
Returns
-------
qpi: qpimage.QPImage
Quantitative phase data set |
def _strip_footnote_definitions(self, text):
"""A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note.
"""
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r'''
^[ ]{0,%d}\[\^(.+)\]: # id = \1
[ \t]*
( # footnote text = \2
# First line need not start with the spaces.
(?:\s*.*\n+)
(?:
(?:[ ]{%d} | \t) # Subsequent lines must be indented.
.*\n+
)*
)
# Lookahead for non-space at line-start, or end of doc.
(?:(?=^[ ]{0,%d}\S)|\Z)
''' % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text) | A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note. |
def _get_self_bounds(self):
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]].
"""
if not self.embed:
raise ValueError('Cannot compute bounds of non-embedded GeoJSON.')
data = json.loads(self.data)
if 'features' not in data.keys():
# Catch case when GeoJSON is just a single Feature or a geometry.
if not (isinstance(data, dict) and 'geometry' in data.keys()):
# Catch case when GeoJSON is just a geometry.
data = {'type': 'Feature', 'geometry': data}
data = {'type': 'FeatureCollection', 'features': [data]}
bounds = [[None, None], [None, None]]
for feature in data['features']:
for point in iter_points(feature.get('geometry', {}).get('coordinates', {})): # noqa
bounds = [
[
none_min(bounds[0][0], point[1]),
none_min(bounds[0][1], point[0]),
],
[
none_max(bounds[1][0], point[1]),
none_max(bounds[1][1], point[0]),
],
]
return bounds | Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]]. |
def decrease_posts_count_after_post_unaproval(sender, instance, **kwargs):
""" Decreases the member's post count after a post unaproval.
This receiver handles the unaproval of a forum post: the posts count associated with the post's
author is decreased.
"""
if not instance.pk:
# Do not consider posts being created.
return
profile, dummy = ForumProfile.objects.get_or_create(user=instance.poster)
try:
old_instance = instance.__class__._default_manager.get(pk=instance.pk)
except ObjectDoesNotExist: # pragma: no cover
# This should never happen (except with django loaddata command)
return
if old_instance and old_instance.approved is True and instance.approved is False:
profile.posts_count = F('posts_count') - 1
profile.save() | Decreases the member's post count after a post unaproval.
This receiver handles the unaproval of a forum post: the posts count associated with the post's
author is decreased. |
def _GetDirectory(self):
"""Retrieves the directory.
Returns:
LVMDirectory: a directory or None if not available.
"""
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return LVMDirectory(self._file_system, self.path_spec) | Retrieves the directory.
Returns:
LVMDirectory: a directory or None if not available. |
def get_cities_by_name(self, name):
"""Get a list of city dictionaries with the given name.
City names cannot be used as keys, as they are not unique.
"""
if name not in self.cities_by_names:
if self.cities_items is None:
self.cities_items = list(self.get_cities().items())
self.cities_by_names[name] = [dict({gid: city})
for gid, city in self.cities_items if city['name'] == name]
return self.cities_by_names[name] | Get a list of city dictionaries with the given name.
City names cannot be used as keys, as they are not unique. |
def remove(self, *args):
"""Remove the instance tied to the field from all the indexes
For the parameters, seen BaseIndex.remove
"""
args = self.prepare_args(args)
for index in self._indexes:
index.remove(*args) | Remove the instance tied to the field from all the indexes
For the parameters, seen BaseIndex.remove |
def estimateBIsochrone(pot,R,z,phi=None):
"""
NAME:
estimateBIsochrone
PURPOSE:
Estimate a good value for the scale of the isochrone potential by matching the slope of the rotation curve
INPUT:
pot- Potential instance or list thereof
R,z - coordinates (if these are arrays, the median estimated delta is returned, i.e., if this is an orbit)
phi= (None) azimuth to use for non-axisymmetric potentials (array if R and z are arrays)
OUTPUT:
b if 1 R,Z given
bmin,bmedian,bmax if multiple R given
HISTORY:
2013-09-12 - Written - Bovy (IAS)
2016-02-20 - Changed input order to allow physical conversions - Bovy (UofT)
2016-06-28 - Added phi= keyword for non-axisymmetric potential - Bovy (UofT)
"""
if pot is None: #pragma: no cover
raise IOError("pot= needs to be set to a Potential instance or list thereof")
if isinstance(R,nu.ndarray):
if phi is None: phi= [None for r in R]
bs= nu.array([estimateBIsochrone(pot,R[ii],z[ii],phi=phi[ii],
use_physical=False)
for ii in range(len(R))])
return nu.array([nu.amin(bs[True^nu.isnan(bs)]),
nu.median(bs[True^nu.isnan(bs)]),
nu.amax(bs[True^nu.isnan(bs)])])
else:
r2= R**2.+z**2
r= math.sqrt(r2)
dlvcdlr= dvcircdR(pot,r,phi=phi,use_physical=False)/vcirc(pot,r,phi=phi,use_physical=False)*r
try:
b= optimize.brentq(lambda x: dlvcdlr-(x/math.sqrt(r2+x**2.)-0.5*r2/(r2+x**2.)),
0.01,100.)
except: #pragma: no cover
b= nu.nan
return b | NAME:
estimateBIsochrone
PURPOSE:
Estimate a good value for the scale of the isochrone potential by matching the slope of the rotation curve
INPUT:
pot- Potential instance or list thereof
R,z - coordinates (if these are arrays, the median estimated delta is returned, i.e., if this is an orbit)
phi= (None) azimuth to use for non-axisymmetric potentials (array if R and z are arrays)
OUTPUT:
b if 1 R,Z given
bmin,bmedian,bmax if multiple R given
HISTORY:
2013-09-12 - Written - Bovy (IAS)
2016-02-20 - Changed input order to allow physical conversions - Bovy (UofT)
2016-06-28 - Added phi= keyword for non-axisymmetric potential - Bovy (UofT) |
def _safe_sparse_mask(tensor: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:
"""
In PyTorch 1.0, Tensor._sparse_mask was changed to Tensor.sparse_mask.
This wrapper allows AllenNLP to (temporarily) work with both 1.0 and 0.4.1.
"""
# pylint: disable=protected-access
try:
return tensor.sparse_mask(mask)
except AttributeError:
# TODO(joelgrus): remove this and/or warn at some point
return tensor._sparse_mask(mask) | In PyTorch 1.0, Tensor._sparse_mask was changed to Tensor.sparse_mask.
This wrapper allows AllenNLP to (temporarily) work with both 1.0 and 0.4.1. |
def get_small_molecule_name(self, hms_lincs_id):
"""Get the name of a small molecule from the LINCS sm metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID of the small molecule.
Returns
-------
str
The name of the small molecule.
"""
entry = self._get_entry_by_id(self._sm_data, hms_lincs_id)
if not entry:
return None
name = entry['Name']
return name | Get the name of a small molecule from the LINCS sm metadata.
Parameters
----------
hms_lincs_id : str
The HMS LINCS ID of the small molecule.
Returns
-------
str
The name of the small molecule. |
def _normalize(x, cmin=None, cmax=None, clip=True):
"""Normalize an array from the range [cmin, cmax] to [0,1],
with optional clipping."""
if not isinstance(x, np.ndarray):
x = np.array(x)
if cmin is None:
cmin = x.min()
if cmax is None:
cmax = x.max()
if cmin == cmax:
return .5 * np.ones(x.shape)
else:
cmin, cmax = float(cmin), float(cmax)
y = (x - cmin) * 1. / (cmax - cmin)
if clip:
y = np.clip(y, 0., 1.)
return y | Normalize an array from the range [cmin, cmax] to [0,1],
with optional clipping. |
def get_connectable_volume_templates(self, start=0, count=-1, filter='', query='', sort=''):
"""
Gets the storage volume templates that are available on the specified networks based on the storage system
port's expected network connectivity. If there are no storage volume templates that meet the specified
connectivity criteria, an empty collection will be returned.
Returns:
list: Storage volume templates.
"""
uri = self.URI + "/connectable-volume-templates"
get_uri = self._client.build_query_uri(start=start, count=count, filter=filter,
query=query, sort=sort, uri=uri)
return self._client.get(get_uri) | Gets the storage volume templates that are available on the specified networks based on the storage system
port's expected network connectivity. If there are no storage volume templates that meet the specified
connectivity criteria, an empty collection will be returned.
Returns:
list: Storage volume templates. |
def add_uuid(dom, uuid):
"""
Add ``<mods:identifier>`` with `uuid`.
"""
mods_tag = get_mods_tag(dom)
uuid_tag = dhtmlparser.HTMLElement(
"mods:identifier",
{"type": "uuid"},
[dhtmlparser.HTMLElement(uuid)]
)
insert_tag(uuid_tag, dom.find("mods:identifier"), mods_tag) | Add ``<mods:identifier>`` with `uuid`. |
def simplified_pos(pos, tagset=None):
"""
Return a simplified POS tag for a full POS tag `pos` belonging to a tagset `tagset`. By default the WordNet
tagset is assumed.
Does the following conversion by default:
- all N... (noun) tags to 'N'
- all V... (verb) tags to 'V'
- all ADJ... (adjective) tags to 'ADJ'
- all ADV... (adverb) tags to 'ADV'
- all other to None
Does the following conversion by with `tagset=='penn'`:
- all N... (noun) tags to 'N'
- all V... (verb) tags to 'V'
- all JJ... (adjective) tags to 'ADJ'
- all RB... (adverb) tags to 'ADV'
- all other to None
"""
if tagset == 'penn':
if pos.startswith('N') or pos.startswith('V'):
return pos[0]
elif pos.startswith('JJ'):
return 'ADJ'
elif pos.startswith('RB'):
return 'ADV'
else:
return None
else: # default: WordNet, STTS or unknown
if pos.startswith('N') or pos.startswith('V'):
return pos[0]
elif pos.startswith('ADJ') or pos.startswith('ADV'):
return pos[:3]
else:
return None | Return a simplified POS tag for a full POS tag `pos` belonging to a tagset `tagset`. By default the WordNet
tagset is assumed.
Does the following conversion by default:
- all N... (noun) tags to 'N'
- all V... (verb) tags to 'V'
- all ADJ... (adjective) tags to 'ADJ'
- all ADV... (adverb) tags to 'ADV'
- all other to None
Does the following conversion by with `tagset=='penn'`:
- all N... (noun) tags to 'N'
- all V... (verb) tags to 'V'
- all JJ... (adjective) tags to 'ADJ'
- all RB... (adverb) tags to 'ADV'
- all other to None |
def get_response_data(self, response, parse_json=True):
"""
Get response data or throw an appropiate exception
:param response: requests response object
:param parse_json: if True, response will be parsed as JSON
:return: response data, either as json or as a regular response.content object
"""
if response.status_code in (requests.codes.ok, requests.codes.created):
if parse_json:
return response.json()
return response.content
elif response.status_code == requests.codes.bad_request:
response_json = response.json()
raise BadRequestException(response_json.get("error", False) or response_json.get("errors",
_("Bad Request: {text}").format(text=response.text)))
elif response.status_code == requests.codes.not_found:
raise NotFoundException(_("Resource not found: {url}").format(url=response.url))
elif response.status_code == requests.codes.internal_server_error:
raise ServerErrorException(_("Internal server error"))
elif response.status_code in (requests.codes.unauthorized, requests.codes.forbidden):
raise AuthErrorException(_("Access denied"))
elif response.status_code == requests.codes.too_many_requests:
raise RateLimitException(_(response.text))
else:
raise ServerErrorException(_("Unknown error occurred")) | Get response data or throw an appropiate exception
:param response: requests response object
:param parse_json: if True, response will be parsed as JSON
:return: response data, either as json or as a regular response.content object |
def samples_to_records(samples, default_keys=None):
"""Convert samples into output CWL records.
"""
from bcbio.pipeline import run_info
RECORD_CONVERT_TO_LIST = set(["config__algorithm__tools_on", "config__algorithm__tools_off",
"reference__genome_context"])
all_keys = _get_all_cwlkeys(samples, default_keys)
out = []
for data in samples:
for raw_key in sorted(list(all_keys)):
key = raw_key.split("__")
if tz.get_in(key, data) is None:
data = tz.update_in(data, key, lambda x: None)
if raw_key not in data["cwl_keys"]:
data["cwl_keys"].append(raw_key)
if raw_key in RECORD_CONVERT_TO_LIST:
val = tz.get_in(key, data)
if not val: val = []
elif not isinstance(val, (list, tuple)): val = [val]
data = tz.update_in(data, key, lambda x: val)
# Booleans are problematic for CWL serialization, convert into string representation
if isinstance(tz.get_in(key, data), bool):
data = tz.update_in(data, key, lambda x: str(tz.get_in(key, data)))
data["metadata"] = run_info.add_metadata_defaults(data.get("metadata", {}))
out.append(data)
return out | Convert samples into output CWL records. |
def p_single_line_if(p):
""" if_inline : if_then_part statements %prec ID
| if_then_part co_statements_co %prec NEWLINE
| if_then_part statements_co %prec NEWLINE
| if_then_part co_statements %prec ID
"""
cond_ = p[1]
stat_ = p[2]
p[0] = make_sentence('IF', cond_, stat_, lineno=p.lineno(1)) | if_inline : if_then_part statements %prec ID
| if_then_part co_statements_co %prec NEWLINE
| if_then_part statements_co %prec NEWLINE
| if_then_part co_statements %prec ID |
def DeactivateCard(self, card):
"""Deactivate a card."""
if hasattr(card, 'connection'):
card.connection.disconnect()
if None != self.parent.apdutracerpanel:
card.connection.deleteObserver(self.parent.apdutracerpanel)
delattr(card, 'connection')
self.dialogpanel.OnDeactivateCard(card) | Deactivate a card. |
def init_states(self,
source_encoded: mx.sym.Symbol,
source_encoded_lengths: mx.sym.Symbol,
source_encoded_max_length: int) -> List[mx.sym.Symbol]:
"""
Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states.
"""
pass | Returns a list of symbolic states that represent the initial states of this decoder.
Used for inference.
:param source_encoded: Encoded source. Shape: (batch_size, source_encoded_max_length, encoder_depth).
:param source_encoded_lengths: Lengths of encoded source sequences. Shape: (batch_size,).
:param source_encoded_max_length: Size of encoder time dimension.
:return: List of symbolic initial states. |
def _warning_for_deprecated_user_based_rules(rules):
"""Warning user based policy enforcement used in the rule but the rule
doesn't support it.
"""
for rule in rules:
# We will skip the warning for the resources which support user based
# policy enforcement.
if [resource for resource in USER_BASED_RESOURCES
if resource in rule[0]]:
continue
if 'user_id' in KEY_EXPR.findall(rule[1]):
LOG.warning(_LW("The user_id attribute isn't supported in the "
"rule '%s'. All the user_id based policy "
"enforcement will be removed in the "
"future."), rule[0]) | Warning user based policy enforcement used in the rule but the rule
doesn't support it. |
def restore_breakpoints_state(cls, breakpoints_state_list):
"""Restore the state of breakpoints given a list provided by
backup_breakpoints_state(). If list of breakpoint has changed
since backup missing or added breakpoints are ignored.
breakpoints_state_list is a list of tuple. Each tuple is of form:
(breakpoint_number, enabled, condition)
"""
for breakpoint_state in breakpoints_state_list:
bp = cls.breakpoints_by_number[breakpoint_state[0]]
if bp:
bp.enabled = breakpoint_state[1]
bp.condition = breakpoint_state[2]
cls.update_active_breakpoint_flag()
return | Restore the state of breakpoints given a list provided by
backup_breakpoints_state(). If list of breakpoint has changed
since backup missing or added breakpoints are ignored.
breakpoints_state_list is a list of tuple. Each tuple is of form:
(breakpoint_number, enabled, condition) |
def out_of_date(self):
"""Check if our local latest sha matches the remote latest sha"""
try:
latest_remote_sha = self.pr_commits(self.pull_request.refresh(True))[-1].sha
print("Latest remote sha: {}".format(latest_remote_sha))
try:
print("Ratelimit remaining: {}".format(self.github.ratelimit_remaining))
except Exception:
print("Failed to look up ratelimit remaining")
return self.last_sha != latest_remote_sha
except IndexError:
return False | Check if our local latest sha matches the remote latest sha |
def add_child_resource_client(self, res_name, res_spec):
"""Add a resource client to the container and start the resource connection"""
res_spec = dict(res_spec)
res_spec['name'] = res_name
res = self.client_resource_factory(
res_spec, parent=self, logger=self._logger)
self.children[resource.escape_name(res_name)] = res;
self._children_dirty = True
res.set_ioloop(self.ioloop)
res.start()
return res | Add a resource client to the container and start the resource connection |
def map_files(self, base_dir, all_components_list):
"""Apply `assert_single_path_by_glob()` to all elements of `all_components_list`.
Each element of `all_components_list` should be a tuple of path components, including
wildcards. The elements of each tuple are joined, and interpreted as a glob expression relative
to `base_dir`. The resulting glob should match exactly one path.
:return: List of matched paths, one per element of `all_components_list`.
:raises: :class:`ArchiveFileMapper.ArchiveFileMappingError` if more or less than one path was
matched by one of the glob expressions interpreted from `all_components_list`.
"""
mapped_paths = []
for components_tupled in all_components_list:
with_base = [base_dir] + list(components_tupled)
# Results are known to exist, since they match a glob.
mapped_paths.append(self.assert_single_path_by_glob(with_base))
return mapped_paths | Apply `assert_single_path_by_glob()` to all elements of `all_components_list`.
Each element of `all_components_list` should be a tuple of path components, including
wildcards. The elements of each tuple are joined, and interpreted as a glob expression relative
to `base_dir`. The resulting glob should match exactly one path.
:return: List of matched paths, one per element of `all_components_list`.
:raises: :class:`ArchiveFileMapper.ArchiveFileMappingError` if more or less than one path was
matched by one of the glob expressions interpreted from `all_components_list`. |
def _pypsa_load_timeseries_aggregated_at_lv_station(network, timesteps):
"""
Aggregates load time series per sector and LV grid.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
Returns
-------
tuple of :pandas:`pandas.DataFrame<dataframe>`
Tuple of size two containing DataFrames that represent
1. 'p_set' of aggregated Load per sector at each LV station
2. 'q_set' of aggregated Load per sector at each LV station
"""
# ToDo: Load.pypsa_timeseries is not differentiated by sector so this
# function will not work (either change here and in
# add_aggregated_lv_components or in Load class)
load_p = []
load_q = []
for lv_grid in network.mv_grid.lv_grids:
# Determine aggregated load at LV stations
load = {}
for lo in lv_grid.graph.nodes_by_attribute('load'):
for sector, val in lo.consumption.items():
load.setdefault(sector, {})
load[sector].setdefault('timeseries_p', [])
load[sector].setdefault('timeseries_q', [])
load[sector]['timeseries_p'].append(
lo.pypsa_timeseries('p').rename(repr(lo)).to_frame().loc[
timesteps])
load[sector]['timeseries_q'].append(
lo.pypsa_timeseries('q').rename(repr(lo)).to_frame().loc[
timesteps])
for sector, val in load.items():
load_p.append(
pd.concat(val['timeseries_p'], axis=1).sum(axis=1).rename(
'_'.join(['Load', sector, repr(lv_grid)])).to_frame())
load_q.append(
pd.concat(val['timeseries_q'], axis=1).sum(axis=1).rename(
'_'.join(['Load', sector, repr(lv_grid)])).to_frame())
return load_p, load_q | Aggregates load time series per sector and LV grid.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
timesteps : array_like
Timesteps is an array-like object with entries of type
:pandas:`pandas.Timestamp<timestamp>` specifying which time steps
to export to pypsa representation and use in power flow analysis.
Returns
-------
tuple of :pandas:`pandas.DataFrame<dataframe>`
Tuple of size two containing DataFrames that represent
1. 'p_set' of aggregated Load per sector at each LV station
2. 'q_set' of aggregated Load per sector at each LV station |
def submission(self):
"""Return the Submission object this comment belongs to."""
if not self._submission: # Comment not from submission
self._submission = self.reddit_session.get_submission(
url=self._fast_permalink)
return self._submission | Return the Submission object this comment belongs to. |
def _reorder_types(self, types_script):
"""
Takes type scripts and reorders them to avoid Type doesn't exist exception
"""
self._logger.debug('Running types definitions scripts')
self._logger.debug('Reordering types definitions scripts to avoid "type does not exist" exceptions')
_type_statements = sqlparse.split(types_script)
# TODO: move up to classes
_type_statements_dict = {} # dictionary that store statements with type and order.
type_unordered_scripts = [] # scripts to execute without order
type_drop_scripts = [] # drop scripts to execute first
for _type_statement in _type_statements:
_type_statement_parsed = sqlparse.parse(_type_statement)
if len(_type_statement_parsed) > 0: # can be empty parsed object so need to check
# we need only type declarations to be ordered
if _type_statement_parsed[0].get_type() == 'CREATE':
_type_body_r = r'\bcreate\s+\b(?:type|domain)\s+\b(\w+\.\w+|\w+)\b'
_type_name = re.compile(_type_body_r, flags=re.IGNORECASE).findall(_type_statement)[0]
_type_statements_dict[str(_type_name)] = \
{'script': _type_statement, 'deps': []}
elif _type_statement_parsed[0].get_type() == 'DROP':
type_drop_scripts.append(_type_statement)
else:
type_unordered_scripts.append(_type_statement)
# now let's add dependant types to dictionary with types
# _type_statements_list = [] # list of statements to be ordered
for _type_key in _type_statements_dict.keys():
for _type_key_sub, _type_value in _type_statements_dict.items():
if _type_key != _type_key_sub:
if pgpm.lib.utils.misc.find_whole_word(_type_key)(_type_value['script']):
_type_value['deps'].append(_type_key)
# now let's add order to type scripts and put them ordered to list
_deps_unresolved = True
_type_script_order = 0
_type_names = []
type_ordered_scripts = [] # ordered list with scripts to execute
while _deps_unresolved:
for k, v in _type_statements_dict.items():
if not v['deps']:
_type_names.append(k)
v['order'] = _type_script_order
_type_script_order += 1
if not v['script'] in type_ordered_scripts:
type_ordered_scripts.append(v['script'])
else:
_dep_exists = True
for _dep in v['deps']:
if _dep not in _type_names:
_dep_exists = False
if _dep_exists:
_type_names.append(k)
v['order'] = _type_script_order
_type_script_order += 1
if not v['script'] in type_ordered_scripts:
type_ordered_scripts.append(v['script'])
else:
v['order'] = -1
_deps_unresolved = False
for k, v in _type_statements_dict.items():
if v['order'] == -1:
_deps_unresolved = True
return type_drop_scripts, type_ordered_scripts, type_unordered_scripts | Takes type scripts and reorders them to avoid Type doesn't exist exception |
def circumradius(self):
'''
Distance from the circumcenter to all the verticies in
the Triangle, float.
'''
return (self.a * self.b * self.c) / (self.area * 4) | Distance from the circumcenter to all the verticies in
the Triangle, float. |
def plot_file_distances(dist_matrix):
"""
Plots dist_matrix
Parameters
----------
dist_matrix: np.ndarray
"""
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111)
ax.matshow(dist_matrix, interpolation='nearest',
cmap=plt.cm.get_cmap('PuBu')) | Plots dist_matrix
Parameters
----------
dist_matrix: np.ndarray |
def run():
"""
Start the BaseHTTPServer and serve requests forever.
"""
server_address = (args.listen_addr, args.listen_port)
httpd = YHSM_VALServer(server_address, YHSM_VALRequestHandler)
my_log_message(args, syslog.LOG_INFO, "Serving requests to 'http://%s:%s%s' (YubiHSM: '%s')" \
% (args.listen_addr, args.listen_port, args.serve_url, args.device))
httpd.serve_forever() | Start the BaseHTTPServer and serve requests forever. |
def browse(i):
"""
Input: {
data_uoa - data UOA of the repo
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
}
"""
o=i.get('out','')
duoa=i.get('data_uoa','')
if duoa=='':
return {'return':1, 'error':'repository UOA is not specified'}
# Get configuration (not from Cache - can be outdated info!)
# r=ck.load_repo_info_from_cache({'repo_uoa':duoa})
r=ck.access({'action':'load',
'module_uoa':work['self_module_uoa'],
'data_uoa':duoa})
if r['return']>0: return r
p=r.get('dict',{}).get('path','')
d=r['dict']
dn=r.get('data_name','')
shared=d.get('shared','')
url=d.get('url','')
if shared!='git' and url=='':
return {'return':1, 'error':'this repository is not shared!'}
import webbrowser
webbrowser.open(url)
return {'return':0} | Input: {
data_uoa - data UOA of the repo
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
} |
def write(self, filename):
"""Write the XML job description to a file."""
txt = self.tostring()
with open(filename, 'w') as f:
f.write(txt) | Write the XML job description to a file. |
def _get_pq_array_construct(self):
""" Returns a construct for an array of PQ load data.
"""
bus_no = integer.setResultsName("bus_no")
s_rating = real.setResultsName("s_rating") # MVA
v_rating = real.setResultsName("v_rating") # kV
p = real.setResultsName("p") # p.u.
q = real.setResultsName("q") # p.u.
v_max = Optional(real).setResultsName("v_max") # p.u.
v_min = Optional(real).setResultsName("v_min") # p.u.
# Allow conversion to impedance
z_conv = Optional(boolean).setResultsName("z_conv")
status = Optional(boolean).setResultsName("status")
pq_data = bus_no + s_rating + v_rating + p + q + v_max + \
v_min + z_conv + status + scolon
pq_data.setParseAction(self.push_pq)
pq_array = Literal("PQ.con") + "=" + "[" + "..." + \
ZeroOrMore(pq_data + Optional("]" + scolon))
return pq_array | Returns a construct for an array of PQ load data. |
def make_headers(worksheet):
"""
Make headers from worksheet
"""
headers = {}
cell_idx = 0
while cell_idx < worksheet.ncols:
cell_type = worksheet.cell_type(0, cell_idx)
if cell_type == 1:
header = slughifi(worksheet.cell_value(0, cell_idx))
if not header.startswith("_"):
headers[cell_idx] = header
cell_idx += 1
return headers | Make headers from worksheet |
def symbol_leading_char(self):
"""Return the symbol leading char attribute of the BFD file being
processed.
"""
if not self._ptr:
raise BfdException("BFD not initialized")
return _bfd.get_bfd_attribute(
self._ptr, BfdAttributes.SYMBOL_LEADING_CHAR) | Return the symbol leading char attribute of the BFD file being
processed. |
def flow_coef_bd(CIJ):
'''
Computes the flow coefficient for each node and averaged over the
network, as described in Honey et al. (2007) PNAS. The flow coefficient
is similar to betweenness centrality, but works on a local
neighborhood. It is mathematically related to the clustering
coefficient (cc) at each node as, fc+cc <= 1.
Parameters
----------
CIJ : NxN np.ndarray
binary directed connection matrix
Returns
-------
fc : Nx1 np.ndarray
flow coefficient for each node
FC : float
average flow coefficient over the network
total_flo : int
number of paths that "flow" across the central node
'''
N = len(CIJ)
fc = np.zeros((N,))
total_flo = np.zeros((N,))
max_flo = np.zeros((N,))
# loop over nodes
for v in range(N):
# find neighbors - note: both incoming and outgoing connections
nb, = np.where(CIJ[v, :] + CIJ[:, v].T)
fc[v] = 0
if np.where(nb)[0].size:
CIJflo = -CIJ[np.ix_(nb, nb)]
for i in range(len(nb)):
for j in range(len(nb)):
if CIJ[nb[i], v] and CIJ[v, nb[j]]:
CIJflo[i, j] += 1
total_flo[v] = np.sum(
(CIJflo == 1) * np.logical_not(np.eye(len(nb))))
max_flo[v] = len(nb) * len(nb) - len(nb)
fc[v] = total_flo[v] / max_flo[v]
fc[np.isnan(fc)] = 0
FC = np.mean(fc)
return fc, FC, total_flo | Computes the flow coefficient for each node and averaged over the
network, as described in Honey et al. (2007) PNAS. The flow coefficient
is similar to betweenness centrality, but works on a local
neighborhood. It is mathematically related to the clustering
coefficient (cc) at each node as, fc+cc <= 1.
Parameters
----------
CIJ : NxN np.ndarray
binary directed connection matrix
Returns
-------
fc : Nx1 np.ndarray
flow coefficient for each node
FC : float
average flow coefficient over the network
total_flo : int
number of paths that "flow" across the central node |
def _set_mct_state(self, v, load=False):
"""
Setter method for mct_state, mapped from YANG variable /mct_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mct_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mct_state() directly.
YANG Description: MCT Operational Information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mct_state.mct_state, is_container='container', presence=False, yang_name="mct-state", rest_name="mct-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'nsm-mct', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mct_state must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mct_state.mct_state, is_container='container', presence=False, yang_name="mct-state", rest_name="mct-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'nsm-mct', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-nsm-operational', defining_module='brocade-nsm-operational', yang_type='container', is_config=True)""",
})
self.__mct_state = t
if hasattr(self, '_set'):
self._set() | Setter method for mct_state, mapped from YANG variable /mct_state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mct_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mct_state() directly.
YANG Description: MCT Operational Information |
def attr_name(self):
"Returns attribute name for this facet"
return self.schema.name if self.schema else self.field.name | Returns attribute name for this facet |
def join(C, *args, **kwargs):
"""join a list of url elements, and include any keyword arguments, as a new URL"""
u = C('/'.join([str(arg).strip('/') for arg in args]), **kwargs)
return u | join a list of url elements, and include any keyword arguments, as a new URL |
def put(self, f, digest=None):
"""
Upload a blob
:param f:
File object to be uploaded (required to support seek if digest is
not provided).
:param digest:
Optional SHA-1 hex digest of the file contents. Gets computed
before actual upload if not provided, which requires an extra file
read.
:return:
The hex digest of the uploaded blob if not provided in the call.
Otherwise a boolean indicating if the blob has been newly created.
"""
if digest:
actual_digest = digest
else:
actual_digest = self._compute_digest(f)
created = self.conn.client.blob_put(self.container_name,
actual_digest, f)
if digest:
return created
return actual_digest | Upload a blob
:param f:
File object to be uploaded (required to support seek if digest is
not provided).
:param digest:
Optional SHA-1 hex digest of the file contents. Gets computed
before actual upload if not provided, which requires an extra file
read.
:return:
The hex digest of the uploaded blob if not provided in the call.
Otherwise a boolean indicating if the blob has been newly created. |
def parse(self, request, bundle_errors=False):
"""Parses argument value(s) from the request, converting according to
the argument's type.
:param request: The flask request object to parse arguments from
:param bundle_errors: Do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled
"""
source = self.source(request)
results = []
# Sentinels
_not_found = False
_found = True
for operator in self.operators:
name = self.name + operator.replace("=", "", 1)
if name in source:
# Account for MultiDict and regular dict
if hasattr(source, "getlist"):
values = source.getlist(name)
else:
values = source.get(name)
if not (isinstance(values, collections.MutableSequence) and self.action == 'append'):
values = [values]
for value in values:
if hasattr(value, "strip") and self.trim:
value = value.strip()
if hasattr(value, "lower") and not self.case_sensitive:
value = value.lower()
if hasattr(self.choices, "__iter__"):
self.choices = [choice.lower()
for choice in self.choices]
try:
value = self.convert(value, operator)
except Exception as error:
if self.ignore:
continue
return self.handle_validation_error(error, bundle_errors)
if self.choices and value not in self.choices:
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return self.handle_validation_error(
ValueError(u"{0} is not a valid choice".format(
value)), bundle_errors)
self.handle_validation_error(
ValueError(u"{0} is not a valid choice".format(
value)), bundle_errors)
if name in request.unparsed_arguments:
request.unparsed_arguments.pop(name)
results.append(value)
if not results and self.required:
if isinstance(self.location, six.string_types):
error_msg = u"Missing required parameter in {0}".format(
_friendly_location.get(self.location, self.location)
)
else:
friendly_locations = [_friendly_location.get(loc, loc)
for loc in self.location]
error_msg = u"Missing required parameter in {0}".format(
' or '.join(friendly_locations)
)
if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors:
return self.handle_validation_error(ValueError(error_msg), bundle_errors)
self.handle_validation_error(ValueError(error_msg), bundle_errors)
if not results:
if callable(self.default):
return self.default(), _not_found
else:
return self.default, _not_found
if self.action == 'append':
return results, _found
if self.action == 'store' or len(results) == 1:
return results[0], _found
return results, _found | Parses argument value(s) from the request, converting according to
the argument's type.
:param request: The flask request object to parse arguments from
:param bundle_errors: Do not abort when first error occurs, return a
dict with the name of the argument and the error message to be
bundled |
def _sanitise(*args):
"""Take an arg or the key portion of a kwarg and check that it is in the
set of allowed GPG options and flags, and that it has the correct
type. Then, attempt to escape any unsafe characters. If an option is not
allowed, drop it with a logged warning. Returns a dictionary of all
sanitised, allowed options.
Each new option that we support that is not a boolean, but instead has
some additional inputs following it, i.e. "--encrypt-file foo.txt", will
need some basic safety checks added here.
GnuPG has three-hundred and eighteen commandline flags. Also, not all
implementations of OpenPGP parse PGP packets and headers in the same way,
so there is added potential there for messing with calls to GPG.
For information on the PGP message format specification, see
:rfc:`1991`.
If you're asking, "Is this *really* necessary?": No, not really -- we could
just follow the security precautions recommended by `this xkcd`__.
__ https://xkcd.com/1181/
:param str args: (optional) The boolean arguments which will be passed to
the GnuPG process.
:rtype: str
:returns: ``sanitised``
"""
## see TODO file, tag :cleanup:sanitise:
def _check_option(arg, value):
"""Check that a single ``arg`` is an allowed option.
If it is allowed, quote out any escape characters in ``value``, and
add the pair to :ivar:`sanitised`. Otherwise, drop them.
:param str arg: The arguments which will be passed to the GnuPG
process, and, optionally their corresponding values.
The values are any additional arguments following the
GnuPG option or flag. For example, if we wanted to
pass ``"--encrypt --recipient [email protected]"`` to
GnuPG, then ``"--encrypt"`` would be an arg without a
value, and ``"--recipient"`` would also be an arg,
with a value of ``"[email protected]"``.
:ivar list checked: The sanitised, allowed options and values.
:rtype: str
:returns: A string of the items in ``checked``, delimited by spaces.
"""
checked = str()
none_options = _get_options_group("none_options")
hex_options = _get_options_group("hex_options")
hex_or_none_options = _get_options_group("hex_or_none_options")
if not _util._py3k:
if not isinstance(arg, list) and isinstance(arg, unicode):
arg = str(arg)
try:
flag = _is_allowed(arg)
assert flag is not None, "_check_option(): got None for flag"
except (AssertionError, ProtectedOption) as error:
log.warn("_check_option(): %s" % str(error))
else:
checked += (flag + ' ')
if _is_string(value):
values = value.split(' ')
for v in values:
## these can be handled separately, without _fix_unsafe(),
## because they are only allowed if they pass the regex
if (flag in none_options) and (v is None):
continue
if flag in hex_options:
if _is_hex(v): checked += (v + " ")
else:
log.debug("'%s %s' not hex." % (flag, v))
if (flag in hex_or_none_options) and (v is None):
log.debug("Allowing '%s' for all keys" % flag)
continue
elif flag in ['--keyserver']:
host = _check_keyserver(v)
if host:
log.debug("Setting keyserver: %s" % host)
checked += (v + " ")
else: log.debug("Dropping keyserver: %s" % v)
continue
## the rest are strings, filenames, etc, and should be
## shell escaped:
val = _fix_unsafe(v)
try:
assert not val is None
assert not val.isspace()
assert not v is None
assert not v.isspace()
except:
log.debug("Dropping %s %s" % (flag, v))
continue
if flag in ['--encrypt', '--encrypt-files', '--decrypt',
'--decrypt-files', '--import', '--verify']:
if ( (_util._is_file(val))
or
((flag == '--verify') and (val == '-')) ):
checked += (val + " ")
else:
log.debug("%s not file: %s" % (flag, val))
elif flag in ['--cipher-algo', '--personal-cipher-prefs',
'--personal-cipher-preferences']:
legit_algos = _check_preferences(val, 'cipher')
if legit_algos: checked += (legit_algos + " ")
else: log.debug("'%s' is not cipher" % val)
elif flag in ['--compress-algo', '--compression-algo',
'--personal-compress-prefs',
'--personal-compress-preferences']:
legit_algos = _check_preferences(val, 'compress')
if legit_algos: checked += (legit_algos + " ")
else: log.debug("'%s' not compress algo" % val)
elif flag == '--trust-model':
legit_models = _check_preferences(val, 'trust')
if legit_models: checked += (legit_models + " ")
else: log.debug("%r is not a trust model", val)
elif flag == '--pinentry-mode':
legit_modes = _check_preferences(val, 'pinentry')
if legit_modes: checked += (legit_modes + " ")
else: log.debug("%r is not a pinentry mode", val)
else:
checked += (val + " ")
log.debug("_check_option(): No checks for %s" % val)
return checked.rstrip(' ')
is_flag = lambda x: x.startswith('--')
def _make_filo(args_string):
filo = arg.split(' ')
filo.reverse()
log.debug("_make_filo(): Converted to reverse list: %s" % filo)
return filo
def _make_groups(filo):
groups = {}
while len(filo) >= 1:
last = filo.pop()
if is_flag(last):
log.debug("Got arg: %s" % last)
if last == '--verify':
groups[last] = str(filo.pop())
## accept the read-from-stdin arg:
if len(filo) >= 1 and filo[len(filo)-1] == '-':
groups[last] += str(' - ') ## gross hack
filo.pop()
else:
groups[last] = str()
while len(filo) > 1 and not is_flag(filo[len(filo)-1]):
log.debug("Got value: %s" % filo[len(filo)-1])
groups[last] += (filo.pop() + " ")
else:
if len(filo) == 1 and not is_flag(filo[0]):
log.debug("Got value: %s" % filo[0])
groups[last] += filo.pop()
else:
log.warn("_make_groups(): Got solitary value: %s" % last)
groups["xxx"] = last
return groups
def _check_groups(groups):
log.debug("Got groups: %s" % groups)
checked_groups = []
for a,v in groups.items():
v = None if len(v) == 0 else v
safe = _check_option(a, v)
if safe is not None and not safe.strip() == "":
log.debug("Appending option: %s" % safe)
checked_groups.append(safe)
else:
log.warn("Dropped option: '%s %s'" % (a,v))
return checked_groups
if args is not None:
option_groups = {}
for arg in args:
## if we're given a string with a bunch of options in it split
## them up and deal with them separately
if (not _util._py3k and isinstance(arg, basestring)) \
or (_util._py3k and isinstance(arg, str)):
log.debug("Got arg string: %s" % arg)
if arg.find(' ') > 0:
filo = _make_filo(arg)
option_groups.update(_make_groups(filo))
else:
option_groups.update({ arg: "" })
elif isinstance(arg, list):
log.debug("Got arg list: %s" % arg)
arg.reverse()
option_groups.update(_make_groups(arg))
else:
log.warn("Got non-str/list arg: '%s', type '%s'"
% (arg, type(arg)))
checked = _check_groups(option_groups)
sanitised = ' '.join(x for x in checked)
return sanitised
else:
log.debug("Got None for args") | Take an arg or the key portion of a kwarg and check that it is in the
set of allowed GPG options and flags, and that it has the correct
type. Then, attempt to escape any unsafe characters. If an option is not
allowed, drop it with a logged warning. Returns a dictionary of all
sanitised, allowed options.
Each new option that we support that is not a boolean, but instead has
some additional inputs following it, i.e. "--encrypt-file foo.txt", will
need some basic safety checks added here.
GnuPG has three-hundred and eighteen commandline flags. Also, not all
implementations of OpenPGP parse PGP packets and headers in the same way,
so there is added potential there for messing with calls to GPG.
For information on the PGP message format specification, see
:rfc:`1991`.
If you're asking, "Is this *really* necessary?": No, not really -- we could
just follow the security precautions recommended by `this xkcd`__.
__ https://xkcd.com/1181/
:param str args: (optional) The boolean arguments which will be passed to
the GnuPG process.
:rtype: str
:returns: ``sanitised`` |
def collect_genv(self, include_local=True, include_global=True):
"""
Returns a copy of the global environment with all the local variables copied back into it.
"""
e = type(self.genv)()
if include_global:
e.update(self.genv)
if include_local:
for k, v in self.lenv.items():
e['%s_%s' % (self.obj.name.lower(), k)] = v
return e | Returns a copy of the global environment with all the local variables copied back into it. |
def postcode(self):
"""
:example '101-1212'
"""
return "%03d-%04d" % (self.generator.random.randint(0, 999),
self.generator.random.randint(0, 9999)) | :example '101-1212' |
def kallisto_general_stats_table(self):
""" Take the parsed stats from the Kallisto report and add it to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['fragment_length'] = {
'title': 'Frag Length',
'description': 'Estimated average fragment length',
'min': 0,
'suffix': 'bp',
'scale': 'RdYlGn'
}
headers['percent_aligned'] = {
'title': '% Aligned',
'description': '% processed reads that were pseudoaligned',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
headers['pseudoaligned_reads'] = {
'title': '{} Aligned'.format(config.read_count_prefix),
'description': 'Pseudoaligned reads ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuRd',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
self.general_stats_addcols(self.kallisto_data, headers) | Take the parsed stats from the Kallisto report and add it to the
basic stats table at the top of the report |
def playback(cls, filename):
"""
.. testcode::
import io
import json
import requests
import httpretty
with httpretty.record('/tmp/ip.json'):
data = requests.get('https://httpbin.org/ip').json()
with io.open('/tmp/ip.json') as fd:
assert data == json.load(fd)
:param filename: a string
:returns: a `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_
"""
cls.enable()
data = json.loads(open(filename).read())
for item in data:
uri = item['request']['uri']
method = item['request']['method']
body = item['response']['body']
headers = item['response']['headers']
cls.register_uri(method, uri, body=body, forcing_headers=headers)
yield
cls.disable() | .. testcode::
import io
import json
import requests
import httpretty
with httpretty.record('/tmp/ip.json'):
data = requests.get('https://httpbin.org/ip').json()
with io.open('/tmp/ip.json') as fd:
assert data == json.load(fd)
:param filename: a string
:returns: a `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_ |
def merge_nodes(self, keep_node, kill_node):
"""
Merge two nodes in the graph.
Takes two nodes and merges them together, merging their links by
combining the two link lists and summing the weights of links which
point to the same node.
All links in the graph pointing to ``kill_node`` will be merged
into ``keep_node``.
Links belonging to ``kill_node`` which point to targets not in
``self.node_list`` will not be merged into ``keep_node``
Args:
keep_node (Node): node to be kept
kill_node (Node): node to be deleted
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_3 = Node('Three')
>>> node_1.add_link(node_3, 7)
>>> node_2.add_link(node_1, 1)
>>> node_2.add_link(node_2, 3)
>>> node_3.add_link(node_2, 5)
>>> graph = Graph([node_1, node_2, node_3])
>>> print([node.value for node in graph.node_list])
['One', 'Two', 'Three']
>>> graph.merge_nodes(node_2, node_3)
>>> print([node.value for node in graph.node_list])
['One', 'Two']
>>> for link in graph.node_list[1].link_list:
... print('{} {}'.format(link.target.value, link.weight))
One 1
Two 8
"""
# Merge links from kill_node to keep_node
for kill_link in kill_node.link_list:
if kill_link.target in self.node_list:
keep_node.add_link(kill_link.target, kill_link.weight)
# Merge any links in the graph pointing to kill_node into links
# pointing to keep_node
for node in self.node_list:
for link in node.link_list:
if link.target == kill_node:
node.add_link(keep_node, link.weight)
break
# Remove kill_node from the graph
self.remove_node(kill_node) | Merge two nodes in the graph.
Takes two nodes and merges them together, merging their links by
combining the two link lists and summing the weights of links which
point to the same node.
All links in the graph pointing to ``kill_node`` will be merged
into ``keep_node``.
Links belonging to ``kill_node`` which point to targets not in
``self.node_list`` will not be merged into ``keep_node``
Args:
keep_node (Node): node to be kept
kill_node (Node): node to be deleted
Returns: None
Example:
>>> from blur.markov.node import Node
>>> node_1 = Node('One')
>>> node_2 = Node('Two')
>>> node_3 = Node('Three')
>>> node_1.add_link(node_3, 7)
>>> node_2.add_link(node_1, 1)
>>> node_2.add_link(node_2, 3)
>>> node_3.add_link(node_2, 5)
>>> graph = Graph([node_1, node_2, node_3])
>>> print([node.value for node in graph.node_list])
['One', 'Two', 'Three']
>>> graph.merge_nodes(node_2, node_3)
>>> print([node.value for node in graph.node_list])
['One', 'Two']
>>> for link in graph.node_list[1].link_list:
... print('{} {}'.format(link.target.value, link.weight))
One 1
Two 8 |
def get_close_matches(word, possibilities, n=None, cutoff=0.6):
"""Overrides `difflib.get_close_match` to controle argument `n`."""
if n is None:
n = settings.num_close_matches
return difflib_get_close_matches(word, possibilities, n, cutoff) | Overrides `difflib.get_close_match` to controle argument `n`. |
def search(self, query, page=None, per_page=1000, mentions=3, data=False):
"""
Retrieve all objects that make a search query.
Will loop through all pages that match unless you provide
the number of pages you'd like to restrict the search to.
Example usage:
>> documentcloud.documents.search('salazar')
"""
# If the user provides a page, search it and stop there
if page:
document_list = self._get_search_page(
query,
page=page,
per_page=per_page,
mentions=mentions,
data=data,
)
# If the user doesn't provide a page keep looping until you have
# everything
else:
page = 1
document_list = []
# Loop through all the search pages and fetch everything
while True:
results = self._get_search_page(
query,
page=page,
per_page=per_page,
mentions=mentions,
data=data,
)
if results:
document_list += results
page += 1
else:
break
# Convert the JSON objects from the API into Python objects
obj_list = []
for doc in document_list:
doc['_connection'] = self._connection
obj = Document(doc)
obj_list.append(obj)
# Pass it back out
return obj_list | Retrieve all objects that make a search query.
Will loop through all pages that match unless you provide
the number of pages you'd like to restrict the search to.
Example usage:
>> documentcloud.documents.search('salazar') |
def main(args=None):
"""
Create a private key and a certificate and write them to a file.
"""
if args is None:
args = sys.argv[1:]
o = Options()
try:
o.parseOptions(args)
except usage.UsageError, e:
raise SystemExit(str(e))
else:
return createSSLCertificate(o) | Create a private key and a certificate and write them to a file. |
def is_img_id_valid(img_id):
"""
Checks if img_id is valid.
"""
t = re.sub(r'[^a-z0-9_:\-\.]', '', img_id, re.IGNORECASE)
t = re.sub(r'\.+', '.', t)
if img_id != t or img_id.count(':') != 1:
return False
profile, base_name = img_id.split(':', 1)
if not profile or not base_name:
return False
try:
get_profile_configs(profile)
except ValueError:
return False
return True | Checks if img_id is valid. |
def update_pypsa_storage_timeseries(network, storages_to_update=None,
timesteps=None):
"""
Updates storage time series in pypsa representation.
This function overwrites p_set and q_set of storage_unit_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only storage time series are updated but none of the other time
series or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
This function will also raise an error when a storage that is currently
not in the pypsa representation is added.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
storages_to_update : :obj:`list`, optional
List with all storages (of type :class:`~.grid.components.Storage`)
that need to be updated. If None all storages are updated depending on
mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the storage time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current time
steps are overwritten by given time steps. Default: None.
"""
_update_pypsa_timeseries_by_type(
network, type='storage', components_to_update=storages_to_update,
timesteps=timesteps) | Updates storage time series in pypsa representation.
This function overwrites p_set and q_set of storage_unit_t attribute of
pypsa network.
Be aware that if you call this function with `timesteps` and thus overwrite
current time steps it may lead to inconsistencies in the pypsa network
since only storage time series are updated but none of the other time
series or the snapshots attribute of the pypsa network. Use the function
:func:`update_pypsa_timeseries` to change the time steps you want to
analyse in the power flow analysis.
This function will also raise an error when a storage that is currently
not in the pypsa representation is added.
Parameters
----------
network : Network
The eDisGo grid topology model overall container
storages_to_update : :obj:`list`, optional
List with all storages (of type :class:`~.grid.components.Storage`)
that need to be updated. If None all storages are updated depending on
mode. See :meth:`~.tools.pypsa_io.to_pypsa` for more information.
timesteps : :pandas:`pandas.DatetimeIndex<datetimeindex>` or :pandas:`pandas.Timestamp<timestamp>`
Timesteps specifies which time steps of the storage time series to
export to pypsa representation. If None all time steps currently
existing in pypsa representation are updated. If not None current time
steps are overwritten by given time steps. Default: None. |
def freeze_matrix(script, all_layers=False):
""" Freeze the current transformation matrix into the coordinates of the
vertices of the mesh (and set this matrix to the identity).
In other words it applies in a definitive way the current matrix to the
vertex coordinates.
Args:
script: the FilterScript object or script filename to write
the filter to.
all_layers (bool): If selected the filter will be applied to all
visible mesh layers.
"""
filter_xml = ''.join([
' <filter name="Freeze Current Matrix">\n',
' <Param name="allLayers" ',
'value="%s" ' % str(all_layers).lower(),
'description="Apply to all visible Layers" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | Freeze the current transformation matrix into the coordinates of the
vertices of the mesh (and set this matrix to the identity).
In other words it applies in a definitive way the current matrix to the
vertex coordinates.
Args:
script: the FilterScript object or script filename to write
the filter to.
all_layers (bool): If selected the filter will be applied to all
visible mesh layers. |
def _expand_options(cls, options, backend=None):
"""
Validates and expands a dictionaries of options indexed by
type[.group][.label] keys into separate style, plot, norm and
output options.
opts._expand_options({'Image': dict(cmap='viridis', show_title=False)})
returns
{'Image': {'plot': dict(show_title=False), 'style': dict(cmap='viridis')}}
"""
current_backend = Store.current_backend
try:
backend_options = Store.options(backend=backend or current_backend)
except KeyError as e:
raise Exception('The %s backend is not loaded. Please load the backend using hv.extension.' % str(e))
expanded = {}
if isinstance(options, list):
options = merge_options_to_dict(options)
for objspec, options in options.items():
objtype = objspec.split('.')[0]
if objtype not in backend_options:
raise ValueError('%s type not found, could not apply options.'
% objtype)
obj_options = backend_options[objtype]
expanded[objspec] = {g: {} for g in obj_options.groups}
for opt, value in options.items():
found = False
valid_options = []
for g, group_opts in sorted(obj_options.groups.items()):
if opt in group_opts.allowed_keywords:
expanded[objspec][g][opt] = value
found = True
break
valid_options += group_opts.allowed_keywords
if found: continue
cls._options_error(opt, objtype, backend, valid_options)
return expanded | Validates and expands a dictionaries of options indexed by
type[.group][.label] keys into separate style, plot, norm and
output options.
opts._expand_options({'Image': dict(cmap='viridis', show_title=False)})
returns
{'Image': {'plot': dict(show_title=False), 'style': dict(cmap='viridis')}} |
def describe_splits(self, cfName, start_token, end_token, keys_per_split):
"""
experimental API for hadoop/parallel query support.
may change violently and without warning.
returns list of token strings such that first subrange is (list[0], list[1]],
next is (list[1], list[2]], etc.
Parameters:
- cfName
- start_token
- end_token
- keys_per_split
"""
self._seqid += 1
d = self._reqs[self._seqid] = defer.Deferred()
self.send_describe_splits(cfName, start_token, end_token, keys_per_split)
return d | experimental API for hadoop/parallel query support.
may change violently and without warning.
returns list of token strings such that first subrange is (list[0], list[1]],
next is (list[1], list[2]], etc.
Parameters:
- cfName
- start_token
- end_token
- keys_per_split |
def export_avg_losses(ekey, dstore):
"""
:param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object
"""
dskey = ekey[0]
oq = dstore['oqparam']
dt = oq.loss_dt()
name, value, tags = _get_data(dstore, dskey, oq.hazard_stats().items())
writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
assets = get_assets(dstore)
for tag, values in zip(tags, value.transpose(1, 0, 2)):
dest = dstore.build_fname(name, tag, 'csv')
array = numpy.zeros(len(values), dt)
for l, lt in enumerate(dt.names):
array[lt] = values[:, l]
writer.save(compose_arrays(assets, array), dest)
return writer.getsaved() | :param ekey: export key, i.e. a pair (datastore key, fmt)
:param dstore: datastore object |
def addFreetextAnnot(self, rect, text, fontsize=12, fontname=None, color=None, rotate=0):
"""Add a 'FreeText' annotation in rectangle 'rect'."""
CheckParent(self)
val = _fitz.Page_addFreetextAnnot(self, rect, text, fontsize, fontname, color, rotate)
if not val: return
val.thisown = True
val.parent = weakref.proxy(self)
self._annot_refs[id(val)] = val
return val | Add a 'FreeText' annotation in rectangle 'rect'. |
def effects(self, cursor=None, order='asc', limit=10, sse=False):
"""Retrieve the effects JSON from this instance's Horizon server.
Retrieve the effects JSON response for the account associated with
this :class:`Address`.
:param cursor: A paging token, specifying where to start returning records from.
When streaming this can be set to "now" to stream object created since your request time.
:type cursor: int, str
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool sse: Use the SSE client for connecting to Horizon.
"""
return self.horizon.account_effects(
self.address, cursor=cursor, order=order, limit=limit, sse=sse) | Retrieve the effects JSON from this instance's Horizon server.
Retrieve the effects JSON response for the account associated with
this :class:`Address`.
:param cursor: A paging token, specifying where to start returning records from.
When streaming this can be set to "now" to stream object created since your request time.
:type cursor: int, str
:param str order: The order in which to return rows, "asc" or "desc".
:param int limit: Maximum number of records to return.
:param bool sse: Use the SSE client for connecting to Horizon. |
def create_button(self, style=Gtk.ReliefStyle.NORMAL):
"""
This is generalized method for creating Gtk.Button
"""
btn = Gtk.Button()
btn.set_relief(style)
return btn | This is generalized method for creating Gtk.Button |
def prepare_env(self):
"""
Manages reading environment metadata files under ``private_data_dir`` and merging/updating
with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily
"""
try:
passwords = self.loader.load_file('env/passwords', Mapping)
self.expect_passwords = {
re.compile(pattern, re.M): password
for pattern, password in iteritems(passwords)
}
except ConfigurationError:
output.debug('Not loading passwords')
self.expect_passwords = dict()
self.expect_passwords[pexpect.TIMEOUT] = None
self.expect_passwords[pexpect.EOF] = None
try:
# seed env with existing shell env
self.env = os.environ.copy()
envvars = self.loader.load_file('env/envvars', Mapping)
if envvars:
self.env.update({k:six.text_type(v) for k, v in envvars.items()})
if self.envvars and isinstance(self.envvars, dict):
self.env.update({k:six.text_type(v) for k, v in self.envvars.items()})
except ConfigurationError:
output.debug("Not loading environment vars")
# Still need to pass default environment to pexpect
self.env = os.environ.copy()
try:
self.settings = self.loader.load_file('env/settings', Mapping)
except ConfigurationError:
output.debug("Not loading settings")
self.settings = dict()
try:
self.ssh_key_data = self.loader.load_file('env/ssh_key', string_types)
except ConfigurationError:
output.debug("Not loading ssh key")
self.ssh_key_data = None
self.idle_timeout = self.settings.get('idle_timeout', None)
self.job_timeout = self.settings.get('job_timeout', None)
self.pexpect_timeout = self.settings.get('pexpect_timeout', 5)
self.process_isolation = self.settings.get('process_isolation', self.process_isolation)
self.process_isolation_executable = self.settings.get('process_isolation_executable', self.process_isolation_executable)
self.process_isolation_path = self.settings.get('process_isolation_path', self.process_isolation_path)
self.process_isolation_hide_paths = self.settings.get('process_isolation_hide_paths', self.process_isolation_hide_paths)
self.process_isolation_show_paths = self.settings.get('process_isolation_show_paths', self.process_isolation_show_paths)
self.process_isolation_ro_paths = self.settings.get('process_isolation_ro_paths', self.process_isolation_ro_paths)
self.pexpect_use_poll = self.settings.get('pexpect_use_poll', True)
self.suppress_ansible_output = self.settings.get('suppress_ansible_output', self.quiet)
self.directory_isolation_cleanup = bool(self.settings.get('directory_isolation_cleanup', True))
if 'AD_HOC_COMMAND_ID' in self.env or not os.path.exists(self.project_dir):
self.cwd = self.private_data_dir
else:
if self.directory_isolation_path is not None:
self.cwd = self.directory_isolation_path
else:
self.cwd = self.project_dir
if 'fact_cache' in self.settings:
if 'fact_cache_type' in self.settings:
if self.settings['fact_cache_type'] == 'jsonfile':
self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache'])
else:
self.fact_cache = os.path.join(self.artifact_dir, self.settings['fact_cache']) | Manages reading environment metadata files under ``private_data_dir`` and merging/updating
with existing values so the :py:class:`ansible_runner.runner.Runner` object can read and use them easily |
def generate_reciprocal_vectors_squared(a1, a2, a3, encut):
"""
Generate reciprocal vector magnitudes within the cutoff along the specied
lattice vectors.
Args:
a1: Lattice vector a (in Bohrs)
a2: Lattice vector b (in Bohrs)
a3: Lattice vector c (in Bohrs)
encut: Reciprocal vector energy cutoff
Returns:
[[g1^2], [g2^2], ...] Square of reciprocal vectors (1/Bohr)^2
determined by a1, a2, a3 and whose magntidue is less than gcut^2.
"""
for vec in genrecip(a1, a2, a3, encut):
yield np.dot(vec, vec) | Generate reciprocal vector magnitudes within the cutoff along the specied
lattice vectors.
Args:
a1: Lattice vector a (in Bohrs)
a2: Lattice vector b (in Bohrs)
a3: Lattice vector c (in Bohrs)
encut: Reciprocal vector energy cutoff
Returns:
[[g1^2], [g2^2], ...] Square of reciprocal vectors (1/Bohr)^2
determined by a1, a2, a3 and whose magntidue is less than gcut^2. |
def check_uid_validity(key, email):
"""Check that a the email belongs to the given key. Also check the trust
level of this connection. Only if the trust level is high enough (>=4) the
email is assumed to belong to the key.
:param key: the GPG key to which the email should belong
:type key: gpg.gpgme._gpgme_key
:param email: the email address that should belong to the key
:type email: str
:returns: whether the key can be assumed to belong to the given email
:rtype: bool
"""
def check(key_uid):
return (email == key_uid.email and
not key_uid.revoked and
not key_uid.invalid and
key_uid.validity >= gpg.constants.validity.FULL)
return any(check(u) for u in key.uids) | Check that a the email belongs to the given key. Also check the trust
level of this connection. Only if the trust level is high enough (>=4) the
email is assumed to belong to the key.
:param key: the GPG key to which the email should belong
:type key: gpg.gpgme._gpgme_key
:param email: the email address that should belong to the key
:type email: str
:returns: whether the key can be assumed to belong to the given email
:rtype: bool |
def block_lengths(self):
"""Gets the lengths of the blocks.
Note: This works with the property structure `_lengths_cache` to avoid
having to recompute these values each time they are needed.
"""
if self._lengths_cache is None:
# The first column will have the correct lengths. We have an
# invariant that requires that all blocks be the same length in a
# row of blocks.
self._lengths_cache = np.array(
[obj.length().get() for obj in self._partitions_cache.T[0]]
if len(self._partitions_cache.T) > 0
else []
)
return self._lengths_cache | Gets the lengths of the blocks.
Note: This works with the property structure `_lengths_cache` to avoid
having to recompute these values each time they are needed. |
def get_main_chain_layers(self):
"""Return a list of layer IDs in the main chain."""
main_chain = self.get_main_chain()
ret = []
for u in main_chain:
for v, layer_id in self.adj_list[u]:
if v in main_chain and u in main_chain:
ret.append(layer_id)
return ret | Return a list of layer IDs in the main chain. |
def format_query_result(self, query_result, query_path, return_type=list, preceding_depth=None):
""" Formats the query result based on the return type requested.
:param query_result: (dict or str or list), yaml query result
:param query_path: (str, list(str)), representing query path
:param return_type: type, return type of object user desires
:param preceding_depth: int, the depth to which we want to encapsulate back up config tree
-1 : defaults to entire tree
:return: (dict, OrderedDict, str, list), specified return type
"""
if type(query_result) != return_type:
converted_result = self.format_with_handler(query_result, return_type)
else:
converted_result = query_result
converted_result = self.add_preceding_dict(converted_result, query_path, preceding_depth)
return converted_result | Formats the query result based on the return type requested.
:param query_result: (dict or str or list), yaml query result
:param query_path: (str, list(str)), representing query path
:param return_type: type, return type of object user desires
:param preceding_depth: int, the depth to which we want to encapsulate back up config tree
-1 : defaults to entire tree
:return: (dict, OrderedDict, str, list), specified return type |
def sort(self, func=None):
"""
Sorts 'self.data' in-place. Argument:
- func : optional, default 'None' --
- If 'func' not given, sorting will be in ascending
order.
- If 'func' given, it will determine the sort order.
'func' must be a two-argument comparison function
which returns -1, 0, or 1, to mean before, same,
or after ordering."""
if func:
self.data.sort(func)
else:
self.data.sort() | Sorts 'self.data' in-place. Argument:
- func : optional, default 'None' --
- If 'func' not given, sorting will be in ascending
order.
- If 'func' given, it will determine the sort order.
'func' must be a two-argument comparison function
which returns -1, 0, or 1, to mean before, same,
or after ordering. |
def dist_abs(
self,
src,
tar,
metric='euclidean',
cost=(1, 1, 0.5, 0.5),
layout='QWERTY',
):
"""Return the typo distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
metric : str
Supported values include: ``euclidean``, ``manhattan``,
``log-euclidean``, and ``log-manhattan``
cost : tuple
A 4-tuple representing the cost of the four possible edits:
inserts, deletes, substitutions, and shift, respectively (by
default: (1, 1, 0.5, 0.5)) The substitution & shift costs should be
significantly less than the cost of an insertion & deletion unless
a log metric is used.
layout : str
Name of the keyboard layout to use (Currently supported:
``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``)
Returns
-------
float
Typo distance
Raises
------
ValueError
char not found in any keyboard layouts
Examples
--------
>>> cmp = Typo()
>>> cmp.dist_abs('cat', 'hat')
1.5811388
>>> cmp.dist_abs('Niall', 'Neil')
2.8251407
>>> cmp.dist_abs('Colin', 'Cuilen')
3.4142137
>>> cmp.dist_abs('ATCG', 'TAGC')
2.5
>>> cmp.dist_abs('cat', 'hat', metric='manhattan')
2.0
>>> cmp.dist_abs('Niall', 'Neil', metric='manhattan')
3.0
>>> cmp.dist_abs('Colin', 'Cuilen', metric='manhattan')
3.5
>>> cmp.dist_abs('ATCG', 'TAGC', metric='manhattan')
2.5
>>> cmp.dist_abs('cat', 'hat', metric='log-manhattan')
0.804719
>>> cmp.dist_abs('Niall', 'Neil', metric='log-manhattan')
2.2424533
>>> cmp.dist_abs('Colin', 'Cuilen', metric='log-manhattan')
2.2424533
>>> cmp.dist_abs('ATCG', 'TAGC', metric='log-manhattan')
2.3465736
"""
ins_cost, del_cost, sub_cost, shift_cost = cost
if src == tar:
return 0.0
if not src:
return len(tar) * ins_cost
if not tar:
return len(src) * del_cost
keyboard = self._keyboard[layout]
lowercase = {item for sublist in keyboard[0] for item in sublist}
uppercase = {item for sublist in keyboard[1] for item in sublist}
def _kb_array_for_char(char):
"""Return the keyboard layout that contains ch.
Parameters
----------
char : str
The character to lookup
Returns
-------
tuple
A keyboard
Raises
------
ValueError
char not found in any keyboard layouts
"""
if char in lowercase:
return keyboard[0]
elif char in uppercase:
return keyboard[1]
raise ValueError(char + ' not found in any keyboard layouts')
def _substitution_cost(char1, char2):
cost = sub_cost
cost *= metric_dict[metric](char1, char2) + shift_cost * (
_kb_array_for_char(char1) != _kb_array_for_char(char2)
)
return cost
def _get_char_coord(char, kb_array):
"""Return the row & column of char in the keyboard.
Parameters
----------
char : str
The character to search for
kb_array : tuple of tuples
The array of key positions
Returns
-------
tuple
The row & column of the key
"""
for row in kb_array: # pragma: no branch
if char in row:
return kb_array.index(row), row.index(char)
def _euclidean_keyboard_distance(char1, char2):
row1, col1 = _get_char_coord(char1, _kb_array_for_char(char1))
row2, col2 = _get_char_coord(char2, _kb_array_for_char(char2))
return ((row1 - row2) ** 2 + (col1 - col2) ** 2) ** 0.5
def _manhattan_keyboard_distance(char1, char2):
row1, col1 = _get_char_coord(char1, _kb_array_for_char(char1))
row2, col2 = _get_char_coord(char2, _kb_array_for_char(char2))
return abs(row1 - row2) + abs(col1 - col2)
def _log_euclidean_keyboard_distance(char1, char2):
return log(1 + _euclidean_keyboard_distance(char1, char2))
def _log_manhattan_keyboard_distance(char1, char2):
return log(1 + _manhattan_keyboard_distance(char1, char2))
metric_dict = {
'euclidean': _euclidean_keyboard_distance,
'manhattan': _manhattan_keyboard_distance,
'log-euclidean': _log_euclidean_keyboard_distance,
'log-manhattan': _log_manhattan_keyboard_distance,
}
d_mat = np_zeros((len(src) + 1, len(tar) + 1), dtype=np_float32)
for i in range(len(src) + 1):
d_mat[i, 0] = i * del_cost
for j in range(len(tar) + 1):
d_mat[0, j] = j * ins_cost
for i in range(len(src)):
for j in range(len(tar)):
d_mat[i + 1, j + 1] = min(
d_mat[i + 1, j] + ins_cost, # ins
d_mat[i, j + 1] + del_cost, # del
d_mat[i, j]
+ (
_substitution_cost(src[i], tar[j])
if src[i] != tar[j]
else 0
), # sub/==
)
return d_mat[len(src), len(tar)] | Return the typo distance between two strings.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
metric : str
Supported values include: ``euclidean``, ``manhattan``,
``log-euclidean``, and ``log-manhattan``
cost : tuple
A 4-tuple representing the cost of the four possible edits:
inserts, deletes, substitutions, and shift, respectively (by
default: (1, 1, 0.5, 0.5)) The substitution & shift costs should be
significantly less than the cost of an insertion & deletion unless
a log metric is used.
layout : str
Name of the keyboard layout to use (Currently supported:
``QWERTY``, ``Dvorak``, ``AZERTY``, ``QWERTZ``)
Returns
-------
float
Typo distance
Raises
------
ValueError
char not found in any keyboard layouts
Examples
--------
>>> cmp = Typo()
>>> cmp.dist_abs('cat', 'hat')
1.5811388
>>> cmp.dist_abs('Niall', 'Neil')
2.8251407
>>> cmp.dist_abs('Colin', 'Cuilen')
3.4142137
>>> cmp.dist_abs('ATCG', 'TAGC')
2.5
>>> cmp.dist_abs('cat', 'hat', metric='manhattan')
2.0
>>> cmp.dist_abs('Niall', 'Neil', metric='manhattan')
3.0
>>> cmp.dist_abs('Colin', 'Cuilen', metric='manhattan')
3.5
>>> cmp.dist_abs('ATCG', 'TAGC', metric='manhattan')
2.5
>>> cmp.dist_abs('cat', 'hat', metric='log-manhattan')
0.804719
>>> cmp.dist_abs('Niall', 'Neil', metric='log-manhattan')
2.2424533
>>> cmp.dist_abs('Colin', 'Cuilen', metric='log-manhattan')
2.2424533
>>> cmp.dist_abs('ATCG', 'TAGC', metric='log-manhattan')
2.3465736 |
def makeBasicSolution(self,EndOfPrdvP,aNrm,interpolator):
'''
Given end of period assets and end of period marginal value, construct
the basic solution for this period.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrm : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m.
'''
cNrm,mNrm = self.getPointsForInterpolation(EndOfPrdvP,aNrm)
solution_now = self.usePointsForInterpolation(cNrm,mNrm,interpolator)
return solution_now | Given end of period assets and end of period marginal value, construct
the basic solution for this period.
Parameters
----------
EndOfPrdvP : np.array
Array of end-of-period marginal values.
aNrm : np.array
Array of end-of-period asset values that yield the marginal values
in EndOfPrdvP.
interpolator : function
A function that constructs and returns a consumption function.
Returns
-------
solution_now : ConsumerSolution
The solution to this period's consumption-saving problem, with a
consumption function, marginal value function, and minimum m. |
def compute_between_collection_interval_duration(self, prefix):
"""Calculates BETWEEN-collection intervals for the current collection and measure type
and takes their mean.
:param str prefix: Prefix for the key entry in self.measures.
Negative intervals (for overlapping clusters) are counted as 0 seconds. Intervals are
calculated as being the difference between the ending time of the last word in a collection
and the start time of the first word in the subsequent collection.
Note that these intervals are not necessarily silences, and may include asides, filled
pauses, words from the examiner, etc.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_between_collection_interval_duration_mean:
average interval duration separating clusters
"""
durations = [] # duration of each collection
for collection in self.collection_list:
# Entry, with timing, in timed_response for first word in collection
start = collection[0].start_time
# Entry, with timing, in timed_response for last word in collection
end = collection[-1].end_time
durations.append((start, end))
# calculation between-duration intervals
interstices = [durations[i + 1][0] - durations[i][1] for i, d in enumerate(durations[:-1])]
# Replace negative interstices (for overlapping clusters) with
# interstices of duration 0
for i, entry in enumerate(interstices):
if interstices[i] < 0:
interstices[i] = 0
self.measures[prefix + 'between_collection_interval_duration_mean'] = get_mean(interstices) \
if len(interstices) > 0 else 'NA'
if not self.quiet:
print
print self.current_similarity_measure + " between-" + self.current_collection_type + " durations"
table = [(self.current_collection_type + " 1 (start,end)", "Interval",
self.current_collection_type + " 2 (start,end)")] + \
[(str(d1), str(i1), str(d2)) for d1, i1, d2 in zip(durations[:-1], interstices, durations[1:])]
print_table(table)
print
print "Mean " + self.current_similarity_measure + " between-" + self.current_collection_type + " duration", \
self.measures[prefix + 'between_collection_interval_duration_mean'] | Calculates BETWEEN-collection intervals for the current collection and measure type
and takes their mean.
:param str prefix: Prefix for the key entry in self.measures.
Negative intervals (for overlapping clusters) are counted as 0 seconds. Intervals are
calculated as being the difference between the ending time of the last word in a collection
and the start time of the first word in the subsequent collection.
Note that these intervals are not necessarily silences, and may include asides, filled
pauses, words from the examiner, etc.
Adds the following measures to the self.measures dictionary:
- TIMING_(similarity_measure)_(collection_type)_between_collection_interval_duration_mean:
average interval duration separating clusters |
def query(cls, url=urljoin(config.API_URL, 'stac/search'), **kwargs):
""" Get request """
logger.debug('Query URL: %s, Body: %s' % (url, json.dumps(kwargs)))
response = requests.post(url, data=json.dumps(kwargs))
# API error
if response.status_code != 200:
raise SatSearchError(response.text)
return response.json() | Get request |
def user_roles_exists(name, roles, database, user=None, password=None, host=None,
port=None, authdb=None):
'''
Checks if a user of a MongoDB database has specified roles
CLI Examples:
.. code-block:: bash
salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017
.. code-block:: bash
salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017
'''
try:
roles = _to_dict(roles)
except Exception:
return 'Roles provided in wrong format'
users = user_list(user, password, host, port, database, authdb)
if isinstance(users, six.string_types):
return 'Failed to connect to mongo database'
for user in users:
if name == dict(user).get('user'):
for role in roles:
# if the role was provided in the shortened form, we convert it to a long form
if not isinstance(role, dict):
role = {'role': role, 'db': database}
if role not in dict(user).get('roles', []):
return False
return True
return False | Checks if a user of a MongoDB database has specified roles
CLI Examples:
.. code-block:: bash
salt '*' mongodb.user_roles_exists johndoe '["readWrite"]' dbname admin adminpwd localhost 27017
.. code-block:: bash
salt '*' mongodb.user_roles_exists johndoe '[{"role": "readWrite", "db": "dbname" }, {"role": "read", "db": "otherdb"}]' dbname admin adminpwd localhost 27017 |
def set_public_domain(self, public_domain=None):
"""Sets the public domain flag.
:param public_domain: the public domain status
:type public_domain: ``boolean``
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.*
"""
if public_domain is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['public_domain'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(public_domain, metadata, array=False):
self._my_map['publicDomain'] = public_domain
else:
raise InvalidArgument() | Sets the public domain flag.
:param public_domain: the public domain status
:type public_domain: ``boolean``
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
*compliance: mandatory -- This method must be implemented.* |
def flatten_all(nested_iterable):
"""Flatten arbitrary depth of nesting. Good for unknown nesting structure
iterable object.
Example::
>>> list(flatten_all([[1, 2], "abc", [3, ["x", "y", "z"]], 4]))
[1, 2, "abc", 3, "x", "y", "z", 4]
**中文文档**
将任意维度的列表压平成一维列表。
注: 使用hasattr(i, "__iter__")方法做是否是可循环对象的判断, 性能要高于其他
任何方法, 例如: isinstance(i, collections.Iterable)
"""
for item in nested_iterable:
if hasattr(item, "__iter__") and not isinstance(item, string_types):
for i in flatten_all(item):
yield i
else:
yield item | Flatten arbitrary depth of nesting. Good for unknown nesting structure
iterable object.
Example::
>>> list(flatten_all([[1, 2], "abc", [3, ["x", "y", "z"]], 4]))
[1, 2, "abc", 3, "x", "y", "z", 4]
**中文文档**
将任意维度的列表压平成一维列表。
注: 使用hasattr(i, "__iter__")方法做是否是可循环对象的判断, 性能要高于其他
任何方法, 例如: isinstance(i, collections.Iterable) |
def nailgunned_stdio(cls, sock, env, handle_stdin=True):
"""Redirects stdio to the connected socket speaking the nailgun protocol."""
# Determine output tty capabilities from the environment.
stdin_isatty, stdout_isatty, stderr_isatty = NailgunProtocol.isatty_from_env(env)
is_tty_capable = all((stdin_isatty, stdout_isatty, stderr_isatty))
if is_tty_capable:
with cls._tty_stdio(env) as finalizer:
yield finalizer
else:
with cls._pipe_stdio(
sock,
stdin_isatty,
stdout_isatty,
stderr_isatty,
handle_stdin
) as finalizer:
yield finalizer | Redirects stdio to the connected socket speaking the nailgun protocol. |
def submit(recaptcha_response, private_key, remoteip):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_response -- The value of reCAPTCHA response from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address
"""
params = urlencode({
"secret": private_key,
"response": recaptcha_response,
"remoteip": remoteip,
})
if not PY2:
params = params.encode("utf-8")
response = recaptcha_request(params)
data = json.loads(response.read().decode("utf-8"))
response.close()
return RecaptchaResponse(
is_valid=data["success"],
error_codes=data.get("error-codes")
) | Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_response -- The value of reCAPTCHA response from the form
private_key -- your reCAPTCHA private key
remoteip -- the user's ip address |
def encode(self):
"""Encode this matrix in binary suitable for including in a PDF"""
return '{:.6f} {:.6f} {:.6f} {:.6f} {:.6f} {:.6f}'.format(
self.a, self.b, self.c, self.d, self.e, self.f
).encode() | Encode this matrix in binary suitable for including in a PDF |
def el_is_empty(el):
"""Return ``True`` if tuple ``el`` represents an empty XML element."""
if len(el) == 1 and not isinstance(el[0], (list, tuple)):
return True
subels_are_empty = []
for subel in el:
if isinstance(subel, (list, tuple)):
subels_are_empty.append(el_is_empty(subel))
else:
subels_are_empty.append(not bool(subel))
return all(subels_are_empty) | Return ``True`` if tuple ``el`` represents an empty XML element. |
def _connect(self):
"""Establish connection to MySQL Database."""
if self._connParams:
self._conn = MySQLdb.connect(**self._connParams)
else:
self._conn = MySQLdb.connect('') | Establish connection to MySQL Database. |
def construct_pipeline_block_lambda(env='',
generated=None,
previous_env=None,
region='us-east-1',
region_subnets=None,
settings=None,
pipeline_data=None):
"""Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
region_subnets (dict): Subnets for a Region, e.g.
{'us-west-2': ['us-west-2a', 'us-west-2b', 'us-west-2c']}.
Returns:
dict: Pipeline JSON template rendered with configurations.
"""
LOG.info('%s block for [%s].', env, region)
if env.startswith('prod'):
template_name = 'pipeline/pipeline_{}_lambda.json.j2'.format(env)
else:
template_name = 'pipeline/pipeline_stages_lambda.json.j2'
LOG.debug('%s info:\n%s', env, pformat(settings))
gen_app_name = generated.app_name()
user_data = generate_encoded_user_data(
env=env,
region=region,
generated=generated,
group_name=generated.project,
)
# Use different variable to keep template simple
instance_security_groups = sorted(DEFAULT_EC2_SECURITYGROUPS[env])
instance_security_groups.append(gen_app_name)
instance_security_groups.extend(settings['security_group']['instance_extras'])
instance_security_groups = remove_duplicate_sg(instance_security_groups)
LOG.info('Instance security groups to attach: %s', instance_security_groups)
data = copy.deepcopy(settings)
data['app'].update({
'appname': gen_app_name,
'repo_name': generated.repo,
'group_name': generated.project,
'environment': env,
'region': region,
'az_dict': json.dumps(region_subnets),
'previous_env': previous_env,
'encoded_user_data': user_data,
'instance_security_groups': json.dumps(instance_security_groups),
'promote_restrict': pipeline_data['promote_restrict'],
'owner_email': pipeline_data['owner_email'],
'function_name': pipeline_data['lambda']['handler']
})
LOG.debug('Block data:\n%s', pformat(data))
pipeline_json = get_template(template_file=template_name, data=data, formats=generated)
return pipeline_json | Create the Pipeline JSON from template.
This handles the common repeatable patterns in a pipeline, such as
judgement, infrastructure, tagger and qe.
Args:
env (str): Deploy environment name, e.g. dev, stage, prod.
generated (gogoutils.Generator): Gogo Application name generator.
previous_env (str): The previous deploy environment to use as
Trigger.
region (str): AWS Region to deploy to.
settings (dict): Environment settings from configurations.
region_subnets (dict): Subnets for a Region, e.g.
{'us-west-2': ['us-west-2a', 'us-west-2b', 'us-west-2c']}.
Returns:
dict: Pipeline JSON template rendered with configurations. |
def compute_svd(X, n_components, n_iter, random_state, engine):
"""Computes an SVD with k components."""
# Determine what SVD engine to use
if engine == 'auto':
engine = 'sklearn'
# Compute the SVD
if engine == 'fbpca':
if FBPCA_INSTALLED:
U, s, V = fbpca.pca(X, k=n_components, n_iter=n_iter)
else:
raise ValueError('fbpca is not installed; please install it if you want to use it')
elif engine == 'sklearn':
U, s, V = extmath.randomized_svd(
X,
n_components=n_components,
n_iter=n_iter,
random_state=random_state
)
else:
raise ValueError("engine has to be one of ('auto', 'fbpca', 'sklearn')")
U, V = extmath.svd_flip(U, V)
return U, s, V | Computes an SVD with k components. |
def get_include_path():
""" Default include path using a tricky sys
calls.
"""
f1 = os.path.basename(sys.argv[0]).lower() # script filename
f2 = os.path.basename(sys.executable).lower() # Executable filename
# If executable filename and script name are the same, we are
if f1 == f2 or f2 == f1 + '.exe': # under a "compiled" python binary
result = os.path.dirname(os.path.realpath(sys.executable))
else:
result = os.path.dirname(os.path.realpath(__file__))
return result | Default include path using a tricky sys
calls. |
def export_obj_str(surface, **kwargs):
""" Exports surface(s) as a .obj file (string).
Keyword Arguments:
* ``vertex_spacing``: size of the triangle edge in terms of surface points sampled. *Default: 2*
* ``vertex_normals``: if True, then computes vertex normals. *Default: False*
* ``parametric_vertices``: if True, then adds parameter space vertices. *Default: False*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:return: contents of the .obj file generated
:rtype: str
"""
# Get keyword arguments
vertex_spacing = int(kwargs.get('vertex_spacing', 1))
include_vertex_normal = kwargs.get('vertex_normals', False)
include_param_vertex = kwargs.get('parametric_vertices', False)
update_delta = kwargs.get('update_delta', True)
# Input validity checking
if surface.pdimension != 2:
raise exch.GeomdlException("Can only export surfaces")
if vertex_spacing < 1:
raise exch.GeomdlException("Vertex spacing should be bigger than zero")
# Create the string and start adding triangulated surface points
line = "# Generated by geomdl\n"
vertex_offset = 0 # count the vertices to update the face numbers correctly
# Initialize lists for geometry data
str_v = [] # vertices
str_vn = [] # vertex normals
str_vp = [] # parameter space vertices
str_f = [] # faces
# Loop through SurfaceContainer object
for srf in surface:
# Set surface evaluation delta
if update_delta:
srf.sample_size_u = surface.sample_size_u
srf.sample_size_v = surface.sample_size_v
# Tessellate surface
srf.tessellate(vertex_spacing=vertex_spacing)
vertices = srf.tessellator.vertices
triangles = srf.tessellator.faces
# Collect vertices
for vert in vertices:
temp = "v " + str(vert.x) + " " + str(vert.y) + " " + str(vert.z) + "\n"
str_v.append(temp)
# Collect parameter space vertices
if include_param_vertex:
for vert in vertices:
temp = "vp " + str(vert.uv[0]) + " " + str(vert.uv[1]) + "\n"
str_vp.append(temp)
# Compute vertex normals
if include_vertex_normal:
for vert in vertices:
sn = operations.normal(srf, vert.uv)
temp = "vn " + str(sn[1][0]) + " " + str(sn[1][1]) + " " + str(sn[1][2]) + "\n"
str_vn.append(temp)
# Collect faces (1-indexed)
for t in triangles:
vl = t.data
temp = "f " + \
str(vl[0] + 1 + vertex_offset) + " " + \
str(vl[1] + 1 + vertex_offset) + " " + \
str(vl[2] + 1 + vertex_offset) + "\n"
str_f.append(temp)
# Update vertex offset
vertex_offset = len(str_v)
# Write all collected data to the return string
for lv in str_v:
line += lv
for lvn in str_vn:
line += lvn
for lvp in str_vp:
line += lvp
for lf in str_f:
line += lf
return line | Exports surface(s) as a .obj file (string).
Keyword Arguments:
* ``vertex_spacing``: size of the triangle edge in terms of surface points sampled. *Default: 2*
* ``vertex_normals``: if True, then computes vertex normals. *Default: False*
* ``parametric_vertices``: if True, then adds parameter space vertices. *Default: False*
* ``update_delta``: use multi-surface evaluation delta for all surfaces. *Default: True*
:param surface: surface or surfaces to be saved
:type surface: abstract.Surface or multi.SurfaceContainer
:return: contents of the .obj file generated
:rtype: str |
def token(self):
"""
Token function. Contains 2 hacks:
1. Injects ';' into blocks where the last property
leaves out the ;
2. Strips out whitespace from nonsignificant locations
to ease parsing.
"""
if self.next_:
t = self.next_
self.next_ = None
return t
while True:
t = self.lexer.token()
if not t:
return t
if t.type == 't_ws' and (
self.pretok or
(self.last and self.last.type not in self.significant_ws)):
continue
self.pretok = False
if t.type == 't_bclose' and self.last and self.last.type not in ['t_bopen', 't_bclose'] and self.last.type != 't_semicolon' \
and not (hasattr(t, 'lexer') and (t.lexer.lexstate == 'escapequotes' or t.lexer.lexstate == 'escapeapostrophe')):
self.next_ = t
tok = lex.LexToken()
tok.type = 't_semicolon'
tok.value = ';'
tok.lineno = t.lineno
tok.lexpos = t.lexpos
self.last = tok
self.lexer.in_property_decl = False
return tok
self.last = t
break
return t | Token function. Contains 2 hacks:
1. Injects ';' into blocks where the last property
leaves out the ;
2. Strips out whitespace from nonsignificant locations
to ease parsing. |
def listener(self, acceptor, wrapper):
"""
Listens for new connections to the manager's endpoint. Once a
new connection is received, a TCPTendril object is generated
for it and it is passed to the acceptor, which must initialize
the state of the connection. If no acceptor is given, no new
connections can be initialized.
:param acceptor: If given, specifies a callable that will be
called with each newly received TCPTendril;
that callable is responsible for initial
acceptance of the connection and for setting
up the initial state of the connection. If
not given, no new connections will be
accepted by the TCPTendrilManager.
:param wrapper: A callable taking, as its first argument, a
socket.socket object. The callable must
return a valid proxy for the socket.socket
object, which will subsequently be used to
communicate on the connection.
"""
# If we have no acceptor, there's nothing for us to do here
if not acceptor:
# Not listening on anything
self.local_addr = None
# Just sleep in a loop
while True:
gevent.sleep(600)
return # Pragma: nocover
# OK, set up the socket
sock = socket.socket(self.addr_family, socket.SOCK_STREAM)
with utils.SocketCloser(sock):
# Set up SO_REUSEADDR
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind to our endpoint
sock.bind(self.endpoint)
# Get the assigned port number
self.local_addr = sock.getsockname()
# Call any wrappers
if wrapper:
sock = wrapper(sock)
# Initiate listening
sock.listen(self.backlog)
# OK, now go into an accept loop with an error threshold of 10
closer = utils.SocketCloser(sock, 10,
ignore=[application.RejectConnection])
while True:
with closer:
cli, addr = sock.accept()
# OK, the connection has been accepted; construct a
# Tendril for it
tend = TCPTendril(self, cli, addr)
# Set up the application
with utils.SocketCloser(cli):
tend.application = acceptor(tend)
# Make sure we track the new tendril, but only if
# the acceptor doesn't throw any exceptions
self._track_tendril(tend)
# Start the tendril
tend._start() | Listens for new connections to the manager's endpoint. Once a
new connection is received, a TCPTendril object is generated
for it and it is passed to the acceptor, which must initialize
the state of the connection. If no acceptor is given, no new
connections can be initialized.
:param acceptor: If given, specifies a callable that will be
called with each newly received TCPTendril;
that callable is responsible for initial
acceptance of the connection and for setting
up the initial state of the connection. If
not given, no new connections will be
accepted by the TCPTendrilManager.
:param wrapper: A callable taking, as its first argument, a
socket.socket object. The callable must
return a valid proxy for the socket.socket
object, which will subsequently be used to
communicate on the connection. |
def bug_activity(self, bug_id):
"""Get the activity of a bug in HTML format.
:param bug_id: bug identifier
"""
params = {
self.PBUG_ID: bug_id
}
response = self.call(self.CGI_BUG_ACTIVITY, params)
return response | Get the activity of a bug in HTML format.
:param bug_id: bug identifier |
def completions(self):
"""
Return :class:`classes.Completion` objects. Those objects contain
information about the completions, more than just names.
:return: Completion objects, sorted by name.
:rtype: list of :class:`classes.Completion`
"""
debug.speed('completions start')
comps = self._evaluator.complete()
debug.speed('completions end')
return sorted(comps, key=lambda x: (x.name.lower())) | Return :class:`classes.Completion` objects. Those objects contain
information about the completions, more than just names.
:return: Completion objects, sorted by name.
:rtype: list of :class:`classes.Completion` |
def to_dict(self):
"""Convert back to the pstats dictionary representation (used for saving back as pstats binary file)"""
if self.subcall is not None:
if isinstance(self.subcall, dict):
subcalls = self.subcall
else:
subcalls = {}
for s in self.subcall:
subcalls.update(s.to_dict())
return {(self.filename, self.line_number, self.name): \
(self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s, subcalls)}
else:
return {(self.filename, self.line_number, self.name): \
(self.ncalls, self.nonrecursive_calls, self.own_time_s, self.cummulative_time_s)} | Convert back to the pstats dictionary representation (used for saving back as pstats binary file) |
def currentVersion(self):
""" returns the current version of the site """
if self._currentVersion is None:
self.__init(self._url)
return self._currentVersion | returns the current version of the site |
def _qteMouseClicked(self, widgetObj):
"""
Update the Qtmacs internal focus state as the result of a mouse click.
|Args|
* ``new`` (**QWidget**): the widget that received the focus.
|Returns|
* **None**
|Raises|
* **None**
"""
# ------------------------------------------------------------
# The following cases for widgetObj have to be distinguished:
# 1: not part of the Qtmacs widget hierarchy
# 2: part of the Qtmacs widget hierarchy but not registered
# 3: registered with Qtmacs and an applet
# 4: registered with Qtmacs and anything but an applet
# ------------------------------------------------------------
# Case 1: return immediately if widgetObj is not part of the
# Qtmacs widget hierarchy; otherwise, declare the applet
# containing the widgetObj active.
app = qteGetAppletFromWidget(widgetObj)
if app is None:
return
else:
self._qteActiveApplet = app
# Case 2: unregistered widgets are activated immediately.
if not hasattr(widgetObj, '_qteAdmin'):
self._qteActiveApplet.qteMakeWidgetActive(widgetObj)
else:
if app._qteAdmin.isQtmacsApplet:
# Case 3: widgetObj is a QtmacsApplet instance; do not
# focus any of its widgets as the focus manager will
# take care of it.
self._qteActiveApplet.qteMakeWidgetActive(None)
else:
# Case 4: widgetObj was registered with qteAddWidget
# and can thus be focused directly.
self._qteActiveApplet.qteMakeWidgetActive(widgetObj)
# Trigger the focus manager.
self._qteFocusManager() | Update the Qtmacs internal focus state as the result of a mouse click.
|Args|
* ``new`` (**QWidget**): the widget that received the focus.
|Returns|
* **None**
|Raises|
* **None** |
def colors(self, color_code):
"""Change the foreground and background colors for subsequently printed characters.
None resets colors to their original values (when class was instantiated).
Since setting a color requires including both foreground and background codes (merged), setting just the
foreground color resets the background color to black, and vice versa.
This function first gets the current background and foreground colors, merges in the requested color code, and
sets the result.
However if we need to remove just the foreground color but leave the background color the same (or vice versa)
such as when {/red} is used, we must merge the default foreground color with the current background color. This
is the reason for those negative values.
:param int color_code: Color code from WINDOWS_CODES.
"""
if color_code is None:
color_code = WINDOWS_CODES['/all']
# Get current color code.
current_fg, current_bg = self.colors
# Handle special negative codes. Also determine the final color code.
if color_code == WINDOWS_CODES['/fg']:
final_color_code = self.default_fg | current_bg # Reset the foreground only.
elif color_code == WINDOWS_CODES['/bg']:
final_color_code = current_fg | self.default_bg # Reset the background only.
elif color_code == WINDOWS_CODES['/all']:
final_color_code = self.default_fg | self.default_bg # Reset both.
elif color_code == WINDOWS_CODES['bgblack']:
final_color_code = current_fg # Black background.
else:
new_is_bg = color_code in self.ALL_BG_CODES
final_color_code = color_code | (current_fg if new_is_bg else current_bg)
# Set new code.
self._kernel32.SetConsoleTextAttribute(self._stream_handle, final_color_code) | Change the foreground and background colors for subsequently printed characters.
None resets colors to their original values (when class was instantiated).
Since setting a color requires including both foreground and background codes (merged), setting just the
foreground color resets the background color to black, and vice versa.
This function first gets the current background and foreground colors, merges in the requested color code, and
sets the result.
However if we need to remove just the foreground color but leave the background color the same (or vice versa)
such as when {/red} is used, we must merge the default foreground color with the current background color. This
is the reason for those negative values.
:param int color_code: Color code from WINDOWS_CODES. |
def softmax(x: np.ndarray,
b: float = 1.0) -> np.ndarray:
r"""
Standard softmax function:
.. math::
P_i = \frac {e ^ {\beta \cdot x_i}} { \sum_{i}{\beta \cdot x_i} }
Args:
x: vector (``numpy.array``) of values
b: exploration parameter :math:`\beta`, or inverse temperature
[Daw2009], or :math:`1/t`; see below
Returns:
vector of probabilities corresponding to the input values
where:
- :math:`t` is temperature (towards infinity: all actions equally likely;
towards zero: probability of action with highest value tends to 1)
- Temperature is not used directly as optimizers may take it to zero,
giving an infinity; use inverse temperature instead.
- [Daw2009] Daw ND, "Trial-by-trial data analysis using computational
methods", 2009/2011; in "Decision Making, Affect, and Learning: Attention
and Performance XXIII"; Delgado MR, Phelps EA, Robbins TW (eds),
Oxford University Press.
"""
constant = np.mean(x)
products = x * b - constant
# ... softmax is invariant to addition of a constant: Daw article and
# http://www.faqs.org/faqs/ai-faq/neural-nets/part2/section-12.html#b
# noinspection PyUnresolvedReferences
if products.max() > sys.float_info.max_exp:
# ... max_exp for base e; max_10_exp for base 10
log.warning("OVERFLOW in softmax(): x = {}, b = {}, constant = {}, "
"x*b - constant = {}".format(x, b, constant, products))
# map the maximum to 1, other things to zero
n = len(x)
index_of_max = np.argmax(products)
answer = np.zeros(n)
answer[index_of_max] = 1.0
else:
# noinspection PyUnresolvedReferences
exponented = np.exp(products)
answer = exponented / np.sum(exponented)
return answer | r"""
Standard softmax function:
.. math::
P_i = \frac {e ^ {\beta \cdot x_i}} { \sum_{i}{\beta \cdot x_i} }
Args:
x: vector (``numpy.array``) of values
b: exploration parameter :math:`\beta`, or inverse temperature
[Daw2009], or :math:`1/t`; see below
Returns:
vector of probabilities corresponding to the input values
where:
- :math:`t` is temperature (towards infinity: all actions equally likely;
towards zero: probability of action with highest value tends to 1)
- Temperature is not used directly as optimizers may take it to zero,
giving an infinity; use inverse temperature instead.
- [Daw2009] Daw ND, "Trial-by-trial data analysis using computational
methods", 2009/2011; in "Decision Making, Affect, and Learning: Attention
and Performance XXIII"; Delgado MR, Phelps EA, Robbins TW (eds),
Oxford University Press. |
def get(self, key, value):
"""Get single app by one of id or name
Supports resource cache
Keyword Args:
id (str): Full app id
name (str): App name
Returns:
App: Corresponding App resource instance
Raises:
TypeError: No or multiple keyword arguments provided
ValueError: No matching app found on server
"""
if key == 'id':
# Server returns 204 instead of 404 for a non-existent app id
response = self._swimlane.request('get', 'app/{}'.format(value))
if response.status_code == 204:
raise ValueError('No app with id "{}"'.format(value))
return App(
self._swimlane,
response.json()
)
else:
# Workaround for lack of support for get by name
# Holdover from previous driver support, to be fixed as part of 3.x
for app in self.list():
if value and value == app.name:
return app
# No matching app found
raise ValueError('No app with name "{}"'.format(value)) | Get single app by one of id or name
Supports resource cache
Keyword Args:
id (str): Full app id
name (str): App name
Returns:
App: Corresponding App resource instance
Raises:
TypeError: No or multiple keyword arguments provided
ValueError: No matching app found on server |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.