code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def simple_db_engine(reader=None, srnos=None):
"""engine that gets values from the simple excel 'db'"""
if reader is None:
reader = dbreader.Reader()
logger.debug("No reader provided. Creating one myself.")
info_dict = dict()
info_dict["filenames"] = [reader.get_cell_name(srno) for srno in srnos]
info_dict["masses"] = [reader.get_mass(srno) for srno in srnos]
info_dict["total_masses"] = [reader.get_total_mass(srno) for srno in srnos]
info_dict["loadings"] = [reader.get_loading(srno) for srno in srnos]
info_dict["fixed"] = [reader.inspect_hd5f_fixed(srno) for srno in srnos]
info_dict["labels"] = [reader.get_label(srno) for srno in srnos]
info_dict["cell_type"] = [reader.get_cell_type(srno) for srno in srnos]
info_dict["raw_file_names"] = []
info_dict["cellpy_file_names"] = []
logger.debug("created info-dict")
for key in list(info_dict.keys()):
logger.debug("%s: %s" % (key, str(info_dict[key])))
_groups = [reader.get_group(srno) for srno in srnos]
logger.debug(">\ngroups: %s" % str(_groups))
groups = helper.fix_groups(_groups)
info_dict["groups"] = groups
my_timer_start = time.time()
filename_cache = []
info_dict = helper.find_files(info_dict, filename_cache)
my_timer_end = time.time()
if (my_timer_end - my_timer_start) > 5.0:
logger.info(
"The function _find_files was very slow. "
"Save your info_df so you don't have to run it again!"
)
info_df = pd.DataFrame(info_dict)
info_df = info_df.sort_values(["groups", "filenames"])
info_df = helper.make_unique_groups(info_df)
info_df["labels"] = info_df["filenames"].apply(helper.create_labels)
info_df.set_index("filenames", inplace=True)
return info_df | engine that gets values from the simple excel 'db |
def djfrontend_jquery_formset(version=None):
"""
Returns the jQuery Dynamic Formset plugin file according to version number.
TEMPLATE_DEBUG returns full file, otherwise returns minified file.
"""
if version is None:
version = getattr(settings, 'DJFRONTEND_JQUERY_FORMSET', DJFRONTEND_JQUERY_FORMSET_DEFAULT)
if getattr(settings, 'TEMPLATE_DEBUG', False):
template = '<script src="{static}djfrontend/js/jquery/jquery.formset/{v}/jquery.formset.js"></script>'
else:
template = (
'<script src="//cdnjs.cloudflare.com/ajax/libs/jquery.formset/{v}/jquery.formset.min.js"></script>\n'
'<script>window.jQuery.fn.formset || document.write(\'<script src="{static}djfrontend/js/jquery/jquery.formset/{v}/jquery.formset.min.js"><\/script>\')</script>')
return format_html(template, static=_static_url, v=version) | Returns the jQuery Dynamic Formset plugin file according to version number.
TEMPLATE_DEBUG returns full file, otherwise returns minified file. |
def get_project_totals(entries, date_headers, hour_type=None, overtime=False,
total_column=False, by='user'):
"""
Yield hour totals grouped by user and date. Optionally including overtime.
"""
totals = [0 for date in date_headers]
rows = []
for thing, thing_entries in groupby(entries, lambda x: x[by]):
name, thing_id, date_dict = date_totals(thing_entries, by)
dates = []
for index, day in enumerate(date_headers):
if isinstance(day, datetime.datetime):
day = day.date()
if hour_type:
total = date_dict.get(day, {}).get(hour_type, 0)
dates.append(total)
else:
billable = date_dict.get(day, {}).get('billable', 0)
nonbillable = date_dict.get(day, {}).get('non_billable', 0)
total = billable + nonbillable
dates.append({
'day': day,
'billable': billable,
'nonbillable': nonbillable,
'total': total
})
totals[index] += total
if total_column:
dates.append(sum(dates))
if overtime:
dates.append(find_overtime(dates))
dates = [date or '' for date in dates]
rows.append((name, thing_id, dates))
if total_column:
totals.append(sum(totals))
totals = [t or '' for t in totals]
yield (rows, totals) | Yield hour totals grouped by user and date. Optionally including overtime. |
def _create_storage_profile(self):
"""
Create the storage profile for the instance.
Image reference can be a custom image name or a published urn.
"""
if self.image_publisher:
storage_profile = {
'image_reference': {
'publisher': self.image_publisher,
'offer': self.image_offer,
'sku': self.image_sku,
'version': self.image_version
},
}
else:
for image in self.compute.images.list():
if image.name == self.image_id:
image_id = image.id
break
else:
raise AzureCloudException(
'Image with name {0} not found.'.format(self.image_id)
)
storage_profile = {
'image_reference': {
'id': image_id
}
}
return storage_profile | Create the storage profile for the instance.
Image reference can be a custom image name or a published urn. |
def inv_n(x):
'''given N matrices, return N inverses'''
#
# The inverse of a small matrix (e.g. 3x3) is
#
# 1
# ----- C(j,i)
# det(A)
#
# where C(j,i) is the cofactor of matrix A at position j,i
#
assert x.ndim == 3
assert x.shape[1] == x.shape[2]
c = np.array([ [cofactor_n(x, j, i) * (1 - ((i+j) % 2)*2)
for j in range(x.shape[1])]
for i in range(x.shape[1])]).transpose(2,0,1)
return c / det_n(x)[:, np.newaxis, np.newaxis] | given N matrices, return N inverses |
def preprocess(self, x):
"""Load a single example using this field, tokenizing if necessary.
If the input is a Python 2 `str`, it will be converted to Unicode
first. If `sequential=True`, it will be tokenized. Then the input
will be optionally lowercased and passed to the user-provided
`preprocessing` Pipeline."""
if (six.PY2 and isinstance(x, six.string_types)
and not isinstance(x, six.text_type)):
x = Pipeline(lambda s: six.text_type(s, encoding='utf-8'))(x)
if self.sequential and isinstance(x, six.text_type):
x = self.tokenize(x.rstrip('\n'))
if self.lower:
x = Pipeline(six.text_type.lower)(x)
if self.sequential and self.use_vocab and self.stop_words is not None:
x = [w for w in x if w not in self.stop_words]
if self.preprocessing is not None:
return self.preprocessing(x)
else:
return x | Load a single example using this field, tokenizing if necessary.
If the input is a Python 2 `str`, it will be converted to Unicode
first. If `sequential=True`, it will be tokenized. Then the input
will be optionally lowercased and passed to the user-provided
`preprocessing` Pipeline. |
def check_xml(code):
"""Yield errors."""
try:
xml.etree.ElementTree.fromstring(code)
except xml.etree.ElementTree.ParseError as exception:
message = '{}'.format(exception)
line_number = 0
found = re.search(r': line\s+([0-9]+)[^:]*$', message)
if found:
line_number = int(found.group(1))
yield (int(line_number), message) | Yield errors. |
def upcoming(
cls,
api_key=djstripe_settings.STRIPE_SECRET_KEY,
customer=None,
coupon=None,
subscription=None,
subscription_plan=None,
subscription_prorate=None,
subscription_proration_date=None,
subscription_quantity=None,
subscription_trial_end=None,
**kwargs
):
"""
Gets the upcoming preview invoice (singular) for a customer.
At any time, you can preview the upcoming
invoice for a customer. This will show you all the charges that are
pending, including subscription renewal charges, invoice item charges,
etc. It will also show you any discount that is applicable to the
customer. (Source: https://stripe.com/docs/api#upcoming_invoice)
.. important:: Note that when you are viewing an upcoming invoice, you are simply viewing a preview.
:param customer: The identifier of the customer whose upcoming invoice \
you'd like to retrieve.
:type customer: Customer or string (customer ID)
:param coupon: The code of the coupon to apply.
:type coupon: str
:param subscription: The identifier of the subscription to retrieve an \
invoice for.
:type subscription: Subscription or string (subscription ID)
:param subscription_plan: If set, the invoice returned will preview \
updating the subscription given to this plan, or creating a new \
subscription to this plan if no subscription is given.
:type subscription_plan: Plan or string (plan ID)
:param subscription_prorate: If previewing an update to a subscription, \
this decides whether the preview will show the result of applying \
prorations or not.
:type subscription_prorate: bool
:param subscription_proration_date: If previewing an update to a \
subscription, and doing proration, subscription_proration_date forces \
the proration to be calculated as though the update was done at the \
specified time.
:type subscription_proration_date: datetime
:param subscription_quantity: If provided, the invoice returned will \
preview updating or creating a subscription with that quantity.
:type subscription_quantity: int
:param subscription_trial_end: If provided, the invoice returned will \
preview updating or creating a subscription with that trial end.
:type subscription_trial_end: datetime
:returns: The upcoming preview invoice.
:rtype: UpcomingInvoice
"""
# Convert Customer to id
if customer is not None and isinstance(customer, StripeModel):
customer = customer.id
# Convert Subscription to id
if subscription is not None and isinstance(subscription, StripeModel):
subscription = subscription.id
# Convert Plan to id
if subscription_plan is not None and isinstance(subscription_plan, StripeModel):
subscription_plan = subscription_plan.id
try:
upcoming_stripe_invoice = cls.stripe_class.upcoming(
api_key=api_key,
customer=customer,
coupon=coupon,
subscription=subscription,
subscription_plan=subscription_plan,
subscription_prorate=subscription_prorate,
subscription_proration_date=subscription_proration_date,
subscription_quantity=subscription_quantity,
subscription_trial_end=subscription_trial_end,
**kwargs
)
except InvalidRequestError as exc:
if str(exc) != "Nothing to invoice for customer":
raise
return
# Workaround for "id" being missing (upcoming invoices don't persist).
upcoming_stripe_invoice["id"] = "upcoming"
return UpcomingInvoice._create_from_stripe_object(upcoming_stripe_invoice, save=False) | Gets the upcoming preview invoice (singular) for a customer.
At any time, you can preview the upcoming
invoice for a customer. This will show you all the charges that are
pending, including subscription renewal charges, invoice item charges,
etc. It will also show you any discount that is applicable to the
customer. (Source: https://stripe.com/docs/api#upcoming_invoice)
.. important:: Note that when you are viewing an upcoming invoice, you are simply viewing a preview.
:param customer: The identifier of the customer whose upcoming invoice \
you'd like to retrieve.
:type customer: Customer or string (customer ID)
:param coupon: The code of the coupon to apply.
:type coupon: str
:param subscription: The identifier of the subscription to retrieve an \
invoice for.
:type subscription: Subscription or string (subscription ID)
:param subscription_plan: If set, the invoice returned will preview \
updating the subscription given to this plan, or creating a new \
subscription to this plan if no subscription is given.
:type subscription_plan: Plan or string (plan ID)
:param subscription_prorate: If previewing an update to a subscription, \
this decides whether the preview will show the result of applying \
prorations or not.
:type subscription_prorate: bool
:param subscription_proration_date: If previewing an update to a \
subscription, and doing proration, subscription_proration_date forces \
the proration to be calculated as though the update was done at the \
specified time.
:type subscription_proration_date: datetime
:param subscription_quantity: If provided, the invoice returned will \
preview updating or creating a subscription with that quantity.
:type subscription_quantity: int
:param subscription_trial_end: If provided, the invoice returned will \
preview updating or creating a subscription with that trial end.
:type subscription_trial_end: datetime
:returns: The upcoming preview invoice.
:rtype: UpcomingInvoice |
def convert_all(self):
'''Convert all links in URL table.'''
for url_record in self._url_table.get_all():
if url_record.status != Status.done:
continue
self.convert_by_record(url_record) | Convert all links in URL table. |
def upsert_event(self, calendar_id, event):
"""Inserts or updates an event for the specified calendar.
:param string calendar_id: ID of calendar to insert/update event into.
:param dict event: Dictionary of event data to send to cronofy.
"""
event['start'] = format_event_time(event['start'])
event['end'] = format_event_time(event['end'])
self.request_handler.post(
endpoint='calendars/%s/events' % calendar_id, data=event) | Inserts or updates an event for the specified calendar.
:param string calendar_id: ID of calendar to insert/update event into.
:param dict event: Dictionary of event data to send to cronofy. |
def generate_passphrase(size=12):
"""Return a generate string `size` long based on lowercase, uppercase,
and digit chars
"""
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
return str(''.join(random.choice(chars) for _ in range(size))) | Return a generate string `size` long based on lowercase, uppercase,
and digit chars |
def bound(self, p1, p2=None):
"""Bound this point within the rect defined by (`p1`, `p2`)."""
r = Rect(p1, p2)
return Point(min(max(self.x, r.l), r.r), min(max(self.y, r.t), r.b)) | Bound this point within the rect defined by (`p1`, `p2`). |
def s_demand(self, bus):
""" Returns the total complex power demand.
"""
Svl = array([complex(g.p, g.q) for g in self.generators if
(g.bus == bus) and g.is_load], dtype=complex64)
Sd = complex(bus.p_demand, bus.q_demand)
return -sum(Svl) + Sd | Returns the total complex power demand. |
def add_alt(self, entry):
"""Parse and store the alternative allele field"""
entry = entry[7:-1]
info = entry.split(',')
if len(info) < 2:
return False
for v in info:
key, value = v.split('=', 1)
if key == 'ID':
self.alt[value] = {}
id_ = value
elif key == 'Description':
self.alt[id_]['description'] = value
if len(info) > 4:
self.alt[id_]['description'] += '; '.join(info[4:])
break
return True | Parse and store the alternative allele field |
def _equal_values(self, val1, val2):
"""Matrices are equal if they hash to the same value."""
if self._is_supported_matrix(val1):
if self._is_supported_matrix(val2):
_, _, hash_tuple_1 = self._serialize_matrix(val1)
_, _, hash_tuple_2 = self._serialize_matrix(val2)
return hash(hash_tuple_1) == hash(hash_tuple_2)
else:
return False
else:
return super(SparseParameter, self)._equal_values(val1, val2) | Matrices are equal if they hash to the same value. |
def simxPackFloats(floatList):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if sys.version_info[0] == 3:
s=bytes()
for i in range(len(floatList)):
s=s+struct.pack('<f',floatList[i])
s=bytearray(s)
else:
s=''
for i in range(len(floatList)):
s+=struct.pack('<f',floatList[i])
return s | Please have a look at the function description/documentation in the V-REP user manual |
def open_zip(cls, dbname, zipped, encoding=None, fieldnames_lower=True, case_sensitive=True):
"""Context manager. Allows opening a .dbf file from zip archive.
.. code-block::
with Dbf.open_zip('some.dbf', 'myarch.zip') as dbf:
...
:param str|unicode dbname: .dbf file name
:param str|unicode|file zipped: .zip file path or a file-like object.
:param str|unicode encoding: Encoding used by DB.
This will be used if there's no encoding information in the DB itself.
:param bool fieldnames_lower: Lowercase field names.
:param bool case_sensitive: Whether DB filename is case sensitive.
:rtype: Dbf
"""
with ZipFile(zipped, 'r') as zip_:
if not case_sensitive:
dbname = pick_name(dbname, zip_.namelist())
with zip_.open(dbname) as f:
yield cls(f, encoding=encoding, fieldnames_lower=fieldnames_lower) | Context manager. Allows opening a .dbf file from zip archive.
.. code-block::
with Dbf.open_zip('some.dbf', 'myarch.zip') as dbf:
...
:param str|unicode dbname: .dbf file name
:param str|unicode|file zipped: .zip file path or a file-like object.
:param str|unicode encoding: Encoding used by DB.
This will be used if there's no encoding information in the DB itself.
:param bool fieldnames_lower: Lowercase field names.
:param bool case_sensitive: Whether DB filename is case sensitive.
:rtype: Dbf |
def union(cls):
"""A class decorator which other classes can specify that they can resolve to with `UnionRule`.
Annotating a class with @union allows other classes to use a UnionRule() instance to indicate that
they can be resolved to this base union class. This class will never be instantiated, and should
have no members -- it is used as a tag only, and will be replaced with whatever object is passed
in as the subject of a `yield Get(...)`. See the following example:
@union
class UnionBase(object): pass
@rule(B, [X])
def get_some_union_type(x):
result = yield Get(ResultType, UnionBase, x.f())
# ...
If there exists a single path from (whatever type the expression `x.f()` returns) -> `ResultType`
in the rule graph, the engine will retrieve and execute that path to produce a `ResultType` from
`x.f()`. This requires also that whatever type `x.f()` returns was registered as a union member of
`UnionBase` with a `UnionRule`.
Unions allow @rule bodies to be written without knowledge of what types may eventually be provided
as input -- rather, they let the engine check that there is a valid path to the desired result.
"""
# TODO: Check that the union base type is used as a tag and nothing else (e.g. no attributes)!
assert isinstance(cls, type)
return type(cls.__name__, (cls,), {
'_is_union': True,
}) | A class decorator which other classes can specify that they can resolve to with `UnionRule`.
Annotating a class with @union allows other classes to use a UnionRule() instance to indicate that
they can be resolved to this base union class. This class will never be instantiated, and should
have no members -- it is used as a tag only, and will be replaced with whatever object is passed
in as the subject of a `yield Get(...)`. See the following example:
@union
class UnionBase(object): pass
@rule(B, [X])
def get_some_union_type(x):
result = yield Get(ResultType, UnionBase, x.f())
# ...
If there exists a single path from (whatever type the expression `x.f()` returns) -> `ResultType`
in the rule graph, the engine will retrieve and execute that path to produce a `ResultType` from
`x.f()`. This requires also that whatever type `x.f()` returns was registered as a union member of
`UnionBase` with a `UnionRule`.
Unions allow @rule bodies to be written without knowledge of what types may eventually be provided
as input -- rather, they let the engine check that there is a valid path to the desired result. |
def do_find(self, arg):
"""
[~process] f <string> - find the string in the process memory
[~process] find <string> - find the string in the process memory
"""
if not arg:
raise CmdError("missing parameter: string")
process = self.get_process_from_prefix()
self.find_in_memory(arg, process) | [~process] f <string> - find the string in the process memory
[~process] find <string> - find the string in the process memory |
def write(self, bytes_):
"""Write bytes to the file."""
string = bytes_.decode(self._encoding)
self._file.write(string) | Write bytes to the file. |
def create_stack_user(self):
"""Create the stack user on the machine.
"""
self.run('adduser -m stack', success_status=(0, 9))
self.create_file('/etc/sudoers.d/stack', 'stack ALL=(root) NOPASSWD:ALL\n')
self.run('mkdir -p /home/stack/.ssh')
self.run('cp /root/.ssh/authorized_keys /home/stack/.ssh/authorized_keys')
self.run('chown -R stack:stack /home/stack/.ssh')
self.run('chmod 700 /home/stack/.ssh')
self.run('chmod 600 /home/stack/.ssh/authorized_keys')
self.ssh_pool.build_ssh_client(self.hostname, 'stack',
self._key_filename,
self.via_ip) | Create the stack user on the machine. |
def __set_transaction_detail(self, *args, **kwargs):
"""
Checks kwargs for 'customer_transaction_id' and sets it if present.
"""
customer_transaction_id = kwargs.get('customer_transaction_id', None)
if customer_transaction_id:
transaction_detail = self.client.factory.create('TransactionDetail')
transaction_detail.CustomerTransactionId = customer_transaction_id
self.logger.debug(transaction_detail)
self.TransactionDetail = transaction_detail | Checks kwargs for 'customer_transaction_id' and sets it if present. |
def shift_or_mirror_into_invertible_domain(self, solution_genotype,
copy=False):
"""Details: input ``solution_genotype`` is changed. The domain is
[lb - al, ub + au] and in [lb - 2*al - (ub - lb) / 2, lb - al]
mirroring is applied.
"""
assert solution_genotype is not None
if copy:
y = [val for val in solution_genotype]
else:
y = solution_genotype
if isinstance(y, np.ndarray) and not isinstance(y[0], float):
y = array(y, dtype=float)
for i in rglen(y):
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
# x is far from the boundary, compared to ub - lb
if y[i] < lb - 2 * al - (ub - lb) / 2.0 or y[i] > ub + 2 * au + (ub - lb) / 2.0:
r = 2 * (ub - lb + al + au) # period
s = lb - 2 * al - (ub - lb) / 2.0 # start
y[i] -= r * ((y[i] - s) // r) # shift
if y[i] > ub + au:
y[i] -= 2 * (y[i] - ub - au)
if y[i] < lb - al:
y[i] += 2 * (lb - al - y[i])
return y | Details: input ``solution_genotype`` is changed. The domain is
[lb - al, ub + au] and in [lb - 2*al - (ub - lb) / 2, lb - al]
mirroring is applied. |
def d_step(self, true_frames, gen_frames):
"""Performs the discriminator step in computing the GAN loss.
Applies stop-gradient to the generated frames while computing the
discriminator loss to make sure that the gradients are not back-propagated
to the generator. This makes sure that only the discriminator is updated.
Args:
true_frames: True outputs
gen_frames: Generated frames.
Returns:
d_loss: Loss component due to the discriminator.
"""
hparam_to_disc_loss = {
"least_squares": gan_losses.least_squares_discriminator_loss,
"cross_entropy": gan_losses.modified_discriminator_loss,
"wasserstein": gan_losses.wasserstein_discriminator_loss}
# Concat across batch-axis.
_, batch_size, _, _, _ = common_layers.shape_list(true_frames)
all_frames = tf.concat(
[true_frames, tf.stop_gradient(gen_frames)], axis=1)
all_logits = self.discriminator(all_frames)
true_logits, fake_logits_stop = \
all_logits[:batch_size], all_logits[batch_size:]
mean_true_logits = tf.reduce_mean(true_logits)
tf.summary.scalar("mean_true_logits", mean_true_logits)
mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop)
tf.summary.scalar("mean_fake_logits_stop", mean_fake_logits_stop)
discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss]
gan_d_loss = discriminator_loss_func(
discriminator_real_outputs=true_logits,
discriminator_gen_outputs=fake_logits_stop,
add_summaries=True)
return gan_d_loss, true_logits, fake_logits_stop | Performs the discriminator step in computing the GAN loss.
Applies stop-gradient to the generated frames while computing the
discriminator loss to make sure that the gradients are not back-propagated
to the generator. This makes sure that only the discriminator is updated.
Args:
true_frames: True outputs
gen_frames: Generated frames.
Returns:
d_loss: Loss component due to the discriminator. |
def list_documents(self, page_size=None):
"""List all subdocuments of the current collection.
Args:
page_size (Optional[int]]): The maximum number of documents
in each page of results from this request. Non-positive values
are ignored. Defaults to a sensible value set by the API.
Returns:
Sequence[~.firestore_v1beta1.collection.DocumentReference]:
iterator of subdocuments of the current collection. If the
collection does not exist at the time of `snapshot`, the
iterator will be empty
"""
parent, _ = self._parent_info()
iterator = self._client._firestore_api.list_documents(
parent,
self.id,
page_size=page_size,
show_missing=True,
metadata=self._client._rpc_metadata,
)
iterator.collection = self
iterator.item_to_value = _item_to_document_ref
return iterator | List all subdocuments of the current collection.
Args:
page_size (Optional[int]]): The maximum number of documents
in each page of results from this request. Non-positive values
are ignored. Defaults to a sensible value set by the API.
Returns:
Sequence[~.firestore_v1beta1.collection.DocumentReference]:
iterator of subdocuments of the current collection. If the
collection does not exist at the time of `snapshot`, the
iterator will be empty |
def clear(self):
"""
Cleans up the manager. The manager can't be used after this method has
been called
"""
self.services.clear()
self._future_value.clear()
self.services = None
self._lock = None
self._ipopo_instance = None
self._context = None
self.requirement = None
self._key = None
self._allow_none = None
self._future_value = None
self._field = None | Cleans up the manager. The manager can't be used after this method has
been called |
def is_readable(value, **kwargs):
"""Indicate whether ``value`` is a readable file.
.. caution::
**Use of this validator is an anti-pattern and should be used with caution.**
Validating the readability of a file *before* attempting to read it
exposes your code to a bug called
`TOCTOU <https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use>`_.
This particular class of bug can expose your code to **security vulnerabilities**
and so this validator should only be used if you are an advanced user.
A better pattern to use when reading from a file is to apply the principle of
EAFP ("easier to ask forgiveness than permission"), and simply attempt to
write to the file using a ``try ... except`` block:
.. code-block:: python
try:
with open('path/to/filename.txt', mode = 'r') as file_object:
# read from file here
except (OSError, IOError) as error:
# Handle an error if unable to write.
:param value: The value to evaluate.
:type value: Path-like object
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator
"""
try:
validators.readable(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | Indicate whether ``value`` is a readable file.
.. caution::
**Use of this validator is an anti-pattern and should be used with caution.**
Validating the readability of a file *before* attempting to read it
exposes your code to a bug called
`TOCTOU <https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use>`_.
This particular class of bug can expose your code to **security vulnerabilities**
and so this validator should only be used if you are an advanced user.
A better pattern to use when reading from a file is to apply the principle of
EAFP ("easier to ask forgiveness than permission"), and simply attempt to
write to the file using a ``try ... except`` block:
.. code-block:: python
try:
with open('path/to/filename.txt', mode = 'r') as file_object:
# read from file here
except (OSError, IOError) as error:
# Handle an error if unable to write.
:param value: The value to evaluate.
:type value: Path-like object
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator |
def process_action(self, request, queryset):
"""
Deletes the object(s). Successful deletes are logged.
Returns a 'render redirect' to the result of the
`get_done_url` method.
If a ProtectedError is raised, the `render` method
is called with message explaining the error added
to the context as `protected`.
"""
count = 0
try:
with transaction.commit_on_success():
for obj in queryset:
self.log_action(obj, CMSLog.DELETE)
count += 1
obj.delete()
msg = "%s object%s deleted." % (count, ('' if count ==1 else 's'))
url = self.get_done_url()
return self.render(request, redirect_url=url, message = msg)
except ProtectedError, e:
protected = []
for x in e.protected_objects:
if hasattr(x, 'delete_blocked_message'):
protected.append(x.delete_blocked_message())
else:
protected.append(u"%s - %s" % (x._meta.verbose_name, x))
msg = "Cannot delete some objects because the following objects depend on them:"
return self.render(request, error_msg = msg, errors = protected) | Deletes the object(s). Successful deletes are logged.
Returns a 'render redirect' to the result of the
`get_done_url` method.
If a ProtectedError is raised, the `render` method
is called with message explaining the error added
to the context as `protected`. |
def __set_bp(self, aProcess):
"""
Sets the target pages as guard pages.
@type aProcess: L{Process}
@param aProcess: Process object.
"""
lpAddress = self.get_address()
dwSize = self.get_size()
flNewProtect = aProcess.mquery(lpAddress).Protect
flNewProtect = flNewProtect | win32.PAGE_GUARD
aProcess.mprotect(lpAddress, dwSize, flNewProtect) | Sets the target pages as guard pages.
@type aProcess: L{Process}
@param aProcess: Process object. |
def _create_row_labels(self):
"""
Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label.
"""
# start with the original column names
labels = {}
for c in self._columns:
labels[c] = c
# replace column names with alternative names if provided
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
# append the label suffix
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels | Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label. |
def validate_object(obj, field_validators=None, non_field_validators=None,
schema=None, context=None):
"""
Takes a mapping and applies a mapping of validator functions to it
collecting and reraising any validation errors that occur.
"""
if schema is None:
schema = {}
if context is None:
context = {}
if field_validators is None:
field_validators = ValidationDict()
if non_field_validators is None:
non_field_validators = ValidationList()
from flex.validation.schema import (
construct_schema_validators,
)
schema_validators = construct_schema_validators(schema, context)
if '$ref' in schema_validators and hasattr(schema_validators['$ref'], 'validators'):
ref_ = field_validators.pop('$ref')
for k, v in ref_.validators.items():
if k not in schema_validators:
schema_validators.add_validator(k, v)
if 'discriminator' in schema:
schema_validators = add_polymorphism_requirements(obj, schema, context, schema_validators)
# delete resolved discriminator to avoid infinite recursion
del schema['discriminator']
schema_validators.update(field_validators)
schema_validators.validate_object(obj, context=context)
non_field_validators.validate_object(obj, context=context)
return obj | Takes a mapping and applies a mapping of validator functions to it
collecting and reraising any validation errors that occur. |
def _parse_mtllibs(self):
"""Load mtl files"""
for mtllib in self.meta.mtllibs:
try:
materials = self.material_parser_cls(
os.path.join(self.path, mtllib),
encoding=self.encoding,
strict=self.strict).materials
except IOError:
raise IOError("Failed to load mtl file:".format(os.path.join(self.path, mtllib)))
for name, material in materials.items():
self.wavefront.materials[name] = material | Load mtl files |
def dump(self, filename):
"""Dump counters to file"""
try:
with open(filename, 'wb') as fp:
cPickle.dump(self.counters, fp)
except Exception as e:
logging.warning("can't dump counter to file %s: %s", filename, e)
return False
return True | Dump counters to file |
def imresize(self, data, new_wd, new_ht, method='bilinear'):
"""Scale an image in numpy array _data_ to the specified width and
height. A smooth scaling is preferred.
"""
old_ht, old_wd = data.shape[:2]
start_time = time.time()
if have_pilutil:
means = 'PIL'
zoom_x = float(new_wd) / float(old_wd)
zoom_y = float(new_ht) / float(old_ht)
if (old_wd >= new_wd) or (old_ht >= new_ht):
# data size is bigger, skip pixels
zoom = max(zoom_x, zoom_y)
else:
zoom = min(zoom_x, zoom_y)
newdata = imresize(data, zoom, interp=method)
else:
raise ImageError("No way to scale image smoothly")
end_time = time.time()
self.logger.debug("scaling (%s) time %.4f sec" % (
means, end_time - start_time))
return newdata | Scale an image in numpy array _data_ to the specified width and
height. A smooth scaling is preferred. |
def _landsat_get_mtl(sceneid):
"""
Get Landsat-8 MTL metadata.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
Returns
-------
out : dict
returns a JSON like object with the metadata.
"""
scene_params = _landsat_parse_scene_id(sceneid)
meta_file = "http://landsat-pds.s3.amazonaws.com/{}_MTL.txt".format(
scene_params["key"]
)
metadata = str(urlopen(meta_file).read().decode())
return toa_utils._parse_mtl_txt(metadata) | Get Landsat-8 MTL metadata.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
Returns
-------
out : dict
returns a JSON like object with the metadata. |
def hamming_emd(d1, d2):
"""Return the Earth Mover's Distance between two distributions (indexed
by state, one dimension per node) using the Hamming distance between states
as the transportation cost function.
Singleton dimensions are sqeezed out.
"""
N = d1.squeeze().ndim
d1, d2 = flatten(d1), flatten(d2)
return emd(d1, d2, _hamming_matrix(N)) | Return the Earth Mover's Distance between two distributions (indexed
by state, one dimension per node) using the Hamming distance between states
as the transportation cost function.
Singleton dimensions are sqeezed out. |
def _on_library_path_changed(self, renderer, path, new_library_path):
"""Callback handling a change of a library path
:param Gtk.CellRenderer renderer: Cell renderer showing the library path
:param path: Path of library within the list store
:param str new_library_path: New library path
"""
library_name = self.library_list_store[int(path)][self.KEY_STORAGE_ID]
library_config = self.core_config_model.get_current_config_value("LIBRARY_PATHS", use_preliminary=True,
default={})
library_config[library_name] = new_library_path
self.core_config_model.set_preliminary_config_value("LIBRARY_PATHS", library_config)
self._select_row_by_column_value(self.view['library_tree_view'], self.library_list_store,
self.KEY_STORAGE_ID, library_name) | Callback handling a change of a library path
:param Gtk.CellRenderer renderer: Cell renderer showing the library path
:param path: Path of library within the list store
:param str new_library_path: New library path |
def get_message_actions(current):
"""
Returns applicable actions for current user for given message key
.. code-block:: python
# request:
{
'view':'_zops_get_message_actions',
'key': key,
}
# response:
{
'actions':[('name_string', 'cmd_string'),]
'status': string, # 'OK' for success
'code': int, # 200 for success
}
"""
current.output = {'status': 'OK',
'code': 200,
'actions': Message.objects.get(
current.input['key']).get_actions_for(current.user)} | Returns applicable actions for current user for given message key
.. code-block:: python
# request:
{
'view':'_zops_get_message_actions',
'key': key,
}
# response:
{
'actions':[('name_string', 'cmd_string'),]
'status': string, # 'OK' for success
'code': int, # 200 for success
} |
def _get_optimizer(self):
"""Uses Adagrad to optimize the GloVe/Mittens objective,
as specified in the GloVe paper.
"""
optim = tf.train.AdagradOptimizer(self.learning_rate)
gradients = optim.compute_gradients(self.cost)
if self.log_dir:
for name, (g, v) in zip(['W', 'C', 'bw', 'bc'], gradients):
tf.summary.histogram("{}_grad".format(name), g)
tf.summary.histogram("{}_vals".format(name), v)
return optim.apply_gradients(gradients) | Uses Adagrad to optimize the GloVe/Mittens objective,
as specified in the GloVe paper. |
def ssn(self):
"""
Returns a 13 digits Swiss SSN named AHV (German) or
AVS (French and Italian)
See: http://www.bsv.admin.ch/themen/ahv/00011/02185/
"""
def _checksum(digits):
evensum = sum(digits[:-1:2])
oddsum = sum(digits[1::2])
return (10 - ((evensum + oddsum * 3) % 10)) % 10
digits = [7, 5, 6]
# create an array of first 9 elements initialized randomly
digits += self.generator.random.sample(range(10), 9)
# determine the last digit to make it qualify the test
digits.append(_checksum(digits))
# repeat steps until it does qualify the test
digits = ''.join([str(d) for d in digits])
ssn = digits[:3] + '.' \
+ digits[3:7] + '.' \
+ digits[7:11] + '.' \
+ digits[11:]
return ssn | Returns a 13 digits Swiss SSN named AHV (German) or
AVS (French and Italian)
See: http://www.bsv.admin.ch/themen/ahv/00011/02185/ |
def _validate_sample_rates(input_filepath_list, combine_type):
''' Check if files in input file list have the same sample rate
'''
sample_rates = [
file_info.sample_rate(f) for f in input_filepath_list
]
if not core.all_equal(sample_rates):
raise IOError(
"Input files do not have the same sample rate. The {} combine "
"type requires that all files have the same sample rate"
.format(combine_type)
) | Check if files in input file list have the same sample rate |
def pca_plot(pca, dt, xlabs=None, mode='scatter', lognorm=True):
"""
Plot a fitted PCA, and all components.
"""
nc = pca.n_components
f = np.arange(pca.n_features_)
cs = list(itertools.combinations(range(nc), 2))
ind = ~np.apply_along_axis(any, 1, np.isnan(dt))
cylim = (pca.components_.min(), pca.components_.max())
yd = cylim[1] - cylim[0]
# Make figure
fig, axs = plt.subplots(nc, nc, figsize=[3 * nc, nc * 3], tight_layout=True)
for x, y in zip(*np.triu_indices(nc)):
if x == y:
tax = axs[x, y]
tax.bar(f, pca.components_[x], 0.8)
tax.set_xticks([])
tax.axhline(0, zorder=-1, c=(0,0,0,0.6))
# labels
tax.set_ylim(cylim[0] - 0.2 * yd,
cylim[1] + 0.2 * yd)
for xi, yi, lab in zip(f, pca.components_[x], xlabs):
if yi > 0:
yo = yd * 0.03
va = 'bottom'
else:
yo = yd * -0.02
va = 'top'
tax.text(xi, yi + yo, lab, ha='center', va=va, rotation=90, fontsize=8)
else:
xv = dt[ind, x]
yv = dt[ind, y]
if mode == 'scatter':
axs[x, y].scatter(xv, yv, alpha=0.2)
axs[y, x].scatter(yv, xv, alpha=0.2)
if mode == 'hist2d':
if lognorm:
norm = mpl.colors.LogNorm()
else:
norm = None
axs[x, y].hist2d(xv, yv, 50, cmap=plt.cm.Blues, norm=norm)
axs[y, x].hist2d(yv, xv, 50, cmap=plt.cm.Blues, norm=norm)
if x == 0:
axs[y, x].set_ylabel('PC{:.0f}'.format(y + 1))
if y == nc - 1:
axs[y, x].set_xlabel('PC{:.0f}'.format(x + 1))
return fig, axs, xv, yv | Plot a fitted PCA, and all components. |
def spill(self, src, dest):
"""
Spill a workspace, i.e. unpack it and turn it into a workspace.
See https://ocr-d.github.com/ocrd_zip#unpacking-ocrd-zip-to-a-workspace
Arguments:
src (string): Path to OCRD-ZIP
dest (string): Path to directory to unpack data folder to
"""
# print(dest)
if exists(dest) and not isdir(dest):
raise Exception("Not a directory: %s" % dest)
# If dest is an existing directory, try to derive its name from src
if isdir(dest):
workspace_name = re.sub(r'(\.ocrd)?\.zip$', '', basename(src))
new_dest = join(dest, workspace_name)
if exists(new_dest):
raise Exception("Directory exists: %s" % new_dest)
dest = new_dest
log.info("Spilling %s to %s", src, dest)
bagdir = mkdtemp(prefix=TMP_BAGIT_PREFIX)
unzip_file_to_dir(src, bagdir)
datadir = join(bagdir, 'data')
for root, _, files in walk(datadir):
for f in files:
srcfile = join(root, f)
destdir = join(dest, relpath(root, datadir))
destfile = join(destdir, f)
if not exists(destdir):
makedirs(destdir)
log.debug("Copy %s -> %s", srcfile, destfile)
copyfile(srcfile, destfile)
# TODO copy allowed tag files if present
# TODO validate bagit
# Drop tempdir
rmtree(bagdir)
# Create workspace
workspace = Workspace(self.resolver, directory=dest)
# TODO validate workspace
return workspace | Spill a workspace, i.e. unpack it and turn it into a workspace.
See https://ocr-d.github.com/ocrd_zip#unpacking-ocrd-zip-to-a-workspace
Arguments:
src (string): Path to OCRD-ZIP
dest (string): Path to directory to unpack data folder to |
def create_jinja_env():
"""Create a Jinja2 `~jinja2.Environment`.
Returns
-------
env : `jinja2.Environment`
Jinja2 template rendering environment, configured to use templates in
``templates/``.
"""
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
autoescape=jinja2.select_autoescape(['html'])
)
env.filters['simple_date'] = filter_simple_date
env.filters['paragraphify'] = filter_paragraphify
return env | Create a Jinja2 `~jinja2.Environment`.
Returns
-------
env : `jinja2.Environment`
Jinja2 template rendering environment, configured to use templates in
``templates/``. |
def print(self, tag=None, name=None):
"""
Prints each tuple to stdout flushing after each tuple.
If `tag` is not `None` then each tuple has "tag: " prepended
to it before printing.
Args:
tag: A tag to prepend to each tuple.
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
Returns:
streamsx.topology.topology.Sink: Stream termination.
.. versionadded:: 1.6.1 `tag`, `name` parameters.
.. versionchanged:: 1.7
Now returns a :py:class:`Sink` instance.
"""
_name = name
if _name is None:
_name = 'print'
fn = streamsx.topology.functions.print_flush
if tag is not None:
tag = str(tag) + ': '
fn = lambda v : streamsx.topology.functions.print_flush(tag + str(v))
sp = self.for_each(fn, name=_name)
sp._op().sl = _SourceLocation(_source_info(), 'print')
return sp | Prints each tuple to stdout flushing after each tuple.
If `tag` is not `None` then each tuple has "tag: " prepended
to it before printing.
Args:
tag: A tag to prepend to each tuple.
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
Returns:
streamsx.topology.topology.Sink: Stream termination.
.. versionadded:: 1.6.1 `tag`, `name` parameters.
.. versionchanged:: 1.7
Now returns a :py:class:`Sink` instance. |
def get_tops(self):
'''
Gather the top files
'''
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
found = 0 # did we find any contents in the top files?
# Gather initial top files
merging_strategy = self.opts['top_file_merging_strategy']
if merging_strategy == 'same' and not self.opts['saltenv']:
if not self.opts['default_top']:
raise SaltRenderError(
'top_file_merging_strategy set to \'same\', but no '
'default_top configuration option was set'
)
if self.opts['saltenv']:
contents = self.client.cache_file(
self.opts['state_top'],
self.opts['saltenv']
)
if contents:
found = 1
tops[self.opts['saltenv']] = [
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
self.state.opts['renderer_blacklist'],
self.state.opts['renderer_whitelist'],
saltenv=self.opts['saltenv']
)
]
else:
tops[self.opts['saltenv']] = [{}]
else:
found = 0
state_top_saltenv = self.opts.get('state_top_saltenv', False)
if state_top_saltenv \
and not isinstance(state_top_saltenv, six.string_types):
state_top_saltenv = six.text_type(state_top_saltenv)
for saltenv in [state_top_saltenv] if state_top_saltenv \
else self._get_envs():
contents = self.client.cache_file(
self.opts['state_top'],
saltenv
)
if contents:
found = found + 1
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts['renderer'],
self.state.opts['renderer_blacklist'],
self.state.opts['renderer_whitelist'],
saltenv=saltenv
)
)
else:
tops[saltenv].append({})
log.debug('No contents loaded for saltenv \'%s\'', saltenv)
if found > 1 and merging_strategy == 'merge' and not self.opts.get('env_order', None):
log.warning(
'top_file_merging_strategy is set to \'%s\' and '
'multiple top files were found. Merging order is not '
'deterministic, it may be desirable to either set '
'top_file_merging_strategy to \'same\' or use the '
'\'env_order\' configuration parameter to specify the '
'merging order.', merging_strategy
)
if found == 0:
log.debug(
'No contents found in top file. If this is not expected, '
'verify that the \'file_roots\' specified in \'etc/master\' '
'are accessible. The \'file_roots\' configuration is: %s',
repr(self.state.opts['file_roots'])
)
# Search initial top files for includes
for saltenv, ctops in six.iteritems(tops):
for ctop in ctops:
if 'include' not in ctop:
continue
for sls in ctop['include']:
include[saltenv].append(sls)
ctop.pop('include')
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in six.iteritems(include):
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(
sls,
saltenv
).get('dest', False),
self.state.rend,
self.state.opts['renderer'],
self.state.opts['renderer_blacklist'],
self.state.opts['renderer_whitelist'],
saltenv
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops | Gather the top files |
def pretty_dumps(data):
"""Return json string in pretty format.
**中文文档**
将字典转化成格式化后的字符串。
"""
try:
return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)
except:
return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=True) | Return json string in pretty format.
**中文文档**
将字典转化成格式化后的字符串。 |
def calc_nested_probs(nest_coefs,
index_coefs,
design,
rows_to_obs,
rows_to_nests,
chosen_row_to_obs=None,
return_type="long_probs",
*args,
**kwargs):
"""
Parameters
----------
nest_coefs : 1D or 2D ndarray.
All elements should by ints, floats, or longs. If 1D, should have 1
element for each nesting coefficient being estimated. If 2D, should
have 1 column for each set of nesting coefficients being used to
predict the probabilities of each alternative being chosen. There
should be one row per nesting coefficient. Elements denote the inverse
of the scale coefficients for each of the lower level nests.
index_coefs : 1D or 2D ndarray.
All elements should by ints, floats, or longs. If 1D, should have 1
element for each utility coefficient being estimated (i.e.
num_features). If 2D, should have 1 column for each set of coefficients
being used to predict the probabilities of each alternative being
chosen. There should be one row per index coefficient.
design : 2D ndarray.
There should be one row per observation per available alternative.
There should be one column per utility coefficient being estimated. All
elements should be ints, floats, or longs.
rows_to_obs : 2D scipy sparse array.
There should be one row per observation per available alternative and
one column per observation. This matrix maps the rows of the design
matrix to the unique observations (on the columns).
rows_to_nests : 2D scipy sparse array.
There should with one row per observation per available alternative and
one column per nest. This matrix maps the rows of the design matrix to
the unique nests (on the columns).
chosen_row_to_obs : 2D scipy sparse array, or None, optional.
There should be one row per observation per available alternative and
one column per observation. This matrix indicates, for each observation
(on the columns), which rows of the design matrix were the realized
outcome. If an array is passed then an array of shape
(num_observations,) can be returned and each element will be the
probability of the realized outcome of the given observation.
Default == None.
return_type : str, optional.
Indicates what object(s) are to be returned from the function. Valid
values are: `['long_probs', 'chosen_probs', 'long_and_chosen_probs',
'all_prob_dict']`. If `long_probs`, the long format probabilities (a 1D
numpy array with one element per observation per available alternative)
will be returned. If `chosen_probs`, a 1D numpy array with one element
per observation will be returned, where the values are the
probabilities of the chosen alternative for the given observation. If
`long_and_chosen_probs`, a tuple of chosen_probs and long_probs will be
returned. If `all_prob_dict`, a dictionary will be returned. The values
will all be 1D numpy arrays of probabilities dictated by the value's
corresponding key. The keys will be `long_probs`, `nest_choice_probs`,
`prob_given_nest`, and `chosen_probs`. If chosen_row_to_obs is None,
then `chosen_probs` will be None. If `chosen_row_to_obs` is passed,
then `chosen_probs` will be a 1D array as described above.
`nest_choice_probs` is of the same shape as `rows_to_nests` and it
denotes the probability of each individual choosing each of the
possible nests. `prob_given_nest` is of the same shape as `long_probs`
and it denotes the probability of the individual associated with a
given row choosing the alternative associated with that row, given that
the individual chooses the nest that contains the given alternative.
Default == `long_probs`.
Returns
-------
See above for documentation of the `return_type` kwarg.
"""
# Check for 2D index coefficients or nesting coefficients
try:
assert len(index_coefs.shape) <= 2
assert (len(index_coefs.shape) == 1) or (index_coefs.shape[1] == 1)
assert len(nest_coefs.shape) <= 2
assert (len(nest_coefs.shape) == 1) or (nest_coefs.shape[1] == 1)
except AssertionError:
msg = "Support for 2D index_coefs or nest_coefs not yet implemented."
raise NotImplementedError(msg)
# Check for kwarg validity
valid_return_types = ['long_probs',
'chosen_probs',
'long_and_chosen_probs',
'all_prob_dict']
if return_type not in valid_return_types:
msg = "return_type must be one of the following values: "
raise ValueError(msg + str(valid_return_types))
chosen_probs_needed = ['chosen_probs', 'long_and_chosen_probs']
if chosen_row_to_obs is None and return_type in chosen_probs_needed:
msg = "chosen_row_to_obs is None AND return_type in {}."
raise ValueError(msg.format(chosen_probs_needed) +
"\nThis is invalid.")
# Calculate the index for each alternative for each individual, V = X*beta
index_vals = design.dot(index_coefs)
# Get the long format nest parameters for each row of the design matrix
long_nest_coefs = rows_to_nests.dot(nest_coefs)
# Calculate the scaled index values (index / nest_param = V / lambda)
scaled_index = index_vals / long_nest_coefs
# Guard against overflow
pos_inf_idx = np.isposinf(scaled_index)
neg_inf_idx = np.isneginf(scaled_index)
scaled_index[pos_inf_idx] = max_comp_value
scaled_index[neg_inf_idx] = -1 * max_comp_value
# Calculate the e^(scaled-index) = exp(V / lambda)
exp_scaled_index = np.exp(scaled_index)
# Guard against overflow
inf_idx = np.isposinf(exp_scaled_index)
exp_scaled_index[inf_idx] = max_comp_value
# Guard against underflow. Note that I'm not sure this is the best place or
# best way to perform such guarding. If all of an observations indices
# suffer underflow, then we'll have 0 / 0 when calculating the
# probabilities and I should use L'Hopital's rule to get the correct
# probability. However, replacing underflowed values here may result in
# incorrectly assigning probabilities of either zero for all alternatives
# or 1 / num_alternatives for all alternatives.
zero_idx = (exp_scaled_index == 0)
exp_scaled_index[zero_idx] = min_comp_value
# Calculate the log-sum for each nest, for each observation. Note that the
# "*" is used to compute the dot product between the mapping matrix which
# is a scipy.sparse matrix and the second term which is a scipy sparse
# matrix. Note the dimensions of ind_log_sums_per_nest are (obs, nests).
# Calculates sum _{j \in C_m} exp(V_{ij} / \lambda_m) for each nest m.
ind_exp_sums_per_nest = (rows_to_obs.T *
rows_to_nests.multiply(exp_scaled_index[:, None]))
# Ensure that ind_exp_sums_per_nest is an ndarray
if isinstance(ind_exp_sums_per_nest, np.matrixlib.defmatrix.matrix):
ind_exp_sums_per_nest = np.asarray(ind_exp_sums_per_nest)
elif issparse(ind_exp_sums_per_nest):
ind_exp_sums_per_nest = ind_exp_sums_per_nest.toarray()
# Guard against overflow
inf_idx = np.isposinf(ind_exp_sums_per_nest)
ind_exp_sums_per_nest[inf_idx] = max_comp_value
# Get the long-format representation of ind_log_sums_per_nest. Each row
# will have two columns, one for each nest. The entries of the matrix will
# be the log-sum for each nest, for the individual associated with the
# given row. The "*" is used to perform the dot product since rows_to_obs
# is a sparse matrix & ind_exp_sums_per_nest is a dense numpy matrix.
long_exp_sums_per_nest = rows_to_obs.dot(ind_exp_sums_per_nest)
if isinstance(long_exp_sums_per_nest, np.matrixlib.defmatrix.matrix):
long_exp_sums_per_nest = np.asarray(long_exp_sums_per_nest)
# Get the relevant log-sum for each row of the long-format data
# Note the .A converts the numpy matrix into a numpy array
# This is sum _{j \in C_m} exp(V_{ij} / \lambda_m) for the nest
# belonging to each row
long_exp_sums = (rows_to_nests.multiply(long_exp_sums_per_nest)
.sum(axis=1)
.A).ravel()
# Get the denominators for each individual
ind_denom = (np.power(ind_exp_sums_per_nest,
nest_coefs[None, :])
.sum(axis=1))
# Guard against overflow and underflow
inf_idx = np.isposinf(ind_denom)
ind_denom[inf_idx] = max_comp_value
zero_idx = (ind_denom == 0)
ind_denom[zero_idx] = min_comp_value
# Get the long format denominators.
long_denom = rows_to_obs.dot(ind_denom)
# Ensure that long_denom is 1D.
long_denom.ravel()
# Get the long format numerators
long_numerators = (exp_scaled_index *
np.power(long_exp_sums,
(long_nest_coefs - 1)))
# Guard agains overflow and underflow
inf_idx = np.isposinf(long_numerators)
long_numerators[inf_idx] = max_comp_value
zero_idx = (long_numerators == 0)
long_numerators[zero_idx] = min_comp_value
# Calculate and return the long-format probabilities
long_probs = (long_numerators / long_denom).ravel()
# Guard against underflow
long_probs[np.where(long_probs == 0)] = min_comp_value
# If desired, isolate the probabilities of the chosen alternatives
if chosen_row_to_obs is None:
chosen_probs = None
else:
# chosen_probs will be of shape (num_observations,)
chosen_probs = (chosen_row_to_obs.transpose()
.dot(long_probs))
chosen_probs = np.asarray(chosen_probs).ravel()
# Return the long form and chosen probabilities if desired
if return_type == 'long_and_chosen_probs':
return chosen_probs, long_probs
# If working with predictions, return just the long form probabilities
elif return_type == 'long_probs':
return long_probs
# If estimating the model and storing fitted probabilities or testing the
# model on data for which we know the chosen alternative, just return the
# chosen probabilities.
elif return_type == 'chosen_probs':
return chosen_probs
# If we want all the factors of the probability (e.g. as when calculating
# the gradient)
elif return_type == 'all_prob_dict':
# Create the dictionary of the various probabilities to be returned
prob_dict = {}
prob_dict["long_probs"] = long_probs
prob_dict["chosen_probs"] = chosen_probs
# Calculate the 'prob_given_nest' array
prob_given_nest = exp_scaled_index / long_exp_sums
# Guard against underflow
zero_idx = (prob_given_nest == 0)
prob_given_nest[zero_idx] = min_comp_value
# Calculate the 'nest_choice_probs'. Note ind_denom is a matrix with
# shape (num_obs, 1) so no need to explicitly broadcast
nest_choice_probs = (np.power(ind_exp_sums_per_nest,
nest_coefs[None, :]) /
ind_denom[:, None])
# Guard against underflow
zero_idx = (nest_choice_probs == 0)
nest_choice_probs[zero_idx] = min_comp_value
# Return dictionary.
# Note the ".A" converts the numpy matrix into a numpy array
prob_dict["prob_given_nest"] = prob_given_nest
prob_dict["nest_choice_probs"] = nest_choice_probs
prob_dict["ind_sums_per_nest"] = ind_exp_sums_per_nest
return prob_dict | Parameters
----------
nest_coefs : 1D or 2D ndarray.
All elements should by ints, floats, or longs. If 1D, should have 1
element for each nesting coefficient being estimated. If 2D, should
have 1 column for each set of nesting coefficients being used to
predict the probabilities of each alternative being chosen. There
should be one row per nesting coefficient. Elements denote the inverse
of the scale coefficients for each of the lower level nests.
index_coefs : 1D or 2D ndarray.
All elements should by ints, floats, or longs. If 1D, should have 1
element for each utility coefficient being estimated (i.e.
num_features). If 2D, should have 1 column for each set of coefficients
being used to predict the probabilities of each alternative being
chosen. There should be one row per index coefficient.
design : 2D ndarray.
There should be one row per observation per available alternative.
There should be one column per utility coefficient being estimated. All
elements should be ints, floats, or longs.
rows_to_obs : 2D scipy sparse array.
There should be one row per observation per available alternative and
one column per observation. This matrix maps the rows of the design
matrix to the unique observations (on the columns).
rows_to_nests : 2D scipy sparse array.
There should with one row per observation per available alternative and
one column per nest. This matrix maps the rows of the design matrix to
the unique nests (on the columns).
chosen_row_to_obs : 2D scipy sparse array, or None, optional.
There should be one row per observation per available alternative and
one column per observation. This matrix indicates, for each observation
(on the columns), which rows of the design matrix were the realized
outcome. If an array is passed then an array of shape
(num_observations,) can be returned and each element will be the
probability of the realized outcome of the given observation.
Default == None.
return_type : str, optional.
Indicates what object(s) are to be returned from the function. Valid
values are: `['long_probs', 'chosen_probs', 'long_and_chosen_probs',
'all_prob_dict']`. If `long_probs`, the long format probabilities (a 1D
numpy array with one element per observation per available alternative)
will be returned. If `chosen_probs`, a 1D numpy array with one element
per observation will be returned, where the values are the
probabilities of the chosen alternative for the given observation. If
`long_and_chosen_probs`, a tuple of chosen_probs and long_probs will be
returned. If `all_prob_dict`, a dictionary will be returned. The values
will all be 1D numpy arrays of probabilities dictated by the value's
corresponding key. The keys will be `long_probs`, `nest_choice_probs`,
`prob_given_nest`, and `chosen_probs`. If chosen_row_to_obs is None,
then `chosen_probs` will be None. If `chosen_row_to_obs` is passed,
then `chosen_probs` will be a 1D array as described above.
`nest_choice_probs` is of the same shape as `rows_to_nests` and it
denotes the probability of each individual choosing each of the
possible nests. `prob_given_nest` is of the same shape as `long_probs`
and it denotes the probability of the individual associated with a
given row choosing the alternative associated with that row, given that
the individual chooses the nest that contains the given alternative.
Default == `long_probs`.
Returns
-------
See above for documentation of the `return_type` kwarg. |
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
try:
self._find_delivery_streams()
except EndpointConnectionError as ex:
logger.warning(
'Caught exception when trying to use Firehose ('
'perhaps the Firehose service is not available in this '
'region?): %s', ex
)
self._have_usage = True
logger.debug("Done checking usage.") | Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`. |
def read(self, length):
"""
Read as many bytes from socket as specified in length.
Loop as long as every byte is read unless exception is raised.
"""
data = bytearray()
while len(data) != length:
data += self.sock.recv((length - len(data)))
if not data:
raise ConnectionError('Connection unexpectedly closed.')
return data | Read as many bytes from socket as specified in length.
Loop as long as every byte is read unless exception is raised. |
def get_index(self, index, type, alias=None, typed=None, read_only=True, kwargs=None):
"""
TESTS THAT THE INDEX EXISTS BEFORE RETURNING A HANDLE
"""
if kwargs.tjson != None:
Log.error("used `typed` parameter, not `tjson`")
if read_only:
# GET EXACT MATCH, OR ALIAS
aliases = wrap(self.get_aliases())
if index in aliases.index:
pass
elif index in aliases.alias:
match = [a for a in aliases if a.alias == index][0]
kwargs.alias = match.alias
kwargs.index = match.index
else:
Log.error("Can not find index {{index_name}}", index_name=kwargs.index)
return Index(kwargs=kwargs, cluster=self)
else:
# GET BEST MATCH, INCLUDING PROTOTYPE
best = self.get_best_matching_index(index, alias)
if not best:
Log.error("Can not find index {{index_name}}", index_name=kwargs.index)
if best.alias != None:
kwargs.alias = best.alias
kwargs.index = best.index
elif kwargs.alias == None:
kwargs.alias = kwargs.index
kwargs.index = best.index
return Index(kwargs=kwargs, cluster=self) | TESTS THAT THE INDEX EXISTS BEFORE RETURNING A HANDLE |
def check(self):
""" Check if data and third party tools are available
:raises: RuntimeError
"""
#for path in self.path.values():
# if not os.path.exists(path):
# raise RuntimeError("File '{}' is missing".format(path))
for tool in ('cd-hit', 'prank', 'hmmbuild', 'hmmpress', 'hmmscan', 'phmmer', 'mafft', 'meme'):
if not self.pathfinder.exists(tool):
raise RuntimeError("Dependency {} is missing".format(tool)) | Check if data and third party tools are available
:raises: RuntimeError |
def predictor(self, (i, j, A, alpha, Bb)):
"Add to chart any rules for B that could help extend this edge."
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs]) | Add to chart any rules for B that could help extend this edge. |
def find(self, path, all=False):
"""
Looks for files in the extra locations
as defined in ``MEDIA_FIXTURES_FILES_DIRS``.
"""
matches = []
for prefix, root in self.locations:
if root not in searched_locations:
searched_locations.append(root)
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches | Looks for files in the extra locations
as defined in ``MEDIA_FIXTURES_FILES_DIRS``. |
def make_tempfile (self, want='handle', resolution='try_unlink', suffix='', **kwargs):
"""Get a context manager that creates and cleans up a uniquely-named temporary
file with a name similar to this path.
This function returns a context manager that creates a secure
temporary file with a path similar to *self*. In particular, if
``str(self)`` is something like ``foo/bar``, the path of the temporary
file will be something like ``foo/bar.ame8_2``.
The object returned by the context manager depends on the *want* argument:
``"handle"``
An open file-like object is returned. This is the object returned by
:class:`tempfile.NamedTemporaryFile`. Its name on the filesystem is
accessible as a string as its `name` attribute, or (a customization here)
as a :class:`Path` instance as its `path` attribute.
``"path"``
The temporary file is created as in ``"handle"``, but is then immediately
closed. A :class:`Path` instance pointing to the path of the temporary file is
instead returned.
If an exception occurs inside the context manager block, the temporary file is
left lying around. Otherwise, what happens to it upon exit from the context
manager depends on the *resolution* argument:
``"try_unlink"``
Call :meth:`try_unlink` on the temporary file — no exception is raised if
the file did not exist.
``"unlink"``
Call :meth:`unlink` on the temporary file — an exception is raised if
the file did not exist.
``"keep"``
The temporary file is left lying around.
``"overwrite"``
The temporary file is :meth:`rename`-d to overwrite *self*.
For instance, when rewriting important files, it’s typical to write
the new data to a temporary file, and only rename the temporary file
to the final destination at the end — that way, if a problem happens
while writing the new data, the original file is left unmodified;
otherwise you’d be stuck with a partially-written version of the file.
This pattern can be accomplished with::
p = Path ('path/to/important/file')
with p.make_tempfile (resolution='overwrite', mode='wt') as h:
print ('important stuff goes here', file=h)
The *suffix* argument is appended to the temporary file name after the
random portion. It defaults to the empty string. If you want it to
operate as a typical filename suffix, include a leading ``"."``.
Other **kwargs** are passed to :class:`tempfile.NamedTemporaryFile`.
"""
if want not in ('handle', 'path'):
raise ValueError ('unrecognized make_tempfile() "want" mode %r' % (want,))
if resolution not in ('unlink', 'try_unlink', 'keep', 'overwrite'):
raise ValueError ('unrecognized make_tempfile() "resolution" mode %r' % (resolution,))
return Path._PathTempfileContextManager (self, want, resolution, suffix, kwargs) | Get a context manager that creates and cleans up a uniquely-named temporary
file with a name similar to this path.
This function returns a context manager that creates a secure
temporary file with a path similar to *self*. In particular, if
``str(self)`` is something like ``foo/bar``, the path of the temporary
file will be something like ``foo/bar.ame8_2``.
The object returned by the context manager depends on the *want* argument:
``"handle"``
An open file-like object is returned. This is the object returned by
:class:`tempfile.NamedTemporaryFile`. Its name on the filesystem is
accessible as a string as its `name` attribute, or (a customization here)
as a :class:`Path` instance as its `path` attribute.
``"path"``
The temporary file is created as in ``"handle"``, but is then immediately
closed. A :class:`Path` instance pointing to the path of the temporary file is
instead returned.
If an exception occurs inside the context manager block, the temporary file is
left lying around. Otherwise, what happens to it upon exit from the context
manager depends on the *resolution* argument:
``"try_unlink"``
Call :meth:`try_unlink` on the temporary file — no exception is raised if
the file did not exist.
``"unlink"``
Call :meth:`unlink` on the temporary file — an exception is raised if
the file did not exist.
``"keep"``
The temporary file is left lying around.
``"overwrite"``
The temporary file is :meth:`rename`-d to overwrite *self*.
For instance, when rewriting important files, it’s typical to write
the new data to a temporary file, and only rename the temporary file
to the final destination at the end — that way, if a problem happens
while writing the new data, the original file is left unmodified;
otherwise you’d be stuck with a partially-written version of the file.
This pattern can be accomplished with::
p = Path ('path/to/important/file')
with p.make_tempfile (resolution='overwrite', mode='wt') as h:
print ('important stuff goes here', file=h)
The *suffix* argument is appended to the temporary file name after the
random portion. It defaults to the empty string. If you want it to
operate as a typical filename suffix, include a leading ``"."``.
Other **kwargs** are passed to :class:`tempfile.NamedTemporaryFile`. |
def prepare(self):
"""Prepare the ordered list of transformers and reset context state to initial."""
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)] | Prepare the ordered list of transformers and reset context state to initial. |
def numpy_to_data_array(ary, *, var_name="data", coords=None, dims=None):
"""Convert a numpy array to an xarray.DataArray.
The first two dimensions will be (chain, draw), and any remaining
dimensions will be "shape".
If the numpy array is 1d, this dimension is interpreted as draw
If the numpy array is 2d, it is interpreted as (chain, draw)
If the numpy array is 3 or more dimensions, the last dimensions are kept as shapes.
Parameters
----------
ary : np.ndarray
A numpy array. If it has 2 or more dimensions, the first dimension should be
independent chains from a simulation. Use `np.expand_dims(ary, 0)` to add a
single dimension to the front if there is only 1 chain.
var_name : str
If there are no dims passed, this string is used to name dimensions
coords : dict[str, iterable]
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : List(str)
A list of coordinate names for the variable
Returns
-------
xr.DataArray
Will have the same data as passed, but with coordinates and dimensions
"""
# manage and transform copies
default_dims = ["chain", "draw"]
ary = np.atleast_2d(ary)
n_chains, n_samples, *shape = ary.shape
if n_chains > n_samples:
warnings.warn(
"More chains ({n_chains}) than draws ({n_samples}). "
"Passed array should have shape (chains, draws, *shape)".format(
n_chains=n_chains, n_samples=n_samples
),
SyntaxWarning,
)
dims, coords = generate_dims_coords(
shape, var_name, dims=dims, coords=coords, default_dims=default_dims
)
# reversed order for default dims: 'chain', 'draw'
if "draw" not in dims:
dims = ["draw"] + dims
if "chain" not in dims:
dims = ["chain"] + dims
if "chain" not in coords:
coords["chain"] = np.arange(n_chains)
if "draw" not in coords:
coords["draw"] = np.arange(n_samples)
# filter coords based on the dims
coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in dims}
return xr.DataArray(ary, coords=coords, dims=dims) | Convert a numpy array to an xarray.DataArray.
The first two dimensions will be (chain, draw), and any remaining
dimensions will be "shape".
If the numpy array is 1d, this dimension is interpreted as draw
If the numpy array is 2d, it is interpreted as (chain, draw)
If the numpy array is 3 or more dimensions, the last dimensions are kept as shapes.
Parameters
----------
ary : np.ndarray
A numpy array. If it has 2 or more dimensions, the first dimension should be
independent chains from a simulation. Use `np.expand_dims(ary, 0)` to add a
single dimension to the front if there is only 1 chain.
var_name : str
If there are no dims passed, this string is used to name dimensions
coords : dict[str, iterable]
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : List(str)
A list of coordinate names for the variable
Returns
-------
xr.DataArray
Will have the same data as passed, but with coordinates and dimensions |
def venv_pth(self, dirs):
'''
Add the directories in `dirs` to the `sys.path`. A venv.pth file
will be written in the site-packages dir of this virtualenv to add
dirs to sys.path.
dirs: a list of directories.
'''
# Create venv.pth to add dirs to sys.path when using the virtualenv.
text = StringIO.StringIO()
text.write("# Autogenerated file. Do not modify.\n")
for path in dirs:
text.write('{}\n'.format(path))
put(text, os.path.join(self.site_packages_dir(), 'venv.pth'), mode=0664) | Add the directories in `dirs` to the `sys.path`. A venv.pth file
will be written in the site-packages dir of this virtualenv to add
dirs to sys.path.
dirs: a list of directories. |
def command_upgrade(self):
"""Recreate repositories package lists
"""
if len(self.args) == 1 and self.args[0] == "upgrade":
Initialization(False).upgrade(only="")
elif (len(self.args) == 2 and self.args[0] == "upgrade" and
self.args[1].startswith("--only=")):
repos = self.args[1].split("=")[-1].split(",")
for rp in repos:
if rp not in self.meta.repositories:
repos.remove(rp)
Initialization(False).upgrade(repos)
else:
usage("") | Recreate repositories package lists |
def def_links(mobj):
"""Define Sphinx requirements links."""
fdict = json_load(os.path.join("data", "requirements.json"))
sdeps = sorted(fdict.keys())
olines = []
for item in sdeps:
olines.append(
".. _{name}: {url}\n".format(
name=fdict[item]["name"], url=fdict[item]["url"]
)
)
ret = []
for line in olines:
wobj = textwrap.wrap(line, width=LINE_WIDTH, subsequent_indent=" ")
ret.append("\n".join([item for item in wobj]))
mobj.out("\n".join(ret)) | Define Sphinx requirements links. |
def export(self, name, columns, points):
"""Write the points in MQTT."""
WHITELIST = '_-' + string.ascii_letters + string.digits
SUBSTITUTE = '_'
def whitelisted(s,
whitelist=WHITELIST,
substitute=SUBSTITUTE):
return ''.join(c if c in whitelist else substitute for c in s)
for sensor, value in zip(columns, points):
try:
sensor = [whitelisted(name) for name in sensor.split('.')]
tobeexport = [self.topic, self.hostname, name]
tobeexport.extend(sensor)
topic = '/'.join(tobeexport)
self.client.publish(topic, value)
except Exception as e:
logger.error("Can not export stats to MQTT server (%s)" % e) | Write the points in MQTT. |
def get_minimum_size(self, data):
"""Returns the rotated minimum size."""
size = self.element.get_minimum_size(data)
if self.angle in (RotateLM.NORMAL, RotateLM.UPSIDE_DOWN):
return size
else:
return datatypes.Point(size.y, size.x) | Returns the rotated minimum size. |
def current(cls):
"""
Helper method for getting the current peer of whichever host we're
running on.
"""
name = socket.getfqdn()
ip = socket.gethostbyname(name)
return cls(name, ip) | Helper method for getting the current peer of whichever host we're
running on. |
def _auth_session(self, username, password):
"""
Creates session to Hetzner account, authenticates with given credentials and
returns the session, if authentication was successful. Otherwise raises error.
"""
api = self.api[self.account]['auth']
endpoint = api.get('endpoint', self.api[self.account]['endpoint'])
session = requests.Session()
session_retries = Retry(total=10, backoff_factor=0.5)
session_adapter = requests.adapters.HTTPAdapter(max_retries=session_retries)
session.mount('https://', session_adapter)
response = session.request('GET', endpoint + api['GET'].get('url', '/'))
dom = Provider._filter_dom(response.text, api['filter'])
data = Provider._extract_hidden_data(dom)
data[api['user']], data[api['pass']] = username, password
response = session.request('POST', endpoint + api['POST']['url'], data=data)
if Provider._filter_dom(response.text, api['filter']):
LOGGER.error('Hetzner => Unable to authenticate session with %s account \'%s\': '
'Invalid credentials',
self.account, username)
raise AssertionError
LOGGER.info('Hetzner => Authenticate session with %s account \'%s\'',
self.account, username)
return session | Creates session to Hetzner account, authenticates with given credentials and
returns the session, if authentication was successful. Otherwise raises error. |
def get_representative_cases(self):
"""
>>> armr = OldNorseNoun("armr", decl_utils.Gender.masculine)
>>> armr.set_representative_cases("armr", "arms", "armar")
>>> armr.get_representative_cases()
('armr', 'arms', 'armar')
:return: nominative singular, genetive singular, nominative plural
"""
return (self.get_declined(decl_utils.Case.nominative, decl_utils.Number.singular),
self.get_declined(decl_utils.Case.genitive, decl_utils.Number.singular),
self.get_declined(decl_utils.Case.nominative, decl_utils.Number.plural)) | >>> armr = OldNorseNoun("armr", decl_utils.Gender.masculine)
>>> armr.set_representative_cases("armr", "arms", "armar")
>>> armr.get_representative_cases()
('armr', 'arms', 'armar')
:return: nominative singular, genetive singular, nominative plural |
def list_config(root_package = 'vlcp'):
'''
Walk through all the sub modules, find subclasses of vlcp.config.Configurable,
list their available configurations through _default_ prefix
'''
pkg = __import__(root_package, fromlist=['_'])
return_dict = OrderedDict()
for imp, module, _ in walk_packages(pkg.__path__, root_package + '.'):
m = __import__(module, fromlist = ['_'])
for name, v in vars(m).items():
if v is not None and isinstance(v, type) and issubclass(v, Configurable) \
and v is not Configurable \
and hasattr(v, '__dict__') and 'configkey' in v.__dict__ \
and v.__module__ == module:
configkey = v.__dict__['configkey']
if configkey not in return_dict:
configs = OrderedDict()
v2 = v
parents = [v2]
while True:
parent = None
for c in v2.__bases__:
if issubclass(c, Configurable):
parent = c
if parent is None or parent is Configurable:
break
if hasattr(parent, '__dict__') and 'configkey' not in parent.__dict__:
parents.append(parent)
v2 = parent
else:
break
for v2 in reversed(parents):
tmp_configs = {}
for k, default_value in v2.__dict__.items():
if k.startswith('_default_'):
config_attr = k[len('_default_'):]
if config_attr in v.__dict__:
continue
configname = configkey + '.' + config_attr
tmp_configs.setdefault(configname, OrderedDict())['default'] = \
pformat(default_value, width=10)
# Inspect the source lines to find remarks for these configurations
lines, _ = getsourcelines(v2)
last_remark = []
for l in lines:
l = l.strip()
if not l:
continue
if l.startswith('#'):
last_remark.append(l[1:])
else:
if l.startswith('_default_'):
key, sep, _ = l.partition('=')
if sep and key.startswith('_default_'):
configname = configkey + '.' + key[len('_default_'):].strip()
if configname in tmp_configs and configname not in configs:
configs[configname] = tmp_configs.pop(configname)
if configname in configs and last_remark:
configs[configname]['description'] = cleandoc('\n' + '\n'.join(last_remark))
del last_remark[:]
for key in tmp_configs:
if key not in configs:
configs[key] = tmp_configs[key]
if configs:
return_dict[configkey] = OrderedDict((('class', v.__module__ + '.' + name),
('classdescription', getdoc(v)),
('configs', configs)))
return return_dict | Walk through all the sub modules, find subclasses of vlcp.config.Configurable,
list their available configurations through _default_ prefix |
def _apply_role_tree(self, perm_tree, role):
"""In permission tree, sets `'checked': True` for the permissions that the role has."""
role_permissions = role.get_permissions()
for perm in role_permissions:
self._traverse_tree(perm_tree, perm)['checked'] = True
return perm_tree | In permission tree, sets `'checked': True` for the permissions that the role has. |
def smove(self, src, dst, value):
"""Emulate smove."""
src_set = self._get_set(src, 'SMOVE')
dst_set = self._get_set(dst, 'SMOVE')
value = self._encode(value)
if value not in src_set:
return False
src_set.discard(value)
dst_set.add(value)
self.redis[self._encode(src)], self.redis[self._encode(dst)] = src_set, dst_set
return True | Emulate smove. |
def _check_for_api_errors(geocoding_results):
"""
Raise any exceptions if there were problems reported
in the api response.
"""
status_result = geocoding_results.get("STATUS", {})
if "NO_RESULTS" in status_result.get("status", ""):
return
api_call_success = status_result.get("status", "") == "SUCCESS"
if not api_call_success:
access_error = status_result.get("access")
access_error_to_exception = {
'API_KEY_INVALID': GeocoderAuthenticationFailure,
'OVER_QUERY_LIMIT': GeocoderQuotaExceeded,
}
exception_cls = access_error_to_exception.get(
access_error, GeocoderServiceError
)
raise exception_cls(access_error) | Raise any exceptions if there were problems reported
in the api response. |
def check_list_type(objects, allowed_type, name, allow_none=True):
"""Verify that objects in list are of the allowed type or raise TypeError.
Args:
objects: The list of objects to check.
allowed_type: The allowed type of items in 'settings'.
name: Name of the list of objects, added to the exception.
allow_none: If set, None is also allowed.
Raises:
TypeError: if object is not of the allowed type.
Returns:
The list of objects, for convenient use in assignment.
"""
if objects is None:
if not allow_none:
raise TypeError('%s is None, which is not allowed.' % name)
return objects
if not isinstance(objects, (tuple, list)):
raise TypeError('%s is not a list.' % name)
if not all(isinstance(i, allowed_type) for i in objects):
type_list = sorted(list(set(type(obj) for obj in objects)))
raise TypeError('%s contains types that don\'t match %s: %s' %
(name, allowed_type.__name__, type_list))
return objects | Verify that objects in list are of the allowed type or raise TypeError.
Args:
objects: The list of objects to check.
allowed_type: The allowed type of items in 'settings'.
name: Name of the list of objects, added to the exception.
allow_none: If set, None is also allowed.
Raises:
TypeError: if object is not of the allowed type.
Returns:
The list of objects, for convenient use in assignment. |
def register(cls):
"""
Register a given model in the registry
"""
registry_entry = RegistryEntry(category = cls.category, namespace = cls.namespace, name = cls.name, cls=cls)
if registry_entry not in registry and not exists_in_registry(cls.category, cls.namespace, cls.name):
registry.append(registry_entry)
else:
log.warn("Class {0} already in registry".format(cls)) | Register a given model in the registry |
def addAsn1MibSource(self, *asn1Sources, **kwargs):
"""Adds path to a repository to search ASN.1 MIB files.
Parameters
----------
*asn1Sources :
one or more URL in form of :py:obj:`str` identifying local or
remote ASN.1 MIB repositories. Path must include the *@mib@*
component which will be replaced with MIB module name at the
time of search.
Returns
-------
: :py:class:`~pysnmp.smi.rfc1902.ObjectIdentity`
reference to itself
Notes
-----
Please refer to :py:class:`~pysmi.reader.localfile.FileReader`,
:py:class:`~pysmi.reader.httpclient.HttpReader` and
:py:class:`~pysmi.reader.ftpclient.FtpReader` classes for
in-depth information on ASN.1 MIB lookup.
Examples
--------
>>> ObjectIdentity('SNMPv2-MIB', 'sysDescr').addAsn1Source('http://mibs.snmplabs.com/asn1/@mib@')
ObjectIdentity('SNMPv2-MIB', 'sysDescr')
>>>
"""
if self._asn1SourcesToAdd is None:
self._asn1SourcesToAdd = asn1Sources
else:
self._asn1SourcesToAdd += asn1Sources
if self._asn1SourcesOptions:
self._asn1SourcesOptions.update(kwargs)
else:
self._asn1SourcesOptions = kwargs
return self | Adds path to a repository to search ASN.1 MIB files.
Parameters
----------
*asn1Sources :
one or more URL in form of :py:obj:`str` identifying local or
remote ASN.1 MIB repositories. Path must include the *@mib@*
component which will be replaced with MIB module name at the
time of search.
Returns
-------
: :py:class:`~pysnmp.smi.rfc1902.ObjectIdentity`
reference to itself
Notes
-----
Please refer to :py:class:`~pysmi.reader.localfile.FileReader`,
:py:class:`~pysmi.reader.httpclient.HttpReader` and
:py:class:`~pysmi.reader.ftpclient.FtpReader` classes for
in-depth information on ASN.1 MIB lookup.
Examples
--------
>>> ObjectIdentity('SNMPv2-MIB', 'sysDescr').addAsn1Source('http://mibs.snmplabs.com/asn1/@mib@')
ObjectIdentity('SNMPv2-MIB', 'sysDescr')
>>> |
def load(self):
""" Function load
Get the list of all objects
@return RETURN: A ForemanItem list
"""
cl_tmp = self.api.list(self.objName, limit=self.searchLimit).values()
cl = []
for i in cl_tmp:
cl.extend(i)
return {x[self.index]: ItemPuppetClass(self.api, x['id'],
self.objName, self.payloadObj,
x)
for x in cl} | Function load
Get the list of all objects
@return RETURN: A ForemanItem list |
def _generate_request_handler_proxy(handler_class, handler_args, name):
"""When a tornado.web.RequestHandler gets mounted we create a launcher function"""
@scope.inject
def request_handler_wrapper(app, handler, **kwargs):
handler = handler_class(app, handler.request, **handler_args)
handler._execute([], **kwargs)
request_handler_wrapper.__name__ = name
request_handler_wrapper.handler_class = handler_class
request_handler_wrapper.handler_args = handler_args
return request_handler_wrapper | When a tornado.web.RequestHandler gets mounted we create a launcher function |
def _prepare_sets(self, sets):
"""
The original "_prepare_sets" method simple return the list of sets in
_lazy_collection, know to be all keys of redis sets.
As the new "intersect" method can accept different types of "set", we
have to handle them because we must return only keys of redis sets.
"""
if self.stored_key and not self.stored_key_exists():
raise DoesNotExist('This collection is based on a previous one, '
'stored at a key that does not exist anymore.')
conn = self.cls.get_connection()
all_sets = set()
tmp_keys = set()
lists = []
def add_key(key, key_type=None, is_tmp=False):
if not key_type:
key_type = conn.type(key)
if key_type == 'set':
all_sets.add(key)
elif key_type == 'zset':
all_sets.add(key)
self._has_sortedsets = True
elif key_type == 'list':
# if only one list, and no sets, at the end we'll directly use the list
# else lists will be converted to sets
lists.append(key)
elif key_type == 'none':
# considered as an empty set
all_sets.add(key)
else:
raise ValueError('Cannot use redis key %s of type %s for filtering' % (
key, key_type
))
if is_tmp:
tmp_keys.add(key)
for set_ in sets:
if isinstance(set_, str):
add_key(set_)
elif isinstance(set_, ParsedFilter):
value = set_.value
# We have a RedisModel and we'll use its pk, or a RedisField
# (single value) and we'll use its value
if isinstance(value, RedisModel):
value = value.pk.get()
elif isinstance(value, SingleValueField):
value = value.proxy_get()
elif isinstance(value, RedisField):
raise ValueError(u'Invalid filter value for %s: %s' % (set_.index.field.name, value))
for index_key, key_type, is_tmp in set_.index.get_filtered_keys(
set_.suffix,
accepted_key_types=self._accepted_key_types,
*(set_.extra_field_parts + [value])
):
if key_type not in self._accepted_key_types:
raise ValueError('The index key returned by the index %s is not valid' % (
set_.index.__class__.__name__
))
add_key(index_key, key_type, is_tmp)
elif isinstance(set_, SetField):
# Use the set key. If we need to intersect, we'll use
# sunionstore, and if not, store accepts set
add_key(set_.key, 'set')
elif isinstance(set_, SortedSetField):
# Use the sorted set key. If we need to intersect, we'll use
# zinterstore, and if not, store accepts zset
add_key(set_.key, 'zset')
elif isinstance(set_, (ListField, _StoredCollection)):
add_key(set_.key, 'list')
elif isinstance(set_, tuple) and len(set_):
# if we got a list or set, create a redis set to hold its values
tmp_key = self._unique_key()
conn.sadd(tmp_key, *set_)
add_key(tmp_key, 'set', True)
else:
raise ValueError('Invalid filter type')
if lists:
if not len(all_sets) and len(lists) == 1:
# only one list, nothing else, we can return the list key
all_sets = {lists[0]}
else:
# we have many sets/lists, we need to convert them to sets
for list_key in lists:
# many sets, convert the list to a simple redis set
tmp_key = self._unique_key()
self._list_to_set(list_key, tmp_key)
add_key(tmp_key, 'set', True)
return all_sets, tmp_keys | The original "_prepare_sets" method simple return the list of sets in
_lazy_collection, know to be all keys of redis sets.
As the new "intersect" method can accept different types of "set", we
have to handle them because we must return only keys of redis sets. |
def _lcs(x, y):
"""Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs
"""
n, m = len(x), len(y)
table = {}
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table | Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs |
def values(self, with_defaults=True):
""" Return the values dictionary, defaulting to default values """
return dict(((k, str(v)) for k, v in self._inputs.items() if not v.is_empty(with_defaults))) | Return the values dictionary, defaulting to default values |
def _generate(self, pset, min_, max_, condition, type_=None):
"""Generate a Tree as a list of lists.
The tree is build from the root to the leaves, and it stop growing when
the condition is fulfilled.
Parameters
----------
pset: PrimitiveSetTyped
Primitive set from which primitives are selected.
min_: int
Minimum height of the produced trees.
max_: int
Maximum Height of the produced trees.
condition: function
The condition is a function that takes two arguments,
the height of the tree to build and the current
depth in the tree.
type_: class
The type that should return the tree when called, when
:obj:None (default) no return type is enforced.
Returns
-------
individual: list
A grown tree with leaves at possibly different depths
dependending on the condition function.
"""
if type_ is None:
type_ = pset.ret
expr = []
height = np.random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
# We've added a type_ parameter to the condition function
if condition(height, depth, type_):
try:
term = np.random.choice(pset.terminals[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError(
'The gp.generate function tried to add '
'a terminal of type {}, but there is'
'none available. {}'.format(type_, traceback)
)
if inspect.isclass(term):
term = term()
expr.append(term)
else:
try:
prim = np.random.choice(pset.primitives[type_])
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError(
'The gp.generate function tried to add '
'a primitive of type {}, but there is'
'none available. {}'.format(type_, traceback)
)
expr.append(prim)
for arg in reversed(prim.args):
stack.append((depth + 1, arg))
return expr | Generate a Tree as a list of lists.
The tree is build from the root to the leaves, and it stop growing when
the condition is fulfilled.
Parameters
----------
pset: PrimitiveSetTyped
Primitive set from which primitives are selected.
min_: int
Minimum height of the produced trees.
max_: int
Maximum Height of the produced trees.
condition: function
The condition is a function that takes two arguments,
the height of the tree to build and the current
depth in the tree.
type_: class
The type that should return the tree when called, when
:obj:None (default) no return type is enforced.
Returns
-------
individual: list
A grown tree with leaves at possibly different depths
dependending on the condition function. |
def magnitude(self):
"""Return the magnitude when treating the point as a vector."""
return math.sqrt( self.x * self.x + self.y * self.y ) | Return the magnitude when treating the point as a vector. |
def search_end_date(self, search_end_date):
"""
:type search_end_date: astropy.io.Time
:param search_end_date: search for frames take after the given date.
"""
assert isinstance(search_end_date, Time)
self._search_end_date = search_end_date.replicate(format='iso')
self._search_end_date.out_subfmt = 'date' | :type search_end_date: astropy.io.Time
:param search_end_date: search for frames take after the given date. |
def parse_enum_value_definition(lexer: Lexer) -> EnumValueDefinitionNode:
"""EnumValueDefinition: Description? EnumValue Directives[Const]?"""
start = lexer.token
description = parse_description(lexer)
name = parse_name(lexer)
directives = parse_directives(lexer, True)
return EnumValueDefinitionNode(
description=description, name=name, directives=directives, loc=loc(lexer, start)
) | EnumValueDefinition: Description? EnumValue Directives[Const]? |
def calculate_size(name, thread_id):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += LONG_SIZE_IN_BYTES
return data_size | Calculates the request payload size |
def filter_files_extensions(files, extension_lists):
"""
Put the files in buckets according to extension_lists
files=[movie.avi, movie.srt], extension_lists=[[avi],[srt]] ==> [[movie.avi],[movie.srt]]
:param files: A list of files
:param extension_lists: A list of list of extensions
:return: The files filtered and sorted according to extension_lists
"""
log.debug('filter_files_extensions: files="{}"'.format(files))
result = [[] for _ in extension_lists]
for file in files:
ext = file.suffix[1:].lower()
for ext_i, ext_list in enumerate(extension_lists):
if ext in ext_list:
result[ext_i].append(file)
log.debug('filter_files_extensions result:{}'.format(result))
return result | Put the files in buckets according to extension_lists
files=[movie.avi, movie.srt], extension_lists=[[avi],[srt]] ==> [[movie.avi],[movie.srt]]
:param files: A list of files
:param extension_lists: A list of list of extensions
:return: The files filtered and sorted according to extension_lists |
def woodbury_vector(self):
"""
Woodbury vector in the gaussian likelihood case only is defined as
$$
(K_{xx} + \Sigma)^{-1}Y
\Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$
"""
if self._woodbury_vector is None:
self._woodbury_vector, _ = dpotrs(self.K_chol, self.mean - self._prior_mean)
return self._woodbury_vector | Woodbury vector in the gaussian likelihood case only is defined as
$$
(K_{xx} + \Sigma)^{-1}Y
\Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$ |
def create_table(self, table, fields):
""" Responsys.createTable call
Accepts:
InteractObject table
list fields
Returns True on success
"""
table = table.get_soap_object(self.client)
return self.call('createTable', table, fields) | Responsys.createTable call
Accepts:
InteractObject table
list fields
Returns True on success |
def _reducedProtToPeps(protToPeps, proteins):
"""Returns a new, reduced "protToPeps" dictionary that does not contain
entries present in "proteins".
:param protToPeps: dict, for each protein (=key) contains a set of
associated peptides (=value). For Example {protein: {peptide, ...}, ...}
:param proteins: a list of proteinSet
:returns: dict, protToPeps not containing entries from "proteins"
"""
return {k: v for k, v in viewitems(protToPeps) if k not in proteins} | Returns a new, reduced "protToPeps" dictionary that does not contain
entries present in "proteins".
:param protToPeps: dict, for each protein (=key) contains a set of
associated peptides (=value). For Example {protein: {peptide, ...}, ...}
:param proteins: a list of proteinSet
:returns: dict, protToPeps not containing entries from "proteins" |
def emit(self, record):
"""Emit a formatted log record via DDP."""
if getattr(this, 'subs', {}).get(LOGS_NAME, False):
self.format(record)
this.send({
'msg': ADDED,
'collection': LOGS_NAME,
'id': meteor_random_id('/collection/%s' % LOGS_NAME),
'fields': {
attr: {
# typecasting methods for specific attributes
'args': lambda args: [repr(arg) for arg in args],
'created': datetime.datetime.fromtimestamp,
'exc_info': stacklines_or_none,
}.get(
attr,
lambda val: val # default typecasting method
)(getattr(record, attr, None))
for attr in (
'args',
'asctime',
'created',
'exc_info',
'filename',
'funcName',
'levelname',
'levelno',
'lineno',
'module',
'msecs',
'message',
'name',
'pathname',
'process',
'processName',
'relativeCreated',
'thread',
'threadName',
)
},
}) | Emit a formatted log record via DDP. |
def weighted_round_robin(iterable):
'''Takes an iterable of tuples of <item>, <weight> and cycles around them,
returning heavier (integer) weighted items more frequently.
'''
cyclable_list = []
assigned_weight = 0
still_to_process = [
(item, weight) for item, weight in
sorted(iterable, key=lambda tup: tup[1], reverse=True)]
while still_to_process:
for i, (item, weight) in enumerate(still_to_process):
if weight > assigned_weight:
cyclable_list.append(item)
else:
del still_to_process[i]
assigned_weight += 1
return cycle(cyclable_list) | Takes an iterable of tuples of <item>, <weight> and cycles around them,
returning heavier (integer) weighted items more frequently. |
def standings(self, league_table, league):
"""Store output of league standings to a CSV file"""
headers = ['Position', 'Team Name', 'Games Played', 'Goal For',
'Goals Against', 'Goal Difference', 'Points']
result = [headers]
result.extend([team['position'],
team['team']['name'],
team['playedGames'],
team['goalsFor'],
team['goalsAgainst'],
team['goalDifference'],
team['points']]
for team in league_table['standings'][0]['table'])
self.generate_output(result) | Store output of league standings to a CSV file |
def run_initial(self, events):
"""Runs the initial batch upload
:param events: an iterable containing events
"""
self_name = type(self).__name__
for i, batch in enumerate(grouper(events, self.INITIAL_BATCH_SIZE, skip_missing=True), 1):
self.logger.debug('%s processing initial batch %d', self_name, i)
for j, processed_batch in enumerate(grouper(
batch, self.BATCH_SIZE, skip_missing=True), 1):
self.logger.info('%s uploading initial chunk #%d (batch %d)', self_name, j, i)
self.upload_records(processed_batch, from_queue=False) | Runs the initial batch upload
:param events: an iterable containing events |
def set_pdb_trace(pm=False):
"""Start the Python debugger when robotframework is running.
This makes sure that pdb can use stdin/stdout even though
robotframework has redirected I/O.
"""
import sys
import pdb
for attr in ("stdin", "stdout", "stderr"):
setattr(sys, attr, getattr(sys, "__%s__" % attr))
if pm:
# Post-mortem debugging of an exception
pdb.post_mortem()
else:
pdb.set_trace() | Start the Python debugger when robotframework is running.
This makes sure that pdb can use stdin/stdout even though
robotframework has redirected I/O. |
def combineReads(filename, sequences, readClass=DNARead,
upperCase=False, idPrefix='command-line-read-'):
"""
Combine FASTA reads from a file and/or sequence strings.
@param filename: A C{str} file name containing FASTA reads.
@param sequences: A C{list} of C{str} sequences. If a sequence
contains spaces, the last field (after splitting on spaces) will be
used as the sequence and the first fields will be used as the sequence
id.
@param readClass: The class of the individual reads.
@param upperCase: If C{True}, reads will be converted to upper case.
@param idPrefix: The C{str} prefix that will be used for the id of the
sequences in C{sequences} that do not have an id specified. A trailing
sequence number will be appended to this prefix. Note that
'command-line-read-', the default id prefix, could collide with ids in
the FASTA file, if given. So output might be ambiguous. That's why we
allow the caller to specify a custom prefix.
@return: A C{FastaReads} instance.
"""
# Read sequences from a FASTA file, if given.
if filename:
reads = FastaReads(filename, readClass=readClass, upperCase=upperCase)
else:
reads = Reads()
# Add any individually specified subject sequences.
if sequences:
for count, sequence in enumerate(sequences, start=1):
# Try splitting the sequence on its last space and using the
# first part of the split as the read id. If there's no space,
# assign a generic id.
parts = sequence.rsplit(' ', 1)
if len(parts) == 2:
readId, sequence = parts
else:
readId = '%s%d' % (idPrefix, count)
if upperCase:
sequence = sequence.upper()
read = readClass(readId, sequence)
reads.add(read)
return reads | Combine FASTA reads from a file and/or sequence strings.
@param filename: A C{str} file name containing FASTA reads.
@param sequences: A C{list} of C{str} sequences. If a sequence
contains spaces, the last field (after splitting on spaces) will be
used as the sequence and the first fields will be used as the sequence
id.
@param readClass: The class of the individual reads.
@param upperCase: If C{True}, reads will be converted to upper case.
@param idPrefix: The C{str} prefix that will be used for the id of the
sequences in C{sequences} that do not have an id specified. A trailing
sequence number will be appended to this prefix. Note that
'command-line-read-', the default id prefix, could collide with ids in
the FASTA file, if given. So output might be ambiguous. That's why we
allow the caller to specify a custom prefix.
@return: A C{FastaReads} instance. |
async def _play(self, ctx, *, query: str):
""" Searches and plays a song from a given query. """
player = self.bot.lavalink.players.get(ctx.guild.id)
query = query.strip('<>')
if not url_rx.match(query):
query = f'ytsearch:{query}'
tracks = await self.bot.lavalink.get_tracks(query)
if not tracks:
return await ctx.send('Nothing found!')
embed = discord.Embed(color=discord.Color.blurple())
if 'list' in query and 'ytsearch:' not in query:
for track in tracks:
player.add(requester=ctx.author.id, track=track)
embed.title = 'Playlist enqueued!'
embed.description = f'Imported {len(tracks)} tracks from the playlist!'
await ctx.send(embed=embed)
else:
track_title = tracks[0]["info"]["title"]
track_uri = tracks[0]["info"]["uri"]
embed.title = "Track enqueued!"
embed.description = f'[{track_title}]({track_uri})'
player.add(requester=ctx.author.id, track=tracks[0])
if not player.is_playing:
await player.play() | Searches and plays a song from a given query. |
def list_of_vars(arg_plot):
"""Construct list of variables per plot.
Args:
arg_plot (str): string with variable names separated with
``_`` (figures), ``.`` (subplots) and ``,`` (same subplot).
Returns:
three nested lists of str
- variables on the same subplot;
- subplots on the same figure;
- figures.
"""
lovs = [[[var for var in svars.split(',') if var]
for svars in pvars.split('.') if svars]
for pvars in arg_plot.split('-') if pvars]
lovs = [[slov for slov in lov if slov] for lov in lovs if lov]
return [lov for lov in lovs if lov] | Construct list of variables per plot.
Args:
arg_plot (str): string with variable names separated with
``_`` (figures), ``.`` (subplots) and ``,`` (same subplot).
Returns:
three nested lists of str
- variables on the same subplot;
- subplots on the same figure;
- figures. |
def generate_markdown(cls):
"""
Documents values in markdown
"""
lines = []
if cls.__doc__:
lines.extend(['# {}'.format(cls.__doc__), ''])
for k, v in cls._values.items():
lines.append('* **{}** '.format(k))
if v.required:
lines[-1] = lines[-1] + '_REQUIRED_ '
if v.help:
lines.append(' {} '.format(v.help))
lines.append(' type: `{}` '.format(v.cast_as.__name__))
if v.default is not None:
lines.append(' default: `{}` '.format(v.default))
return '\n'.join(lines) | Documents values in markdown |
def is_auto_partition_required(self, brain_or_object):
"""Returns whether the passed in object needs to be partitioned
"""
obj = api.get_object(brain_or_object)
if not IAnalysisRequest.providedBy(obj):
return False
template = obj.getTemplate()
return template and template.getAutoPartition() | Returns whether the passed in object needs to be partitioned |
def AddWeight(self, path_segment_index, weight):
"""Adds a weight for a specific path segment index.
Args:
path_segment_index: an integer containing the path segment index.
weight: an integer containing the weight.
Raises:
ValueError: if the path segment weights do not contain
the path segment index.
"""
if path_segment_index not in self._weight_per_index:
raise ValueError('Path segment index not set.')
self._weight_per_index[path_segment_index] += weight
if weight not in self._indexes_per_weight:
self._indexes_per_weight[weight] = []
self._indexes_per_weight[weight].append(path_segment_index) | Adds a weight for a specific path segment index.
Args:
path_segment_index: an integer containing the path segment index.
weight: an integer containing the weight.
Raises:
ValueError: if the path segment weights do not contain
the path segment index. |
def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs):
'''
Create a snapshot from the given instance
.. versionadded:: 2016.3.0
'''
ret = {'name': name,
'result': True,
'comment': '',
'changes': {}
}
if not __salt__['boto_ec2.create_image'](ami_name=ami_name, instance_name=instance_name, **kwargs):
ret['comment'] = 'Failed to create new AMI {ami_name}'.format(ami_name=ami_name)
ret['result'] = False
return ret
ret['comment'] = 'Created new AMI {ami_name}'.format(ami_name=ami_name)
ret['changes']['new'] = {ami_name: ami_name}
if not wait_until_available:
return ret
starttime = time()
while True:
images = __salt__['boto_ec2.find_images'](ami_name=ami_name, return_objs=True, **kwargs)
if images and images[0].state == 'available':
break
if time() - starttime > wait_timeout_seconds:
if images:
ret['comment'] = 'AMI still in state {state} after timeout'.format(state=images[0].state)
else:
ret['comment'] = 'AMI with name {ami_name} not found after timeout.'.format(ami_name=ami_name)
ret['result'] = False
return ret
sleep(5)
return ret | Create a snapshot from the given instance
.. versionadded:: 2016.3.0 |
def get_arg_parser(cls, settings = None, option_prefix = u'--',
add_help = False):
"""Make a command-line option parser.
The returned parser may be used as a parent parser for application
argument parser.
:Parameters:
- `settings`: list of PyXMPP2 settings to consider. By default
all 'basic' settings are provided.
- `option_prefix`: custom prefix for PyXMPP2 options. E.g.
``'--xmpp'`` to differentiate them from not xmpp-related
application options.
- `add_help`: when `True` a '--help' option will be included
(probably already added in the application parser object)
:Types:
- `settings`: list of `unicode`
- `option_prefix`: `str`
- `add_help`:
:return: an argument parser object.
:returntype: :std:`argparse.ArgumentParser`
"""
# pylint: disable-msg=R0914,R0912
parser = argparse.ArgumentParser(add_help = add_help,
prefix_chars = option_prefix[0])
if settings is None:
settings = cls.list_all(basic = True)
if sys.version_info.major < 3:
# pylint: disable-msg=W0404
from locale import getpreferredencoding
encoding = getpreferredencoding()
def decode_string_option(value):
"""Decode a string option."""
return value.decode(encoding)
for name in settings:
if name not in cls._defs:
logger.debug("get_arg_parser: ignoring unknown option {0}"
.format(name))
return
setting = cls._defs[name]
if not setting.cmdline_help:
logger.debug("get_arg_parser: option {0} has no cmdline"
.format(name))
return
if sys.version_info.major < 3:
name = name.encode(encoding, "replace")
option = option_prefix + name.replace("_", "-")
dest = "pyxmpp2_" + name
if setting.validator:
opt_type = setting.validator
elif setting.type is unicode and sys.version_info.major < 3:
opt_type = decode_string_option
else:
opt_type = setting.type
if setting.default_d:
default_s = setting.default_d
if sys.version_info.major < 3:
default_s = default_s.encode(encoding, "replace")
elif setting.default is not None:
default_s = repr(setting.default)
else:
default_s = None
opt_help = setting.cmdline_help
if sys.version_info.major < 3:
opt_help = opt_help.encode(encoding, "replace")
if default_s:
opt_help += " (Default: {0})".format(default_s)
if opt_type is bool:
opt_action = _YesNoAction
else:
opt_action = "store"
parser.add_argument(option,
action = opt_action,
default = setting.default,
type = opt_type,
help = opt_help,
metavar = name.upper(),
dest = dest)
return parser | Make a command-line option parser.
The returned parser may be used as a parent parser for application
argument parser.
:Parameters:
- `settings`: list of PyXMPP2 settings to consider. By default
all 'basic' settings are provided.
- `option_prefix`: custom prefix for PyXMPP2 options. E.g.
``'--xmpp'`` to differentiate them from not xmpp-related
application options.
- `add_help`: when `True` a '--help' option will be included
(probably already added in the application parser object)
:Types:
- `settings`: list of `unicode`
- `option_prefix`: `str`
- `add_help`:
:return: an argument parser object.
:returntype: :std:`argparse.ArgumentParser` |
def recall_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the recall.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import recall_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> recall_score(y_true, y_pred)
0.50
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_true = len(true_entities)
score = nb_correct / nb_true if nb_true > 0 else 0
return score | Compute the recall.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import recall_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> recall_score(y_true, y_pred)
0.50 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.