Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
3,700 | def simple_db_engine(reader=None, srnos=None):
if reader is None:
reader = dbreader.Reader()
logger.debug("No reader provided. Creating one myself.")
info_dict = dict()
info_dict["filenames"] = [reader.get_cell_name(srno) for srno in srnos]
info_dict["masses"] = [reader.get_mass(srno) for srno in srnos]
info_dict["total_masses"] = [reader.get_total_mass(srno) for srno in srnos]
info_dict["loadings"] = [reader.get_loading(srno) for srno in srnos]
info_dict["fixed"] = [reader.inspect_hd5f_fixed(srno) for srno in srnos]
info_dict["labels"] = [reader.get_label(srno) for srno in srnos]
info_dict["cell_type"] = [reader.get_cell_type(srno) for srno in srnos]
info_dict["raw_file_names"] = []
info_dict["cellpy_file_names"] = []
logger.debug("created info-dict")
for key in list(info_dict.keys()):
logger.debug("%s: %s" % (key, str(info_dict[key])))
_groups = [reader.get_group(srno) for srno in srnos]
logger.debug(">\ngroups: %s" % str(_groups))
groups = helper.fix_groups(_groups)
info_dict["groups"] = groups
my_timer_start = time.time()
filename_cache = []
info_dict = helper.find_files(info_dict, filename_cache)
my_timer_end = time.time()
if (my_timer_end - my_timer_start) > 5.0:
logger.info(
"The function _find_files was very slow. "
"Save your info_df so you don't have to run it again!"
)
info_df = pd.DataFrame(info_dict)
info_df = info_df.sort_values(["groups", "filenames"])
info_df = helper.make_unique_groups(info_df)
info_df["labels"] = info_df["filenames"].apply(helper.create_labels)
info_df.set_index("filenames", inplace=True)
return info_df | engine that gets values from the simple excel 'db |
3,701 | def djfrontend_jquery_formset(version=None):
if version is None:
version = getattr(settings, , DJFRONTEND_JQUERY_FORMSET_DEFAULT)
if getattr(settings, , False):
template =
else:
template = (
<script src="{static}djfrontend/js/jquery/jquery.formset/{v}/jquery.formset.min.js"><\/script>\)
return format_html(template, static=_static_url, v=version) | Returns the jQuery Dynamic Formset plugin file according to version number.
TEMPLATE_DEBUG returns full file, otherwise returns minified file. |
3,702 | def get_project_totals(entries, date_headers, hour_type=None, overtime=False,
total_column=False, by=):
totals = [0 for date in date_headers]
rows = []
for thing, thing_entries in groupby(entries, lambda x: x[by]):
name, thing_id, date_dict = date_totals(thing_entries, by)
dates = []
for index, day in enumerate(date_headers):
if isinstance(day, datetime.datetime):
day = day.date()
if hour_type:
total = date_dict.get(day, {}).get(hour_type, 0)
dates.append(total)
else:
billable = date_dict.get(day, {}).get(, 0)
nonbillable = date_dict.get(day, {}).get(, 0)
total = billable + nonbillable
dates.append({
: day,
: billable,
: nonbillable,
: total
})
totals[index] += total
if total_column:
dates.append(sum(dates))
if overtime:
dates.append(find_overtime(dates))
dates = [date or for date in dates]
rows.append((name, thing_id, dates))
if total_column:
totals.append(sum(totals))
totals = [t or for t in totals]
yield (rows, totals) | Yield hour totals grouped by user and date. Optionally including overtime. |
3,703 | def _create_storage_profile(self):
if self.image_publisher:
storage_profile = {
: {
: self.image_publisher,
: self.image_offer,
: self.image_sku,
: self.image_version
},
}
else:
for image in self.compute.images.list():
if image.name == self.image_id:
image_id = image.id
break
else:
raise AzureCloudException(
.format(self.image_id)
)
storage_profile = {
: {
: image_id
}
}
return storage_profile | Create the storage profile for the instance.
Image reference can be a custom image name or a published urn. |
3,704 | def inv_n(x):
assert x.ndim == 3
assert x.shape[1] == x.shape[2]
c = np.array([ [cofactor_n(x, j, i) * (1 - ((i+j) % 2)*2)
for j in range(x.shape[1])]
for i in range(x.shape[1])]).transpose(2,0,1)
return c / det_n(x)[:, np.newaxis, np.newaxis] | given N matrices, return N inverses |
3,705 | def preprocess(self, x):
if (six.PY2 and isinstance(x, six.string_types)
and not isinstance(x, six.text_type)):
x = Pipeline(lambda s: six.text_type(s, encoding=))(x)
if self.sequential and isinstance(x, six.text_type):
x = self.tokenize(x.rstrip())
if self.lower:
x = Pipeline(six.text_type.lower)(x)
if self.sequential and self.use_vocab and self.stop_words is not None:
x = [w for w in x if w not in self.stop_words]
if self.preprocessing is not None:
return self.preprocessing(x)
else:
return x | Load a single example using this field, tokenizing if necessary.
If the input is a Python 2 `str`, it will be converted to Unicode
first. If `sequential=True`, it will be tokenized. Then the input
will be optionally lowercased and passed to the user-provided
`preprocessing` Pipeline. |
3,706 | def check_xml(code):
try:
xml.etree.ElementTree.fromstring(code)
except xml.etree.ElementTree.ParseError as exception:
message = .format(exception)
line_number = 0
found = re.search(r, message)
if found:
line_number = int(found.group(1))
yield (int(line_number), message) | Yield errors. |
3,707 | def upcoming(
cls,
api_key=djstripe_settings.STRIPE_SECRET_KEY,
customer=None,
coupon=None,
subscription=None,
subscription_plan=None,
subscription_prorate=None,
subscription_proration_date=None,
subscription_quantity=None,
subscription_trial_end=None,
**kwargs
):
if customer is not None and isinstance(customer, StripeModel):
customer = customer.id
if subscription is not None and isinstance(subscription, StripeModel):
subscription = subscription.id
if subscription_plan is not None and isinstance(subscription_plan, StripeModel):
subscription_plan = subscription_plan.id
try:
upcoming_stripe_invoice = cls.stripe_class.upcoming(
api_key=api_key,
customer=customer,
coupon=coupon,
subscription=subscription,
subscription_plan=subscription_plan,
subscription_prorate=subscription_prorate,
subscription_proration_date=subscription_proration_date,
subscription_quantity=subscription_quantity,
subscription_trial_end=subscription_trial_end,
**kwargs
)
except InvalidRequestError as exc:
if str(exc) != "Nothing to invoice for customer":
raise
return
upcoming_stripe_invoice["id"] = "upcoming"
return UpcomingInvoice._create_from_stripe_object(upcoming_stripe_invoice, save=False) | Gets the upcoming preview invoice (singular) for a customer.
At any time, you can preview the upcoming
invoice for a customer. This will show you all the charges that are
pending, including subscription renewal charges, invoice item charges,
etc. It will also show you any discount that is applicable to the
customer. (Source: https://stripe.com/docs/api#upcoming_invoice)
.. important:: Note that when you are viewing an upcoming invoice, you are simply viewing a preview.
:param customer: The identifier of the customer whose upcoming invoice \
you'd like to retrieve.
:type customer: Customer or string (customer ID)
:param coupon: The code of the coupon to apply.
:type coupon: str
:param subscription: The identifier of the subscription to retrieve an \
invoice for.
:type subscription: Subscription or string (subscription ID)
:param subscription_plan: If set, the invoice returned will preview \
updating the subscription given to this plan, or creating a new \
subscription to this plan if no subscription is given.
:type subscription_plan: Plan or string (plan ID)
:param subscription_prorate: If previewing an update to a subscription, \
this decides whether the preview will show the result of applying \
prorations or not.
:type subscription_prorate: bool
:param subscription_proration_date: If previewing an update to a \
subscription, and doing proration, subscription_proration_date forces \
the proration to be calculated as though the update was done at the \
specified time.
:type subscription_proration_date: datetime
:param subscription_quantity: If provided, the invoice returned will \
preview updating or creating a subscription with that quantity.
:type subscription_quantity: int
:param subscription_trial_end: If provided, the invoice returned will \
preview updating or creating a subscription with that trial end.
:type subscription_trial_end: datetime
:returns: The upcoming preview invoice.
:rtype: UpcomingInvoice |
3,708 | def convert_all(self):
for url_record in self._url_table.get_all():
if url_record.status != Status.done:
continue
self.convert_by_record(url_record) | Convert all links in URL table. |
3,709 | def upsert_event(self, calendar_id, event):
event[] = format_event_time(event[])
event[] = format_event_time(event[])
self.request_handler.post(
endpoint= % calendar_id, data=event) | Inserts or updates an event for the specified calendar.
:param string calendar_id: ID of calendar to insert/update event into.
:param dict event: Dictionary of event data to send to cronofy. |
3,710 | def generate_passphrase(size=12):
chars = string.ascii_lowercase + string.ascii_uppercase + string.digits
return str(.join(random.choice(chars) for _ in range(size))) | Return a generate string `size` long based on lowercase, uppercase,
and digit chars |
3,711 | def bound(self, p1, p2=None):
r = Rect(p1, p2)
return Point(min(max(self.x, r.l), r.r), min(max(self.y, r.t), r.b)) | Bound this point within the rect defined by (`p1`, `p2`). |
3,712 | def s_demand(self, bus):
Svl = array([complex(g.p, g.q) for g in self.generators if
(g.bus == bus) and g.is_load], dtype=complex64)
Sd = complex(bus.p_demand, bus.q_demand)
return -sum(Svl) + Sd | Returns the total complex power demand. |
3,713 | def add_alt(self, entry):
entry = entry[7:-1]
info = entry.split()
if len(info) < 2:
return False
for v in info:
key, value = v.split(, 1)
if key == :
self.alt[value] = {}
id_ = value
elif key == :
self.alt[id_][] = value
if len(info) > 4:
self.alt[id_][] += .join(info[4:])
break
return True | Parse and store the alternative allele field |
3,714 | def _equal_values(self, val1, val2):
if self._is_supported_matrix(val1):
if self._is_supported_matrix(val2):
_, _, hash_tuple_1 = self._serialize_matrix(val1)
_, _, hash_tuple_2 = self._serialize_matrix(val2)
return hash(hash_tuple_1) == hash(hash_tuple_2)
else:
return False
else:
return super(SparseParameter, self)._equal_values(val1, val2) | Matrices are equal if they hash to the same value. |
3,715 | def simxPackFloats(floatList):
if sys.version_info[0] == 3:
s=bytes()
for i in range(len(floatList)):
s=s+struct.pack(,floatList[i])
s=bytearray(s)
else:
s=
for i in range(len(floatList)):
s+=struct.pack(,floatList[i])
return s | Please have a look at the function description/documentation in the V-REP user manual |
3,716 | def open_zip(cls, dbname, zipped, encoding=None, fieldnames_lower=True, case_sensitive=True):
with ZipFile(zipped, ) as zip_:
if not case_sensitive:
dbname = pick_name(dbname, zip_.namelist())
with zip_.open(dbname) as f:
yield cls(f, encoding=encoding, fieldnames_lower=fieldnames_lower) | Context manager. Allows opening a .dbf file from zip archive.
.. code-block::
with Dbf.open_zip('some.dbf', 'myarch.zip') as dbf:
...
:param str|unicode dbname: .dbf file name
:param str|unicode|file zipped: .zip file path or a file-like object.
:param str|unicode encoding: Encoding used by DB.
This will be used if there's no encoding information in the DB itself.
:param bool fieldnames_lower: Lowercase field names.
:param bool case_sensitive: Whether DB filename is case sensitive.
:rtype: Dbf |
3,717 | def union(cls):
assert isinstance(cls, type)
return type(cls.__name__, (cls,), {
: True,
}) | A class decorator which other classes can specify that they can resolve to with `UnionRule`.
Annotating a class with @union allows other classes to use a UnionRule() instance to indicate that
they can be resolved to this base union class. This class will never be instantiated, and should
have no members -- it is used as a tag only, and will be replaced with whatever object is passed
in as the subject of a `yield Get(...)`. See the following example:
@union
class UnionBase(object): pass
@rule(B, [X])
def get_some_union_type(x):
result = yield Get(ResultType, UnionBase, x.f())
# ...
If there exists a single path from (whatever type the expression `x.f()` returns) -> `ResultType`
in the rule graph, the engine will retrieve and execute that path to produce a `ResultType` from
`x.f()`. This requires also that whatever type `x.f()` returns was registered as a union member of
`UnionBase` with a `UnionRule`.
Unions allow @rule bodies to be written without knowledge of what types may eventually be provided
as input -- rather, they let the engine check that there is a valid path to the desired result. |
3,718 | def do_find(self, arg):
if not arg:
raise CmdError("missing parameter: string")
process = self.get_process_from_prefix()
self.find_in_memory(arg, process) | [~process] f <string> - find the string in the process memory
[~process] find <string> - find the string in the process memory |
3,719 | def write(self, bytes_):
string = bytes_.decode(self._encoding)
self._file.write(string) | Write bytes to the file. |
3,720 | def create_stack_user(self):
self.run(, success_status=(0, 9))
self.create_file(, )
self.run()
self.run()
self.run()
self.run()
self.run()
self.ssh_pool.build_ssh_client(self.hostname, ,
self._key_filename,
self.via_ip) | Create the stack user on the machine. |
3,721 | def __set_transaction_detail(self, *args, **kwargs):
customer_transaction_id = kwargs.get(, None)
if customer_transaction_id:
transaction_detail = self.client.factory.create()
transaction_detail.CustomerTransactionId = customer_transaction_id
self.logger.debug(transaction_detail)
self.TransactionDetail = transaction_detail | Checks kwargs for 'customer_transaction_id' and sets it if present. |
3,722 | def shift_or_mirror_into_invertible_domain(self, solution_genotype,
copy=False):
assert solution_genotype is not None
if copy:
y = [val for val in solution_genotype]
else:
y = solution_genotype
if isinstance(y, np.ndarray) and not isinstance(y[0], float):
y = array(y, dtype=float)
for i in rglen(y):
lb = self._lb[self._index(i)]
ub = self._ub[self._index(i)]
al = self._al[self._index(i)]
au = self._au[self._index(i)]
if y[i] < lb - 2 * al - (ub - lb) / 2.0 or y[i] > ub + 2 * au + (ub - lb) / 2.0:
r = 2 * (ub - lb + al + au)
s = lb - 2 * al - (ub - lb) / 2.0
y[i] -= r * ((y[i] - s) // r)
if y[i] > ub + au:
y[i] -= 2 * (y[i] - ub - au)
if y[i] < lb - al:
y[i] += 2 * (lb - al - y[i])
return y | Details: input ``solution_genotype`` is changed. The domain is
[lb - al, ub + au] and in [lb - 2*al - (ub - lb) / 2, lb - al]
mirroring is applied. |
3,723 | def d_step(self, true_frames, gen_frames):
hparam_to_disc_loss = {
"least_squares": gan_losses.least_squares_discriminator_loss,
"cross_entropy": gan_losses.modified_discriminator_loss,
"wasserstein": gan_losses.wasserstein_discriminator_loss}
_, batch_size, _, _, _ = common_layers.shape_list(true_frames)
all_frames = tf.concat(
[true_frames, tf.stop_gradient(gen_frames)], axis=1)
all_logits = self.discriminator(all_frames)
true_logits, fake_logits_stop = \
all_logits[:batch_size], all_logits[batch_size:]
mean_true_logits = tf.reduce_mean(true_logits)
tf.summary.scalar("mean_true_logits", mean_true_logits)
mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop)
tf.summary.scalar("mean_fake_logits_stop", mean_fake_logits_stop)
discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss]
gan_d_loss = discriminator_loss_func(
discriminator_real_outputs=true_logits,
discriminator_gen_outputs=fake_logits_stop,
add_summaries=True)
return gan_d_loss, true_logits, fake_logits_stop | Performs the discriminator step in computing the GAN loss.
Applies stop-gradient to the generated frames while computing the
discriminator loss to make sure that the gradients are not back-propagated
to the generator. This makes sure that only the discriminator is updated.
Args:
true_frames: True outputs
gen_frames: Generated frames.
Returns:
d_loss: Loss component due to the discriminator. |
3,724 | def list_documents(self, page_size=None):
parent, _ = self._parent_info()
iterator = self._client._firestore_api.list_documents(
parent,
self.id,
page_size=page_size,
show_missing=True,
metadata=self._client._rpc_metadata,
)
iterator.collection = self
iterator.item_to_value = _item_to_document_ref
return iterator | List all subdocuments of the current collection.
Args:
page_size (Optional[int]]): The maximum number of documents
in each page of results from this request. Non-positive values
are ignored. Defaults to a sensible value set by the API.
Returns:
Sequence[~.firestore_v1beta1.collection.DocumentReference]:
iterator of subdocuments of the current collection. If the
collection does not exist at the time of `snapshot`, the
iterator will be empty |
3,725 | def clear(self):
self.services.clear()
self._future_value.clear()
self.services = None
self._lock = None
self._ipopo_instance = None
self._context = None
self.requirement = None
self._key = None
self._allow_none = None
self._future_value = None
self._field = None | Cleans up the manager. The manager can't be used after this method has
been called |
3,726 | def is_readable(value, **kwargs):
try:
validators.readable(value, **kwargs)
except SyntaxError as error:
raise error
except Exception:
return False
return True | Indicate whether ``value`` is a readable file.
.. caution::
**Use of this validator is an anti-pattern and should be used with caution.**
Validating the readability of a file *before* attempting to read it
exposes your code to a bug called
`TOCTOU <https://en.wikipedia.org/wiki/Time_of_check_to_time_of_use>`_.
This particular class of bug can expose your code to **security vulnerabilities**
and so this validator should only be used if you are an advanced user.
A better pattern to use when reading from a file is to apply the principle of
EAFP ("easier to ask forgiveness than permission"), and simply attempt to
write to the file using a ``try ... except`` block:
.. code-block:: python
try:
with open('path/to/filename.txt', mode = 'r') as file_object:
# read from file here
except (OSError, IOError) as error:
# Handle an error if unable to write.
:param value: The value to evaluate.
:type value: Path-like object
:returns: ``True`` if ``value`` is valid, ``False`` if it is not.
:rtype: :class:`bool <python:bool>`
:raises SyntaxError: if ``kwargs`` contains duplicate keyword parameters or duplicates
keyword parameters passed to the underlying validator |
3,727 | def process_action(self, request, queryset):
count = 0
try:
with transaction.commit_on_success():
for obj in queryset:
self.log_action(obj, CMSLog.DELETE)
count += 1
obj.delete()
msg = "%s object%s deleted." % (count, ( if count ==1 else ))
url = self.get_done_url()
return self.render(request, redirect_url=url, message = msg)
except ProtectedError, e:
protected = []
for x in e.protected_objects:
if hasattr(x, ):
protected.append(x.delete_blocked_message())
else:
protected.append(u"%s - %s" % (x._meta.verbose_name, x))
msg = "Cannot delete some objects because the following objects depend on them:"
return self.render(request, error_msg = msg, errors = protected) | Deletes the object(s). Successful deletes are logged.
Returns a 'render redirect' to the result of the
`get_done_url` method.
If a ProtectedError is raised, the `render` method
is called with message explaining the error added
to the context as `protected`. |
3,728 | def __set_bp(self, aProcess):
lpAddress = self.get_address()
dwSize = self.get_size()
flNewProtect = aProcess.mquery(lpAddress).Protect
flNewProtect = flNewProtect | win32.PAGE_GUARD
aProcess.mprotect(lpAddress, dwSize, flNewProtect) | Sets the target pages as guard pages.
@type aProcess: L{Process}
@param aProcess: Process object. |
3,729 | def _create_row_labels(self):
labels = {}
for c in self._columns:
labels[c] = c
if self._alt_labels:
for k in self._alt_labels.keys():
labels[k] = self._alt_labels[k]
if self._label_suffix:
for k in labels.keys():
if k in self._nonnormal:
labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]")
elif k in self._categorical:
labels[k] = "{}, {}".format(labels[k],"n (%)")
else:
labels[k] = "{}, {}".format(labels[k],"mean (SD)")
return labels | Take the original labels for rows. Rename if alternative labels are
provided. Append label suffix if label_suffix is True.
Returns
----------
labels : dictionary
Dictionary, keys are original column name, values are final label. |
3,730 | def validate_object(obj, field_validators=None, non_field_validators=None,
schema=None, context=None):
if schema is None:
schema = {}
if context is None:
context = {}
if field_validators is None:
field_validators = ValidationDict()
if non_field_validators is None:
non_field_validators = ValidationList()
from flex.validation.schema import (
construct_schema_validators,
)
schema_validators = construct_schema_validators(schema, context)
if in schema_validators and hasattr(schema_validators[], ):
ref_ = field_validators.pop()
for k, v in ref_.validators.items():
if k not in schema_validators:
schema_validators.add_validator(k, v)
if in schema:
schema_validators = add_polymorphism_requirements(obj, schema, context, schema_validators)
del schema[]
schema_validators.update(field_validators)
schema_validators.validate_object(obj, context=context)
non_field_validators.validate_object(obj, context=context)
return obj | Takes a mapping and applies a mapping of validator functions to it
collecting and reraising any validation errors that occur. |
3,731 | def _parse_mtllibs(self):
for mtllib in self.meta.mtllibs:
try:
materials = self.material_parser_cls(
os.path.join(self.path, mtllib),
encoding=self.encoding,
strict=self.strict).materials
except IOError:
raise IOError("Failed to load mtl file:".format(os.path.join(self.path, mtllib)))
for name, material in materials.items():
self.wavefront.materials[name] = material | Load mtl files |
3,732 | def dump(self, filename):
try:
with open(filename, ) as fp:
cPickle.dump(self.counters, fp)
except Exception as e:
logging.warning("can't dump counter to file %s: %s", filename, e)
return False
return True | Dump counters to file |
3,733 | def imresize(self, data, new_wd, new_ht, method=):
old_ht, old_wd = data.shape[:2]
start_time = time.time()
if have_pilutil:
means =
zoom_x = float(new_wd) / float(old_wd)
zoom_y = float(new_ht) / float(old_ht)
if (old_wd >= new_wd) or (old_ht >= new_ht):
zoom = max(zoom_x, zoom_y)
else:
zoom = min(zoom_x, zoom_y)
newdata = imresize(data, zoom, interp=method)
else:
raise ImageError("No way to scale image smoothly")
end_time = time.time()
self.logger.debug("scaling (%s) time %.4f sec" % (
means, end_time - start_time))
return newdata | Scale an image in numpy array _data_ to the specified width and
height. A smooth scaling is preferred. |
3,734 | def _landsat_get_mtl(sceneid):
scene_params = _landsat_parse_scene_id(sceneid)
meta_file = "http://landsat-pds.s3.amazonaws.com/{}_MTL.txt".format(
scene_params["key"]
)
metadata = str(urlopen(meta_file).read().decode())
return toa_utils._parse_mtl_txt(metadata) | Get Landsat-8 MTL metadata.
Attributes
----------
sceneid : str
Landsat sceneid. For scenes after May 2017,
sceneid have to be LANDSAT_PRODUCT_ID.
Returns
-------
out : dict
returns a JSON like object with the metadata. |
3,735 | def hamming_emd(d1, d2):
N = d1.squeeze().ndim
d1, d2 = flatten(d1), flatten(d2)
return emd(d1, d2, _hamming_matrix(N)) | Return the Earth Mover's Distance between two distributions (indexed
by state, one dimension per node) using the Hamming distance between states
as the transportation cost function.
Singleton dimensions are sqeezed out. |
3,736 | def _on_library_path_changed(self, renderer, path, new_library_path):
library_name = self.library_list_store[int(path)][self.KEY_STORAGE_ID]
library_config = self.core_config_model.get_current_config_value("LIBRARY_PATHS", use_preliminary=True,
default={})
library_config[library_name] = new_library_path
self.core_config_model.set_preliminary_config_value("LIBRARY_PATHS", library_config)
self._select_row_by_column_value(self.view[], self.library_list_store,
self.KEY_STORAGE_ID, library_name) | Callback handling a change of a library path
:param Gtk.CellRenderer renderer: Cell renderer showing the library path
:param path: Path of library within the list store
:param str new_library_path: New library path |
3,737 | def get_message_actions(current):
current.output = {: ,
: 200,
: Message.objects.get(
current.input[]).get_actions_for(current.user)} | Returns applicable actions for current user for given message key
.. code-block:: python
# request:
{
'view':'_zops_get_message_actions',
'key': key,
}
# response:
{
'actions':[('name_string', 'cmd_string'),]
'status': string, # 'OK' for success
'code': int, # 200 for success
} |
3,738 | def _get_optimizer(self):
optim = tf.train.AdagradOptimizer(self.learning_rate)
gradients = optim.compute_gradients(self.cost)
if self.log_dir:
for name, (g, v) in zip([, , , ], gradients):
tf.summary.histogram("{}_grad".format(name), g)
tf.summary.histogram("{}_vals".format(name), v)
return optim.apply_gradients(gradients) | Uses Adagrad to optimize the GloVe/Mittens objective,
as specified in the GloVe paper. |
3,739 | def ssn(self):
def _checksum(digits):
evensum = sum(digits[:-1:2])
oddsum = sum(digits[1::2])
return (10 - ((evensum + oddsum * 3) % 10)) % 10
digits = [7, 5, 6]
digits += self.generator.random.sample(range(10), 9)
digits.append(_checksum(digits))
digits = .join([str(d) for d in digits])
ssn = digits[:3] + \
+ digits[3:7] + \
+ digits[7:11] + \
+ digits[11:]
return ssn | Returns a 13 digits Swiss SSN named AHV (German) or
AVS (French and Italian)
See: http://www.bsv.admin.ch/themen/ahv/00011/02185/ |
3,740 | def _validate_sample_rates(input_filepath_list, combine_type):
sample_rates = [
file_info.sample_rate(f) for f in input_filepath_list
]
if not core.all_equal(sample_rates):
raise IOError(
"Input files do not have the same sample rate. The {} combine "
"type requires that all files have the same sample rate"
.format(combine_type)
) | Check if files in input file list have the same sample rate |
3,741 | def pca_plot(pca, dt, xlabs=None, mode=, lognorm=True):
nc = pca.n_components
f = np.arange(pca.n_features_)
cs = list(itertools.combinations(range(nc), 2))
ind = ~np.apply_along_axis(any, 1, np.isnan(dt))
cylim = (pca.components_.min(), pca.components_.max())
yd = cylim[1] - cylim[0]
fig, axs = plt.subplots(nc, nc, figsize=[3 * nc, nc * 3], tight_layout=True)
for x, y in zip(*np.triu_indices(nc)):
if x == y:
tax = axs[x, y]
tax.bar(f, pca.components_[x], 0.8)
tax.set_xticks([])
tax.axhline(0, zorder=-1, c=(0,0,0,0.6))
tax.set_ylim(cylim[0] - 0.2 * yd,
cylim[1] + 0.2 * yd)
for xi, yi, lab in zip(f, pca.components_[x], xlabs):
if yi > 0:
yo = yd * 0.03
va =
else:
yo = yd * -0.02
va =
tax.text(xi, yi + yo, lab, ha=, va=va, rotation=90, fontsize=8)
else:
xv = dt[ind, x]
yv = dt[ind, y]
if mode == :
axs[x, y].scatter(xv, yv, alpha=0.2)
axs[y, x].scatter(yv, xv, alpha=0.2)
if mode == :
if lognorm:
norm = mpl.colors.LogNorm()
else:
norm = None
axs[x, y].hist2d(xv, yv, 50, cmap=plt.cm.Blues, norm=norm)
axs[y, x].hist2d(yv, xv, 50, cmap=plt.cm.Blues, norm=norm)
if x == 0:
axs[y, x].set_ylabel(.format(y + 1))
if y == nc - 1:
axs[y, x].set_xlabel(.format(x + 1))
return fig, axs, xv, yv | Plot a fitted PCA, and all components. |
3,742 | def spill(self, src, dest):
if exists(dest) and not isdir(dest):
raise Exception("Not a directory: %s" % dest)
if isdir(dest):
workspace_name = re.sub(r, , basename(src))
new_dest = join(dest, workspace_name)
if exists(new_dest):
raise Exception("Directory exists: %s" % new_dest)
dest = new_dest
log.info("Spilling %s to %s", src, dest)
bagdir = mkdtemp(prefix=TMP_BAGIT_PREFIX)
unzip_file_to_dir(src, bagdir)
datadir = join(bagdir, )
for root, _, files in walk(datadir):
for f in files:
srcfile = join(root, f)
destdir = join(dest, relpath(root, datadir))
destfile = join(destdir, f)
if not exists(destdir):
makedirs(destdir)
log.debug("Copy %s -> %s", srcfile, destfile)
copyfile(srcfile, destfile)
rmtree(bagdir)
workspace = Workspace(self.resolver, directory=dest)
return workspace | Spill a workspace, i.e. unpack it and turn it into a workspace.
See https://ocr-d.github.com/ocrd_zip#unpacking-ocrd-zip-to-a-workspace
Arguments:
src (string): Path to OCRD-ZIP
dest (string): Path to directory to unpack data folder to |
3,743 | def create_jinja_env():
template_dir = os.path.join(os.path.dirname(__file__), )
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(template_dir),
autoescape=jinja2.select_autoescape([])
)
env.filters[] = filter_simple_date
env.filters[] = filter_paragraphify
return env | Create a Jinja2 `~jinja2.Environment`.
Returns
-------
env : `jinja2.Environment`
Jinja2 template rendering environment, configured to use templates in
``templates/``. |
3,744 | def print(self, tag=None, name=None):
_name = name
if _name is None:
_name =
fn = streamsx.topology.functions.print_flush
if tag is not None:
tag = str(tag) +
fn = lambda v : streamsx.topology.functions.print_flush(tag + str(v))
sp = self.for_each(fn, name=_name)
sp._op().sl = _SourceLocation(_source_info(), )
return sp | Prints each tuple to stdout flushing after each tuple.
If `tag` is not `None` then each tuple has "tag: " prepended
to it before printing.
Args:
tag: A tag to prepend to each tuple.
name(str): Name of the resulting stream.
When `None` defaults to a generated name.
Returns:
streamsx.topology.topology.Sink: Stream termination.
.. versionadded:: 1.6.1 `tag`, `name` parameters.
.. versionchanged:: 1.7
Now returns a :py:class:`Sink` instance. |
3,745 | def get_tops(self):
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
found = 0
merging_strategy = self.opts[]
if merging_strategy == and not self.opts[]:
if not self.opts[]:
raise SaltRenderError(
same\
)
if self.opts[]:
contents = self.client.cache_file(
self.opts[],
self.opts[]
)
if contents:
found = 1
tops[self.opts[]] = [
compile_template(
contents,
self.state.rend,
self.state.opts[],
self.state.opts[],
self.state.opts[],
saltenv=self.opts[]
)
]
else:
tops[self.opts[]] = [{}]
else:
found = 0
state_top_saltenv = self.opts.get(, False)
if state_top_saltenv \
and not isinstance(state_top_saltenv, six.string_types):
state_top_saltenv = six.text_type(state_top_saltenv)
for saltenv in [state_top_saltenv] if state_top_saltenv \
else self._get_envs():
contents = self.client.cache_file(
self.opts[],
saltenv
)
if contents:
found = found + 1
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts[],
self.state.opts[],
self.state.opts[],
saltenv=saltenv
)
)
else:
tops[saltenv].append({})
log.debug(%s\, saltenv)
if found > 1 and merging_strategy == and not self.opts.get(, None):
log.warning(
%s\
same\
env_order\
, merging_strategy
)
if found == 0:
log.debug(
file_roots\etc/master\
file_roots\,
repr(self.state.opts[])
)
for saltenv, ctops in six.iteritems(tops):
for ctop in ctops:
if not in ctop:
continue
for sls in ctop[]:
include[saltenv].append(sls)
ctop.pop()
while include:
pops = []
for saltenv, states in six.iteritems(include):
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(
sls,
saltenv
).get(, False),
self.state.rend,
self.state.opts[],
self.state.opts[],
self.state.opts[],
saltenv
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops | Gather the top files |
3,746 | def pretty_dumps(data):
try:
return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=False)
except:
return json.dumps(data, sort_keys=True, indent=4, ensure_ascii=True) | Return json string in pretty format.
**中文文档**
将字典转化成格式化后的字符串。 |
3,747 | def calc_nested_probs(nest_coefs,
index_coefs,
design,
rows_to_obs,
rows_to_nests,
chosen_row_to_obs=None,
return_type="long_probs",
*args,
**kwargs):
try:
assert len(index_coefs.shape) <= 2
assert (len(index_coefs.shape) == 1) or (index_coefs.shape[1] == 1)
assert len(nest_coefs.shape) <= 2
assert (len(nest_coefs.shape) == 1) or (nest_coefs.shape[1] == 1)
except AssertionError:
msg = "Support for 2D index_coefs or nest_coefs not yet implemented."
raise NotImplementedError(msg)
valid_return_types = [,
,
,
]
if return_type not in valid_return_types:
msg = "return_type must be one of the following values: "
raise ValueError(msg + str(valid_return_types))
chosen_probs_needed = [, ]
if chosen_row_to_obs is None and return_type in chosen_probs_needed:
msg = "chosen_row_to_obs is None AND return_type in {}."
raise ValueError(msg.format(chosen_probs_needed) +
"\nThis is invalid.")
index_vals = design.dot(index_coefs)
long_nest_coefs = rows_to_nests.dot(nest_coefs)
scaled_index = index_vals / long_nest_coefs
pos_inf_idx = np.isposinf(scaled_index)
neg_inf_idx = np.isneginf(scaled_index)
scaled_index[pos_inf_idx] = max_comp_value
scaled_index[neg_inf_idx] = -1 * max_comp_value
exp_scaled_index = np.exp(scaled_index)
inf_idx = np.isposinf(exp_scaled_index)
exp_scaled_index[inf_idx] = max_comp_value
zero_idx = (exp_scaled_index == 0)
exp_scaled_index[zero_idx] = min_comp_value
ind_exp_sums_per_nest = (rows_to_obs.T *
rows_to_nests.multiply(exp_scaled_index[:, None]))
if isinstance(ind_exp_sums_per_nest, np.matrixlib.defmatrix.matrix):
ind_exp_sums_per_nest = np.asarray(ind_exp_sums_per_nest)
elif issparse(ind_exp_sums_per_nest):
ind_exp_sums_per_nest = ind_exp_sums_per_nest.toarray()
inf_idx = np.isposinf(ind_exp_sums_per_nest)
ind_exp_sums_per_nest[inf_idx] = max_comp_value
long_exp_sums_per_nest = rows_to_obs.dot(ind_exp_sums_per_nest)
if isinstance(long_exp_sums_per_nest, np.matrixlib.defmatrix.matrix):
long_exp_sums_per_nest = np.asarray(long_exp_sums_per_nest)
long_exp_sums = (rows_to_nests.multiply(long_exp_sums_per_nest)
.sum(axis=1)
.A).ravel()
ind_denom = (np.power(ind_exp_sums_per_nest,
nest_coefs[None, :])
.sum(axis=1))
inf_idx = np.isposinf(ind_denom)
ind_denom[inf_idx] = max_comp_value
zero_idx = (ind_denom == 0)
ind_denom[zero_idx] = min_comp_value
long_denom = rows_to_obs.dot(ind_denom)
long_denom.ravel()
long_numerators = (exp_scaled_index *
np.power(long_exp_sums,
(long_nest_coefs - 1)))
inf_idx = np.isposinf(long_numerators)
long_numerators[inf_idx] = max_comp_value
zero_idx = (long_numerators == 0)
long_numerators[zero_idx] = min_comp_value
long_probs = (long_numerators / long_denom).ravel()
long_probs[np.where(long_probs == 0)] = min_comp_value
if chosen_row_to_obs is None:
chosen_probs = None
else:
chosen_probs = (chosen_row_to_obs.transpose()
.dot(long_probs))
chosen_probs = np.asarray(chosen_probs).ravel()
if return_type == :
return chosen_probs, long_probs
elif return_type == :
return long_probs
elif return_type == :
return chosen_probs
elif return_type == :
prob_dict = {}
prob_dict["long_probs"] = long_probs
prob_dict["chosen_probs"] = chosen_probs
prob_given_nest = exp_scaled_index / long_exp_sums
zero_idx = (prob_given_nest == 0)
prob_given_nest[zero_idx] = min_comp_value
nest_choice_probs = (np.power(ind_exp_sums_per_nest,
nest_coefs[None, :]) /
ind_denom[:, None])
zero_idx = (nest_choice_probs == 0)
nest_choice_probs[zero_idx] = min_comp_value
prob_dict["prob_given_nest"] = prob_given_nest
prob_dict["nest_choice_probs"] = nest_choice_probs
prob_dict["ind_sums_per_nest"] = ind_exp_sums_per_nest
return prob_dict | Parameters
----------
nest_coefs : 1D or 2D ndarray.
All elements should by ints, floats, or longs. If 1D, should have 1
element for each nesting coefficient being estimated. If 2D, should
have 1 column for each set of nesting coefficients being used to
predict the probabilities of each alternative being chosen. There
should be one row per nesting coefficient. Elements denote the inverse
of the scale coefficients for each of the lower level nests.
index_coefs : 1D or 2D ndarray.
All elements should by ints, floats, or longs. If 1D, should have 1
element for each utility coefficient being estimated (i.e.
num_features). If 2D, should have 1 column for each set of coefficients
being used to predict the probabilities of each alternative being
chosen. There should be one row per index coefficient.
design : 2D ndarray.
There should be one row per observation per available alternative.
There should be one column per utility coefficient being estimated. All
elements should be ints, floats, or longs.
rows_to_obs : 2D scipy sparse array.
There should be one row per observation per available alternative and
one column per observation. This matrix maps the rows of the design
matrix to the unique observations (on the columns).
rows_to_nests : 2D scipy sparse array.
There should with one row per observation per available alternative and
one column per nest. This matrix maps the rows of the design matrix to
the unique nests (on the columns).
chosen_row_to_obs : 2D scipy sparse array, or None, optional.
There should be one row per observation per available alternative and
one column per observation. This matrix indicates, for each observation
(on the columns), which rows of the design matrix were the realized
outcome. If an array is passed then an array of shape
(num_observations,) can be returned and each element will be the
probability of the realized outcome of the given observation.
Default == None.
return_type : str, optional.
Indicates what object(s) are to be returned from the function. Valid
values are: `['long_probs', 'chosen_probs', 'long_and_chosen_probs',
'all_prob_dict']`. If `long_probs`, the long format probabilities (a 1D
numpy array with one element per observation per available alternative)
will be returned. If `chosen_probs`, a 1D numpy array with one element
per observation will be returned, where the values are the
probabilities of the chosen alternative for the given observation. If
`long_and_chosen_probs`, a tuple of chosen_probs and long_probs will be
returned. If `all_prob_dict`, a dictionary will be returned. The values
will all be 1D numpy arrays of probabilities dictated by the value's
corresponding key. The keys will be `long_probs`, `nest_choice_probs`,
`prob_given_nest`, and `chosen_probs`. If chosen_row_to_obs is None,
then `chosen_probs` will be None. If `chosen_row_to_obs` is passed,
then `chosen_probs` will be a 1D array as described above.
`nest_choice_probs` is of the same shape as `rows_to_nests` and it
denotes the probability of each individual choosing each of the
possible nests. `prob_given_nest` is of the same shape as `long_probs`
and it denotes the probability of the individual associated with a
given row choosing the alternative associated with that row, given that
the individual chooses the nest that contains the given alternative.
Default == `long_probs`.
Returns
-------
See above for documentation of the `return_type` kwarg. |
3,748 | def find_usage(self):
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
try:
self._find_delivery_streams()
except EndpointConnectionError as ex:
logger.warning(
, ex
)
self._have_usage = True
logger.debug("Done checking usage.") | Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`. |
3,749 | def read(self, length):
data = bytearray()
while len(data) != length:
data += self.sock.recv((length - len(data)))
if not data:
raise ConnectionError()
return data | Read as many bytes from socket as specified in length.
Loop as long as every byte is read unless exception is raised. |
3,750 | def get_index(self, index, type, alias=None, typed=None, read_only=True, kwargs=None):
if kwargs.tjson != None:
Log.error("used `typed` parameter, not `tjson`")
if read_only:
aliases = wrap(self.get_aliases())
if index in aliases.index:
pass
elif index in aliases.alias:
match = [a for a in aliases if a.alias == index][0]
kwargs.alias = match.alias
kwargs.index = match.index
else:
Log.error("Can not find index {{index_name}}", index_name=kwargs.index)
return Index(kwargs=kwargs, cluster=self)
else:
best = self.get_best_matching_index(index, alias)
if not best:
Log.error("Can not find index {{index_name}}", index_name=kwargs.index)
if best.alias != None:
kwargs.alias = best.alias
kwargs.index = best.index
elif kwargs.alias == None:
kwargs.alias = kwargs.index
kwargs.index = best.index
return Index(kwargs=kwargs, cluster=self) | TESTS THAT THE INDEX EXISTS BEFORE RETURNING A HANDLE |
3,751 | def check(self):
for tool in (, , , , , , , ):
if not self.pathfinder.exists(tool):
raise RuntimeError("Dependency {} is missing".format(tool)) | Check if data and third party tools are available
:raises: RuntimeError |
3,752 | def predictor(self, (i, j, A, alpha, Bb)):
"Add to chart any rules for B that could help extend this edge."
B = Bb[0]
if B in self.grammar.rules:
for rhs in self.grammar.rewrites_for(B):
self.add_edge([j, j, B, [], rhs]) | Add to chart any rules for B that could help extend this edge. |
3,753 | def find(self, path, all=False):
matches = []
for prefix, root in self.locations:
if root not in searched_locations:
searched_locations.append(root)
matched_path = self.find_location(root, path, prefix)
if matched_path:
if not all:
return matched_path
matches.append(matched_path)
return matches | Looks for files in the extra locations
as defined in ``MEDIA_FIXTURES_FILES_DIRS``. |
3,754 | def make_tempfile (self, want=, resolution=, suffix=, **kwargs):
if want not in (, ):
raise ValueError ( % (want,))
if resolution not in (, , , ):
raise ValueError ( % (resolution,))
return Path._PathTempfileContextManager (self, want, resolution, suffix, kwargs) | Get a context manager that creates and cleans up a uniquely-named temporary
file with a name similar to this path.
This function returns a context manager that creates a secure
temporary file with a path similar to *self*. In particular, if
``str(self)`` is something like ``foo/bar``, the path of the temporary
file will be something like ``foo/bar.ame8_2``.
The object returned by the context manager depends on the *want* argument:
``"handle"``
An open file-like object is returned. This is the object returned by
:class:`tempfile.NamedTemporaryFile`. Its name on the filesystem is
accessible as a string as its `name` attribute, or (a customization here)
as a :class:`Path` instance as its `path` attribute.
``"path"``
The temporary file is created as in ``"handle"``, but is then immediately
closed. A :class:`Path` instance pointing to the path of the temporary file is
instead returned.
If an exception occurs inside the context manager block, the temporary file is
left lying around. Otherwise, what happens to it upon exit from the context
manager depends on the *resolution* argument:
``"try_unlink"``
Call :meth:`try_unlink` on the temporary file — no exception is raised if
the file did not exist.
``"unlink"``
Call :meth:`unlink` on the temporary file — an exception is raised if
the file did not exist.
``"keep"``
The temporary file is left lying around.
``"overwrite"``
The temporary file is :meth:`rename`-d to overwrite *self*.
For instance, when rewriting important files, it’s typical to write
the new data to a temporary file, and only rename the temporary file
to the final destination at the end — that way, if a problem happens
while writing the new data, the original file is left unmodified;
otherwise you’d be stuck with a partially-written version of the file.
This pattern can be accomplished with::
p = Path ('path/to/important/file')
with p.make_tempfile (resolution='overwrite', mode='wt') as h:
print ('important stuff goes here', file=h)
The *suffix* argument is appended to the temporary file name after the
random portion. It defaults to the empty string. If you want it to
operate as a typical filename suffix, include a leading ``"."``.
Other **kwargs** are passed to :class:`tempfile.NamedTemporaryFile`. |
3,755 | def prepare(self):
self.scope = 0
self.mapping = deque([0])
self._handler = [i() for i in sorted(self.handlers, key=lambda handler: handler.priority)] | Prepare the ordered list of transformers and reset context state to initial. |
3,756 | def numpy_to_data_array(ary, *, var_name="data", coords=None, dims=None):
default_dims = ["chain", "draw"]
ary = np.atleast_2d(ary)
n_chains, n_samples, *shape = ary.shape
if n_chains > n_samples:
warnings.warn(
"More chains ({n_chains}) than draws ({n_samples}). "
"Passed array should have shape (chains, draws, *shape)".format(
n_chains=n_chains, n_samples=n_samples
),
SyntaxWarning,
)
dims, coords = generate_dims_coords(
shape, var_name, dims=dims, coords=coords, default_dims=default_dims
)
if "draw" not in dims:
dims = ["draw"] + dims
if "chain" not in dims:
dims = ["chain"] + dims
if "chain" not in coords:
coords["chain"] = np.arange(n_chains)
if "draw" not in coords:
coords["draw"] = np.arange(n_samples)
coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in dims}
return xr.DataArray(ary, coords=coords, dims=dims) | Convert a numpy array to an xarray.DataArray.
The first two dimensions will be (chain, draw), and any remaining
dimensions will be "shape".
If the numpy array is 1d, this dimension is interpreted as draw
If the numpy array is 2d, it is interpreted as (chain, draw)
If the numpy array is 3 or more dimensions, the last dimensions are kept as shapes.
Parameters
----------
ary : np.ndarray
A numpy array. If it has 2 or more dimensions, the first dimension should be
independent chains from a simulation. Use `np.expand_dims(ary, 0)` to add a
single dimension to the front if there is only 1 chain.
var_name : str
If there are no dims passed, this string is used to name dimensions
coords : dict[str, iterable]
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : List(str)
A list of coordinate names for the variable
Returns
-------
xr.DataArray
Will have the same data as passed, but with coordinates and dimensions |
3,757 | def venv_pth(self, dirs):
text = StringIO.StringIO()
text.write("
for path in dirs:
text.write(.format(path))
put(text, os.path.join(self.site_packages_dir(), ), mode=0664) | Add the directories in `dirs` to the `sys.path`. A venv.pth file
will be written in the site-packages dir of this virtualenv to add
dirs to sys.path.
dirs: a list of directories. |
3,758 | def command_upgrade(self):
if len(self.args) == 1 and self.args[0] == "upgrade":
Initialization(False).upgrade(only="")
elif (len(self.args) == 2 and self.args[0] == "upgrade" and
self.args[1].startswith("--only=")):
repos = self.args[1].split("=")[-1].split(",")
for rp in repos:
if rp not in self.meta.repositories:
repos.remove(rp)
Initialization(False).upgrade(repos)
else:
usage("") | Recreate repositories package lists |
3,759 | def def_links(mobj):
fdict = json_load(os.path.join("data", "requirements.json"))
sdeps = sorted(fdict.keys())
olines = []
for item in sdeps:
olines.append(
".. _{name}: {url}\n".format(
name=fdict[item]["name"], url=fdict[item]["url"]
)
)
ret = []
for line in olines:
wobj = textwrap.wrap(line, width=LINE_WIDTH, subsequent_indent=" ")
ret.append("\n".join([item for item in wobj]))
mobj.out("\n".join(ret)) | Define Sphinx requirements links. |
3,760 | def export(self, name, columns, points):
WHITELIST = + string.ascii_letters + string.digits
SUBSTITUTE =
def whitelisted(s,
whitelist=WHITELIST,
substitute=SUBSTITUTE):
return .join(c if c in whitelist else substitute for c in s)
for sensor, value in zip(columns, points):
try:
sensor = [whitelisted(name) for name in sensor.split()]
tobeexport = [self.topic, self.hostname, name]
tobeexport.extend(sensor)
topic = .join(tobeexport)
self.client.publish(topic, value)
except Exception as e:
logger.error("Can not export stats to MQTT server (%s)" % e) | Write the points in MQTT. |
3,761 | def get_minimum_size(self, data):
size = self.element.get_minimum_size(data)
if self.angle in (RotateLM.NORMAL, RotateLM.UPSIDE_DOWN):
return size
else:
return datatypes.Point(size.y, size.x) | Returns the rotated minimum size. |
3,762 | def current(cls):
name = socket.getfqdn()
ip = socket.gethostbyname(name)
return cls(name, ip) | Helper method for getting the current peer of whichever host we're
running on. |
3,763 | def _auth_session(self, username, password):
api = self.api[self.account][]
endpoint = api.get(, self.api[self.account][])
session = requests.Session()
session_retries = Retry(total=10, backoff_factor=0.5)
session_adapter = requests.adapters.HTTPAdapter(max_retries=session_retries)
session.mount(, session_adapter)
response = session.request(, endpoint + api[].get(, ))
dom = Provider._filter_dom(response.text, api[])
data = Provider._extract_hidden_data(dom)
data[api[]], data[api[]] = username, password
response = session.request(, endpoint + api[][], data=data)
if Provider._filter_dom(response.text, api[]):
LOGGER.error(%s\
,
self.account, username)
raise AssertionError
LOGGER.info(%s\,
self.account, username)
return session | Creates session to Hetzner account, authenticates with given credentials and
returns the session, if authentication was successful. Otherwise raises error. |
3,764 | def get_representative_cases(self):
return (self.get_declined(decl_utils.Case.nominative, decl_utils.Number.singular),
self.get_declined(decl_utils.Case.genitive, decl_utils.Number.singular),
self.get_declined(decl_utils.Case.nominative, decl_utils.Number.plural)) | >>> armr = OldNorseNoun("armr", decl_utils.Gender.masculine)
>>> armr.set_representative_cases("armr", "arms", "armar")
>>> armr.get_representative_cases()
('armr', 'arms', 'armar')
:return: nominative singular, genetive singular, nominative plural |
3,765 | def list_config(root_package = ):
pkg = __import__(root_package, fromlist=[])
return_dict = OrderedDict()
for imp, module, _ in walk_packages(pkg.__path__, root_package + ):
m = __import__(module, fromlist = [])
for name, v in vars(m).items():
if v is not None and isinstance(v, type) and issubclass(v, Configurable) \
and v is not Configurable \
and hasattr(v, ) and in v.__dict__ \
and v.__module__ == module:
configkey = v.__dict__[]
if configkey not in return_dict:
configs = OrderedDict()
v2 = v
parents = [v2]
while True:
parent = None
for c in v2.__bases__:
if issubclass(c, Configurable):
parent = c
if parent is None or parent is Configurable:
break
if hasattr(parent, ) and not in parent.__dict__:
parents.append(parent)
v2 = parent
else:
break
for v2 in reversed(parents):
tmp_configs = {}
for k, default_value in v2.__dict__.items():
if k.startswith():
config_attr = k[len():]
if config_attr in v.__dict__:
continue
configname = configkey + + config_attr
tmp_configs.setdefault(configname, OrderedDict())[] = \
pformat(default_value, width=10)
lines, _ = getsourcelines(v2)
last_remark = []
for l in lines:
l = l.strip()
if not l:
continue
if l.startswith():
last_remark.append(l[1:])
else:
if l.startswith():
key, sep, _ = l.partition()
if sep and key.startswith():
configname = configkey + + key[len():].strip()
if configname in tmp_configs and configname not in configs:
configs[configname] = tmp_configs.pop(configname)
if configname in configs and last_remark:
configs[configname][] = cleandoc( + .join(last_remark))
del last_remark[:]
for key in tmp_configs:
if key not in configs:
configs[key] = tmp_configs[key]
if configs:
return_dict[configkey] = OrderedDict(((, v.__module__ + + name),
(, getdoc(v)),
(, configs)))
return return_dict | Walk through all the sub modules, find subclasses of vlcp.config.Configurable,
list their available configurations through _default_ prefix |
3,766 | def _apply_role_tree(self, perm_tree, role):
role_permissions = role.get_permissions()
for perm in role_permissions:
self._traverse_tree(perm_tree, perm)[] = True
return perm_tree | In permission tree, sets `'checked': True` for the permissions that the role has. |
3,767 | def smove(self, src, dst, value):
src_set = self._get_set(src, )
dst_set = self._get_set(dst, )
value = self._encode(value)
if value not in src_set:
return False
src_set.discard(value)
dst_set.add(value)
self.redis[self._encode(src)], self.redis[self._encode(dst)] = src_set, dst_set
return True | Emulate smove. |
3,768 | def _check_for_api_errors(geocoding_results):
status_result = geocoding_results.get("STATUS", {})
if "NO_RESULTS" in status_result.get("status", ""):
return
api_call_success = status_result.get("status", "") == "SUCCESS"
if not api_call_success:
access_error = status_result.get("access")
access_error_to_exception = {
: GeocoderAuthenticationFailure,
: GeocoderQuotaExceeded,
}
exception_cls = access_error_to_exception.get(
access_error, GeocoderServiceError
)
raise exception_cls(access_error) | Raise any exceptions if there were problems reported
in the api response. |
3,769 | def check_list_type(objects, allowed_type, name, allow_none=True):
if objects is None:
if not allow_none:
raise TypeError( % name)
return objects
if not isinstance(objects, (tuple, list)):
raise TypeError( % name)
if not all(isinstance(i, allowed_type) for i in objects):
type_list = sorted(list(set(type(obj) for obj in objects)))
raise TypeError(t match %s: %s' %
(name, allowed_type.__name__, type_list))
return objects | Verify that objects in list are of the allowed type or raise TypeError.
Args:
objects: The list of objects to check.
allowed_type: The allowed type of items in 'settings'.
name: Name of the list of objects, added to the exception.
allow_none: If set, None is also allowed.
Raises:
TypeError: if object is not of the allowed type.
Returns:
The list of objects, for convenient use in assignment. |
3,770 | def register(cls):
registry_entry = RegistryEntry(category = cls.category, namespace = cls.namespace, name = cls.name, cls=cls)
if registry_entry not in registry and not exists_in_registry(cls.category, cls.namespace, cls.name):
registry.append(registry_entry)
else:
log.warn("Class {0} already in registry".format(cls)) | Register a given model in the registry |
3,771 | def addAsn1MibSource(self, *asn1Sources, **kwargs):
if self._asn1SourcesToAdd is None:
self._asn1SourcesToAdd = asn1Sources
else:
self._asn1SourcesToAdd += asn1Sources
if self._asn1SourcesOptions:
self._asn1SourcesOptions.update(kwargs)
else:
self._asn1SourcesOptions = kwargs
return self | Adds path to a repository to search ASN.1 MIB files.
Parameters
----------
*asn1Sources :
one or more URL in form of :py:obj:`str` identifying local or
remote ASN.1 MIB repositories. Path must include the *@mib@*
component which will be replaced with MIB module name at the
time of search.
Returns
-------
: :py:class:`~pysnmp.smi.rfc1902.ObjectIdentity`
reference to itself
Notes
-----
Please refer to :py:class:`~pysmi.reader.localfile.FileReader`,
:py:class:`~pysmi.reader.httpclient.HttpReader` and
:py:class:`~pysmi.reader.ftpclient.FtpReader` classes for
in-depth information on ASN.1 MIB lookup.
Examples
--------
>>> ObjectIdentity('SNMPv2-MIB', 'sysDescr').addAsn1Source('http://mibs.snmplabs.com/asn1/@mib@')
ObjectIdentity('SNMPv2-MIB', 'sysDescr')
>>> |
3,772 | def load(self):
cl_tmp = self.api.list(self.objName, limit=self.searchLimit).values()
cl = []
for i in cl_tmp:
cl.extend(i)
return {x[self.index]: ItemPuppetClass(self.api, x[],
self.objName, self.payloadObj,
x)
for x in cl} | Function load
Get the list of all objects
@return RETURN: A ForemanItem list |
3,773 | def _generate_request_handler_proxy(handler_class, handler_args, name):
@scope.inject
def request_handler_wrapper(app, handler, **kwargs):
handler = handler_class(app, handler.request, **handler_args)
handler._execute([], **kwargs)
request_handler_wrapper.__name__ = name
request_handler_wrapper.handler_class = handler_class
request_handler_wrapper.handler_args = handler_args
return request_handler_wrapper | When a tornado.web.RequestHandler gets mounted we create a launcher function |
3,774 | def _prepare_sets(self, sets):
if self.stored_key and not self.stored_key_exists():
raise DoesNotExist(
)
conn = self.cls.get_connection()
all_sets = set()
tmp_keys = set()
lists = []
def add_key(key, key_type=None, is_tmp=False):
if not key_type:
key_type = conn.type(key)
if key_type == :
all_sets.add(key)
elif key_type == :
all_sets.add(key)
self._has_sortedsets = True
elif key_type == :
add_key(set_.key, )
elif isinstance(set_, SortedSetField):
return all_sets, tmp_keys | The original "_prepare_sets" method simple return the list of sets in
_lazy_collection, know to be all keys of redis sets.
As the new "intersect" method can accept different types of "set", we
have to handle them because we must return only keys of redis sets. |
3,775 | def _lcs(x, y):
n, m = len(x), len(y)
table = {}
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
table[i, j] = 0
elif x[i - 1] == y[j - 1]:
table[i, j] = table[i - 1, j - 1] + 1
else:
table[i, j] = max(table[i - 1, j], table[i, j - 1])
return table | Computes the length of the LCS between two seqs.
The implementation below uses a DP programming algorithm and runs
in O(nm) time where n = len(x) and m = len(y).
Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence
Args:
x: collection of words
y: collection of words
Returns:
Table of dictionary of coord and len lcs |
3,776 | def values(self, with_defaults=True):
return dict(((k, str(v)) for k, v in self._inputs.items() if not v.is_empty(with_defaults))) | Return the values dictionary, defaulting to default values |
3,777 | def _generate(self, pset, min_, max_, condition, type_=None):
if type_ is None:
type_ = pset.ret
expr = []
height = np.random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
)
expr.append(prim)
for arg in reversed(prim.args):
stack.append((depth + 1, arg))
return expr | Generate a Tree as a list of lists.
The tree is build from the root to the leaves, and it stop growing when
the condition is fulfilled.
Parameters
----------
pset: PrimitiveSetTyped
Primitive set from which primitives are selected.
min_: int
Minimum height of the produced trees.
max_: int
Maximum Height of the produced trees.
condition: function
The condition is a function that takes two arguments,
the height of the tree to build and the current
depth in the tree.
type_: class
The type that should return the tree when called, when
:obj:None (default) no return type is enforced.
Returns
-------
individual: list
A grown tree with leaves at possibly different depths
dependending on the condition function. |
3,778 | def magnitude(self):
return math.sqrt( self.x * self.x + self.y * self.y ) | Return the magnitude when treating the point as a vector. |
3,779 | def search_end_date(self, search_end_date):
assert isinstance(search_end_date, Time)
self._search_end_date = search_end_date.replicate(format=)
self._search_end_date.out_subfmt = | :type search_end_date: astropy.io.Time
:param search_end_date: search for frames take after the given date. |
3,780 | def parse_enum_value_definition(lexer: Lexer) -> EnumValueDefinitionNode:
start = lexer.token
description = parse_description(lexer)
name = parse_name(lexer)
directives = parse_directives(lexer, True)
return EnumValueDefinitionNode(
description=description, name=name, directives=directives, loc=loc(lexer, start)
) | EnumValueDefinition: Description? EnumValue Directives[Const]? |
3,781 | def calculate_size(name, thread_id):
data_size = 0
data_size += calculate_size_str(name)
data_size += LONG_SIZE_IN_BYTES
return data_size | Calculates the request payload size |
3,782 | def filter_files_extensions(files, extension_lists):
log.debug(.format(files))
result = [[] for _ in extension_lists]
for file in files:
ext = file.suffix[1:].lower()
for ext_i, ext_list in enumerate(extension_lists):
if ext in ext_list:
result[ext_i].append(file)
log.debug(.format(result))
return result | Put the files in buckets according to extension_lists
files=[movie.avi, movie.srt], extension_lists=[[avi],[srt]] ==> [[movie.avi],[movie.srt]]
:param files: A list of files
:param extension_lists: A list of list of extensions
:return: The files filtered and sorted according to extension_lists |
3,783 | def woodbury_vector(self):
if self._woodbury_vector is None:
self._woodbury_vector, _ = dpotrs(self.K_chol, self.mean - self._prior_mean)
return self._woodbury_vector | Woodbury vector in the gaussian likelihood case only is defined as
$$
(K_{xx} + \Sigma)^{-1}Y
\Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance}
$$ |
3,784 | def create_table(self, table, fields):
table = table.get_soap_object(self.client)
return self.call(, table, fields) | Responsys.createTable call
Accepts:
InteractObject table
list fields
Returns True on success |
3,785 | def _reducedProtToPeps(protToPeps, proteins):
return {k: v for k, v in viewitems(protToPeps) if k not in proteins} | Returns a new, reduced "protToPeps" dictionary that does not contain
entries present in "proteins".
:param protToPeps: dict, for each protein (=key) contains a set of
associated peptides (=value). For Example {protein: {peptide, ...}, ...}
:param proteins: a list of proteinSet
:returns: dict, protToPeps not containing entries from "proteins" |
3,786 | def emit(self, record):
if getattr(this, , {}).get(LOGS_NAME, False):
self.format(record)
this.send({
: ADDED,
: LOGS_NAME,
: meteor_random_id( % LOGS_NAME),
: {
attr: {
: lambda args: [repr(arg) for arg in args],
: datetime.datetime.fromtimestamp,
: stacklines_or_none,
}.get(
attr,
lambda val: val
)(getattr(record, attr, None))
for attr in (
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
,
)
},
}) | Emit a formatted log record via DDP. |
3,787 | def weighted_round_robin(iterable):
cyclable_list = []
assigned_weight = 0
still_to_process = [
(item, weight) for item, weight in
sorted(iterable, key=lambda tup: tup[1], reverse=True)]
while still_to_process:
for i, (item, weight) in enumerate(still_to_process):
if weight > assigned_weight:
cyclable_list.append(item)
else:
del still_to_process[i]
assigned_weight += 1
return cycle(cyclable_list) | Takes an iterable of tuples of <item>, <weight> and cycles around them,
returning heavier (integer) weighted items more frequently. |
3,788 | def standings(self, league_table, league):
headers = [, , , ,
, , ]
result = [headers]
result.extend([team[],
team[][],
team[],
team[],
team[],
team[],
team[]]
for team in league_table[][0][])
self.generate_output(result) | Store output of league standings to a CSV file |
3,789 | def run_initial(self, events):
self_name = type(self).__name__
for i, batch in enumerate(grouper(events, self.INITIAL_BATCH_SIZE, skip_missing=True), 1):
self.logger.debug(, self_name, i)
for j, processed_batch in enumerate(grouper(
batch, self.BATCH_SIZE, skip_missing=True), 1):
self.logger.info(, self_name, j, i)
self.upload_records(processed_batch, from_queue=False) | Runs the initial batch upload
:param events: an iterable containing events |
3,790 | def set_pdb_trace(pm=False):
import sys
import pdb
for attr in ("stdin", "stdout", "stderr"):
setattr(sys, attr, getattr(sys, "__%s__" % attr))
if pm:
pdb.post_mortem()
else:
pdb.set_trace() | Start the Python debugger when robotframework is running.
This makes sure that pdb can use stdin/stdout even though
robotframework has redirected I/O. |
3,791 | def combineReads(filename, sequences, readClass=DNARead,
upperCase=False, idPrefix=):
if filename:
reads = FastaReads(filename, readClass=readClass, upperCase=upperCase)
else:
reads = Reads()
if sequences:
for count, sequence in enumerate(sequences, start=1):
if upperCase:
sequence = sequence.upper()
read = readClass(readId, sequence)
reads.add(read)
return reads | Combine FASTA reads from a file and/or sequence strings.
@param filename: A C{str} file name containing FASTA reads.
@param sequences: A C{list} of C{str} sequences. If a sequence
contains spaces, the last field (after splitting on spaces) will be
used as the sequence and the first fields will be used as the sequence
id.
@param readClass: The class of the individual reads.
@param upperCase: If C{True}, reads will be converted to upper case.
@param idPrefix: The C{str} prefix that will be used for the id of the
sequences in C{sequences} that do not have an id specified. A trailing
sequence number will be appended to this prefix. Note that
'command-line-read-', the default id prefix, could collide with ids in
the FASTA file, if given. So output might be ambiguous. That's why we
allow the caller to specify a custom prefix.
@return: A C{FastaReads} instance. |
3,792 | async def _play(self, ctx, *, query: str):
player = self.bot.lavalink.players.get(ctx.guild.id)
query = query.strip()
if not url_rx.match(query):
query = f
tracks = await self.bot.lavalink.get_tracks(query)
if not tracks:
return await ctx.send()
embed = discord.Embed(color=discord.Color.blurple())
if in query and not in query:
for track in tracks:
player.add(requester=ctx.author.id, track=track)
embed.title =
embed.description = f
await ctx.send(embed=embed)
else:
track_title = tracks[0]["info"]["title"]
track_uri = tracks[0]["info"]["uri"]
embed.title = "Track enqueued!"
embed.description = f
player.add(requester=ctx.author.id, track=tracks[0])
if not player.is_playing:
await player.play() | Searches and plays a song from a given query. |
3,793 | def list_of_vars(arg_plot):
lovs = [[[var for var in svars.split() if var]
for svars in pvars.split() if svars]
for pvars in arg_plot.split() if pvars]
lovs = [[slov for slov in lov if slov] for lov in lovs if lov]
return [lov for lov in lovs if lov] | Construct list of variables per plot.
Args:
arg_plot (str): string with variable names separated with
``_`` (figures), ``.`` (subplots) and ``,`` (same subplot).
Returns:
three nested lists of str
- variables on the same subplot;
- subplots on the same figure;
- figures. |
3,794 | def generate_markdown(cls):
lines = []
if cls.__doc__:
lines.extend([.format(cls.__doc__), ])
for k, v in cls._values.items():
lines.append(.format(k))
if v.required:
lines[-1] = lines[-1] +
if v.help:
lines.append(.format(v.help))
lines.append(.format(v.cast_as.__name__))
if v.default is not None:
lines.append(.format(v.default))
return .join(lines) | Documents values in markdown |
3,795 | def is_auto_partition_required(self, brain_or_object):
obj = api.get_object(brain_or_object)
if not IAnalysisRequest.providedBy(obj):
return False
template = obj.getTemplate()
return template and template.getAutoPartition() | Returns whether the passed in object needs to be partitioned |
3,796 | def AddWeight(self, path_segment_index, weight):
if path_segment_index not in self._weight_per_index:
raise ValueError()
self._weight_per_index[path_segment_index] += weight
if weight not in self._indexes_per_weight:
self._indexes_per_weight[weight] = []
self._indexes_per_weight[weight].append(path_segment_index) | Adds a weight for a specific path segment index.
Args:
path_segment_index: an integer containing the path segment index.
weight: an integer containing the weight.
Raises:
ValueError: if the path segment weights do not contain
the path segment index. |
3,797 | def snapshot_created(name, ami_name, instance_name, wait_until_available=True, wait_timeout_seconds=300, **kwargs):
ret = {: name,
: True,
: ,
: {}
}
if not __salt__[](ami_name=ami_name, instance_name=instance_name, **kwargs):
ret[] = .format(ami_name=ami_name)
ret[] = False
return ret
ret[] = .format(ami_name=ami_name)
ret[][] = {ami_name: ami_name}
if not wait_until_available:
return ret
starttime = time()
while True:
images = __salt__[](ami_name=ami_name, return_objs=True, **kwargs)
if images and images[0].state == :
break
if time() - starttime > wait_timeout_seconds:
if images:
ret[] = .format(state=images[0].state)
else:
ret[] = .format(ami_name=ami_name)
ret[] = False
return ret
sleep(5)
return ret | Create a snapshot from the given instance
.. versionadded:: 2016.3.0 |
3,798 | def get_arg_parser(cls, settings = None, option_prefix = u,
add_help = False):
parser = argparse.ArgumentParser(add_help = add_help,
prefix_chars = option_prefix[0])
if settings is None:
settings = cls.list_all(basic = True)
if sys.version_info.major < 3:
from locale import getpreferredencoding
encoding = getpreferredencoding()
def decode_string_option(value):
return value.decode(encoding)
for name in settings:
if name not in cls._defs:
logger.debug("get_arg_parser: ignoring unknown option {0}"
.format(name))
return
setting = cls._defs[name]
if not setting.cmdline_help:
logger.debug("get_arg_parser: option {0} has no cmdline"
.format(name))
return
if sys.version_info.major < 3:
name = name.encode(encoding, "replace")
option = option_prefix + name.replace("_", "-")
dest = "pyxmpp2_" + name
if setting.validator:
opt_type = setting.validator
elif setting.type is unicode and sys.version_info.major < 3:
opt_type = decode_string_option
else:
opt_type = setting.type
if setting.default_d:
default_s = setting.default_d
if sys.version_info.major < 3:
default_s = default_s.encode(encoding, "replace")
elif setting.default is not None:
default_s = repr(setting.default)
else:
default_s = None
opt_help = setting.cmdline_help
if sys.version_info.major < 3:
opt_help = opt_help.encode(encoding, "replace")
if default_s:
opt_help += " (Default: {0})".format(default_s)
if opt_type is bool:
opt_action = _YesNoAction
else:
opt_action = "store"
parser.add_argument(option,
action = opt_action,
default = setting.default,
type = opt_type,
help = opt_help,
metavar = name.upper(),
dest = dest)
return parser | Make a command-line option parser.
The returned parser may be used as a parent parser for application
argument parser.
:Parameters:
- `settings`: list of PyXMPP2 settings to consider. By default
all 'basic' settings are provided.
- `option_prefix`: custom prefix for PyXMPP2 options. E.g.
``'--xmpp'`` to differentiate them from not xmpp-related
application options.
- `add_help`: when `True` a '--help' option will be included
(probably already added in the application parser object)
:Types:
- `settings`: list of `unicode`
- `option_prefix`: `str`
- `add_help`:
:return: an argument parser object.
:returntype: :std:`argparse.ArgumentParser` |
3,799 | def recall_score(y_true, y_pred, average=, suffix=False):
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_true = len(true_entities)
score = nb_correct / nb_true if nb_true > 0 else 0
return score | Compute the recall.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import recall_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> recall_score(y_true, y_pred)
0.50 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.