Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
385,700 | def is_cached(self):
try:
for item in self.rule.output_files:
log.info(item)
self.cachemgr.in_cache(item, self._metahash())
except cache.CacheMiss:
log.info(, self.address)
return False
else:
log.info(, self.address)
return True | Returns true if this rule is already cached. |
385,701 | def write_header(self):
for properties in self.header.values():
value = properties[]
offset_bytes = int(properties[])
self.file.seek(offset_bytes)
value.tofile(self.file) | Write `header` to `file`.
See Also
--------
write_data |
385,702 | def cancelAllPendingResults( self ):
jobs = self.pendingResults()
if len(jobs) > 0:
self._abortJobs(jobs)
self.notebook().cancelAllPendingResults() | Cancel all pending results. |
385,703 | def from_meta(cls, meta, meta_all=None):
if len(meta.args) == 2:
name = meta.args[1]
meta_type = None
for x in meta_all:
if x.args[1] == name and x.args[0] == :
meta_type = x.description
break
return cls(args=meta.args, description=meta.description, type=meta_type)
else:
return cls(args=meta.args, description=meta.description) | Copy DocstringMeta from another instance. |
385,704 | def issue_comments(self, issue_id_or_key, extra_query_params={}):
return self.do("GET", "issues/{issue_id_or_key}/comments",
url_params={"issue_id_or_key": issue_id_or_key},
query_params=extra_query_params
) | client = BacklogClient("your_space_name", "your_api_key")
client.issue_comments("YOUR_PROJECT-999") |
385,705 | def to_sigproc_keyword(keyword, value=None):
keyword = bytes(keyword)
if value is None:
return np.int32(len(keyword)).tostring() + keyword
else:
dtype = header_keyword_types[keyword]
dtype_to_type = {b : np.int32,
b : str,
b : np.float64,
b : to_sigproc_angle}
value_dtype = dtype_to_type[dtype]
if value_dtype is str:
return np.int32(len(keyword)).tostring() + keyword + np.int32(len(value)).tostring() + value
else:
return np.int32(len(keyword)).tostring() + keyword + value_dtype(value).tostring() | Generate a serialized string for a sigproc keyword:value pair
If value=None, just the keyword will be written with no payload.
Data type is inferred by keyword name (via a lookup table)
Args:
keyword (str): Keyword to write
value (None, float, str, double or angle): value to write to file
Returns:
value_str (str): serialized string to write to file. |
385,706 | def print_report(label, user, system, real):
print("{:<12s} {:12f} {:12f} ( {:12f} )".format(label,
user,
system,
real)) | Prints the report of one step of a benchmark. |
385,707 | def encode(self, b64=False, always_bytes=True):
if self.binary and not b64:
encoded_packet = six.int2byte(self.packet_type)
else:
encoded_packet = six.text_type(self.packet_type)
if self.binary and b64:
encoded_packet = + encoded_packet
if self.binary:
if b64:
encoded_packet += base64.b64encode(self.data).decode()
else:
encoded_packet += self.data
elif isinstance(self.data, six.string_types):
encoded_packet += self.data
elif isinstance(self.data, dict) or isinstance(self.data, list):
encoded_packet += self.json.dumps(self.data,
separators=(, ))
elif self.data is not None:
encoded_packet += str(self.data)
if always_bytes and not isinstance(encoded_packet, binary_types):
encoded_packet = encoded_packet.encode()
return encoded_packet | Encode the packet for transmission. |
385,708 | def start(self, daemon=True):
if self._run_lock.acquire(False):
try:
if self._run_thread is None:
self._run_thread = threading.Thread(target=self._run_in_thread)
self._run_thread.daemon = daemon
self._run_thread.start()
finally:
self._run_lock.release() | Start driving the chain asynchronously, return immediately
:param daemon: ungracefully kill the driver when the program terminates
:type daemon: bool |
385,709 | def softmax_to_unary(sm, GT_PROB=1):
warning("pydensecrf.softmax_to_unary is deprecated, use unary_from_softmax instead.")
scale = None if GT_PROB == 1 else GT_PROB
return unary_from_softmax(sm, scale, clip=None) | Deprecated, use `unary_from_softmax` instead. |
385,710 | def teardown(self):
if self.controller:
self.controller.teardown()
for monitor in self.monitors:
monitor.teardown() | Clean up the target once all tests are completed |
385,711 | def notify_slaves(self):
if self.disable_slave_notify is not None:
LOGGER.debug()
return False
if self.zone_data()[] == :
response_code = self._put( + self.domain + ).status_code
if response_code == 200:
LOGGER.debug()
return True
LOGGER.debug(, response_code)
else:
LOGGER.debug(Master\)
return False | Checks to see if slaves should be notified, and notifies them if needed |
385,712 | def to_api_data(self):
data = {}
if self.__interval and isinstance(self.__interval, int):
recurrence_pattern = data[self._cc()] = {}
recurrence_pattern[self._cc()] =
recurrence_pattern[self._cc()] = self.__interval
if self.__days_of_week and isinstance(self.__days_of_week,
(list, tuple, set)):
recurrence_pattern[self._cc()] =
recurrence_pattern[self._cc()] = list(
self.__days_of_week)
if self.__first_day_of_week:
recurrence_pattern[self._cc()] =
recurrence_pattern[
self._cc()] = self.__first_day_of_week
elif self.__month and isinstance(self.__month, int):
recurrence_pattern[self._cc()] =
recurrence_pattern[self._cc()] = self.__month
if self.__index:
recurrence_pattern[self._cc()] = self.__index
else:
if self.__index:
recurrence_pattern[self._cc()] = self.__index
elif self.__day_of_month and isinstance(self.__day_of_month, int):
recurrence_pattern[self._cc()] =
recurrence_pattern[self._cc()] = self.__day_of_month
if self.__month and isinstance(self.__month, int):
recurrence_pattern[self._cc()] =
recurrence_pattern[self._cc()] = self.__month
if self.__start_date:
recurrence_range = data[self._cc()] = {}
recurrence_range[self._cc()] =
recurrence_range[
self._cc()] = self.__start_date.isoformat()
recurrence_range[
self._cc()] = self.__recurrence_time_zone
if self.__end_date:
recurrence_range[self._cc()] =
recurrence_range[
self._cc()] = self.__end_date.isoformat()
elif self.__occurrences is not None and isinstance(
self.__occurrences,
int):
recurrence_range[self._cc()] =
recurrence_range[
self._cc()] = self.__occurrences
return data | Returns a dict to communicate with the server
:rtype: dict |
385,713 | def get_gradebook_hierarchy_session(self, proxy):
if not self.supports_gradebook_hierarchy():
raise errors.Unimplemented()
return sessions.GradebookHierarchySession(proxy=proxy, runtime=self._runtime) | Gets the session traversing gradebook hierarchies.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookHierarchySession) - a
``GradebookHierarchySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_hierarchy() is
false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_hierarchy()`` is true.* |
385,714 | def create(self, num):
self.log.record_process(, + str(num) + + self.name) | Creates the environment
in your subclassed create function include the line below
super().build(arg1, arg2, arg2, ...) |
385,715 | def get_moments(metricParams, vary_fmax=False, vary_density=None):
psd_amp = metricParams.psd.data
psd_f = numpy.arange(len(psd_amp), dtype=float) * metricParams.deltaF
new_f, new_amp = interpolate_psd(psd_f, psd_amp, metricParams.deltaF)
funct = lambda x,f0: 1
I7 = calculate_moment(new_f, new_amp, metricParams.fLow, \
metricParams.fUpper, metricParams.f0, funct,\
vary_fmax=vary_fmax, vary_density=vary_density)
moments = {}
moments[] = I7
for i in range(-7,18):
funct = lambda x,f0: x**((-i+7)/3.)
moments[ %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.))) * x**((-i+7)/3.)
moments[ %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**2 * x**((-i+7)/3.)
moments[ %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**3 * x**((-i+7)/3.)
moments[ %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
for i in range(-1,18):
funct = lambda x,f0: (numpy.log((x*f0)**(1./3.)))**4 * x**((-i+7)/3.)
moments[ %(i)] = calculate_moment(new_f, new_amp, \
metricParams.fLow, metricParams.fUpper, \
metricParams.f0, funct, norm=I7, \
vary_fmax=vary_fmax, vary_density=vary_density)
metricParams.moments = moments | This function will calculate the various integrals (moments) that are
needed to compute the metric used in template bank placement and
coincidence.
Parameters
-----------
metricParams : metricParameters instance
Structure holding all the options for construction of the metric.
vary_fmax : boolean, optional (default False)
If set to False the metric and rotations are calculated once, for the
full range of frequency [f_low,f_upper).
If set to True the metric and rotations are calculated multiple times,
for frequency ranges [f_low,f_low + i*vary_density), where i starts at
1 and runs up until f_low + (i+1)*vary_density > f_upper.
Thus values greater than f_upper are *not* computed.
The calculation for the full range [f_low,f_upper) is also done.
vary_density : float, optional
If vary_fmax is True, this will be used in computing the frequency
ranges as described for vary_fmax.
Returns
--------
None : None
**THIS FUNCTION RETURNS NOTHING**
The following will be **added** to the metricParams structure
metricParams.moments : Moments structure
This contains the result of all the integrals used in computing the
metrics above. It can be used for the ethinca components calculation,
or other similar calculations. This is composed of two compound
dictionaries. The first entry indicates which moment is being
calculated and the second entry indicates the upper frequency cutoff
that was used.
In all cases x = f/f0.
For the first entries the options are:
moments['J%d' %(i)][f_cutoff]
This stores the integral of
x**((-i)/3.) * delta X / PSD(x)
moments['log%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.))) x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**2 x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**3 x**((-i)/3.) * delta X / PSD(x)
moments['loglog%d' %(i)][f_cutoff]
This stores the integral of
(numpy.log(x**(1./3.)))**4 x**((-i)/3.) * delta X / PSD(x)
The second entry stores the frequency cutoff used when computing
the integral. See description of the vary_fmax option above.
All of these values are nomralized by a factor of
x**((-7)/3.) * delta X / PSD(x)
The normalization factor can be obtained in
moments['I7'][f_cutoff] |
385,716 | def find_first_version(self):
for version in self.versions_to_try:
remote_url = self.get_download_url(version=version)
if http_url_exists(remote_url):
return version, remote_url
return None, None | Finds the first version of igraph that exists in the nightly build
repo from the version numbers provided in ``self.versions_to_try``. |
385,717 | def file_is_present(self, file_path):
p = self.p(file_path)
if not os.path.exists(p):
return False
if not os.path.isfile(p):
raise IOError("%s is not a file" % file_path)
return True | check if file 'file_path' is present, raises IOError if file_path
is not a file
:param file_path: str, path to the file
:return: True if file exists, False if file does not exist |
385,718 | def update_members(self, list_id, data):
self.list_id = list_id
if not in data:
raise KeyError()
else:
if not len(data[]) <= 500:
raise ValueError()
for member in data[]:
if not in member:
raise KeyError()
check_email(member[])
if not in member and not in member:
raise KeyError()
valid_statuses = [, , , ]
if in member and member[] not in valid_statuses:
raise ValueError(
)
if in member and member[] not in valid_statuses:
raise ValueError(
)
if not in data:
data[] = False
return self._mc_client._post(url=self._build_path(list_id), data=data) | Batch subscribe or unsubscribe list members.
Only the members array is required in the request body parameters.
Within the members array, each member requires an email_address
and either a status or status_if_new. The update_existing parameter
will also be considered required to help prevent accidental updates
to existing members and will default to false if not present.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"members": array*
[
{
"email_address": string*,
"status": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending'),
"status_if_new": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending')
}
],
"update_existing": boolean*
} |
385,719 | def show(self):
print("APKs in Session: {}".format(len(self.analyzed_apk)))
for d, a in self.analyzed_apk.items():
print("\t{}: {}".format(d, a))
print("DEXs in Session: {}".format(len(self.analyzed_dex)))
for d, dex in self.analyzed_dex.items():
print("\t{}: {}".format(d, dex))
print("Analysis in Session: {}".format(len(self.analyzed_vms)))
for d, a in self.analyzed_vms.items():
print("\t{}: {}".format(d, a)) | Print information to stdout about the current session.
Gets all APKs, all DEX files and all Analysis objects. |
385,720 | def predict_fixation_duration(
durations, angles, length_diffs, dataset=None, params=None):
if dataset is None:
dataset = np.ones(durations.shape)
corrected_durations = np.nan * np.ones(durations.shape)
for i, ds in enumerate(np.unique(dataset)):
e = lambda v, x, y, z: (leastsq_dual_model(x, z, *v) - y)
v0 = [120, 220.0, -.1, 0.5, .1, .1]
id_ds = dataset == ds
idnan = (
~np.isnan(angles)) & (
~np.isnan(durations)) & (
~np.isnan(length_diffs))
v, s = leastsq(
e, v0, args=(
angles[
idnan & id_ds], durations[
idnan & id_ds], length_diffs[
idnan & id_ds]), maxfev=10000)
corrected_durations[id_ds] = (durations[id_ds] -
(leastsq_dual_model(angles[id_ds], length_diffs[id_ds], *v)))
if params is not None:
params[ + str(i)] = v
params[ + str(i)] = s
return corrected_durations | Fits a non-linear piecewise regression to fixtaion durations for a fixmat.
Returns corrected fixation durations. |
385,721 | def diff_tree(candidate_config=None,
candidate_path=None,
running_config=None,
running_path=None,
saltenv=):
*
candidate_tree = tree(config=candidate_config,
path=candidate_path,
saltenv=saltenv)
running_tree = tree(config=running_config,
path=running_path,
saltenv=saltenv)
return salt.utils.dictdiffer.deep_diff(running_tree, candidate_tree) | Return the diff, as Python dictionary, between the candidate and the running
configuration.
candidate_config
The candidate configuration sent as text. This argument is ignored when
``candidate_path`` is set.
candidate_path
Absolute or remote path from where to load the candidate configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
running_config
The running configuration sent as text. This argument is ignored when
``running_path`` is set.
running_path
Absolute or remote path from where to load the runing configuration
text. This argument allows any URI supported by
:py:func:`cp.get_url <salt.modules.cp.get_url>`), e.g., ``salt://``,
``https://``, ``s3://``, ``ftp:/``, etc.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file.
Ignored if ``candidate_path`` or ``running_path`` is not a
``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' iosconfig.diff_tree candidate_path=salt://path/to/candidate.cfg running_path=salt://path/to/running.cfg |
385,722 | def ensure_unique(qs, field_name, value, exclude_id=None):
orig = value
if not value:
value = "None"
for x in itertools.count(1):
if not qs.exclude(id=exclude_id).filter(**{field_name: value}).exists():
break
if orig:
value = % (orig, x)
else:
value = % x
return value | Makes sure that `value` is unique on model.fieldname. And nonempty. |
385,723 | def check(self, obj, condition) -> "WriteTransaction":
self._extend([TxItem.new("check", obj, condition)])
return self | Add a condition which must be met for the transaction to commit.
While the condition is checked against the provided object, that object will not be modified. It is only
used to provide the hash and range key to apply the condition to.
At most 10 items can be checked, saved, or deleted in the same transaction. The same idempotency token will
be used for a single prepared transaction, which allows you to safely call commit on the PreparedCommit object
multiple times.
:param obj: The object to use for the transaction condition. This object will not be modified.
:param condition: A condition on an object which must hold for the transaction to commit.
:return: this transaction for chaining |
385,724 | def access_zipped_assets(cls, static_module_name, static_path, dir_location=None):
path = os.path.join(static_path, asset_target)
file_data = resource_string(static_module_name, path)
fp.write(file_data)
if dir_location is None:
temp_dir = safe_mkdtemp()
else:
temp_dir = dir_location
walk_zipped_assets(static_module_name, static_path, static_path, temp_dir)
return temp_dir | Create a copy of static resource files as we can't serve them from within the pex file.
:param static_module_name: Module name containing module to cache in a tempdir
:type static_module_name: string, for example 'twitter.common.zookeeper' or similar
:param static_path: Module name, for example 'serverset'
:param dir_location: create a new temporary directory inside, or None to have one created
:returns temp_dir: Temporary directory with the zipped assets inside
:rtype: str |
385,725 | def create_object(self, filename, img_properties=None):
prop_name = os.path.basename(os.path.normpath(filename))
prop_mime = None
pos = prop_name.rfind()
if pos >= 0:
suffix = prop_name[pos:].lower()
if suffix in VALID_IMGFILE_SUFFIXES:
prop_mime = VALID_IMGFILE_SUFFIXES[suffix]
if not prop_mime:
raise ValueError( + prop_name)
identifier = str(uuid.uuid4()).replace(,)
image_dir = self.get_directory(identifier)
shutil.copyfile(filename, os.path.join(image_dir, prop_name))
obj = ImageHandle(identifier, properties, image_dir)
self.insert_object(obj)
return obj | Create an image object on local disk from the given file. The file
is copied to a new local directory that is created for the image object.
The optional list of image properties will be associated with the new
object together with the set of default properties for images.
Parameters
----------
filename : string
Path to file on disk
img_properties : Dictionary, optional
Set of image properties.
Returns
-------
ImageHandle
Handle for created image object |
385,726 | def column(self):
from .column import Column
if in self.soup:
url = Column_Url + + self.soup[][]
name = self.soup[][]
return Column(url, name, session=self._session)
else:
return None | 获取文章所在专栏.
:return: 文章所在专栏
:rtype: Column |
385,727 | def pretokenized_tfrecord_dataset(filenames,
text2self,
eos_included,
repeat,
batch_size,
sequence_length):
dataset = tf.data.TFRecordDataset(filenames, buffer_size=64 * 1024 * 1024)
if repeat:
dataset = dataset.repeat()
keys = ["targets"] if text2self else ["inputs", "targets"]
def decode_example(serialized_example):
data_fields = {}
data_items_to_decoders = {}
for k in keys:
data_fields[k] = tf.VarLenFeature(tf.int64)
data_items_to_decoders[k] = tf.contrib.slim.tfexample_decoder.Tensor(k)
decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder(
data_fields, data_items_to_decoders)
decode_items = list(sorted(data_items_to_decoders))
decoded = decoder.decode(serialized_example, items=decode_items)
if not eos_included:
decoded = [tf.concat([v, [1]], 0) for v in decoded]
return dict(zip(decode_items, decoded))
dataset = dataset.map(decode_example,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return pack_and_batch(dataset, batch_size, sequence_length) | Reads tensor2tensor-style data files.
The dataset is defined by sets of TFRecord files of TFExample protos.
There should be a "targets" feature (a 1d tensor of integers)
If not text2self, there should also be an "inputs" feature.
Other features get ignored.
eos_included specifies whether the inputs and targets were written with an
EOS token, as in tensor2tensor
Args:
filenames: a list of strings
text2self: a boolean
eos_included: a boolean
repeat: a boolean
batch_size: an integer
sequence_length: an integer
Returns:
A tf.data.Dataset of batches |
385,728 | def flatten(inputs, scope=None):
if len(inputs.get_shape()) < 2:
raise ValueError()
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.name_scope(scope, , [inputs]):
return tf.reshape(inputs, [-1, k]) | Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for name_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong. |
385,729 | def _set_police_priority_map(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",police_priority_map.police_priority_map, yang_name="police-priority-map", rest_name="police-priority-map", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: u, u: None, u: None, u: u, u: u}}), is_container=, yang_name="police-priority-map", rest_name="police-priority-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: None, u: None, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__police_priority_map = t
if hasattr(self, ):
self._set() | Setter method for police_priority_map, mapped from YANG variable /police_priority_map (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_police_priority_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_police_priority_map() directly. |
385,730 | def set(self, time_sec, callback_fn, *args, **kwdargs):
timer = self.timer()
timer.set_callback(, callback_fn, *args, **kwdargs)
timer.set(time_sec)
return timer | Convenience function to create and set a timer.
Equivalent to:
timer = timer_factory.timer()
timer.set_callback('expired', callback_fn, *args, **kwdargs)
timer.set(time_sec) |
385,731 | def default_from_address(self):
if self._coinbase_cache_til is not None:
if time.time - self._coinbase_cache_til > 30:
self._coinbase_cache_til = None
self._coinbase_cache = None
if self._coinbase_cache is None:
self._coinbase_cache = self.get_coinbase()
return self._coinbase_cache | Cache the coinbase address so that we don't make two requests for every
single transaction. |
385,732 | def reset( self ):
self.setValue(, XPaletteColorSet())
self.setValue(, QApplication.font())
self.setValue(, QApplication.font().pointSize()) | Resets the values to the current application information. |
385,733 | def setText(self, sequence):
self.setToolTip(sequence)
super(ShortcutLineEdit, self).setText(sequence) | Qt method extension. |
385,734 | def set_starting_ratio(self, ratio):
from samplerate.lowlevel import src_set_ratio
if self._state is None:
self._create()
src_set_ratio(self._state, ratio)
self.ratio = ratio | Set the starting conversion ratio for the next `read` call. |
385,735 | def connect(self):
sock = socket.create_connection((self.host, self.port),
self.timeout, self.source_address)
if getattr(self, , None):
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
ssl_version=ssl.PROTOCOL_TLSv1_2) | Overrides HTTPSConnection.connect to specify TLS version |
385,736 | def get_by_ip(cls, ip):
ret = cls.hosts_by_ip.get(ip)
if ret is None:
ret = cls.hosts_by_ip[ip] = [Host(ip)]
return ret | Returns Host instance for the given ip address. |
385,737 | def multiply(self, a, b):
if a is None or b is None: return None
m, n = len(a), len(b[0])
if len(b) != n:
raise Exception("As row number.")
l = len(b[0])
table_a, table_b = {}, {}
for i, row in enumerate(a):
for j, ele in enumerate(row):
if ele:
if i not in table_a: table_a[i] = {}
table_a[i][j] = ele
for i, row in enumerate(b):
for j, ele in enumerate(row):
if ele:
if i not in table_b: table_b[i] = {}
table_b[i][j] = ele
c = [[0 for j in range(l)] for i in range(m)]
for i in table_a:
for k in table_a[i]:
if k not in table_b: continue
for j in table_b[k]:
c[i][j] += table_a[i][k] * table_b[k][j]
return c | :type A: List[List[int]]
:type B: List[List[int]]
:rtype: List[List[int]] |
385,738 | def download(self, url, path):
qurl = QUrl(url)
url = to_text_string(qurl.toEncoded(), encoding=)
logger.debug(str((url, path)))
if url in self._workers:
while not self._workers[url].finished:
return self._workers[url]
worker = DownloadWorker(url, path)
folder = os.path.dirname(os.path.abspath(path))
if not os.path.isdir(folder):
os.makedirs(folder)
request = QNetworkRequest(qurl)
self._head_requests[url] = request
self._paths[url] = path
self._workers[url] = worker
self._manager.head(request)
self._timer.start()
return worker | Download url and save data to path. |
385,739 | def is_unit(q):
if isinstance(q, six.string_types):
try: return hasattr(units, q)
except: return False
else:
cls = type(q)
return cls.__module__.startswith() and cls.__name__ == | is_unit(q) yields True if q is a pint unit or a string that names a pint unit and False
otherwise. |
385,740 | def _expand_host(self, host):
if isinstance(host, basestring):
return (host, self.default_port)
return tuple(host) | Used internally to add the default port to hosts not including
portnames. |
385,741 | def write(filename, mesh, write_binary=True):
if mesh.points.shape[1] == 2:
logging.warning(
"msh2 requires 3D points, but 2D points given. "
"Appending 0 third component."
)
mesh.points = numpy.column_stack(
[mesh.points[:, 0], mesh.points[:, 1], numpy.zeros(mesh.points.shape[0])]
)
if write_binary:
for key, value in mesh.cells.items():
if value.dtype != c_int:
logging.warning(
"Binary Gmsh needs 32-bit integers (got %s). Converting.",
value.dtype,
)
mesh.cells[key] = numpy.array(value, dtype=c_int)
cells = mesh.cells.copy()
if "tetra10" in cells:
cells["tetra10"] = cells["tetra10"][:, [0, 1, 2, 3, 4, 5, 6, 7, 9, 8]]
if "hexahedron20" in cells:
cells["hexahedron20"] = cells["hexahedron20"][
:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 13, 9, 16, 18, 19, 17, 10, 12, 14, 15]
]
with open(filename, "wb") as fh:
mode_idx = 1 if write_binary else 0
size_of_double = 8
fh.write(
("$MeshFormat\n2.2 {} {}\n".format(mode_idx, size_of_double)).encode(
"utf-8"
)
)
if write_binary:
fh.write(struct.pack("i", 1))
fh.write("\n".encode("utf-8"))
fh.write("$EndMeshFormat\n".encode("utf-8"))
if mesh.field_data:
_write_physical_names(fh, mesh.field_data)
tag_data = {}
other_data = {}
for cell_type, a in mesh.cell_data.items():
tag_data[cell_type] = {}
other_data[cell_type] = {}
for key, data in a.items():
if key in ["gmsh:physical", "gmsh:geometrical"]:
tag_data[cell_type][key] = data.astype(c_int)
else:
other_data[cell_type][key] = data
_write_nodes(fh, mesh.points, write_binary)
_write_elements(fh, cells, tag_data, write_binary)
if mesh.gmsh_periodic is not None:
_write_periodic(fh, mesh.gmsh_periodic)
for name, dat in mesh.point_data.items():
_write_data(fh, "NodeData", name, dat, write_binary)
cell_data_raw = raw_from_cell_data(other_data)
for name, dat in cell_data_raw.items():
_write_data(fh, "ElementData", name, dat, write_binary)
return | Writes msh files, cf.
<http://gmsh.info//doc/texinfo/gmsh.html#MSH-ASCII-file-format>. |
385,742 | def _set_pw_profile(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("pw_profile_name",pw_profile.pw_profile, yang_name="pw-profile", rest_name="pw-profile", parent=self, is_container=, user_ordered=False, path_helper=self._path_helper, yang_keys=, extensions={u: {u: u, u: u, u: u}}), is_container=, yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: u}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "list",
: ,
})
self.__pw_profile = t
if hasattr(self, ):
self._set() | Setter method for pw_profile, mapped from YANG variable /pw_profile (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_pw_profile is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pw_profile() directly. |
385,743 | def when_connected(self):
if self._client and not self._client.is_closed:
return defer.succeed(self._client)
else:
return self._client_deferred | Retrieve the currently-connected Protocol, or the next one to connect.
Returns:
defer.Deferred: A Deferred that fires with a connected
:class:`FedoraMessagingProtocolV2` instance. This is similar to
the whenConnected method from the Twisted endpoints APIs, which
is sadly isn't available before 16.1.0, which isn't available
in EL7. |
385,744 | def view(self, buffer_time = 10.0, sample_size = 10000, name=None, description=None, start=False):
if name is None:
name = .join(random.choice() for x in range(16))
if self.oport.schema == streamsx.topology.schema.CommonSchema.Python:
if self._json_stream:
view_stream = self._json_stream
else:
self._json_stream = self.as_json(force_object=False)._layout(hidden=True)
view_stream = self._json_stream
if self._placeable:
self._colocate(view_stream, )
else:
view_stream = self
port = view_stream.oport.name
view_config = {
: name,
: port,
: description,
: buffer_time,
: sample_size}
if start:
view_config[] =
view_stream.oport.operator.addViewConfig(view_config)
_view = View(name)
self.topology.graph._views.append(_view)
return _view | Defines a view on a stream.
A view is a continually updated sampled buffer of a streams's tuples.
Views allow visibility into a stream from external clients such
as Jupyter Notebooks, the Streams console,
`Microsoft Excel <https://www.ibm.com/support/knowledgecenter/SSCRJU_4.2.0/com.ibm.streams.excel.doc/doc/excel_overview.html>`_ or REST clients.
The view created by this method can be used by external clients
and through the returned :py:class:`~streamsx.topology.topology.View` object after the topology is submitted. For example a Jupyter Notebook can
declare and submit an application with views, and then
use the resultant `View` objects to visualize live data within the streams.
When the stream contains Python objects then they are converted
to JSON.
Args:
buffer_time: Specifies the buffer size to use measured in seconds.
sample_size: Specifies the number of tuples to sample per second.
name(str): Name of the view. Name must be unique within the topology. Defaults to a generated name.
description: Description of the view.
start(bool): Start buffering data when the job is submitted.
If `False` then the view starts buffering data when the first
remote client accesses it to retrieve data.
Returns:
streamsx.topology.topology.View: View object which can be used to access the data when the
topology is submitted.
.. note:: Views are only supported when submitting to distributed
contexts including Streaming Analytics service. |
385,745 | def chempot_vs_gamma_plot_one(self, plt, entry, ref_delu, chempot_range,
delu_dict={}, delu_default=0, label=, JPERM2=False):
chempot_range = sorted(chempot_range)
ucell_comp = self.ucell_entry.composition.reduced_composition
if entry.adsorbates:
s = entry.cleaned_up_slab
clean_comp = s.composition.reduced_composition
else:
clean_comp = entry.composition.reduced_composition
mark = if ucell_comp != clean_comp else
delu_dict = self.set_all_variables(delu_dict, delu_default)
delu_dict[ref_delu] = chempot_range[0]
gamma_min = self.as_coeffs_dict[entry]
gamma_min = gamma_min if type(gamma_min).__name__ == \
"float" else sub_chempots(gamma_min, delu_dict)
delu_dict[ref_delu] = chempot_range[1]
gamma_max = self.as_coeffs_dict[entry]
gamma_max = gamma_max if type(gamma_max).__name__ == \
"float" else sub_chempots(gamma_max, delu_dict)
gamma_range = [gamma_min, gamma_max]
se_range = np.array(gamma_range) * EV_PER_ANG2_TO_JOULES_PER_M2 \
if JPERM2 else gamma_range
mark = entry.mark if entry.mark else mark
c = entry.color if entry.color else self.color_dict[entry]
plt.plot(chempot_range, se_range, mark, color=c, label=label)
return plt | Helper function to help plot the surface energy of a
single SlabEntry as a function of chemical potential.
Args:
plt (Plot): A plot.
entry (SlabEntry): Entry of the slab whose surface energy we want
to plot
ref_delu (sympy Symbol): The range stability of each slab is based
on the chempot range of this chempot. Should be a sympy Symbol
object of the format: Symbol("delu_el") where el is the name of
the element
chempot_range ([max_chempot, min_chempot]): Range to consider the
stability of the slabs.
delu_dict (Dict): Dictionary of the chemical potentials to be set as
constant. Note the key should be a sympy Symbol object of the
format: Symbol("delu_el") where el is the name of the element.
delu_default (float): Default value for all unset chemical potentials
label (str): Label of the slab for the legend.
JPERM2 (bool): Whether to plot surface energy in /m^2 (True) or
eV/A^2 (False)
Returns:
(Plot): Plot of surface energy vs chemical potential for one entry. |
385,746 | def add_device_notification(self, data_name, attr, callback, user_handle=None):
if self._port is not None:
notification_handle, user_handle = adsSyncAddDeviceNotificationReqEx(
self._port, self._adr, data_name, attr, callback, user_handle
)
return notification_handle, user_handle
return None | Add a device notification.
:param str data_name: PLC storage address
:param pyads.structs.NotificationAttrib attr: object that contains
all the attributes for the definition of a notification
:param callback: callback function that gets executed on in the event
of a notification
:rtype: (int, int)
:returns: notification handle, user handle
Save the notification handle and the user handle on creating a
notification if you want to be able to remove the notification
later in your code.
**Usage**:
>>> import pyads
>>> from ctypes import size_of
>>>
>>> # Connect to the local TwinCAT PLC
>>> plc = pyads.Connection('127.0.0.1.1.1', 851)
>>>
>>> # Create callback function that prints the value
>>> def mycallback(adr, notification, user):
>>> contents = notification.contents
>>> value = next(
>>> map(int,
>>> bytearray(contents.data)[0:contents.cbSampleSize])
>>> )
>>> print(value)
>>>
>>> with plc:
>>> # Add notification with default settings
>>> attr = pyads.NotificationAttrib(size_of(pyads.PLCTYPE_INT))
>>>
>>> hnotification, huser = plc.add_device_notification(
>>> adr, attr, mycallback)
>>>
>>> # Remove notification
>>> plc.del_device_notification(hnotification, huser) |
385,747 | def all(self, fields=None, include_fields=True, page=None, per_page=None, extra_params=None):
params = extra_params or {}
params[] = fields and .join(fields) or None
params[] = str(include_fields).lower()
params[] = page
params[] = per_page
return self.client.get(self._url(), params=params) | Retrieves a list of all the applications.
Important: The client_secret and encryption_key attributes can only be
retrieved with the read:client_keys scope.
Args:
fields (list of str, optional): A list of fields to include or
exclude from the result (depending on include_fields). Empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be included in the result, False otherwise.
page (int): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
extra_params (dictionary, optional): The extra parameters to add to
the request. The fields, include_fields, page and per_page values
specified as parameters take precedence over the ones defined here.
See: https://auth0.com/docs/api/management/v2#!/Clients/get_clients |
385,748 | def connect(host=None,
port=rethinkdb.DEFAULT_PORT,
timeout=20,
verify=True,
**kwargs):
if not host:
host = DEFAULT_HOSTS.get(check_stage_env())
connection = None
tries = 0
time_quit = time() + timeout
while not connection and time() <= time_quit:
tries += 1
connection = _attempt_connect(host, port, timeout/3, verify, **kwargs)
if not connection:
sleep(0.5)
if not connection:
raise BrainNotReady(
"Tried ({}:{}) {} times at {} second max timeout".format(host,
port,
tries,
timeout))
return connection | RethinkDB semantic connection wrapper
raises <brain.connection.BrainNotReady> if connection verification fails
:param verify: <bool> (default True) whether to run POST
:param timeout: <int> max time (s) to wait for connection
:param kwargs: <dict> passthrough rethinkdb arguments
:return: |
385,749 | def p_function_declaration(self, p):
if len(p) == 8:
p[0] = ast.FuncDecl(
identifier=p[2], parameters=None, elements=p[6])
else:
p[0] = ast.FuncDecl(
identifier=p[2], parameters=p[4], elements=p[7]) | function_declaration \
: FUNCTION identifier LPAREN RPAREN LBRACE function_body RBRACE
| FUNCTION identifier LPAREN formal_parameter_list RPAREN LBRACE \
function_body RBRACE |
385,750 | def filter_cat(self, axis, cat_index, cat_name):
run_filter.filter_cat(self, axis, cat_index, cat_name) | Filter the matrix based on their category. cat_index is the index of the category, the first category has index=1. |
385,751 | def shewhart(self, data: [, str] = None,
boxchart: str = None,
cchart: str = None,
irchart: str = None,
mchart: str = None,
mrchart: str = None,
npchart: str = None,
pchart: str = None,
rchart: str = None,
schart: str = None,
uchart: str = None,
xrchart: str = None,
xschart: str = None,
procopts: str = None,
stmtpassthrough: str = None,
**kwargs: dict) -> :
| Python method to call the SHEWHART procedure
Documentation link:
https://go.documentation.sas.com/?cdcId=pgmsascdc&cdcVersion=9.4_3.4&docsetId=qcug&docsetTarget=qcug_shewhart_toc.htm&locale=en
:param data: SASdata object or string. This parameter is required.
:parm boxchart: The boxchart variable can only be a string type.
:parm cchart: The cchart variable can only be a string type.
:parm irchart: The irchart variable can only be a string type.
:parm mchart: The mchart variable can only be a string type.
:parm mrchart: The mrchart variable can only be a string type.
:parm npchart: The npchart variable can only be a string type.
:parm pchart: The pchart variable can only be a string type.
:parm rchart: The rchart variable can only be a string type.
:parm schart: The schart variable can only be a string type.
:parm uchart: The uchart variable can only be a string type.
:parm xrchart: The xrchart variable can only be a string type.
:parm xschart: The xschart variable can only be a string type.
:parm procopts: The procopts variable is a generic option available for advanced use. It can only be a string type.
:parm stmtpassthrough: The stmtpassthrough variable is a generic option available for advanced use. It can only be a string type.
:return: SAS Result Object |
385,752 | def save_config(self, cmd="save config", confirm=False, confirm_response=""):
return super(ExtremeVspSSH, self).save_config(
cmd=cmd, confirm=confirm, confirm_response=confirm_response
) | Save Config |
385,753 | def generate_n_vectors(N_max, dx=1, dy=1, dz=1, half_lattice=True):
r
vecs = np.meshgrid(np.arange(-N_max, N_max+1, dx),
np.arange(-N_max, N_max+1, dy),
np.arange(-N_max, N_max+1, dz))
vecs = np.vstack(map(np.ravel, vecs)).T
vecs = vecs[np.linalg.norm(vecs, axis=1) <= N_max]
if half_lattice:
ix = ((vecs[:, 2] > 0) |
((vecs[:, 2] == 0) &
(vecs[:, 1] > 0)) |
((vecs[:, 2] == 0) &
(vecs[:, 1] == 0) &
(vecs[:, 0] > 0)))
vecs = vecs[ix]
vecs = np.array(sorted(vecs, key=lambda x: (x[0], x[1], x[2])))
return vecs | r"""
Generate integer vectors, :math:`\boldsymbol{n}`, with
:math:`|\boldsymbol{n}| < N_{\rm max}`.
If ``half_lattice=True``, only return half of the three-dimensional
lattice. If the set N = {(i,j,k)} defines the lattice, we restrict to
the cases such that ``(k > 0)``, ``(k = 0, j > 0)``, and
``(k = 0, j = 0, i > 0)``.
.. todo::
Return shape should be (3,N) to be consistent.
Parameters
----------
N_max : int
Maximum norm of the integer vector.
dx : int
Step size in x direction. Set to 1 for odd and even terms, set
to 2 for just even terms.
dy : int
Step size in y direction. Set to 1 for odd and even terms, set
to 2 for just even terms.
dz : int
Step size in z direction. Set to 1 for odd and even terms, set
to 2 for just even terms.
half_lattice : bool (optional)
Only return half of the 3D lattice.
Returns
-------
vecs : :class:`numpy.ndarray`
A 2D array of integers with :math:`|\boldsymbol{n}| < N_{\rm max}`
with shape (N,3). |
385,754 | def set_voltage(self, value, channel=1):
cmd = "V%d %f" % (channel, value)
self.write(cmd) | channel: 1=OP1, 2=OP2, AUX is not supported |
385,755 | def ToRequest(self):
param = {}
if self.email:
param[] = self.email
if self.user_id:
param[] = self.user_id
if self.name:
param[] = self.name
if self.photo_url:
param[] = self.photo_url
if self.email_verified is not None:
param[] = self.email_verified
if self.password_hash:
param[] = base64.urlsafe_b64encode(self.password_hash)
if self.salt:
param[] = base64.urlsafe_b64encode(self.salt)
if self.provider_info:
param[] = self.provider_info
return param | Converts to gitkit api request parameter dict.
Returns:
Dict, containing non-empty user attributes. |
385,756 | def partial_derivative(self, X, y=0):
self.check_fit()
U, V = self.split_matrix(X)
if self.theta == 1:
return V
else:
t1 = np.power(-np.log(U), self.theta)
t2 = np.power(-np.log(V), self.theta)
p1 = self.cumulative_distribution(X)
p2 = np.power(t1 + t2, -1 + 1.0 / self.theta)
p3 = np.power(-np.log(V), self.theta - 1)
return np.divide(np.multiply(np.multiply(p1, p2), p3), V) - y | Compute partial derivative :math:`C(u|v)` of cumulative density.
Args:
X: `np.ndarray`
y: `float`
Returns: |
385,757 | def wmean_and_var_str_array(W, x):
m = np.empty(shape=x.shape[1:], dtype=x.dtype)
v = np.empty_like(m)
for p in x.dtype.names:
m[p], v[p] = wmean_and_var(W, x[p]).values()
return {: m, : v} | Weighted mean and variance of each component of a structured array.
Parameters
----------
W: (N,) ndarray
normalised weights (must be >=0 and sum to one).
x: (N,) structured array
data
Returns
-------
dictionary
{'mean':weighted_means, 'var':weighted_variances} |
385,758 | def is_quota_exceeded(self) -> bool:
if self.quota and self._url_table is not None:
return self.size >= self.quota and \
self._url_table.get_root_url_todo_count() == 0 | Return whether the quota is exceeded. |
385,759 | def wanmen_download_by_course_topic_part(json_api_content, tIndex, pIndex, output_dir=, merge=True, info_only=False, **kwargs):
html = json_api_content
title = _wanmen_get_title_by_json_topic_part(html,
tIndex,
pIndex)
bokeccID = _wanmen_get_boke_id_by_json_topic_part(html,
tIndex,
pIndex)
bokecc_download_by_id(vid = bokeccID, title = title, output_dir=output_dir, merge=merge, info_only=info_only, **kwargs) | int, int, int->None
Download ONE PART of the course. |
385,760 | def filler(self):
if not self.filled:
raise SlotNotFilledError(
% (self.name, self.key))
return self._filler_pipeline_key.name() | Returns the pipeline ID that filled this slot's value.
Returns:
A string that is the pipeline ID.
Raises:
SlotNotFilledError if the value hasn't been filled yet. |
385,761 | def _get_span(self, m):
return (m.sentence.id, m.char_start, m.char_end) | Gets a tuple that identifies a span for the specific mention class
that m belongs to. |
385,762 | async def stepper_config(self, steps_per_revolution, stepper_pins):
data = [PrivateConstants.STEPPER_CONFIGURE, steps_per_revolution & 0x7f,
(steps_per_revolution >> 7) & 0x7f]
for pin in range(len(stepper_pins)):
data.append(stepper_pins[pin])
await self._send_sysex(PrivateConstants.STEPPER_DATA, data) | Configure stepper motor prior to operation.
This is a FirmataPlus feature.
:param steps_per_revolution: number of steps per motor revolution
:param stepper_pins: a list of control pin numbers - either 4 or 2
:returns: No return value. |
385,763 | def _mod_spec(self):
mod_spec={}
for mod, sizes in self.int_len_mod.items():
for conv in self.int_sign[]:
mod_spec[mod + conv] = sizes[0]
for conv in self.int_sign[]:
mod_spec[mod + conv] = sizes[1]
return mod_spec | Modified length specifiers: mapping between length modifiers and conversion specifiers. This generates all the
possibilities, i.e. hhd, etc. |
385,764 | def _check_max_running(self, func, data, opts, now):
if not data[]:
return data
if not in data or data[]:
jobcount = 0
if self.opts[] == :
current_jobs = salt.utils.master.get_running_jobs(self.opts)
else:
current_jobs = salt.utils.minion.running(self.opts)
for job in current_jobs:
if in job:
log.debug(
, func, job
)
if data[] == job[] \
and salt.utils.process.os_is_running(job[]):
jobcount += 1
log.debug(
,
jobcount, data[]
)
if jobcount >= data[]:
log.debug(
,
data[], data[]
)
data[] =
data[] = True
data[] = now
data[] = False
return data
return data | Return the schedule data structure |
385,765 | def make_app(global_conf, full_stack=True, **app_conf):
app = make_base_app(global_conf, full_stack=True, **app_conf)
from depot.manager import DepotManager
app = DepotManager.make_middleware(app)
return app | Set depotexample up with the settings found in the PasteDeploy configuration
file used.
:param global_conf: The global settings for depotexample (those
defined under the ``[DEFAULT]`` section).
:type global_conf: dict
:param full_stack: Should the whole TG2 stack be set up?
:type full_stack: str or bool
:return: The depotexample application with all the relevant middleware
loaded.
This is the PasteDeploy factory for the depotexample application.
``app_conf`` contains all the application-specific settings (those defined
under ``[app:main]``. |
385,766 | def get_trans(self, out_vec=None):
if out_vec:
return out_vec.set(*self.data[3][:3])
return Vec3(*self.data[3][:3]) | Return the translation portion of the matrix as a vector.
If out_vec is provided, store in out_vec instead of creating a new Vec3. |
385,767 | def set_hot_pluggable_for_device(self, name, controller_port, device, hot_pluggable):
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
if not isinstance(controller_port, baseinteger):
raise TypeError("controller_port can only be an instance of type baseinteger")
if not isinstance(device, baseinteger):
raise TypeError("device can only be an instance of type baseinteger")
if not isinstance(hot_pluggable, bool):
raise TypeError("hot_pluggable can only be an instance of type bool")
self._call("setHotPluggableForDevice",
in_p=[name, controller_port, device, hot_pluggable]) | Sets a flag in the device information which indicates that the attached
device is hot pluggable or not. This may or may not be supported by a
particular controller and/or drive, and is silently ignored in the
latter case. Changing the setting while the VM is running is forbidden.
The device must already exist; see :py:func:`IMachine.attach_device`
for how to attach a new device.
The @a controllerPort and @a device parameters specify the device slot and
have have the same meaning as with :py:func:`IMachine.attach_device` .
in name of type str
Name of the storage controller.
in controller_port of type int
Storage controller port.
in device of type int
Device slot in the given port.
in hot_pluggable of type bool
New value for the hot-pluggable device flag.
raises :class:`OleErrorInvalidarg`
SATA device, SATA port, IDE port or IDE slot out of range.
raises :class:`VBoxErrorInvalidObjectState`
Attempt to modify an unregistered virtual machine.
raises :class:`VBoxErrorInvalidVmState`
Invalid machine state.
raises :class:`VBoxErrorNotSupported`
Controller doesn't support hot plugging. |
385,768 | def format_args(self, args):
def format_arg(a):
if isinstance(a, dbus.Boolean):
return str(bool(a))
if isinstance(a, dbus.Byte):
return str(int(a))
if isinstance(a, int) or isinstance(a, long):
return str(a)
if isinstance(a, str):
return + str(a) +
if isinstance(a, unicode):
return + repr(a.encode())[1:-1] +
if isinstance(a, list):
return + .join([format_arg(x) for x in a]) +
if isinstance(a, dict):
fmta =
first = True
for k, v in a.items():
if first:
first = False
else:
fmta +=
fmta += format_arg(k) + + format_arg(v)
return fmta +
return repr(a)
s =
for a in args:
if s:
s +=
s += format_arg(a)
if s:
s = + s
return s | Format a D-Bus argument tuple into an appropriate logging string. |
385,769 | def create_empty_dataset(self, dataset_id="", project_id="",
dataset_reference=None):
if dataset_reference:
_validate_value(, dataset_reference, dict)
else:
dataset_reference = {}
if "datasetReference" not in dataset_reference:
dataset_reference["datasetReference"] = {}
if not dataset_reference["datasetReference"].get("datasetId") and not dataset_id:
raise ValueError(
"{} not provided datasetId. Impossible to create dataset")
dataset_required_params = [(dataset_id, "datasetId", ""),
(project_id, "projectId", self.project_id)]
for param_tuple in dataset_required_params:
param, param_name, param_default = param_tuple
if param_name not in dataset_reference[]:
if param_default and not param:
self.log.info(
"%s was not specified. Will be used default value %s.",
param_name, param_default
)
param = param_default
dataset_reference[].update(
{param_name: param})
elif param:
_api_resource_configs_duplication_check(
param_name, param,
dataset_reference[], )
dataset_id = dataset_reference.get("datasetReference").get("datasetId")
dataset_project_id = dataset_reference.get("datasetReference").get(
"projectId")
self.log.info(, dataset_id,
dataset_project_id)
try:
self.service.datasets().insert(
projectId=dataset_project_id,
body=dataset_reference).execute(num_retries=self.num_retries)
self.log.info(
, dataset_project_id, dataset_id)
except HttpError as err:
raise AirflowException(
.format(err.content)
) | Create a new empty dataset:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_reference: Dataset reference that could be provided
with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict |
385,770 | def add_time_step(self, **create_time_step_kwargs):
ts = time_step.TimeStep.create_time_step(**create_time_step_kwargs)
assert isinstance(ts, time_step.TimeStep)
self._time_steps.append(ts) | Creates a time-step and appends it to the list.
Args:
**create_time_step_kwargs: Forwarded to
time_step.TimeStep.create_time_step. |
385,771 | def absent(name,
**kwargs):
ret = {: name,
: True,
: {},
: []}
if __salt__[](name):
try:
__salt__[](name)
except CommandExecutionError as exc:
return _fail(ret, exc.strerror)
else:
ret[] = {0}\.format(name)
else:
ret[] = {0}\.format(name)
return ret | Ensure the job is absent from the Jenkins configured jobs
name
The name of the Jenkins job to remove |
385,772 | def status(name, init_system, verbose):
try:
status = Serv(init_system, verbose=verbose).status(name)
except ServError as ex:
sys.exit(ex)
click.echo(json.dumps(status, indent=4, sort_keys=True)) | WIP! Try at your own expense |
385,773 | def from_schema(cls, schema, handlers={}, **kwargs):
return cls(
schema.get(, schema.get(, )) if isinstance(schema, dict) else ,
schema,
handlers=handlers,
**kwargs
) | Construct a resolver from a JSON schema object. |
385,774 | def get_argument_parser():
file_mv = cli.file_mv
desc =
parser = cli.get_argument_parser(desc)
parser.add_argument(
, , type=str, required=True, metavar=file_mv,
help=
)
parser.add_argument(
, , type=str, required=True, metavar=file_mv,
help=
)
parser.add_argument(
, action=,
help=
)
parser.add_argument(
, , type=str, required=True, metavar=file_mv,
help=
)
cli.add_reporting_args(parser)
return parser | Function to obtain the argument parser.
Returns
-------
A fully configured `argparse.ArgumentParser` object.
Notes
-----
This function is used by the `sphinx-argparse` extension for sphinx. |
385,775 | def _compress(self, data, operation):
original_output_size = int(
math.ceil(len(data) + (len(data) >> 2) + 10240)
)
available_out = ffi.new("size_t *")
available_out[0] = original_output_size
output_buffer = ffi.new("uint8_t []", available_out[0])
ptr_to_output_buffer = ffi.new("uint8_t **", output_buffer)
input_size = ffi.new("size_t *", len(data))
input_buffer = ffi.new("uint8_t []", data)
ptr_to_input_buffer = ffi.new("uint8_t **", input_buffer)
rc = lib.BrotliEncoderCompressStream(
self._encoder,
operation,
input_size,
ptr_to_input_buffer,
available_out,
ptr_to_output_buffer,
ffi.NULL
)
if rc != lib.BROTLI_TRUE:
raise Error("Error encountered compressing data.")
assert not input_size[0]
size_of_output = original_output_size - available_out[0]
return ffi.buffer(output_buffer, size_of_output)[:] | This private method compresses some data in a given mode. This is used
because almost all of the code uses the exact same setup. It wouldn't
have to, but it doesn't hurt at all. |
385,776 | def plot_surface(x, y, z, color=default_color, wrapx=False, wrapy=False):
return plot_mesh(x, y, z, color=color, wrapx=wrapx, wrapy=wrapy, wireframe=False) | Draws a 2d surface in 3d, defined by the 2d ordered arrays x,y,z.
:param x: {x2d}
:param y: {y2d}
:param z: {z2d}
:param color: {color2d}
:param bool wrapx: when True, the x direction is assumed to wrap, and polygons are drawn between the end end begin points
:param bool wrapy: simular for the y coordinate
:return: :any:`Mesh` |
385,777 | def from_url(cls, reactor, url, key, alg=RS256, jws_client=None):
action = LOG_ACME_CONSUME_DIRECTORY(
url=url, key_type=key.typ, alg=alg.name)
with action.context():
check_directory_url_type(url)
jws_client = _default_client(jws_client, reactor, key, alg)
return (
DeferredContext(jws_client.get(url.asText()))
.addCallback(json_content)
.addCallback(messages.Directory.from_json)
.addCallback(
tap(lambda d: action.add_success_fields(directory=d)))
.addCallback(cls, reactor, key, jws_client)
.addActionFinish()) | Construct a client from an ACME directory at a given URL.
:param url: The ``twisted.python.url.URL`` to fetch the directory from.
See `txacme.urls` for constants for various well-known public
directories.
:param reactor: The Twisted reactor to use.
:param ~josepy.jwk.JWK key: The client key to use.
:param alg: The signing algorithm to use. Needs to be compatible with
the type of key used.
:param JWSClient jws_client: The underlying client to use, or ``None``
to construct one.
:return: The constructed client.
:rtype: Deferred[`Client`] |
385,778 | def open(self, inf, psw):
if inf.file_redir:
if inf.file_redir[0] in (RAR5_XREDIR_FILE_COPY, RAR5_XREDIR_HARD_LINK):
inf = self.getinfo(inf.file_redir[2])
if not inf:
raise BadRarFile()
if inf.flags & RAR_FILE_SPLIT_BEFORE:
raise NeedFirstVolume("Partial file, please start from first volume: " + inf.filename)
use_hack = 1
if not self._main:
use_hack = 0
elif self._main._must_disable_hack():
use_hack = 0
elif inf._must_disable_hack():
use_hack = 0
elif is_filelike(self._rarfile):
pass
elif inf.file_size > HACK_SIZE_LIMIT:
use_hack = 0
elif not USE_EXTRACT_HACK:
use_hack = 0
if inf.compress_type == RAR_M0 and (inf.flags & RAR_FILE_PASSWORD) == 0 and inf.file_redir is None:
return self._open_clear(inf)
elif use_hack:
return self._open_hack(inf, psw)
elif is_filelike(self._rarfile):
return self._open_unrar_membuf(self._rarfile, inf, psw)
else:
return self._open_unrar(self._rarfile, inf, psw) | Return stream object for file data. |
385,779 | def get_node_attribute(self, node, attribute_name):
if not self.has_node(node):
raise ValueError("No such node exists.")
elif attribute_name not in self._node_attributes[node]:
raise ValueError("No such attribute exists.")
else:
return copy.\
copy(self._node_attributes[node][attribute_name]) | Given a node and the name of an attribute, get a copy
of that node's attribute.
:param node: reference to the node to retrieve the attribute of.
:param attribute_name: name of the attribute to retrieve.
:returns: attribute value of the attribute_name key for the
specified node.
:raises: ValueError -- No such node exists.
:raises: ValueError -- No such attribute exists. |
385,780 | def list_projects(self):
data = self._run(
url_path="projects/list"
)
projects = data[].get(, [])
return [self._project_formatter(item) for item in projects] | Returns the list of projects owned by user. |
385,781 | def reportResourceUsage(imageObjectList, outwcs, num_cores,
interactive=False):
from . import imageObject
if outwcs is None:
output_mem = 0
else:
if isinstance(outwcs,imageObject.WCSObject):
owcs = outwcs.final_wcs
else:
owcs = outwcs
output_mem = np.prod(owcs.pixel_shape) * 4 * 3
img1 = imageObjectList[0]
numchips = 0
input_mem = 0
for img in imageObjectList:
numchips += img._nmembers
pool_size = util.get_pool_size(num_cores, None)
pool_size = pool_size if (numchips >= pool_size) else numchips
inimg = 0
chip_mem = 0
for img in imageObjectList:
for chip in range(1,img._numchips+1):
cmem = img[chip].shape[0]*img[chip].shape[1]*4
inimg += 1
if inimg < pool_size:
input_mem += cmem*2
if chip_mem == 0:
chip_mem = cmem
max_mem = (input_mem + output_mem*pool_size + chip_mem*2)//(1024*1024)
print(*80)
print()
print(%(max_mem))
print(.format(*owcs.pixel_shape))
print(%(output_mem//(1024*1024)))
print(%(pool_size))
print()
print(*80)
if interactive:
print()
while True:
if sys.version_info[0] >= 3:
k = input("(y)es or (n)o").strip()[0].lower()
else:
k = raw_input("(y)es or (n)o").strip()[0].lower()
if k not in [, ]:
continue
if k == :
raise KeyboardInterrupt("Execution aborted") | Provide some information to the user on the estimated resource
usage (primarily memory) for this run. |
385,782 | def _gen_vol_xml(vmname,
diskname,
disktype,
size,
pool):
size = int(size) * 1024
context = {
: vmname,
: .format(diskname, disktype),
: diskname,
: disktype,
: six.text_type(size),
: pool,
}
fn_ =
try:
template = JINJA.get_template(fn_)
except jinja2.exceptions.TemplateNotFound:
log.error(, fn_)
return
return template.render(**context) | Generate the XML string to define a libvirt storage volume |
385,783 | def deleteByteArray(self, context, page, returnError):
returnError.contents.value = self.IllegalStateError
raise NotImplementedError("You must override this method.") | please override |
385,784 | def unwrap(s, node_indent):
def get_indent():
if line_str.startswith():
return node_indent
return len(re.match(r"^( *)", line_str).group(1))
def finish_block():
if block_list:
unwrap_list.append(
(block_indent, (" ".join([v.strip() for v in block_list])).strip())
)
block_list.clear()
unwrap_list = []
block_indent = None
block_list = []
for line_str in s.splitlines():
line_str = line_str.rstrip()
line_indent = get_indent()
if not block_list:
block_indent = line_indent
if line_str == "":
finish_block()
if line_indent < block_indent:
finish_block()
elif line_str.strip().startswith(("- ", "* ")):
finish_block()
elif line_str.strip().endswith(":"):
finish_block()
block_list.append(line_str)
if not block_list[0].strip().startswith(("- ", "* ")):
finish_block()
finish_block()
return unwrap_list | Group lines of a docstring to blocks.
For now, only groups markdown list sections.
A block designates a list of consequtive lines that all start at the same
indentation level.
The lines of the docstring are iterated top to bottom. Each line is added to
`block_list` until a line is encountered that breaks sufficiently with the previous
line to be deemed to be the start of a new block. At that point, all lines
currently
in `block_list` are stripped and joined to a single line, which is added to
`unwrap_list`.
Some of the block breaks are easy to determine. E.g., a line that starts with "- "
is the start of a new markdown style list item, so is always the start of a new
block. But then there are things like this, which is a single block:
- An example list with a second line
And this, which is 3 single line blocks (due to the different indentation levels):
Args:
jwt_bu64: bytes
JWT, encoded using a a URL safe flavor of Base64. |
385,785 | def writeto(fpath, to_write, aslines=False, verbose=None):
r
if verbose:
print( % (fpath,))
with open(fpath, ) as file:
if aslines:
to_write = map(_ensure_bytes , to_write)
file.writelines(to_write)
else:
bytes = _ensure_bytes(to_write)
file.write(bytes) | r"""
Writes (utf8) text to a file.
Args:
fpath (PathLike): file path
to_write (str): text to write (must be unicode text)
aslines (bool): if True to_write is assumed to be a list of lines
verbose (bool): verbosity flag
CommandLine:
python -m ubelt.util_io writeto --verbose
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt')
>>> fpath = dpath + '/' + 'testwrite.txt'
>>> if exists(fpath):
>>> os.remove(fpath)
>>> to_write = 'utf-8 symbols Δ, Й, ק, م, ๗, あ, 叶, 葉, and 말.'
>>> writeto(fpath, to_write)
>>> read_ = ub.readfrom(fpath)
>>> print('read_ = ' + read_)
>>> print('to_write = ' + to_write)
>>> assert read_ == to_write
Example:
>>> import ubelt as ub
>>> dpath = ub.ensure_app_cache_dir('ubelt')
>>> fpath = dpath + '/' + 'testwrite2.txt'
>>> if exists(fpath):
>>> os.remove(fpath)
>>> to_write = ['a\n', 'b\n', 'c\n', 'd\n']
>>> writeto(fpath, to_write, aslines=True)
>>> read_ = ub.readfrom(fpath, aslines=True)
>>> print('read_ = {}'.format(read_))
>>> print('to_write = {}'.format(to_write))
>>> assert read_ == to_write |
385,786 | def compute_hkdf(ikm, salt):
prk = hmac.new(salt, ikm, hashlib.sha256).digest()
info_bits_update = info_bits + bytearray(chr(1), )
hmac_hash = hmac.new(prk, info_bits_update, hashlib.sha256).digest()
return hmac_hash[:16] | Standard hkdf algorithm
:param {Buffer} ikm Input key material.
:param {Buffer} salt Salt value.
:return {Buffer} Strong key material.
@private |
385,787 | def fromJavascript(cls, javascript_datetime):
title =
jsDTpattern = re.compile()
jsGMTpattern = re.compile()
if not jsGMTpattern.findall(javascript_datetime):
raise Exception( % title)
adj_input = jsDTpattern.sub(, javascript_datetime)
if in adj_input:
adj_input = adj_input.replace(,)
elif in adj_input:
adj_input = adj_input.replace(,)
python_datetime = dTparser.parse(adj_input)
dT = python_datetime.astimezone(pytz.utc)
dt_kwargs = {
: dT.year,
: dT.month,
: dT.day,
: dT.hour,
: dT.minute,
: dT.second,
: dT.microsecond,
: dT.tzinfo
}
return labDT(**dt_kwargs) | a method to construct labDT from a javascript datetime string
:param javascript_datetime: string with datetime info in javascript formatting
:return: labDT object |
385,788 | def find_win32_generator():
ver = int(m.group(1))
return ver >= 14
generators = list(filter(drop_old_vs, generators))
generators.append( + ( if is_64bit else ))
for generator in generators:
build_dir = tempfile.mkdtemp()
print("Trying generator %r" % (generator,))
try:
try_cmake(cmake_dir, build_dir, generator)
except subprocess.CalledProcessError:
continue
else:
return generator
finally:
shutil.rmtree(build_dir)
raise RuntimeError("No compatible cmake generator installed on this machine") | Find a suitable cmake "generator" under Windows. |
385,789 | def walker(self, path=None, base_folder=None):
path = path or self.path or
base_folder = base_folder or self.base_folder
path = os.path.normpath(upath(path))
if base_folder:
base_folder = os.path.normpath(upath(base_folder))
print("The directory structure will be imported in %s" % (base_folder,))
if self.verbosity >= 1:
print("Import the folders and files in %s" % (path,))
root_folder_name = os.path.basename(path)
for root, dirs, files in os.walk(path):
rel_folders = root.partition(path)[2].strip(os.path.sep).split(os.path.sep)
while in rel_folders:
rel_folders.remove()
if base_folder:
folder_names = base_folder.split() + [root_folder_name] + rel_folders
else:
folder_names = [root_folder_name] + rel_folders
folder = self.get_or_create_folder(folder_names)
for file_obj in files:
dj_file = DjangoFile(open(os.path.join(root, file_obj), mode=),
name=file_obj)
self.import_file(file_obj=dj_file, folder=folder)
if self.verbosity >= 1:
print(( + ) % (self.folder_created, self.file_created, self.image_created)) | This method walk a directory structure and create the
Folders and Files as they appear. |
385,790 | def list_sinks(self, project, page_size=0, page_token=None):
path = "projects/%s" % (project,)
page_iter = self._gapic_api.list_sinks(path, page_size=page_size)
page_iter.client = self._client
page_iter.next_page_token = page_token
page_iter.item_to_value = _item_to_sink
return page_iter | List sinks for the project associated with this client.
:type project: str
:param project: ID of the project whose sinks are to be listed.
:type page_size: int
:param page_size: maximum number of sinks to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of sinks. If not
passed, the API will return the first page of
sinks.
:rtype: tuple, (list, str)
:returns: list of mappings, plus a "next page token" string:
if not None, indicates that more sinks can be retrieved
with another call (pass that value as ``page_token``). |
385,791 | def plot(self, data):
import IPython
if not isinstance(data, dict) or not all(isinstance(v, pd.DataFrame) for v in data.values()):
raise ValueError()
gfsg = GenericFeatureStatisticsGenerator()
data = [{: k, : self._remove_nonascii(v)} for k, v in six.iteritems(data)]
data_proto = gfsg.ProtoFromDataFrames(data)
protostr = base64.b64encode(data_proto.SerializeToString()).decode("utf-8")
html_id = + datalab.utils.commands.Html.next_id()
HTML_TEMPLATE =
html = HTML_TEMPLATE.format(html_id=html_id, protostr=protostr)
return IPython.core.display.HTML(html) | Plots an overview in a list of dataframes
Args:
data: a dictionary with key the name, and value the dataframe. |
385,792 | def branches(self):
if self._branches is None:
cmd = .format(self.sha1)
out = shell.run(
cmd,
capture=True,
never_pretend=True
).stdout.strip()
self._branches = [x.strip() for x in out.splitlines()]
return self._branches | List of all branches this commit is a part of. |
385,793 | def _spawn_background_rendering(self, rate=5.0):
self.render_trigger.connect(self.ren_win.Render)
twait = rate**-1
def render():
while self.active:
time.sleep(twait)
self._render()
self.render_thread = Thread(target=render)
self.render_thread.start() | Spawns a thread that updates the render window.
Sometimes directly modifiying object data doesn't trigger
Modified() and upstream objects won't be updated. This
ensures the render window stays updated without consuming too
many resources. |
385,794 | def delete(self, user, commit=True):
events.user_delete_event.send(user)
return super().delete(user, commit) | Delete a user |
385,795 | def sde(self):
variance = float(self.variance.values)
lengthscale = float(self.lengthscale.values)
foo = np.sqrt(3.)/lengthscale
F = np.array([[0, 1], [-foo**2, -2*foo]])
L = np.array([[0], [1]])
Qc = np.array([[12.*np.sqrt(3) / lengthscale**3 * variance]])
H = np.array([[1, 0]])
Pinf = np.array([[variance, 0],
[0, 3.*variance/(lengthscale**2)]])
dF = np.empty([F.shape[0],F.shape[1],2])
dQc = np.empty([Qc.shape[0],Qc.shape[1],2])
dPinf = np.empty([Pinf.shape[0],Pinf.shape[1],2])
dFvariance = np.zeros([2,2])
dFlengthscale = np.array([[0,0],
[6./lengthscale**3,2*np.sqrt(3)/lengthscale**2]])
dQcvariance = np.array([12.*np.sqrt(3)/lengthscale**3])
dQclengthscale = np.array([-3*12*np.sqrt(3)/lengthscale**4*variance])
dPinfvariance = np.array([[1,0],[0,3./lengthscale**2]])
dPinflengthscale = np.array([[0,0],
[0,-6*variance/lengthscale**3]])
dF[:,:,0] = dFvariance
dF[:,:,1] = dFlengthscale
dQc[:,:,0] = dQcvariance
dQc[:,:,1] = dQclengthscale
dPinf[:,:,0] = dPinfvariance
dPinf[:,:,1] = dPinflengthscale
return (F, L, Qc, H, Pinf, dF, dQc, dPinf) | Return the state space representation of the covariance. |
385,796 | def train(cls, rdd, k, maxIterations=100, initMode="random"):
r
model = callMLlibFunc("trainPowerIterationClusteringModel",
rdd.map(_convert_to_vector), int(k), int(maxIterations), initMode)
return PowerIterationClusteringModel(model) | r"""
:param rdd:
An RDD of (i, j, s\ :sub:`ij`\) tuples representing the
affinity matrix, which is the matrix A in the PIC paper. The
similarity s\ :sub:`ij`\ must be nonnegative. This is a symmetric
matrix and hence s\ :sub:`ij`\ = s\ :sub:`ji`\ For any (i, j) with
nonzero similarity, there should be either (i, j, s\ :sub:`ij`\) or
(j, i, s\ :sub:`ji`\) in the input. Tuples with i = j are ignored,
because it is assumed s\ :sub:`ij`\ = 0.0.
:param k:
Number of clusters.
:param maxIterations:
Maximum number of iterations of the PIC algorithm.
(default: 100)
:param initMode:
Initialization mode. This can be either "random" to use
a random vector as vertex properties, or "degree" to use
normalized sum similarities.
(default: "random") |
385,797 | def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
cmd = [, , service, , , , pool_name]
if max_bytes:
cmd = cmd + [, str(max_bytes)]
if max_objects:
cmd = cmd + [, str(max_objects)]
check_call(cmd) | :param service: The Ceph user name to run the command under
:type service: str
:param pool_name: Name of pool
:type pool_name: str
:param max_bytes: Maximum bytes quota to apply
:type max_bytes: int
:param max_objects: Maximum objects quota to apply
:type max_objects: int
:raises: subprocess.CalledProcessError |
385,798 | def idle_task(self):
if self.threat_timeout_timer.trigger():
self.check_threat_timeout()
if self.threat_detection_timer.trigger():
self.perform_threat_detection() | called on idle |
385,799 | def props_to_image(regionprops, shape, prop):
r
im = sp.zeros(shape=shape)
for r in regionprops:
if prop == :
mask = r.convex_image
else:
mask = r.image
temp = mask * r[prop]
s = bbox_to_slices(r.bbox)
im[s] += temp
return im | r"""
Creates an image with each region colored according the specified ``prop``,
as obtained by ``regionprops_3d``.
Parameters
----------
regionprops : list
This is a list of properties for each region that is computed
by PoreSpy's ``regionprops_3D`` or Skimage's ``regionsprops``.
shape : array_like
The shape of the original image for which ``regionprops`` was obtained.
prop : string
The region property of interest. Can be a scalar item such as 'volume'
in which case the the regions will be colored by their respective
volumes, or can be an image-type property such as 'border' or
'convex_image', which will return an image composed of the sub-images.
Returns
-------
image : ND-array
An ND-image the same size as the original image, with each region
represented by the values specified in ``prop``.
See Also
--------
props_to_DataFrame
regionprops_3d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.