code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
---|---|
def add_final_state(self, f):
"""
:param f: int , the state qi to be added to F, epsilon is
conventionally defined as the last node (q_|S|)
"""
if f not in self.Q:
LOG.error("The specified value is invalid, f must be a member of Q")
raise InputError("The specified value is invalid, f must be a member of Q")
self.F.add(f) | :param f: int , the state qi to be added to F, epsilon is
conventionally defined as the last node (q_|S|) |
def zipper(root_dir="", name="", path_name_ext=""):
"""
Zips up directory back to the original location
:param str root_dir: Root directory of the archive
:param str name: <datasetname>.lpd
:param str path_name_ext: /path/to/filename.lpd
"""
logger_zips.info("re_zip: name: {}, dir_tmp: {}".format(path_name_ext, root_dir))
# creates a zip archive in current directory. "somefile.lpd.zip"
shutil.make_archive(path_name_ext, format='zip', root_dir=root_dir, base_dir=name)
# drop the .zip extension. only keep .lpd
os.rename("{}.zip".format(path_name_ext), path_name_ext)
return | Zips up directory back to the original location
:param str root_dir: Root directory of the archive
:param str name: <datasetname>.lpd
:param str path_name_ext: /path/to/filename.lpd |
def _get_dependencies_of(name, location=None):
'''
Returns list of first level dependencies of the given installed dap
or dap from Dapi if not installed
If a location is specified, this only checks for dap installed in that path
and return [] if the dap is not located there
'''
if not location:
detailed_dap_list = get_installed_daps_detailed()
if name not in detailed_dap_list:
return _get_api_dependencies_of(name)
location = detailed_dap_list[name][0]['location']
meta = '{d}/meta/{dap}.yaml'.format(d=location, dap=name)
try:
data = yaml.load(open(meta), Loader=Loader)
except IOError:
return []
return data.get('dependencies', []) | Returns list of first level dependencies of the given installed dap
or dap from Dapi if not installed
If a location is specified, this only checks for dap installed in that path
and return [] if the dap is not located there |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'element_pair') and self.element_pair is not None:
_dict['element_pair'] = [x._to_dict() for x in self.element_pair]
if hasattr(self, 'identical_text') and self.identical_text is not None:
_dict['identical_text'] = self.identical_text
if hasattr(self, 'provenance_ids') and self.provenance_ids is not None:
_dict['provenance_ids'] = self.provenance_ids
if hasattr(self, 'significant_elements'
) and self.significant_elements is not None:
_dict['significant_elements'] = self.significant_elements
return _dict | Return a json dictionary representing this model. |
def table_schema_call(self, target, cls):
"""Perform a table schema call.
We call the callable target with the args and keywords needed for the
table defined by cls. This is how we centralize the Table.create and
Table ctor calls.
"""
index_defs = []
for name in cls.index_names() or []:
index_defs.append(GlobalIncludeIndex(
gsi_name(name),
parts=[HashKey(name)],
includes=['value']
))
return target(
cls.get_table_name(),
connection=get_conn(),
schema=[HashKey('id')],
global_indexes=index_defs or None
) | Perform a table schema call.
We call the callable target with the args and keywords needed for the
table defined by cls. This is how we centralize the Table.create and
Table ctor calls. |
def find_state_op_colocation_error(graph, reported_tags=None):
"""Returns error message for colocation of state ops, or None if ok."""
state_op_types = list_registered_stateful_ops_without_inputs()
state_op_map = {op.name: op for op in graph.get_operations()
if op.type in state_op_types}
for op in state_op_map.values():
for colocation_group in op.colocation_groups():
if not (colocation_group.startswith(tf.compat.as_bytes("loc:@")) and
tf.compat.as_str_any(colocation_group[5:]) in state_op_map):
tags_prefix = ("" if reported_tags is None else
"in the graph for tags %s, " % reported_tags)
return (
"A state-holding node x of a module's graph (e.g., a Variable op) "
"must not be subject to a tf.colocate_with(y) constraint "
"unless y is also a state-holding node.\n"
"Details: %snode '%s' has op '%s', which counts as state-holding, "
"but Operation.colocation_groups() == %s. " %
(tags_prefix, op.name, op.type, op.colocation_groups()))
return None | Returns error message for colocation of state ops, or None if ok. |
def extract_geometry(dataset):
"""Extract the outer surface of a volume or structured grid dataset as
PolyData. This will extract all 0D, 1D, and 2D cells producing the
boundary faces of the dataset.
"""
alg = vtk.vtkGeometryFilter()
alg.SetInputDataObject(dataset)
alg.Update()
return _get_output(alg) | Extract the outer surface of a volume or structured grid dataset as
PolyData. This will extract all 0D, 1D, and 2D cells producing the
boundary faces of the dataset. |
def put(
self,
id,
name,
description,
private,
runs_executable_tasks,
runs_docker_container_tasks,
runs_singularity_container_tasks,
active,
whitelists,
):
"""Updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str): The name of the task queue.
description (str): The description of the task queue.
private (bool): A Booleon signalling whether the queue can
only be used by its associated user.
runs_executable_tasks (bool): A Boolean specifying whether
the queue runs executable tasks.
runs_docker_container_tasks (bool): A Boolean specifying
whether the queue runs container tasks that run in
Docker containers.
runs_singularity_container_tasks (bool): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool): A Booleon signalling whether the queue is
active.
whitelists (list): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated.
"""
# Update the object
request_url = self._client.base_api_url + self.detail_url.format(id=id)
data_to_put = {
"name": name,
"description": description,
"private": private,
"runs_executable_tasks": runs_executable_tasks,
"runs_docker_container_tasks": runs_docker_container_tasks,
"runs_singularity_container_tasks": runs_singularity_container_tasks,
"active": active,
"whitelists": whitelists,
}
response = self._client.session.put(request_url, data=data_to_put)
# Validate that the request was successful
self.validate_request_success(
response_text=response.text,
request_url=request_url,
status_code=response.status_code,
expected_status_code=HTTP_200_OK,
)
# Return a model instance representing the task instance
return self.response_data_to_model_instance(response.json()) | Updates a task queue on the saltant server.
Args:
id (int): The ID of the task queue.
name (str): The name of the task queue.
description (str): The description of the task queue.
private (bool): A Booleon signalling whether the queue can
only be used by its associated user.
runs_executable_tasks (bool): A Boolean specifying whether
the queue runs executable tasks.
runs_docker_container_tasks (bool): A Boolean specifying
whether the queue runs container tasks that run in
Docker containers.
runs_singularity_container_tasks (bool): A Boolean
specifying whether the queue runs container tasks that
run in Singularity containers.
active (bool): A Booleon signalling whether the queue is
active.
whitelists (list): A list of task whitelist IDs.
Returns:
:class:`saltant.models.task_queue.TaskQueue`:
A task queue model instance representing the task queue
just updated. |
def registered(self, driver, executorInfo, frameworkInfo, agentInfo):
"""
Invoked once the executor driver has been able to successfully connect with Mesos.
"""
# Get the ID we have been assigned, if we have it
self.id = executorInfo.executor_id.get('value', None)
log.debug("Registered executor %s with framework", self.id)
self.address = socket.gethostbyname(agentInfo.hostname)
nodeInfoThread = threading.Thread(target=self._sendFrameworkMessage, args=[driver])
nodeInfoThread.daemon = True
nodeInfoThread.start() | Invoked once the executor driver has been able to successfully connect with Mesos. |
def argument(self, argument_dest, arg_type=None, **kwargs):
""" Register an argument for the given command scope using a knack.arguments.CLIArgumentType
:param argument_dest: The destination argument to add this argument type to
:type argument_dest: str
:param arg_type: Predefined CLIArgumentType definition to register, as modified by any provided kwargs.
:type arg_type: knack.arguments.CLIArgumentType
:param kwargs: Possible values: `options_list`, `validator`, `completer`, `nargs`, `action`, `const`, `default`,
`type`, `choices`, `required`, `help`, `metavar`. See /docs/arguments.md.
"""
self._check_stale()
if not self._applicable():
return
deprecate_action = self._handle_deprecations(argument_dest, **kwargs)
if deprecate_action:
kwargs['action'] = deprecate_action
self.command_loader.argument_registry.register_cli_argument(self.command_scope,
argument_dest,
arg_type,
**kwargs) | Register an argument for the given command scope using a knack.arguments.CLIArgumentType
:param argument_dest: The destination argument to add this argument type to
:type argument_dest: str
:param arg_type: Predefined CLIArgumentType definition to register, as modified by any provided kwargs.
:type arg_type: knack.arguments.CLIArgumentType
:param kwargs: Possible values: `options_list`, `validator`, `completer`, `nargs`, `action`, `const`, `default`,
`type`, `choices`, `required`, `help`, `metavar`. See /docs/arguments.md. |
def compute_metrics_cv(self, X, y, **kwargs):
'''Compute cross-validated metrics.
Trains this model on data X with labels y.
Returns a list of dict with keys name, scoring_name, value.
Args:
X (Union[np.array, pd.DataFrame]): data
y (Union[np.array, pd.DataFrame, pd.Series]): labels
'''
# compute scores
results = self.cv_score_mean(X, y)
return results | Compute cross-validated metrics.
Trains this model on data X with labels y.
Returns a list of dict with keys name, scoring_name, value.
Args:
X (Union[np.array, pd.DataFrame]): data
y (Union[np.array, pd.DataFrame, pd.Series]): labels |
def update(self, infos):
"""Process received infos."""
for info in infos:
if isinstance(info, LearningGene):
self.replicate(info) | Process received infos. |
def make_mecard(name, reading=None, email=None, phone=None, videophone=None,
memo=None, nickname=None, birthday=None, url=None, pobox=None,
roomno=None, houseno=None, city=None, prefecture=None,
zipcode=None, country=None):
"""\
Returns a QR Code which encodes a `MeCard <https://en.wikipedia.org/wiki/MeCard>`_
:param str name: Name. If it contains a comma, the first part
is treated as lastname and the second part is treated as forename.
:param str|None reading: Designates a text string to be set as the
kana name in the phonebook
:param str|iterable email: E-mail address. Multiple values are
allowed.
:param str|iterable phone: Phone number. Multiple values are
allowed.
:param str|iterable videophone: Phone number for video calls.
Multiple values are allowed.
:param str memo: A notice for the contact.
:param str nickname: Nickname.
:param str|int|date birthday: Birthday. If a string is provided,
it should encode the date as YYYYMMDD value.
:param str|iterable url: Homepage. Multiple values are allowed.
:param str|None pobox: P.O. box (address information).
:param str|None roomno: Room number (address information).
:param str|None houseno: House number (address information).
:param str|None city: City (address information).
:param str|None prefecture: Prefecture (address information).
:param str|None zipcode: Zip code (address information).
:param str|None country: Country (address information).
:rtype: segno.QRCode
"""
return segno.make_qr(make_mecard_data(name=name, reading=reading,
email=email, phone=phone,
videophone=videophone, memo=memo,
nickname=nickname, birthday=birthday,
url=url, pobox=pobox, roomno=roomno,
houseno=houseno, city=city,
prefecture=prefecture, zipcode=zipcode,
country=country)) | \
Returns a QR Code which encodes a `MeCard <https://en.wikipedia.org/wiki/MeCard>`_
:param str name: Name. If it contains a comma, the first part
is treated as lastname and the second part is treated as forename.
:param str|None reading: Designates a text string to be set as the
kana name in the phonebook
:param str|iterable email: E-mail address. Multiple values are
allowed.
:param str|iterable phone: Phone number. Multiple values are
allowed.
:param str|iterable videophone: Phone number for video calls.
Multiple values are allowed.
:param str memo: A notice for the contact.
:param str nickname: Nickname.
:param str|int|date birthday: Birthday. If a string is provided,
it should encode the date as YYYYMMDD value.
:param str|iterable url: Homepage. Multiple values are allowed.
:param str|None pobox: P.O. box (address information).
:param str|None roomno: Room number (address information).
:param str|None houseno: House number (address information).
:param str|None city: City (address information).
:param str|None prefecture: Prefecture (address information).
:param str|None zipcode: Zip code (address information).
:param str|None country: Country (address information).
:rtype: segno.QRCode |
def setResult(self, value):
"""Validate and set a value into the Result field, taking into
account the Detection Limits.
:param value: is expected to be a string.
"""
# Always update ResultCapture date when this field is modified
self.setResultCaptureDate(DateTime())
# Ensure result integrity regards to None, empty and 0 values
val = str("" if not value and value != 0 else value).strip()
# UDL/LDL directly entered in the results field
if val and val[0] in [LDL, UDL]:
# Result prefixed with LDL/UDL
oper = val[0]
# Strip off LDL/UDL from the result
val = val.replace(oper, "", 1)
# Check if the value is indeterminate / non-floatable
try:
val = float(val)
except (ValueError, TypeError):
val = value
# We dismiss the operand and the selector visibility unless the user
# is allowed to manually set the detection limit or the DL selector
# is visible.
allow_manual = self.getAllowManualDetectionLimit()
selector = self.getDetectionLimitSelector()
if allow_manual or selector:
# Ensure visibility of the detection limit selector
self.setDetectionLimitSelector(True)
# Set the detection limit operand
self.setDetectionLimitOperand(oper)
if not allow_manual:
# Override value by default DL
if oper == LDL:
val = self.getLowerDetectionLimit()
else:
val = self.getUpperDetectionLimit()
# Set the result field
self.getField("Result").set(self, val) | Validate and set a value into the Result field, taking into
account the Detection Limits.
:param value: is expected to be a string. |
def _build_payload(data):
"""
Returns the full payload as a string.
"""
for k, v in iteritems(data):
data[k] = _transform(v, key=(k,))
payload = {
'access_token': SETTINGS['access_token'],
'data': data
}
return payload | Returns the full payload as a string. |
def asizeof(self, *objs, **opts):
'''Return the combined size of the given objects
(with modified options, see method **set**).
'''
if opts:
self.set(**opts)
s, _ = self._sizes(objs, None)
return s | Return the combined size of the given objects
(with modified options, see method **set**). |
def main():
"""Provide the program's entry point when directly executed."""
authenticator = prawcore.TrustedAuthenticator(
prawcore.Requestor("prawcore_script_auth_example"),
os.environ["PRAWCORE_CLIENT_ID"],
os.environ["PRAWCORE_CLIENT_SECRET"],
)
authorizer = prawcore.ScriptAuthorizer(
authenticator,
os.environ["PRAWCORE_USERNAME"],
os.environ["PRAWCORE_PASSWORD"],
)
authorizer.refresh()
with prawcore.session(authorizer) as session:
data = session.request("GET", "/api/v1/me/friends")
for friend in data["data"]["children"]:
print(friend["name"])
return 0 | Provide the program's entry point when directly executed. |
def Pgas(rho,T,mu):
'''
P = R/mu * rho * T
Parameters
----------
mu : float
Mean molecular weight
rho : float
Density [cgs]
T : float
Temperature [K]
'''
R = old_div(boltzmann_constant, atomic_mass_unit)
return (old_div(R,mu)) * rho * T | P = R/mu * rho * T
Parameters
----------
mu : float
Mean molecular weight
rho : float
Density [cgs]
T : float
Temperature [K] |
def periodic_send(self, content, interval, title=''):
"""
发送周期消息
:param content: (必填|str) - 需要发送的消息内容
:param interval: (必填|int|datetime.timedelta) - 发送消息间隔时间,支持 datetime.timedelta 或 integer 表示的秒数
:param title: (选填|str) - 需要发送的消息标题
:return: * status:发送状态,True 发送成,False 发送失败
* message:发送失败详情
"""
url = '{0}periodic_message'.format(self.remote)
if isinstance(interval, datetime.timedelta):
interval = int(interval.total_seconds())
if not isinstance(interval, int):
raise ValueError
data = self._wrap_post_data(title=title, content=content, interval=interval)
res = requests.post(url, data, timeout=self.timeout)
if res.status_code == requests.codes.ok:
res_data = json.loads(self._convert_bytes(res.content))
if res_data.get('status') == STATUS_SUCCESS:
return True, res_data.get('message')
return False, res_data.get('message')
res.raise_for_status()
return False, 'Request or Response Error' | 发送周期消息
:param content: (必填|str) - 需要发送的消息内容
:param interval: (必填|int|datetime.timedelta) - 发送消息间隔时间,支持 datetime.timedelta 或 integer 表示的秒数
:param title: (选填|str) - 需要发送的消息标题
:return: * status:发送状态,True 发送成,False 发送失败
* message:发送失败详情 |
def add(self, pat, fun):
r"""Add a pattern and replacement.
The pattern must not contain capturing groups.
The replacement might be either a string template in which \& will be
replaced with the match, or a function that will get the matching text
as argument. It does not get match object, because capturing is
forbidden anyway.
"""
self._pat = None
self._pats.append(pat)
self._funs.append(fun) | r"""Add a pattern and replacement.
The pattern must not contain capturing groups.
The replacement might be either a string template in which \& will be
replaced with the match, or a function that will get the matching text
as argument. It does not get match object, because capturing is
forbidden anyway. |
def publish(dataset_uri):
"""Return access URL to HTTP enabled (published) dataset.
Exits with error code 1 if the dataset_uri is not a dataset.
Exits with error code 2 if the dataset cannot be HTTP enabled.
"""
try:
dataset = dtoolcore.DataSet.from_uri(dataset_uri)
except dtoolcore.DtoolCoreTypeError:
print("Not a dataset: {}".format(dataset_uri))
sys.exit(1)
try:
access_uri = dataset._storage_broker.http_enable()
except AttributeError:
print(
"Datasets of type '{}' cannot be published using HTTP".format(
dataset._storage_broker.key)
)
sys.exit(2)
return access_uri | Return access URL to HTTP enabled (published) dataset.
Exits with error code 1 if the dataset_uri is not a dataset.
Exits with error code 2 if the dataset cannot be HTTP enabled. |
def loggable(obj):
"""Return "True" if the obj implements the minimum Logger API
required by the 'trace' decorator.
"""
if isinstance(obj, logging.Logger):
return True
else:
return (inspect.isclass(obj)
and inspect.ismethod(getattr(obj, 'debug', None))
and inspect.ismethod(getattr(obj, 'isEnabledFor', None))
and inspect.ismethod(getattr(obj, 'setLevel', None))) | Return "True" if the obj implements the minimum Logger API
required by the 'trace' decorator. |
def set_resolved_name(self, ref: dict, type_name2solve: TypeName,
type_name_ref: TypeName):
"""
Warning!!! Need to rethink it when global poly type
"""
if self.resolution[type_name2solve.value] is None:
self.resolution[type_name2solve.value] = ref[type_name_ref.value] | Warning!!! Need to rethink it when global poly type |
def nl_nlmsg_flags2str(flags, buf, _=None):
"""Netlink Message Flags Translations.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L664
Positional arguments:
flags -- integer.
buf -- bytearray().
Keyword arguments:
_ -- unused.
Returns:
Reference to `buf`.
"""
del buf[:]
all_flags = (
('REQUEST', libnl.linux_private.netlink.NLM_F_REQUEST),
('MULTI', libnl.linux_private.netlink.NLM_F_MULTI),
('ACK', libnl.linux_private.netlink.NLM_F_ACK),
('ECHO', libnl.linux_private.netlink.NLM_F_ECHO),
('ROOT', libnl.linux_private.netlink.NLM_F_ROOT),
('MATCH', libnl.linux_private.netlink.NLM_F_MATCH),
('ATOMIC', libnl.linux_private.netlink.NLM_F_ATOMIC),
('REPLACE', libnl.linux_private.netlink.NLM_F_REPLACE),
('EXCL', libnl.linux_private.netlink.NLM_F_EXCL),
('CREATE', libnl.linux_private.netlink.NLM_F_CREATE),
('APPEND', libnl.linux_private.netlink.NLM_F_APPEND),
)
print_flags = []
for k, v in all_flags:
if not flags & v:
continue
flags &= ~v
print_flags.append(k)
if flags:
print_flags.append('0x{0:x}'.format(flags))
buf.extend(','.join(print_flags).encode('ascii'))
return buf | Netlink Message Flags Translations.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L664
Positional arguments:
flags -- integer.
buf -- bytearray().
Keyword arguments:
_ -- unused.
Returns:
Reference to `buf`. |
def parse(self, *args, **kwargs): # pylint: disable=unused-argument
"""
Parse response.
:param args: List. 2 first items used as parser name and response to parse
:param kwargs: dict, not used
:return: dictionary or return value of called callable from parser.
"""
# pylint: disable=W0703
cmd = args[0]
resp = args[1]
if cmd in self.parsers:
try:
return self.parsers[cmd](resp)
except Exception as err:
print(err)
return {} | Parse response.
:param args: List. 2 first items used as parser name and response to parse
:param kwargs: dict, not used
:return: dictionary or return value of called callable from parser. |
def initializeSessionAsAlice(sessionState, sessionVersion, parameters):
"""
:type sessionState: SessionState
:type sessionVersion: int
:type parameters: AliceAxolotlParameters
"""
sessionState.setSessionVersion(sessionVersion)
sessionState.setRemoteIdentityKey(parameters.getTheirIdentityKey())
sessionState.setLocalIdentityKey(parameters.getOurIdentityKey().getPublicKey())
sendingRatchetKey = Curve.generateKeyPair()
secrets = bytearray()
if sessionVersion >= 3:
secrets.extend(RatchetingSession.getDiscontinuityBytes())
secrets.extend(Curve.calculateAgreement(parameters.getTheirSignedPreKey(),
parameters.getOurIdentityKey().getPrivateKey()))
secrets.extend(Curve.calculateAgreement(parameters.getTheirIdentityKey().getPublicKey(),
parameters.getOurBaseKey().getPrivateKey()))
secrets.extend(Curve.calculateAgreement(parameters.getTheirSignedPreKey(),
parameters.getOurBaseKey().getPrivateKey()))
if sessionVersion >= 3 and parameters.getTheirOneTimePreKey() is not None:
secrets.extend(Curve.calculateAgreement(parameters.getTheirOneTimePreKey(),
parameters.getOurBaseKey().getPrivateKey()))
derivedKeys = RatchetingSession.calculateDerivedKeys(sessionVersion, secrets)
sendingChain = derivedKeys.getRootKey().createChain(parameters.getTheirRatchetKey(), sendingRatchetKey)
sessionState.addReceiverChain(parameters.getTheirRatchetKey(), derivedKeys.getChainKey())
sessionState.setSenderChain(sendingRatchetKey, sendingChain[1])
sessionState.setRootKey(sendingChain[0]) | :type sessionState: SessionState
:type sessionVersion: int
:type parameters: AliceAxolotlParameters |
def _rem_id_from_keys(self, pk, conn=None):
'''
_rem_id_from_keys - Remove primary key from table
internal
'''
if conn is None:
conn = self._get_connection()
conn.srem(self._get_ids_key(), pk) | _rem_id_from_keys - Remove primary key from table
internal |
def overlay_gateway_site_bfd_enable(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
overlay_gateway = ET.SubElement(config, "overlay-gateway", xmlns="urn:brocade.com:mgmt:brocade-tunnels")
name_key = ET.SubElement(overlay_gateway, "name")
name_key.text = kwargs.pop('name')
site = ET.SubElement(overlay_gateway, "site")
name_key = ET.SubElement(site, "name")
name_key.text = kwargs.pop('name')
bfd_enable = ET.SubElement(site, "bfd-enable")
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def update(self, widget, widget_tree):
""" for the selected widget are listed the relative signals
for each signal there is a dropdown containing all the widgets
the user will select the widget that have to listen a specific event
"""
self.listeners_list = []
self.build_widget_list_from_tree(widget_tree)
self.label.set_text('Signal connections: ' + widget.attributes['editor_varname'])
#del self.container
self.container = gui.VBox(width='100%', height='90%')
self.container.style['justify-content'] = 'flex-start'
self.container.style['overflow-y'] = 'scroll'
self.append(self.container, 'container')
##for all the events of this widget
#isclass instead of ismethod because event methods are replaced with ClassEventConnector
for (setOnEventListenerFuncname,setOnEventListenerFunc) in inspect.getmembers(widget):
#if the member is decorated by decorate_set_on_listener and the function is referred to this event
if hasattr(setOnEventListenerFunc, '_event_info'):
self.container.append( SignalConnection(widget,
self.listeners_list,
setOnEventListenerFuncname,
setOnEventListenerFunc,
width='100%') ) | for the selected widget are listed the relative signals
for each signal there is a dropdown containing all the widgets
the user will select the widget that have to listen a specific event |
def delete_char(self, e): # (C-d)
u"""Delete the character at point. If point is at the beginning of
the line, there are no characters in the line, and the last
character typed was not bound to delete-char, then return EOF."""
self.l_buffer.delete_char(self.argument_reset)
self.finalize() | u"""Delete the character at point. If point is at the beginning of
the line, there are no characters in the line, and the last
character typed was not bound to delete-char, then return EOF. |
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = {k: v for k, v in info.items() if v}
return info
except:
return {} | Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found. |
def createFileLink(self, resourceno):
"""Make a link of file
If you don't know ``resourceno``, you'd better use ``getFileLink``.
:param resourceno: Resource number of a file to create link
:return: ``Shared url`` or ``False`` when failed to share a file
"""
data = {'_callback': 'window.__jindo_callback._8920',
'resourceno': resourceno,
'userid': self.user_id,
'useridx': self.useridx,
}
s, metadata = self.GET('createFileLink', data)
if s:
print "URL: %s" % (metadata['short_url'])
return metadata['short_url']
else:
print "Error createFileLink: %s" % (metadata)
return False | Make a link of file
If you don't know ``resourceno``, you'd better use ``getFileLink``.
:param resourceno: Resource number of a file to create link
:return: ``Shared url`` or ``False`` when failed to share a file |
def generate(size, output, schema):
"""Generate fake PII data for testing"""
pii_data = randomnames.NameList(size)
if schema is not None:
raise NotImplementedError
randomnames.save_csv(
pii_data.names,
[f.identifier for f in pii_data.SCHEMA.fields],
output) | Generate fake PII data for testing |
def _build_metrics(func_name, namespace):
"""
Builds metrics dict from function args
It assumes that function arguments is from airflow.bin.cli module's function
and has Namespace instance where it optionally contains "dag_id", "task_id",
and "execution_date".
:param func_name: name of function
:param namespace: Namespace instance from argparse
:return: dict with metrics
"""
metrics = {'sub_command': func_name, 'start_datetime': datetime.utcnow(),
'full_command': '{}'.format(list(sys.argv)), 'user': getpass.getuser()}
assert isinstance(namespace, Namespace)
tmp_dic = vars(namespace)
metrics['dag_id'] = tmp_dic.get('dag_id')
metrics['task_id'] = tmp_dic.get('task_id')
metrics['execution_date'] = tmp_dic.get('execution_date')
metrics['host_name'] = socket.gethostname()
extra = json.dumps(dict((k, metrics[k]) for k in ('host_name', 'full_command')))
log = Log(
event='cli_{}'.format(func_name),
task_instance=None,
owner=metrics['user'],
extra=extra,
task_id=metrics.get('task_id'),
dag_id=metrics.get('dag_id'),
execution_date=metrics.get('execution_date'))
metrics['log'] = log
return metrics | Builds metrics dict from function args
It assumes that function arguments is from airflow.bin.cli module's function
and has Namespace instance where it optionally contains "dag_id", "task_id",
and "execution_date".
:param func_name: name of function
:param namespace: Namespace instance from argparse
:return: dict with metrics |
def forcemerge(self, index=None, params=None):
"""
The force merge API allows to force merging of one or more indices
through an API. The merge relates to the number of segments a Lucene
index holds within each shard. The force merge operation allows to
reduce the number of segments by merging them.
This call will block until the merge is complete. If the http
connection is lost, the request will continue in the background, and
any new requests will block until the previous force merge is complete.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg flush: Specify whether the index should be flushed after performing
the operation (default: true)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg max_num_segments: The number of segments the index should be merged
into (default: dynamic)
:arg only_expunge_deletes: Specify whether the operation should only
expunge deleted documents
:arg operation_threading: TODO: ?
"""
return self.transport.perform_request(
"POST", _make_path(index, "_forcemerge"), params=params
) | The force merge API allows to force merging of one or more indices
through an API. The merge relates to the number of segments a Lucene
index holds within each shard. The force merge operation allows to
reduce the number of segments by merging them.
This call will block until the merge is complete. If the http
connection is lost, the request will continue in the background, and
any new requests will block until the previous force merge is complete.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-forcemerge.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg flush: Specify whether the index should be flushed after performing
the operation (default: true)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg max_num_segments: The number of segments the index should be merged
into (default: dynamic)
:arg only_expunge_deletes: Specify whether the operation should only
expunge deleted documents
:arg operation_threading: TODO: ? |
def start(self):
""" Set up handler and start loop
:return: None
"""
timeout = self.timeout()
if timeout is not None and timeout > 0:
self.__loop.add_timeout(timedelta(0, timeout), self.stop)
self.handler().setup_handler(self.loop())
self.loop().start()
self.handler().loop_stopped() | Set up handler and start loop
:return: None |
def actually_flatten(iterable):
"""Flatten iterables
This is super ugly. There must be a cleaner py2/3 way
of handling this."""
remainder = iter(iterable)
while True:
first = next(remainder) # pylint: disable=R1708
# Python 2/3 compat
is_iter = isinstance(first, collections.Iterable)
try:
basestring
except NameError:
basestring = str # pylint: disable=W0622
if is_py3() and is_iter and not_a_string(first):
remainder = IT.chain(first, remainder)
elif (not is_py3()) and is_iter and not isinstance(first, basestring):
remainder = IT.chain(first, remainder)
else:
yield polite_string(first) | Flatten iterables
This is super ugly. There must be a cleaner py2/3 way
of handling this. |
def get(self, pk, **kwargs):
"""Get item from Model
---
get:
parameters:
- in: path
schema:
type: integer
name: pk
- $ref: '#/components/parameters/get_item_schema'
responses:
200:
description: Item from Model
content:
application/json:
schema:
type: object
properties:
label_columns:
type: object
show_columns:
type: array
items:
type: string
description_columns:
type: object
show_title:
type: string
id:
type: string
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.get'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
item = self.datamodel.get(pk, self._base_filters)
if not item:
return self.response_404()
_response = dict()
_args = kwargs.get("rison", {})
select_cols = _args.get(API_SELECT_COLUMNS_RIS_KEY, [])
_pruned_select_cols = [col for col in select_cols if col in self.show_columns]
self.set_response_key_mappings(
_response,
self.get,
_args,
**{API_SELECT_COLUMNS_RIS_KEY: _pruned_select_cols}
)
if _pruned_select_cols:
_show_model_schema = self.model2schemaconverter.convert(_pruned_select_cols)
else:
_show_model_schema = self.show_model_schema
_response["id"] = pk
_response[API_RESULT_RES_KEY] = _show_model_schema.dump(item, many=False).data
self.pre_get(_response)
return self.response(200, **_response) | Get item from Model
---
get:
parameters:
- in: path
schema:
type: integer
name: pk
- $ref: '#/components/parameters/get_item_schema'
responses:
200:
description: Item from Model
content:
application/json:
schema:
type: object
properties:
label_columns:
type: object
show_columns:
type: array
items:
type: string
description_columns:
type: object
show_title:
type: string
id:
type: string
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.get'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500' |
def ansi_split(text, _re=re.compile(u"(\x1b\\[(\\d*;?)*\\S)")):
"""Yields (is_ansi, text)"""
for part in _re.split(text):
if part:
yield (bool(_re.match(part)), part) | Yields (is_ansi, text) |
def _encode(self):
"""Encode the message and return a bytestring."""
data = ByteBuffer()
if not hasattr(self, '__fields__'):
return data.tostring()
for field in self.__fields__:
field.encode(self, data)
return data.tostring() | Encode the message and return a bytestring. |
def configure_settings():
"""
Configures settings for manage.py and for run_tests.py.
"""
if not settings.configured:
db_config = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'django_kittens_db.sqlite3',
}
settings.configure(
TEST_RUNNER='django_nose.NoseTestSuiteRunner',
NOSE_ARGS=['--nocapture', '--nologcapture', '--verbosity=1'],
DATABASES={
'default': db_config,
},
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.admin',
'django_kittens',
'django_kittens.tests',
),
ROOT_URLCONF='django_kittens.urls',
DEBUG=True,
MIDDLEWARE_CLASSES=(),
) | Configures settings for manage.py and for run_tests.py. |
def addFull(self, vehID, routeID, typeID="DEFAULT_VEHTYPE", depart=None,
departLane="first", departPos="base", departSpeed="0",
arrivalLane="current", arrivalPos="max", arrivalSpeed="current",
fromTaz="", toTaz="", line="", personCapacity=0, personNumber=0):
"""
Add a new vehicle (new style with all possible parameters)
"""
messageString = struct.pack("!Bi", tc.TYPE_COMPOUND, 14)
if depart is None:
depart = str(self._connection.simulation.getCurrentTime() / 1000.)
for val in (routeID, typeID, depart, departLane, departPos, departSpeed,
arrivalLane, arrivalPos, arrivalSpeed, fromTaz, toTaz, line):
messageString += struct.pack("!Bi",
tc.TYPE_STRING, len(val)) + str(val).encode("latin1")
messageString += struct.pack("!Bi", tc.TYPE_INTEGER, personCapacity)
messageString += struct.pack("!Bi", tc.TYPE_INTEGER, personNumber)
self._connection._beginMessage(
tc.CMD_SET_VEHICLE_VARIABLE, tc.ADD_FULL, vehID, len(messageString))
self._connection._string += messageString
self._connection._sendExact() | Add a new vehicle (new style with all possible parameters) |
def b58encode_int(i, default_one=True):
'''Encode an integer using Base58'''
if not i and default_one:
return alphabet[0]
string = ""
while i:
i, idx = divmod(i, 58)
string = alphabet[idx] + string
return string | Encode an integer using Base58 |
def column_mask(self):
"""ndarray, True where column margin <= min_base_size, same shape as slice."""
margin = compress_pruned(
self._slice.margin(
axis=0,
weighted=False,
include_transforms_for_dims=self._hs_dims,
prune=self._prune,
)
)
mask = margin < self._size
if margin.shape == self._shape:
# If margin shape is the same as slice's (such as in a col margin for
# MR x CAT), don't broadcast the mask to the array shape, since
# they're already the same.
return mask
# If the row margin is a row vector - broadcast it's mask to the array shape
return np.logical_or(np.zeros(self._shape, dtype=bool), mask) | ndarray, True where column margin <= min_base_size, same shape as slice. |
def weld_str_replace(array, pat, rep):
"""Replace first occurrence of pat with rep.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data.
pat : str
To find.
rep : str
To replace with.
Returns
-------
WeldObject
Representation of this computation.
"""
obj_id, weld_obj = create_weld_object(array)
pat_id = get_weld_obj_id(weld_obj, pat)
rep_id = get_weld_obj_id(weld_obj, rep)
weld_template = """let lenPat = len({pat});
map({array},
|e: vec[i8]|
let lenString = len(e);
if(lenPat > lenString,
e,
# start by assuming sub is not found, until proven it is
let words_iter_res = iterate({{0L, false}},
|p|
let e_i = p.$0;
let pat_i = 0L;
# start by assuming the substring and sub are the same, until proven otherwise
let word_check_res = iterate({{e_i, pat_i, true}},
|q|
let found = lookup(e, q.$0) == lookup({pat}, q.$1);
{{
{{q.$0 + 1L, q.$1 + 1L, found}},
q.$1 + 1L < lenPat &&
found == true
}}
).$2;
{{
{{p.$0 + 1L, word_check_res}},
p.$0 + lenPat < lenString &&
word_check_res == false
}}
);
if(words_iter_res.$1 == true,
let rep_from = words_iter_res.$0 - 1L;
let rep_to = rep_from + lenPat;
let res = appender[i8];
let res = for(slice(e, 0L, rep_from),
res,
|c: appender[i8], j: i64, f: i8|
merge(c, f)
);
let res = for({rep},
res,
|c: appender[i8], j: i64, f: i8|
merge(c, f)
);
let res = for(slice(e, rep_to, lenString),
res,
|c: appender[i8], j: i64, f: i8|
merge(c, f)
);
result(res),
e
)
)
)"""
weld_obj.weld_code = weld_template.format(array=obj_id,
pat=pat_id,
rep=rep_id)
return weld_obj | Replace first occurrence of pat with rep.
Parameters
----------
array : numpy.ndarray or WeldObject
Input data.
pat : str
To find.
rep : str
To replace with.
Returns
-------
WeldObject
Representation of this computation. |
def webui_schematics_assets_asset_asset_type_image_base_64_image(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
webui = ET.SubElement(config, "webui", xmlns="http://tail-f.com/ns/webui")
schematics = ET.SubElement(webui, "schematics")
assets = ET.SubElement(schematics, "assets")
asset = ET.SubElement(assets, "asset")
name_key = ET.SubElement(asset, "name")
name_key.text = kwargs.pop('name')
asset_type = ET.SubElement(asset, "asset-type")
image = ET.SubElement(asset_type, "image")
base_64_image = ET.SubElement(image, "base-64-image")
base_64_image.text = kwargs.pop('base_64_image')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code |
def classe(self, name):
"""return a class by its name, raise KeyError if not found
"""
for klass in self.classes():
if klass.node.name == name:
return klass
raise KeyError(name) | return a class by its name, raise KeyError if not found |
def _encode_binary(message, on=1, off=0):
"""
>>> message = "SOS"
>>> _encode_binary(message)
[1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1]
>>> _encode_binary(message, on='1', off='0')
['1', '0', '1', '0', '1', '0', '0', '0', '1', '1', '1', '0', '1', '1', '1', '0', '1', '1', '1', '0', '0', '0', '1', '0', '1', '0', '1']
"""
l = _encode_morse(message)
s = ' '.join(l)
l = list(s)
bin_conv = {'.': [on], '-': [on] * 3, ' ': [off]}
l = map(lambda symb: [off] + bin_conv[symb], l)
lst = [item for sublist in l for item in sublist] # flatten list
return lst[1:] | >>> message = "SOS"
>>> _encode_binary(message)
[1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1]
>>> _encode_binary(message, on='1', off='0')
['1', '0', '1', '0', '1', '0', '0', '0', '1', '1', '1', '0', '1', '1', '1', '0', '1', '1', '1', '0', '0', '0', '1', '0', '1', '0', '1'] |
def position_input(obj, visible=False):
"""Template tag to return an input field for the position of the object."""
if not obj.generic_position.all():
ObjectPosition.objects.create(content_object=obj)
return {'obj': obj, 'visible': visible,
'object_position': obj.generic_position.all()[0]} | Template tag to return an input field for the position of the object. |
def render_html(html_str):
"""
makes a temporary html rendering
"""
import utool as ut
from os.path import abspath
import webbrowser
try:
html_str = html_str.decode('utf8')
except Exception:
pass
html_dpath = ut.ensure_app_resource_dir('utool', 'temp_html')
fpath = abspath(ut.unixjoin(html_dpath, 'temp.html'))
url = 'file://' + fpath
ut.writeto(fpath, html_str)
webbrowser.open(url) | makes a temporary html rendering |
def asdict(self, rawkey=False):
r"""Convert Result to dict.
Parameters:
rawkey(bool):
* True: dict key is Descriptor instance
* False: dict key is str
Returns:
dict
"""
if rawkey:
return dict(self.items())
else:
return {
str(k): v
for k, v in self.items()
} | r"""Convert Result to dict.
Parameters:
rawkey(bool):
* True: dict key is Descriptor instance
* False: dict key is str
Returns:
dict |
def _get_interfaces(self):
"""Get a list of interfaces on this hosting device.
:return: List of the interfaces
"""
ios_cfg = self._get_running_config()
parse = HTParser(ios_cfg)
itfcs_raw = parse.find_lines("^interface GigabitEthernet")
itfcs = [raw_if.strip().split(' ')[1] for raw_if in itfcs_raw]
LOG.debug("Interfaces on hosting device: %s", itfcs)
return itfcs | Get a list of interfaces on this hosting device.
:return: List of the interfaces |
def _resolveAddress(address):
"""
Resolves the host in the given string. The input is of the form host[:port]. This method
is idempotent, i.e. the host may already be a dotted IP address.
>>> # noinspection PyProtectedMember
>>> f=MesosBatchSystem._resolveAddress
>>> f('localhost')
'127.0.0.1'
>>> f('127.0.0.1')
'127.0.0.1'
>>> f('localhost:123')
'127.0.0.1:123'
>>> f('127.0.0.1:123')
'127.0.0.1:123'
"""
address = address.split(':')
assert len(address) in (1, 2)
address[0] = socket.gethostbyname(address[0])
return ':'.join(address) | Resolves the host in the given string. The input is of the form host[:port]. This method
is idempotent, i.e. the host may already be a dotted IP address.
>>> # noinspection PyProtectedMember
>>> f=MesosBatchSystem._resolveAddress
>>> f('localhost')
'127.0.0.1'
>>> f('127.0.0.1')
'127.0.0.1'
>>> f('localhost:123')
'127.0.0.1:123'
>>> f('127.0.0.1:123')
'127.0.0.1:123' |
def UpdateSNMPObjsAsync():
""" Starts UpdateSNMPObjs() in a separate thread. """
# UpdateSNMPObjs() will be executed in a separate thread so that the main
# thread can continue looping and processing SNMP requests while the data
# update is still in progress. However we'll make sure only one update
# thread is run at any time, even if the data update interval has been set
# too low.
if threading.active_count() == 1:
LogMsg("Creating thread for UpdateSNMPObjs().")
t = threading.Thread(target=UpdateSNMPObjs, name="UpdateSNMPObjsThread")
t.daemon = True
t.start()
else:
LogMsg("Data update still active, data update interval too low?") | Starts UpdateSNMPObjs() in a separate thread. |
def excepthook(type, value, tb):
"""Report an exception."""
if (issubclass(type, Error) or issubclass(type, lib50.Error)) and str(value):
for line in str(value).split("\n"):
cprint(str(line), "yellow")
else:
cprint(_("Sorry, something's wrong! Let [email protected] know!"), "yellow")
if excepthook.verbose:
traceback.print_exception(type, value, tb)
cprint(_("Submission cancelled."), "red") | Report an exception. |
def get_binary_path(executable, logging_level='INFO'):
"""Gets the software name and returns the path of the binary."""
if sys.platform == 'win32':
if executable == 'start':
return executable
executable = executable + '.exe'
if executable in os.listdir('.'):
binary = os.path.join(os.getcwd(), executable)
else:
binary = next((os.path.join(path, executable)
for path in os.environ['PATH'].split(os.pathsep)
if os.path.isfile(os.path.join(path, executable))), None)
else:
venv_parent = get_venv_parent_path()
venv_bin_path = os.path.join(venv_parent, '.venv', 'bin')
if not venv_bin_path in os.environ.get('PATH'):
if logging_level == 'DEBUG':
print(f'Adding path {venv_bin_path} to environment PATH variable')
os.environ['PATH'] = os.pathsep.join([os.environ['PATH'], venv_bin_path])
binary = shutil.which(executable)
return binary if binary else None | Gets the software name and returns the path of the binary. |
def transform_vector_coorb_to_inertial(vec_coorb, orbPhase, quat_copr):
"""Given a vector (of size 3) in coorbital frame, orbital phase in
coprecessing frame and a minimal rotation frame quat, transforms
the vector from the coorbital to the inertial frame.
"""
# Transform to coprecessing frame
vec_copr = rotate_in_plane(vec_coorb, -orbPhase)
# Transform to inertial frame
vec = transformTimeDependentVector(np.array([quat_copr]).T,
np.array([vec_copr]).T).T[0]
return np.array(vec) | Given a vector (of size 3) in coorbital frame, orbital phase in
coprecessing frame and a minimal rotation frame quat, transforms
the vector from the coorbital to the inertial frame. |
def __kullback_leibler(h1, h2): # 36.3 us
"""
The actual KL implementation. @see kullback_leibler() for details.
Expects the histograms to be of type scipy.ndarray.
"""
result = h1.astype(scipy.float_)
mask = h1 != 0
result[mask] = scipy.multiply(h1[mask], scipy.log(h1[mask] / h2[mask]))
return scipy.sum(result) | The actual KL implementation. @see kullback_leibler() for details.
Expects the histograms to be of type scipy.ndarray. |
def post_event(event,
channel=None,
username=None,
api_url=None,
hook=None):
'''
Send an event to a Mattermost channel.
:param channel: The channel name, either will work.
:param username: The username of the poster.
:param event: The event to send to the Mattermost channel.
:param api_url: The Mattermost api url, if not specified in the configuration.
:param hook: The Mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully.
'''
if not api_url:
api_url = _get_api_url()
if not hook:
hook = _get_hook()
if not username:
username = _get_username()
if not channel:
channel = _get_channel()
if not event:
log.error('message is a required option.')
log.debug('Event: %s', event)
log.debug('Event data: %s', event['data'])
message = 'tag: {0}\r\n'.format(event['tag'])
for key, value in six.iteritems(event['data']):
message += '{0}: {1}\r\n'.format(key, value)
result = post_message(channel,
username,
message,
api_url,
hook)
return bool(result) | Send an event to a Mattermost channel.
:param channel: The channel name, either will work.
:param username: The username of the poster.
:param event: The event to send to the Mattermost channel.
:param api_url: The Mattermost api url, if not specified in the configuration.
:param hook: The Mattermost hook, if not specified in the configuration.
:return: Boolean if message was sent successfully. |
def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
# Get cache if it wasn't provided
if cache is None:
cache = self.feed_forward(input_data,
prediction=False)
if len(cache) == 2:
activations, dropout_mask = cache
else:
activations = cache[0]
# Multiply the binary mask with the incoming gradients
if self.dropout > 0 and dropout_mask is not None:
apply_dropout_mask(df_output, dropout_mask)
# Get gradient wrt activation function
df_activations = self.df(activations)
delta = mult_matrix(df_activations, df_output)
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt inputs
df_input = linalg.dot(delta, self.W, transb='T')
# L1 weight decay
if self.l1_penalty_weight:
df_W += self.l1_penalty_weight * sign(self.W)
# L2 weight decay
if self.l2_penalty_weight:
df_W += self.l2_penalty_weight * self.W
return (df_W, df_b), df_input | Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input. |
def has_symbol(self, symbol, as_of=None):
"""
Return True if the 'symbol' exists in this library AND the symbol
isn't deleted in the specified as_of.
It's possible for a deleted symbol to exist in older snapshots.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
"""
try:
# Always use the primary for has_symbol, it's safer
self._read_metadata(symbol, as_of=as_of, read_preference=ReadPreference.PRIMARY)
return True
except NoDataFoundException:
return False | Return True if the 'symbol' exists in this library AND the symbol
isn't deleted in the specified as_of.
It's possible for a deleted symbol to exist in older snapshots.
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or int or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time |
def _create_object(self, data, request):
""" Create a python object from the given data.
This will use ``self.factory`` object's ``create()`` function to
create the data.
If no factory is defined, this will simply return the same data
that was given.
"""
if request.method.upper() == 'POST' and self.post_factory:
fac_func = self.post_factory.create
else:
fac_func = self.factory.create
if isinstance(data, (list, tuple)):
return map(fac_func, data)
else:
return fac_func(data) | Create a python object from the given data.
This will use ``self.factory`` object's ``create()`` function to
create the data.
If no factory is defined, this will simply return the same data
that was given. |
def mark_seen(self):
"""
Mark the selected message or comment as seen.
"""
data = self.get_selected_item()
if data['is_new']:
with self.term.loader('Marking as read'):
data['object'].mark_as_read()
if not self.term.loader.exception:
data['is_new'] = False
else:
with self.term.loader('Marking as unread'):
data['object'].mark_as_unread()
if not self.term.loader.exception:
data['is_new'] = True | Mark the selected message or comment as seen. |
def getSlaveStatus(self):
"""Returns status of replication slaves.
@return: Dictionary of status items.
"""
info_dict = {}
if self.checkVersion('9.1'):
cols = ['procpid', 'usename', 'application_name',
'client_addr', 'client_port', 'backend_start', 'state',
'sent_location', 'write_location', 'flush_location',
'replay_location', 'sync_priority', 'sync_state',]
cur = self._conn.cursor()
cur.execute("""SELECT %s FROM pg_stat_replication;"""
% ','.join(cols))
rows = cur.fetchall()
for row in rows:
info_dict[row[0]] = dict(zip(cols[1:], row[1:]))
else:
return None
return info_dict | Returns status of replication slaves.
@return: Dictionary of status items. |
def addition_circuit(
addend0: Qubits,
addend1: Qubits,
carry: Qubits) -> Circuit:
"""Returns a quantum circuit for ripple-carry addition. [Cuccaro2004]_
Requires two carry qubit (input and output). The result is returned in
addend1.
.. [Cuccaro2004]
A new quantum ripple-carry addition circuit, Steven A. Cuccaro,
Thomas G. Draper, Samuel A. Kutin, David Petrie Moulton
arXiv:quant-ph/0410184 (2004)
"""
if len(addend0) != len(addend1):
raise ValueError('Number of addend qubits must be equal')
if len(carry) != 2:
raise ValueError('Expected 2 carry qubits')
def _maj(qubits: Qubits) -> Circuit:
q0, q1, q2 = qubits
circ = Circuit()
circ += CNOT(q2, q1)
circ += CNOT(q2, q0)
circ += CCNOT(q0, q1, q2)
return circ
def _uma(qubits: Qubits) -> Circuit:
q0, q1, q2 = qubits
circ = Circuit()
circ += CCNOT(q0, q1, q2)
circ += CNOT(q2, q0)
circ += CNOT(q0, q1)
return circ
qubits = [carry[0]] + list(chain.from_iterable(
zip(reversed(addend1), reversed(addend0)))) + [carry[1]]
circ = Circuit()
for n in range(0, len(qubits)-3, 2):
circ += _maj(qubits[n:n+3])
circ += CNOT(qubits[-2], qubits[-1])
for n in reversed(range(0, len(qubits)-3, 2)):
circ += _uma(qubits[n:n+3])
return circ | Returns a quantum circuit for ripple-carry addition. [Cuccaro2004]_
Requires two carry qubit (input and output). The result is returned in
addend1.
.. [Cuccaro2004]
A new quantum ripple-carry addition circuit, Steven A. Cuccaro,
Thomas G. Draper, Samuel A. Kutin, David Petrie Moulton
arXiv:quant-ph/0410184 (2004) |
def get_text_field_mask(text_field_tensors: Dict[str, torch.Tensor],
num_wrapping_dims: int = 0) -> torch.LongTensor:
"""
Takes the dictionary of tensors produced by a ``TextField`` and returns a mask
with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields``
wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields``
is given by ``num_wrapping_dims``.
If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``.
If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra
dimensions, so the shape will be ``(batch_size, ..., num_tokens)``.
There could be several entries in the tensor dictionary with different shapes (e.g., one for
word ids, one for character ids). In order to get a token mask, we use the tensor in
the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``,
if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``,
and use it for the mask. If instead it has three dimensions, we assume it has shape
``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce
the mask. Most frequently this will be a character id tensor, but it could also be a
featurized representation of each token, etc.
If the input ``text_field_tensors`` contains the "mask" key, this is returned instead of inferring the mask.
TODO(joelgrus): can we change this?
NOTE: Our functions for generating masks create torch.LongTensors, because using
torch.ByteTensors makes it easy to run into overflow errors
when doing mask manipulation, such as summing to get the lengths of sequences - see below.
>>> mask = torch.ones([260]).byte()
>>> mask.sum() # equals 260.
>>> var_mask = torch.autograd.V(mask)
>>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.
"""
if "mask" in text_field_tensors:
return text_field_tensors["mask"]
tensor_dims = [(tensor.dim(), tensor) for tensor in text_field_tensors.values()]
tensor_dims.sort(key=lambda x: x[0])
smallest_dim = tensor_dims[0][0] - num_wrapping_dims
if smallest_dim == 2:
token_tensor = tensor_dims[0][1]
return (token_tensor != 0).long()
elif smallest_dim == 3:
character_tensor = tensor_dims[0][1]
return ((character_tensor > 0).long().sum(dim=-1) > 0).long()
else:
raise ValueError("Expected a tensor with dimension 2 or 3, found {}".format(smallest_dim)) | Takes the dictionary of tensors produced by a ``TextField`` and returns a mask
with 0 where the tokens are padding, and 1 otherwise. We also handle ``TextFields``
wrapped by an arbitrary number of ``ListFields``, where the number of wrapping ``ListFields``
is given by ``num_wrapping_dims``.
If ``num_wrapping_dims == 0``, the returned mask has shape ``(batch_size, num_tokens)``.
If ``num_wrapping_dims > 0`` then the returned mask has ``num_wrapping_dims`` extra
dimensions, so the shape will be ``(batch_size, ..., num_tokens)``.
There could be several entries in the tensor dictionary with different shapes (e.g., one for
word ids, one for character ids). In order to get a token mask, we use the tensor in
the dictionary with the lowest number of dimensions. After subtracting ``num_wrapping_dims``,
if this tensor has two dimensions we assume it has shape ``(batch_size, ..., num_tokens)``,
and use it for the mask. If instead it has three dimensions, we assume it has shape
``(batch_size, ..., num_tokens, num_features)``, and sum over the last dimension to produce
the mask. Most frequently this will be a character id tensor, but it could also be a
featurized representation of each token, etc.
If the input ``text_field_tensors`` contains the "mask" key, this is returned instead of inferring the mask.
TODO(joelgrus): can we change this?
NOTE: Our functions for generating masks create torch.LongTensors, because using
torch.ByteTensors makes it easy to run into overflow errors
when doing mask manipulation, such as summing to get the lengths of sequences - see below.
>>> mask = torch.ones([260]).byte()
>>> mask.sum() # equals 260.
>>> var_mask = torch.autograd.V(mask)
>>> var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows. |
def _setup_watch(self, alias, path, flags):
"""Actual rule setup."""
assert alias not in self.descriptors, "Registering alias %s twice!" % alias
wd = LibC.inotify_add_watch(self._fd, path, flags)
if wd < 0:
raise IOError("Error setting up watch on %s with flags %s: wd=%s" % (
path, flags, wd))
self.descriptors[alias] = wd
self.aliases[wd] = alias | Actual rule setup. |
def _search(self, searchfilter, attrs, basedn):
"""Generic search"""
if attrs == NO_ATTR:
attrlist = []
elif attrs == DISPLAYED_ATTRS:
# fix me later (to much attributes)
attrlist = self.attrlist
elif attrs == LISTED_ATTRS:
attrlist = self.attrlist
elif attrs == ALL_ATTRS:
attrlist = None
else:
attrlist = None
self._logger(
severity=logging.DEBUG,
msg="%(backend)s: executing search "
"with filter '%(filter)s' in DN '%(dn)s'" % {
'backend': self.backend_name,
'dn': basedn,
'filter': self._uni(searchfilter)
}
)
# bind and search the ldap
ldap_client = self._bind()
try:
r = ldap_client.search_s(
basedn,
ldap.SCOPE_SUBTREE,
searchfilter,
attrlist=attrlist
)
except Exception as e:
ldap_client.unbind_s()
self._exception_handler(e)
ldap_client.unbind_s()
# python-ldap doesn't know utf-8,
# it treates everything as bytes.
# So it's necessary to reencode
# it's output in utf-8.
ret = []
for entry in r:
uni_dn = self._uni(entry[0])
uni_attrs = {}
for attr in entry[1]:
if type(entry[1][attr]) is list:
tmp = []
for value in entry[1][attr]:
tmp.append(self._uni(value))
else:
tmp = self._uni(entry[1][attr])
uni_attrs[self._uni(attr)] = tmp
ret.append((uni_dn, uni_attrs))
return ret | Generic search |
def dumplist(args):
"""Dumps lists of files based on your criteria"""
from .query import Database
db = Database()
r = db.objects(
protocol=args.protocol,
purposes=args.purpose,
model_ids=(args.client,),
groups=args.group,
classes=args.sclass
)
output = sys.stdout
if args.selftest:
from bob.db.utils import null
output = null()
for f in r:
output.write('%s\n' % (f.make_path(args.directory, args.extension),))
return 0 | Dumps lists of files based on your criteria |
def add_property(self, c_property_tuple, sync=True):
"""
add property to this container. if this container has no id then it's like sync=False.
:param c_property_tuple: property tuple defined like this :
=> property name = c_property_tuple[0]
=> property value = c_property_tuple[1]
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the property tuple object on list to be added on next save().
:return:
"""
LOGGER.debug("Container.add_property")
if c_property_tuple[1] is None:
LOGGER.debug("Property " + c_property_tuple[0] + " has None value. Ignore.")
return
if not sync or self.id is None:
self.properties_2_add.append(c_property_tuple)
else:
property_param = DriverTools.property_params(c_property_tuple[0], c_property_tuple[1])
params = SessionService.complete_transactional_req({'ID': self.id})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'addContainerProperty'
params['propertyField'] = json.dumps(property_param)
args = {'properties': params}
else:
params['propertyName'] = property_param['propertyName']
params['propertyValue'] = property_param['propertyValue']
if 'propertyType' in property_param:
params['propertyType'] = property_param['propertyType']
args = {'http_operation': 'GET', 'operation_path': 'update/properties/add', 'parameters': params}
response = ContainerService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get()
if response.rc != 0:
LOGGER.warning(
'Container.add_property - Problem while updating container ' + self.name +
'.Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError("Container.add_property", ArianeMappingOverloadError.ERROR_MSG)
# traceback.print_stack()
else:
self.sync() | add property to this container. if this container has no id then it's like sync=False.
:param c_property_tuple: property tuple defined like this :
=> property name = c_property_tuple[0]
=> property value = c_property_tuple[1]
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the property tuple object on list to be added on next save().
:return: |
def serialize(self, value):
"""See base class."""
if isinstance(value, list):
return self.list_sep.join(_helpers.str_or_unicode(x.name) for x in value)
else:
return _helpers.str_or_unicode(value.name) | See base class. |
def appendBitPadding(str, blocksize=AES_blocksize):
'''Bit padding a.k.a. One and Zeroes Padding
A single set ('1') bit is added to the message and then as many reset ('0') bits as required (possibly none) are added.
Input: (str) str - String to be padded
(int) blocksize - block size of the algorithm
Return: Padded string according to ANSI X.923 standart
Used in when padding bit strings.
0x80 in binary is 10000000
0x00 in binary is 00000000
Defined in ANSI X.923 (based on NIST Special Publication 800-38A) and ISO/IEC 9797-1 as Padding Method 2.
Used in hash functions MD5 and SHA, described in RFC 1321 step 3.1.
'''
pad_len = paddingLength(len(str), blocksize) - 1
padding = chr(0x80)+'\0'*pad_len
return str + padding | Bit padding a.k.a. One and Zeroes Padding
A single set ('1') bit is added to the message and then as many reset ('0') bits as required (possibly none) are added.
Input: (str) str - String to be padded
(int) blocksize - block size of the algorithm
Return: Padded string according to ANSI X.923 standart
Used in when padding bit strings.
0x80 in binary is 10000000
0x00 in binary is 00000000
Defined in ANSI X.923 (based on NIST Special Publication 800-38A) and ISO/IEC 9797-1 as Padding Method 2.
Used in hash functions MD5 and SHA, described in RFC 1321 step 3.1. |
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def blacklist(app):
return BlacklistFilter(app, conf)
return blacklist | Returns a WSGI filter app for use with paste.deploy. |
def end_prov_graph(self):
"""
Finalize prov recording with end time
"""
endTime = Literal(datetime.now())
self.prov_g.add((self.entity_d, self.prov.generatedAtTime, endTime))
self.prov_g.add((self.activity, self.prov.endedAtTime, endTime)) | Finalize prov recording with end time |
def subspace_detector_plot(detector, stachans, size, **kwargs):
"""
Plotting for the subspace detector class.
Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type detector: :class:`eqcorrscan.core.subspace.Detector`
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. rubric:: Example
>>> from eqcorrscan.core import subspace
>>> import os
>>> detector = subspace.Detector()
>>> detector.read(os.path.join(
... os.path.abspath(os.path.dirname(__file__)),
... '..', 'tests', 'test_data', 'subspace',
... 'stat_test_detector.h5'))
Detector: Tester
>>> subspace_detector_plot(detector=detector, stachans='all', size=(10, 7),
... show=True) # doctest: +SKIP
.. plot::
from eqcorrscan.core import subspace
from eqcorrscan.utils.plotting import subspace_detector_plot
import os
print('running subspace plot')
detector = subspace.Detector()
detector.read(os.path.join('..', '..', '..', 'tests', 'test_data',
'subspace', 'stat_test_detector.h5'))
subspace_detector_plot(detector=detector, stachans='all', size=(10, 7),
show=True)
"""
import matplotlib.pyplot as plt
if stachans == 'all' and not detector.multiplex:
stachans = detector.stachans
elif detector.multiplex:
stachans = [('multi', ' ')]
if np.isinf(detector.dimension):
msg = ' '.join(['Infinite subspace dimension. Only plotting as many',
'dimensions as events in design set'])
warnings.warn(msg)
nrows = detector.v[0].shape[1]
else:
nrows = detector.dimension
fig, axes = plt.subplots(nrows=nrows, ncols=len(stachans),
sharex=True, sharey=True, figsize=size)
x = np.arange(len(detector.u[0]), dtype=np.float32)
if detector.multiplex:
x /= len(detector.stachans) * detector.sampling_rate
else:
x /= detector.sampling_rate
for column, stachan in enumerate(stachans):
channel = detector.u[column]
for row, vector in enumerate(channel.T[0:nrows]):
if len(stachans) == 1:
if nrows == 1:
axis = axes
else:
axis = axes[row]
else:
axis = axes[row, column]
if row == 0:
axis.set_title('.'.join(stachan))
axis.plot(x, vector, 'k', linewidth=1.1)
if column == 0:
axis.set_ylabel('Basis %s' % (row + 1), rotation=0)
if row == nrows - 1:
axis.set_xlabel('Time (s)')
axis.set_yticks([])
plt.subplots_adjust(hspace=0.05)
plt.subplots_adjust(wspace=0.05)
fig = _finalise_figure(fig=fig, **kwargs) # pragma: no cover
return fig | Plotting for the subspace detector class.
Plot the output basis vectors for the detector at the given dimension.
Corresponds to the first n horizontal vectors of the V matrix.
:type detector: :class:`eqcorrscan.core.subspace.Detector`
:type stachans: list
:param stachans: list of tuples of station, channel pairs to plot.
:type stachans: list
:param stachans: List of tuples of (station, channel) to use. Can set\
to 'all' to use all the station-channel pairs available. If \
detector is multiplexed, will just plot that.
:type size: tuple
:param size: Figure size.
:returns: Figure
:rtype: matplotlib.pyplot.Figure
.. rubric:: Example
>>> from eqcorrscan.core import subspace
>>> import os
>>> detector = subspace.Detector()
>>> detector.read(os.path.join(
... os.path.abspath(os.path.dirname(__file__)),
... '..', 'tests', 'test_data', 'subspace',
... 'stat_test_detector.h5'))
Detector: Tester
>>> subspace_detector_plot(detector=detector, stachans='all', size=(10, 7),
... show=True) # doctest: +SKIP
.. plot::
from eqcorrscan.core import subspace
from eqcorrscan.utils.plotting import subspace_detector_plot
import os
print('running subspace plot')
detector = subspace.Detector()
detector.read(os.path.join('..', '..', '..', 'tests', 'test_data',
'subspace', 'stat_test_detector.h5'))
subspace_detector_plot(detector=detector, stachans='all', size=(10, 7),
show=True) |
def screenshot(self, png_filename=None, format='raw'):
"""
Screenshot with PNG format
Args:
png_filename(string): optional, save file name
format(string): return format, pillow or raw(default)
Returns:
raw data or PIL.Image
Raises:
WDAError
"""
value = self.http.get('screenshot').value
raw_value = base64.b64decode(value)
png_header = b"\x89PNG\r\n\x1a\n"
if not raw_value.startswith(png_header) and png_filename:
raise WDAError(-1, "screenshot png format error")
if png_filename:
with open(png_filename, 'wb') as f:
f.write(raw_value)
if format == 'raw':
return raw_value
elif format == 'pillow':
from PIL import Image
buff = io.BytesIO(raw_value)
return Image.open(buff)
else:
raise ValueError("unknown format") | Screenshot with PNG format
Args:
png_filename(string): optional, save file name
format(string): return format, pillow or raw(default)
Returns:
raw data or PIL.Image
Raises:
WDAError |
def getNextSample(self, V):
"""
Generate the next sample by randomly flipping two adjacent candidates.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample.
"""
# Select a random alternative in V to switch with its adacent alternatives.
randPos = random.randint(0, len(V)-2)
W = copy.deepcopy(V)
d = V[randPos]
c = V[randPos+1]
W[randPos] = c
W[randPos+1] = d
# Check whether we should change to the new ranking.
prMW = 1
prMV = 1
prob = min(1.0,(prMW/prMV)*pow(self.phi, self.wmg[d][c]))/2
if random.random() <= prob:
V = W
return V | Generate the next sample by randomly flipping two adjacent candidates.
:ivar list<int> V: Contains integer representations of each candidate in order of their
ranking in a vote, from first to last. This is the current sample. |
def control_group(action, action_space, control_group_act, control_group_id):
"""Act on a control group, selecting, setting, etc."""
del action_space
select = action.action_ui.control_group
select.action = control_group_act
select.control_group_index = control_group_id | Act on a control group, selecting, setting, etc. |
async def set_power(
self, power_type: str,
power_parameters: typing.Mapping[str, typing.Any] = {}):
"""Set the power type and power parameters for this node."""
data = await self._handler.update(
system_id=self.system_id, power_type=power_type,
power_parameters=power_parameters)
self.power_type = data['power_type'] | Set the power type and power parameters for this node. |
def get_selected_files(self, pipeline='pipeline', forfile=None, quiet=0, allowedfileformats='default'):
"""
Parameters
----------
pipeline : string
can be \'pipeline\' (main analysis pipeline, self in tnet.set_pipeline) or \'confound\' (where confound files are, set in tnet.set_confonud_pipeline()),
\'functionalconnectivity\'
quiet: int
If 1, prints results. If 0, no results printed.
forfile : str or dict
A filename or dictionary of file tags. If this is set, only files that match that subject
accepted_fileformat : list
list of files formats that are acceptable. Default list is: ['.tsv', '.nii.gz']
Returns
-------
found_files : list
The files which are currently selected with the current using the set pipeline, pipeline_subdir, space, parcellation, tasks, runs, subjects etc. There are the files that will generally be used if calling a make_ function.
"""
# This could be mnade better
file_dict = dict(self.bids_tags)
if allowedfileformats == 'default':
allowedfileformats = ['.tsv', '.nii.gz']
if forfile:
if isinstance(forfile, str):
forfile = get_bids_tag(forfile, 'all')
for n in forfile.keys():
file_dict[n] = [forfile[n]]
non_entries = []
for n in file_dict:
if not file_dict[n]:
non_entries.append(n)
for n in non_entries:
file_dict.pop(n)
# Only keep none empty elemenets
file_components = []
for k in ['sub', 'ses', 'task', 'run']:
if k in file_dict:
file_components.append([k + '-' + t for t in file_dict[k]])
file_list = list(itertools.product(*file_components))
# Specify main directory
if pipeline == 'pipeline':
mdir = self.BIDS_dir + '/derivatives/' + self.pipeline
elif pipeline == 'confound' and self.confound_pipeline:
mdir = self.BIDS_dir + '/derivatives/' + self.confound_pipeline
elif pipeline == 'confound':
mdir = self.BIDS_dir + '/derivatives/' + self.pipeline
elif pipeline == 'functionalconnectivity':
mdir = self.BIDS_dir + '/derivatives/teneto_' + teneto.__version__
else:
raise ValueError('unknown request')
found_files = []
for f in file_list:
wdir = str(mdir)
sub = [t for t in f if t.startswith('sub')]
ses = [t for t in f if t.startswith('ses')]
wdir += '/' + sub[0] + '/'
if ses:
wdir += '/' + ses[0] + '/'
wdir += '/func/'
if pipeline == 'pipeline':
wdir += '/' + self.pipeline_subdir + '/'
fileending = [self.bids_suffix +
f for f in allowedfileformats]
elif pipeline == 'functionalconnectivity':
wdir += '/fc/'
fileending = ['conn' + f for f in allowedfileformats]
elif pipeline == 'confound':
fileending = ['confounds' + f for f in allowedfileformats]
if os.path.exists(wdir):
# make filenames
found = []
# Check that the tags are in the specified bids tags
for ff in os.listdir(wdir):
ftags = get_bids_tag(ff, 'all')
t = [t for t in ftags if t in file_dict and ftags[t]
in file_dict[t]]
if len(t) == len(file_dict):
found.append(ff)
found = [f for f in found for e in fileending if f.endswith(e)]
# Include only if all analysis step tags are present
# Exclude if confounds tag is present
if pipeline == 'confound':
found = [i for i in found if '_confounds' in i]
else:
found = [i for i in found if '_confounds' not in i]
# Make full paths
found = list(
map(str.__add__, [re.sub('/+', '/', wdir)]*len(found), found))
# Remove any files in bad files (could add json subcar reading here)
found = [i for i in found if not any(
[bf in i for bf in self.bad_files])]
if found:
found_files += found
if quiet == -1:
print(wdir)
found_files = list(set(found_files))
if quiet == 0:
print(found_files)
return found_files | Parameters
----------
pipeline : string
can be \'pipeline\' (main analysis pipeline, self in tnet.set_pipeline) or \'confound\' (where confound files are, set in tnet.set_confonud_pipeline()),
\'functionalconnectivity\'
quiet: int
If 1, prints results. If 0, no results printed.
forfile : str or dict
A filename or dictionary of file tags. If this is set, only files that match that subject
accepted_fileformat : list
list of files formats that are acceptable. Default list is: ['.tsv', '.nii.gz']
Returns
-------
found_files : list
The files which are currently selected with the current using the set pipeline, pipeline_subdir, space, parcellation, tasks, runs, subjects etc. There are the files that will generally be used if calling a make_ function. |
def get_curent_module_classes(module):
"""
获取制定模块的所有类
:param module: 模块
:return: 类的列表
"""
classes = []
for name, obj in inspect.getmembers(module):
if inspect.isclass(obj):
classes.append(obj)
return classes | 获取制定模块的所有类
:param module: 模块
:return: 类的列表 |
def has_image(self, name: str) -> bool:
"""
Determines whether the server has a Docker image with a given name.
"""
path = "docker/images/{}".format(name)
r = self.__api.head(path)
if r.status_code == 204:
return True
elif r.status_code == 404:
return False
self.__api.handle_erroneous_response(r) | Determines whether the server has a Docker image with a given name. |
def effectiv_num_data_points(self, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps):
"""
returns the effective number of data points considered in the X2 estimation to compute the reduced X2 value
"""
num_linear = 0
if self._image_likelihood is True:
num_linear = self.image_likelihood.num_param_linear(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps)
num_param, _ = self.param.num_param()
return self.num_data - num_param - num_linear | returns the effective number of data points considered in the X2 estimation to compute the reduced X2 value |
def get(self, request, template_id, view_type):
"""
Render the given template with the stock data.
"""
template = get_object_or_404(EnrollmentNotificationEmailTemplate, pk=template_id)
if view_type not in self.view_type_contexts:
return HttpResponse(status=404)
base_context = self.view_type_contexts[view_type].copy()
base_context.update({'user_name': self.get_user_name(request)})
return HttpResponse(template.render_html_template(base_context), content_type='text/html') | Render the given template with the stock data. |
def ngrams(path, elem, ignore_hash=True):
"""
Yields N-grams from a JSTOR DfR dataset.
Parameters
----------
path : string
Path to unzipped JSTOR DfR folder containing N-grams.
elem : string
Name of subdirectory containing N-grams. (e.g. 'bigrams').
ignore_hash : bool
If True, will exclude all N-grams that contain the hash '#' character.
Returns
-------
ngrams : :class:`.FeatureSet`
"""
grams = GramGenerator(path, elem, ignore_hash=ignore_hash)
return FeatureSet({k: Feature(f) for k, f in grams}) | Yields N-grams from a JSTOR DfR dataset.
Parameters
----------
path : string
Path to unzipped JSTOR DfR folder containing N-grams.
elem : string
Name of subdirectory containing N-grams. (e.g. 'bigrams').
ignore_hash : bool
If True, will exclude all N-grams that contain the hash '#' character.
Returns
-------
ngrams : :class:`.FeatureSet` |
def _validate_cidr(self, rule):
"""Validate the cidr block in a rule.
Returns:
True: Upon successful completion.
Raises:
SpinnakerSecurityGroupCreationFailed: CIDR definition is invalid or
the network range is too wide.
"""
try:
network = ipaddress.IPv4Network(rule['app'])
except (ipaddress.NetmaskValueError, ValueError) as error:
raise SpinnakerSecurityGroupCreationFailed(error)
self.log.debug('Validating CIDR: %s', network.exploded)
return True | Validate the cidr block in a rule.
Returns:
True: Upon successful completion.
Raises:
SpinnakerSecurityGroupCreationFailed: CIDR definition is invalid or
the network range is too wide. |
def _write_ctrl_meas(self):
"""
Write the values to the ctrl_meas and ctrl_hum registers in the device
ctrl_meas sets the pressure and temperature data acquistion options
ctrl_hum sets the humidty oversampling and must be written to first
"""
self._write_register_byte(_BME280_REGISTER_CTRL_HUM, self.overscan_humidity)
self._write_register_byte(_BME280_REGISTER_CTRL_MEAS, self._ctrl_meas) | Write the values to the ctrl_meas and ctrl_hum registers in the device
ctrl_meas sets the pressure and temperature data acquistion options
ctrl_hum sets the humidty oversampling and must be written to first |
def __initialize(self, sample):
"""!
@brief Initializes internal states and resets clustering results in line with input sample.
"""
self.__processed = [False] * len(sample)
self.__optics_objects = [optics_descriptor(i) for i in range(len(sample))] # List of OPTICS objects that corresponds to objects from input sample.
self.__ordered_database = [] # List of OPTICS objects in traverse order.
self.__clusters = None # Result of clustering (list of clusters where each cluster contains indexes of objects from input data).
self.__noise = None | !
@brief Initializes internal states and resets clustering results in line with input sample. |
def emit(self, record):
"""
Emit a record.
Always check time
"""
try:
if self.check_base_filename(record):
self.build_base_filename()
FileHandler.emit(self, record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record) | Emit a record.
Always check time |
def _isbn_cleanse(isbn, checksum=True):
"""Check ISBN is a string, and passes basic sanity checks.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
checksum (bool): ``True`` if ``isbn`` includes checksum character
Returns:
``str``: ISBN with hyphenation removed, including when called with a
SBN
Raises:
TypeError: ``isbn`` is not a ``str`` type
IsbnError: Incorrect length for ``isbn``
IsbnError: Incorrect SBN or ISBN formatting
"""
if not isinstance(isbn, string_types):
raise TypeError('ISBN must be a string, received %r' % isbn)
if PY2 and isinstance(isbn, str): # pragma: Python 2
isbn = unicode(isbn)
uni_input = False
else: # pragma: Python 3
uni_input = True
for dash in DASHES:
isbn = isbn.replace(dash, unicode())
if checksum:
if not isbn[:-1].isdigit():
raise IsbnError('non-digit parts')
if len(isbn) == 9:
isbn = '0' + isbn
if len(isbn) == 10:
if not (isbn[-1].isdigit() or isbn[-1] in 'Xx'):
raise IsbnError('non-digit or X checksum')
elif len(isbn) == 13:
if not isbn[-1].isdigit():
raise IsbnError('non-digit checksum')
if not isbn.startswith(('978', '979')):
raise IsbnError('invalid Bookland region')
else:
raise IsbnError('ISBN must be either 10 or 13 characters long')
else:
if len(isbn) == 8:
isbn = '0' + isbn
elif len(isbn) == 12 and not isbn[:3].startswith(('978', '979')):
raise IsbnError('invalid Bookland region')
if not isbn.isdigit():
raise IsbnError('non-digit parts')
if not len(isbn) in (9, 12):
raise IsbnError('ISBN must be either 9 or 12 characters long '
'without checksum')
if PY2 and not uni_input: # pragma: Python 2
# Sadly, type ping-pong is required to maintain backwards compatibility
# with previous pyisbn releases for Python 2 users.
return str(isbn)
else: # pragma: Python 3
return isbn | Check ISBN is a string, and passes basic sanity checks.
Args:
isbn (str): SBN, ISBN-10 or ISBN-13
checksum (bool): ``True`` if ``isbn`` includes checksum character
Returns:
``str``: ISBN with hyphenation removed, including when called with a
SBN
Raises:
TypeError: ``isbn`` is not a ``str`` type
IsbnError: Incorrect length for ``isbn``
IsbnError: Incorrect SBN or ISBN formatting |
def get_graph_by_most_recent(self, name: str) -> Optional[BELGraph]:
"""Get the most recently created network with the given name as a :class:`pybel.BELGraph`."""
network = self.get_most_recent_network_by_name(name)
if network is None:
return
return network.as_bel() | Get the most recently created network with the given name as a :class:`pybel.BELGraph`. |
def arccalibration(wv_master,
xpos_arc,
naxis1_arc,
crpix1,
wv_ini_search,
wv_end_search,
wvmin_useful,
wvmax_useful,
error_xpos_arc,
times_sigma_r,
frac_triplets_for_sum,
times_sigma_theil_sen,
poly_degree_wfit,
times_sigma_polfilt,
times_sigma_cook,
times_sigma_inclusion,
geometry=None,
debugplot=0):
"""Performs arc line identification for arc calibration.
This function is a wrapper of two functions, which are responsible
of computing all the relevant information concerning the triplets
generated from the master table and the actual identification
procedure of the arc lines, respectively.
The separation of those computations in two different functions
helps to avoid the repetition of calls to the first function when
calibrating several arcs using the same master table.
Parameters
----------
wv_master : 1d numpy array, float
Array with wavelengths corresponding to the master table
(Angstroms).
xpos_arc : 1d numpy array, float
Location of arc lines (pixels).
naxis1_arc : int
NAXIS1 for arc spectrum.
crpix1 : float
CRPIX1 value to be employed in the wavelength calibration.
wv_ini_search : float
Minimum expected wavelength in spectrum.
wv_end_search : float
Maximum expected wavelength in spectrum.
wvmin_useful : float
If not None, this value is used to clip detected lines below it.
wvmax_useful : float
If not None, this value is used to clip detected lines above it.
error_xpos_arc : float
Error in arc line position (pixels).
times_sigma_r : float
Times sigma to search for valid line position ratios.
frac_triplets_for_sum : float
Fraction of distances to different triplets to sum when
computing the cost function.
times_sigma_theil_sen : float
Number of times the (robust) standard deviation around the
linear fit (using the Theil-Sen method) to reject points.
poly_degree_wfit : int
Degree for polynomial fit to wavelength calibration.
times_sigma_polfilt : float
Number of times the (robust) standard deviation around the
polynomial fit to reject points.
times_sigma_cook : float
Number of times the standard deviation of Cook's distances
to detect outliers. If zero, this method of outlier detection
is ignored.
times_sigma_inclusion : float
Number of times the (robust) standard deviation around the
polynomial fit to include a new line in the set of identified
lines.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
list_of_wvfeatures : list (of WavecalFeature instances)
A list of size equal to the number of identified lines, which
elements are instances of the class WavecalFeature, containing
all the relevant information concerning the line
identification.
"""
ntriplets_master, ratios_master_sorted, triplets_master_sorted_list = \
gen_triplets_master(wv_master=wv_master, geometry=geometry,
debugplot=debugplot)
list_of_wvfeatures = arccalibration_direct(
wv_master=wv_master,
ntriplets_master=ntriplets_master,
ratios_master_sorted=ratios_master_sorted,
triplets_master_sorted_list=triplets_master_sorted_list,
xpos_arc=xpos_arc,
naxis1_arc=naxis1_arc,
crpix1=crpix1,
wv_ini_search=wv_ini_search,
wv_end_search=wv_end_search,
wvmin_useful=wvmin_useful,
wvmax_useful=wvmax_useful,
error_xpos_arc=error_xpos_arc,
times_sigma_r=times_sigma_r,
frac_triplets_for_sum=frac_triplets_for_sum,
times_sigma_theil_sen=times_sigma_theil_sen,
poly_degree_wfit=poly_degree_wfit,
times_sigma_polfilt=times_sigma_polfilt,
times_sigma_cook=times_sigma_cook,
times_sigma_inclusion=times_sigma_inclusion,
geometry=geometry,
debugplot=debugplot)
return list_of_wvfeatures | Performs arc line identification for arc calibration.
This function is a wrapper of two functions, which are responsible
of computing all the relevant information concerning the triplets
generated from the master table and the actual identification
procedure of the arc lines, respectively.
The separation of those computations in two different functions
helps to avoid the repetition of calls to the first function when
calibrating several arcs using the same master table.
Parameters
----------
wv_master : 1d numpy array, float
Array with wavelengths corresponding to the master table
(Angstroms).
xpos_arc : 1d numpy array, float
Location of arc lines (pixels).
naxis1_arc : int
NAXIS1 for arc spectrum.
crpix1 : float
CRPIX1 value to be employed in the wavelength calibration.
wv_ini_search : float
Minimum expected wavelength in spectrum.
wv_end_search : float
Maximum expected wavelength in spectrum.
wvmin_useful : float
If not None, this value is used to clip detected lines below it.
wvmax_useful : float
If not None, this value is used to clip detected lines above it.
error_xpos_arc : float
Error in arc line position (pixels).
times_sigma_r : float
Times sigma to search for valid line position ratios.
frac_triplets_for_sum : float
Fraction of distances to different triplets to sum when
computing the cost function.
times_sigma_theil_sen : float
Number of times the (robust) standard deviation around the
linear fit (using the Theil-Sen method) to reject points.
poly_degree_wfit : int
Degree for polynomial fit to wavelength calibration.
times_sigma_polfilt : float
Number of times the (robust) standard deviation around the
polynomial fit to reject points.
times_sigma_cook : float
Number of times the standard deviation of Cook's distances
to detect outliers. If zero, this method of outlier detection
is ignored.
times_sigma_inclusion : float
Number of times the (robust) standard deviation around the
polynomial fit to include a new line in the set of identified
lines.
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
list_of_wvfeatures : list (of WavecalFeature instances)
A list of size equal to the number of identified lines, which
elements are instances of the class WavecalFeature, containing
all the relevant information concerning the line
identification. |
def _invertMapping(mapping):
"""Converts a protein to peptide or peptide to protein mapping.
:param mapping: dict, for each key contains a set of entries
:returns: an inverted mapping that each entry of the values points to a set
of initial keys.
"""
invertedMapping = ddict(set)
for key, values in viewitems(mapping):
for value in values:
invertedMapping[value].add(key)
return invertedMapping | Converts a protein to peptide or peptide to protein mapping.
:param mapping: dict, for each key contains a set of entries
:returns: an inverted mapping that each entry of the values points to a set
of initial keys. |
def _doc(from_func):
'''copy doc from one function to another
use as a decorator eg::
@_doc(file.tell)
def tell(..):
...
'''
def decorator(to_func):
to_func.__doc__ = from_func.__doc__
return to_func
return decorator | copy doc from one function to another
use as a decorator eg::
@_doc(file.tell)
def tell(..):
... |
def configure():
'''
Configure the transfer environment and store
'''
completer = Completer()
readline.set_completer_delims('\t')
readline.parse_and_bind('tab: complete')
readline.set_completer(completer.path_completer)
home = os.path.expanduser('~')
if os.path.isfile(os.path.join(home, '.transfer', 'config.yaml')):
with open(os.path.join(home, '.transfer', 'config.yaml'), 'r') as fp:
config = yaml.load(fp.read())
else:
config = []
project_name = input('Name your project: ')
existing_project = None
for project in config:
if project_name == project['name']:
existing_project = project_name
if existing_project is not None:
print(colored('Project ' + project_name + ' already exists', 'red'))
overwrite = str_input('Would you like to overwrite this project? (yes or no) ', ['yes', 'no'])
if overwrite == 'no':
return
else:
config = [project for project in config if project_name != project['name']]
image_path = os.path.expanduser(input('Select parent directory for your images: '))
path_unset = True
while path_unset:
project_path = os.path.expanduser(input('Select destination for your project: '))
if (project_path.find(image_path) == 0):
print('Project destination should not be same or within image directory!')
else:
path_unset = False
print('Select architecture:')
print('[0] resnet50')
print('[1] xception')
print('[2] inception_v3')
architecture = int_input('choice', 0, 2, show_range = False)
if architecture == 0:
arch = 'resnet50'
img_dim = 224
conv_dim = 7
final_cutoff = 80
elif architecture == 1:
arch = 'xception'
img_dim = 299
conv_dim = 10
final_cutoff = 80
else:
arch = 'inception_v3'
img_dim = 299
conv_dim = 8
final_cutoff = 80
api_port = int_input('port for local prediction API (suggested: 5000)', 1024, 49151)
kfold = int_input('number of folds to use (suggested: 5)', 3, 10)
kfold_every = bool_input('Fit a model for every fold? (if false, just fit one)')
print('Warning: if working on a remote computer, you may not be able to plot!')
plot_cm = bool_input('Plot a confusion matrix after training?')
batch_size = int_input('batch size (suggested: 8)', 1, 64)
learning_rate = float_input('learning rate (suggested: 0.001)', 0, 1)
learning_rate_decay = float_input('learning decay rate (suggested: 0.000001)', 0, 1)
cycle = int_input('number of cycles before resetting the learning rate (suggested: 3)', 1, 10)
num_rounds = int_input('number of rounds (suggested: 3)', 1, 100)
print('Select image resolution:')
print('[0] low (' + str(img_dim) + ' px)')
print('[1] mid (' + str(img_dim * 2) + ' px)')
print('[2] high (' + str(img_dim * 4) + ' px)')
img_resolution_index = int_input('choice', 0, 2, show_range = False)
if img_resolution_index == 0:
img_size = 1
elif img_resolution_index == 1:
img_size = 2
else:
img_size = 4
use_augmentation = str_input('Would you like to add image augmentation? (yes or no) ', ['yes', 'no'])
if use_augmentation == 'yes':
augmentations = select_augmentations()
else:
augmentations = None
project = {'name': project_name,
'img_path': image_path,
'path': project_path,
'plot': plot_cm,
'api_port': api_port,
'kfold': kfold,
'kfold_every': kfold_every,
'cycle': cycle,
'seed': np.random.randint(9999),
'batch_size': batch_size,
'learning_rate': learning_rate,
'learning_rate_decay': learning_rate_decay,
'final_cutoff': final_cutoff,
'rounds': num_rounds,
'img_size': img_size,
'augmentations': augmentations,
'architecture': arch,
'img_dim': img_dim,
'conv_dim': conv_dim,
'is_split': False,
'is_array': False,
'is_augmented': False,
'is_pre_model': False,
'is_final': False,
'model_round': 0,
'server_weights': None,
'last_weights': None,
'best_weights': None}
config.append(project)
store_config(config)
print('')
print(colored('Project configure saved!', 'cyan'))
print('')
print('To run project:')
print('')
print(colored(' transfer --run --project ' + project_name, 'green'))
print('or')
print(colored(' transfer -r -p ' + project_name, 'green')) | Configure the transfer environment and store |
def create_payment_transaction(cls, payment_transaction, **kwargs):
"""Create PaymentTransaction
Create a new PaymentTransaction
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_payment_transaction(payment_transaction, async=True)
>>> result = thread.get()
:param async bool
:param PaymentTransaction payment_transaction: Attributes of paymentTransaction to create (required)
:return: PaymentTransaction
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_payment_transaction_with_http_info(payment_transaction, **kwargs)
else:
(data) = cls._create_payment_transaction_with_http_info(payment_transaction, **kwargs)
return data | Create PaymentTransaction
Create a new PaymentTransaction
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_payment_transaction(payment_transaction, async=True)
>>> result = thread.get()
:param async bool
:param PaymentTransaction payment_transaction: Attributes of paymentTransaction to create (required)
:return: PaymentTransaction
If the method is called asynchronously,
returns the request thread. |
def _cnx_is_empty(in_file):
"""Check if cnr or cns files are empty (only have a header)
"""
with open(in_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return False
return True | Check if cnr or cns files are empty (only have a header) |
def detect_languages(self, texts):
"""
Params:
::texts = Array of texts for detect languages
Returns:
Returns language present on array of text.
"""
text_list = TextUtils.format_list_to_send(texts)
infos_translate = TextDetectLanguageModel(text_list).to_dict()
texts_for_detect = TextUtils.change_key(infos_translate, "text",
"texts", infos_translate["text"])
mode_translate = TranslatorMode.DetectArray.value
return self._get_content(texts_for_detect, mode_translate) | Params:
::texts = Array of texts for detect languages
Returns:
Returns language present on array of text. |
def py_module_preamble(ctx: GeneratorContext,) -> GeneratedPyAST:
"""Bootstrap a new module with imports and other boilerplate."""
preamble: List[ast.AST] = []
preamble.extend(_module_imports(ctx))
preamble.append(_from_module_import())
preamble.append(_ns_var())
return GeneratedPyAST(node=ast.NameConstant(None), dependencies=preamble) | Bootstrap a new module with imports and other boilerplate. |
def get_model_file(name, root=os.path.join(base.data_dir(), 'models')):
r"""Return location for the pretrained on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
The root directory will be created if it doesn't exist.
Parameters
----------
name : str
Name of the model.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file.
"""
file_name = '{name}-{short_hash}'.format(name=name,
short_hash=short_hash(name))
root = os.path.expanduser(root)
file_path = os.path.join(root, file_name+'.params')
sha1_hash = _model_sha1[name]
if os.path.exists(file_path):
if check_sha1(file_path, sha1_hash):
return file_path
else:
logging.warning('Mismatch in the content of model file detected. Downloading again.')
else:
logging.info('Model file not found. Downloading to %s.', file_path)
util.makedirs(root)
zip_file_path = os.path.join(root, file_name+'.zip')
repo_url = os.environ.get('MXNET_GLUON_REPO', apache_repo_url)
if repo_url[-1] != '/':
repo_url = repo_url + '/'
download(_url_format.format(repo_url=repo_url, file_name=file_name),
path=zip_file_path,
overwrite=True)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(root)
os.remove(zip_file_path)
if check_sha1(file_path, sha1_hash):
return file_path
else:
raise ValueError('Downloaded file has different hash. Please try again.') | r"""Return location for the pretrained on local file system.
This function will download from online model zoo when model cannot be found or has mismatch.
The root directory will be created if it doesn't exist.
Parameters
----------
name : str
Name of the model.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
Returns
-------
file_path
Path to the requested pretrained model file. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.