max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
virtual/lib/python3.8/site-packages/dns/zonefile.py | Lenus254/personal_blog | 1 | 6200 | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS Zones."""
import re
import sys
import dns.exception
import dns.name
import dns.node
import dns.rdataclass
import dns.rdatatype
import dns.rdata
import dns.rdtypes.ANY.SOA
import dns.rrset
import dns.tokenizer
import dns.transaction
import dns.ttl
import dns.grange
class UnknownOrigin(dns.exception.DNSException):
"""Unknown origin"""
class CNAMEAndOtherData(dns.exception.DNSException):
"""A node has a CNAME and other data"""
def _check_cname_and_other_data(txn, name, rdataset):
rdataset_kind = dns.node.NodeKind.classify_rdataset(rdataset)
node = txn.get_node(name)
if node is None:
# empty nodes are neutral.
return
node_kind = node.classify()
if node_kind == dns.node.NodeKind.CNAME and \
rdataset_kind == dns.node.NodeKind.REGULAR:
raise CNAMEAndOtherData('rdataset type is not compatible with a '
'CNAME node')
elif node_kind == dns.node.NodeKind.REGULAR and \
rdataset_kind == dns.node.NodeKind.CNAME:
raise CNAMEAndOtherData('CNAME rdataset is not compatible with a '
'regular data node')
# Otherwise at least one of the node and the rdataset is neutral, so
# adding the rdataset is ok
class Reader:
"""Read a DNS zone file into a transaction."""
def __init__(self, tok, rdclass, txn, allow_include=False,
allow_directives=True, force_name=None,
force_ttl=None, force_rdclass=None, force_rdtype=None,
default_ttl=None):
self.tok = tok
(self.zone_origin, self.relativize, _) = \
txn.manager.origin_information()
self.current_origin = self.zone_origin
self.last_ttl = 0
self.last_ttl_known = False
if force_ttl is not None:
default_ttl = force_ttl
if default_ttl is None:
self.default_ttl = 0
self.default_ttl_known = False
else:
self.default_ttl = default_ttl
self.default_ttl_known = True
self.last_name = self.current_origin
self.zone_rdclass = rdclass
self.txn = txn
self.saved_state = []
self.current_file = None
self.allow_include = allow_include
self.allow_directives = allow_directives
self.force_name = force_name
self.force_ttl = force_ttl
self.force_rdclass = force_rdclass
self.force_rdtype = force_rdtype
self.txn.check_put_rdataset(_check_cname_and_other_data)
def _eat_line(self):
while 1:
token = self.tok.get()
if token.is_eol_or_eof():
break
def _get_identifier(self):
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
return token
def _rr_line(self):
"""Process one line from a DNS zone file."""
token = None
# Name
if self.force_name is not None:
name = self.force_name
else:
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get(want_leading=True)
if not token.is_whitespace():
self.last_name = self.tok.as_name(token, self.current_origin)
else:
token = self.tok.get()
if token.is_eol_or_eof():
# treat leading WS followed by EOL/EOF as if they were EOL/EOF.
return
self.tok.unget(token)
name = self.last_name
if not name.is_subdomain(self.zone_origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone_origin)
# TTL
if self.force_ttl is not None:
ttl = self.force_ttl
self.last_ttl = ttl
self.last_ttl_known = True
else:
token = self._get_identifier()
ttl = None
try:
ttl = dns.ttl.from_text(token.value)
self.last_ttl = ttl
self.last_ttl_known = True
token = None
except dns.ttl.BadTTL:
if self.default_ttl_known:
ttl = self.default_ttl
elif self.last_ttl_known:
ttl = self.last_ttl
self.tok.unget(token)
# Class
if self.force_rdclass is not None:
rdclass = self.force_rdclass
else:
token = self._get_identifier()
try:
rdclass = dns.rdataclass.from_text(token.value)
except dns.exception.SyntaxError:
raise
except Exception:
rdclass = self.zone_rdclass
self.tok.unget(token)
if rdclass != self.zone_rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
if self.force_rdtype is not None:
rdtype = self.force_rdtype
else:
token = self._get_identifier()
try:
rdtype = dns.rdatatype.from_text(token.value)
except Exception:
raise dns.exception.SyntaxError(
"unknown rdatatype '%s'" % token.value)
try:
rd = dns.rdata.from_text(rdclass, rdtype, self.tok,
self.current_origin, self.relativize,
self.zone_origin)
except dns.exception.SyntaxError:
# Catch and reraise.
raise
except Exception:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError(
"caught exception {}: {}".format(str(ty), str(va)))
if not self.default_ttl_known and rdtype == dns.rdatatype.SOA:
# The pre-RFC2308 and pre-BIND9 behavior inherits the zone default
# TTL from the SOA minttl if no $TTL statement is present before the
# SOA is parsed.
self.default_ttl = rd.minimum
self.default_ttl_known = True
if ttl is None:
# if we didn't have a TTL on the SOA, set it!
ttl = rd.minimum
# TTL check. We had to wait until now to do this as the SOA RR's
# own TTL can be inferred from its minimum.
if ttl is None:
raise dns.exception.SyntaxError("Missing default TTL value")
self.txn.add(name, ttl, rd)
def _parse_modify(self, side):
# Here we catch everything in '{' '}' in a group so we can replace it
# with ''.
is_generate1 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+),(.)}).*$")
is_generate2 = re.compile(r"^.*\$({(\+|-?)(\d+)}).*$")
is_generate3 = re.compile(r"^.*\$({(\+|-?)(\d+),(\d+)}).*$")
# Sometimes there are modifiers in the hostname. These come after
# the dollar sign. They are in the form: ${offset[,width[,base]]}.
# Make names
g1 = is_generate1.match(side)
if g1:
mod, sign, offset, width, base = g1.groups()
if sign == '':
sign = '+'
g2 = is_generate2.match(side)
if g2:
mod, sign, offset = g2.groups()
if sign == '':
sign = '+'
width = 0
base = 'd'
g3 = is_generate3.match(side)
if g3:
mod, sign, offset, width = g3.groups()
if sign == '':
sign = '+'
base = 'd'
if not (g1 or g2 or g3):
mod = ''
sign = '+'
offset = 0
width = 0
base = 'd'
if base != 'd':
raise NotImplementedError()
return mod, sign, offset, width, base
def _generate_line(self):
# range lhs [ttl] [class] type rhs [ comment ]
"""Process one line containing the GENERATE statement from a DNS
zone file."""
if self.current_origin is None:
raise UnknownOrigin
token = self.tok.get()
# Range (required)
try:
start, stop, step = dns.grange.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError
# lhs (required)
try:
lhs = token.value
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError
# TTL
try:
ttl = dns.ttl.from_text(token.value)
self.last_ttl = ttl
self.last_ttl_known = True
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.ttl.BadTTL:
if not (self.last_ttl_known or self.default_ttl_known):
raise dns.exception.SyntaxError("Missing default TTL value")
if self.default_ttl_known:
ttl = self.default_ttl
elif self.last_ttl_known:
ttl = self.last_ttl
# Class
try:
rdclass = dns.rdataclass.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except dns.exception.SyntaxError:
raise dns.exception.SyntaxError
except Exception:
rdclass = self.zone_rdclass
if rdclass != self.zone_rdclass:
raise dns.exception.SyntaxError("RR class is not zone's class")
# Type
try:
rdtype = dns.rdatatype.from_text(token.value)
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError
except Exception:
raise dns.exception.SyntaxError("unknown rdatatype '%s'" %
token.value)
# rhs (required)
rhs = token.value
# The code currently only supports base 'd', so the last value
# in the tuple _parse_modify returns is ignored
lmod, lsign, loffset, lwidth, _ = self._parse_modify(lhs)
rmod, rsign, roffset, rwidth, _ = self._parse_modify(rhs)
for i in range(start, stop + 1, step):
# +1 because bind is inclusive and python is exclusive
if lsign == '+':
lindex = i + int(loffset)
elif lsign == '-':
lindex = i - int(loffset)
if rsign == '-':
rindex = i - int(roffset)
elif rsign == '+':
rindex = i + int(roffset)
lzfindex = str(lindex).zfill(int(lwidth))
rzfindex = str(rindex).zfill(int(rwidth))
name = lhs.replace('$%s' % (lmod), lzfindex)
rdata = rhs.replace('$%s' % (rmod), rzfindex)
self.last_name = dns.name.from_text(name, self.current_origin,
self.tok.idna_codec)
name = self.last_name
if not name.is_subdomain(self.zone_origin):
self._eat_line()
return
if self.relativize:
name = name.relativize(self.zone_origin)
try:
rd = dns.rdata.from_text(rdclass, rdtype, rdata,
self.current_origin, self.relativize,
self.zone_origin)
except dns.exception.SyntaxError:
# Catch and reraise.
raise
except Exception:
# All exceptions that occur in the processing of rdata
# are treated as syntax errors. This is not strictly
# correct, but it is correct almost all of the time.
# We convert them to syntax errors so that we can emit
# helpful filename:line info.
(ty, va) = sys.exc_info()[:2]
raise dns.exception.SyntaxError("caught exception %s: %s" %
(str(ty), str(va)))
self.txn.add(name, ttl, rd)
def read(self):
"""Read a DNS zone file and build a zone object.
@raises dns.zone.NoSOA: No SOA RR was found at the zone origin
@raises dns.zone.NoNS: No NS RRset was found at the zone origin
"""
try:
while 1:
token = self.tok.get(True, True)
if token.is_eof():
if self.current_file is not None:
self.current_file.close()
if len(self.saved_state) > 0:
(self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.last_ttl,
self.last_ttl_known,
self.default_ttl,
self.default_ttl_known) = self.saved_state.pop(-1)
continue
break
elif token.is_eol():
continue
elif token.is_comment():
self.tok.get_eol()
continue
elif token.value[0] == '$' and self.allow_directives:
c = token.value.upper()
if c == '$TTL':
token = self.tok.get()
if not token.is_identifier():
raise dns.exception.SyntaxError("bad $TTL")
self.default_ttl = dns.ttl.from_text(token.value)
self.default_ttl_known = True
self.tok.get_eol()
elif c == '$ORIGIN':
self.current_origin = self.tok.get_name()
self.tok.get_eol()
if self.zone_origin is None:
self.zone_origin = self.current_origin
self.txn._set_origin(self.current_origin)
elif c == '$INCLUDE' and self.allow_include:
token = self.tok.get()
filename = token.value
token = self.tok.get()
if token.is_identifier():
new_origin =\
dns.name.from_text(token.value,
self.current_origin,
self.tok.idna_codec)
self.tok.get_eol()
elif not token.is_eol_or_eof():
raise dns.exception.SyntaxError(
"bad origin in $INCLUDE")
else:
new_origin = self.current_origin
self.saved_state.append((self.tok,
self.current_origin,
self.last_name,
self.current_file,
self.last_ttl,
self.last_ttl_known,
self.default_ttl,
self.default_ttl_known))
self.current_file = open(filename, 'r')
self.tok = dns.tokenizer.Tokenizer(self.current_file,
filename)
self.current_origin = new_origin
elif c == '$GENERATE':
self._generate_line()
else:
raise dns.exception.SyntaxError(
"Unknown zone file directive '" + c + "'")
continue
self.tok.unget(token)
self._rr_line()
except dns.exception.SyntaxError as detail:
(filename, line_number) = self.tok.where()
if detail is None:
detail = "syntax error"
ex = dns.exception.SyntaxError(
"%s:%d: %s" % (filename, line_number, detail))
tb = sys.exc_info()[2]
raise ex.with_traceback(tb) from None
class RRsetsReaderTransaction(dns.transaction.Transaction):
def __init__(self, manager, replacement, read_only):
assert not read_only
super().__init__(manager, replacement, read_only)
self.rdatasets = {}
def _get_rdataset(self, name, rdtype, covers):
return self.rdatasets.get((name, rdtype, covers))
def _get_node(self, name):
rdatasets = []
for (rdataset_name, _, _), rdataset in self.rdatasets.items():
if name == rdataset_name:
rdatasets.append(rdataset)
if len(rdatasets) == 0:
return None
node = dns.node.Node()
node.rdatasets = rdatasets
return node
def _put_rdataset(self, name, rdataset):
self.rdatasets[(name, rdataset.rdtype, rdataset.covers)] = rdataset
def _delete_name(self, name):
# First remove any changes involving the name
remove = []
for key in self.rdatasets:
if key[0] == name:
remove.append(key)
if len(remove) > 0:
for key in remove:
del self.rdatasets[key]
def _delete_rdataset(self, name, rdtype, covers):
try:
del self.rdatasets[(name, rdtype, covers)]
except KeyError:
pass
def _name_exists(self, name):
for (n, _, _) in self.rdatasets:
if n == name:
return True
return False
def _changed(self):
return len(self.rdatasets) > 0
def _end_transaction(self, commit):
if commit and self._changed():
rrsets = []
for (name, _, _), rdataset in self.rdatasets.items():
rrset = dns.rrset.RRset(name, rdataset.rdclass, rdataset.rdtype,
rdataset.covers)
rrset.update(rdataset)
rrsets.append(rrset)
self.manager.set_rrsets(rrsets)
def _set_origin(self, origin):
pass
class RRSetsReaderManager(dns.transaction.TransactionManager):
def __init__(self, origin=dns.name.root, relativize=False,
rdclass=dns.rdataclass.IN):
self.origin = origin
self.relativize = relativize
self.rdclass = rdclass
self.rrsets = []
def writer(self, replacement=False):
assert replacement is True
return RRsetsReaderTransaction(self, True, False)
def get_class(self):
return self.rdclass
def origin_information(self):
if self.relativize:
effective = dns.name.empty
else:
effective = self.origin
return (self.origin, self.relativize, effective)
def set_rrsets(self, rrsets):
self.rrsets = rrsets
def read_rrsets(text, name=None, ttl=None, rdclass=dns.rdataclass.IN,
default_rdclass=dns.rdataclass.IN,
rdtype=None, default_ttl=None, idna_codec=None,
origin=dns.name.root, relativize=False):
"""Read one or more rrsets from the specified text, possibly subject
to restrictions.
*text*, a file object or a string, is the input to process.
*name*, a string, ``dns.name.Name``, or ``None``, is the owner name of
the rrset. If not ``None``, then the owner name is "forced", and the
input must not specify an owner name. If ``None``, then any owner names
are allowed and must be present in the input.
*ttl*, an ``int``, string, or None. If not ``None``, the the TTL is
forced to be the specified value and the input must not specify a TTL.
If ``None``, then a TTL may be specified in the input. If it is not
specified, then the *default_ttl* will be used.
*rdclass*, a ``dns.rdataclass.RdataClass``, string, or ``None``. If
not ``None``, then the class is forced to the specified value, and the
input must not specify a class. If ``None``, then the input may specify
a class that matches *default_rdclass*. Note that it is not possible to
return rrsets with differing classes; specifying ``None`` for the class
simply allows the user to optionally type a class as that may be convenient
when cutting and pasting.
*default_rdclass*, a ``dns.rdataclass.RdataClass`` or string. The class
of the returned rrsets.
*rdtype*, a ``dns.rdatatype.RdataType``, string, or ``None``. If not
``None``, then the type is forced to the specified value, and the
input must not specify a type. If ``None``, then a type must be present
for each RR.
*default_ttl*, an ``int``, string, or ``None``. If not ``None``, then if
the TTL is not forced and is not specified, then this value will be used.
if ``None``, then if the TTL is not forced an error will occur if the TTL
is not specified.
*idna_codec*, a ``dns.name.IDNACodec``, specifies the IDNA
encoder/decoder. If ``None``, the default IDNA 2003 encoder/decoder
is used. Note that codecs only apply to the owner name; dnspython does
not do IDNA for names in rdata, as there is no IDNA zonefile format.
*origin*, a string, ``dns.name.Name``, or ``None``, is the origin for any
relative names in the input, and also the origin to relativize to if
*relativize* is ``True``.
*relativize*, a bool. If ``True``, names are relativized to the *origin*;
if ``False`` then any relative names in the input are made absolute by
appending the *origin*.
"""
if isinstance(origin, str):
origin = dns.name.from_text(origin, dns.name.root, idna_codec)
if isinstance(name, str):
name = dns.name.from_text(name, origin, idna_codec)
if isinstance(ttl, str):
ttl = dns.ttl.from_text(ttl)
if isinstance(default_ttl, str):
default_ttl = dns.ttl.from_text(default_ttl)
if rdclass is not None:
rdclass = dns.rdataclass.RdataClass.make(rdclass)
default_rdclass = dns.rdataclass.RdataClass.make(default_rdclass)
if rdtype is not None:
rdtype = dns.rdatatype.RdataType.make(rdtype)
manager = RRSetsReaderManager(origin, relativize, default_rdclass)
with manager.writer(True) as txn:
tok = dns.tokenizer.Tokenizer(text, '<input>', idna_codec=idna_codec)
reader = Reader(tok, default_rdclass, txn, allow_directives=False,
force_name=name, force_ttl=ttl, force_rdclass=rdclass,
force_rdtype=rdtype, default_ttl=default_ttl)
reader.read()
return manager.rrsets
| 1.765625 | 2 |
swift/common/daemon.py | fossabot/swift-1 | 0 | 6201 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import os
import sys
import time
import signal
from re import sub
import eventlet.debug
from eventlet.hubs import use_hub
from swift.common import utils
class Daemon(object):
"""
Daemon base class
A daemon has a run method that accepts a ``once`` kwarg and will dispatch
to :meth:`run_once` or :meth:`run_forever`.
A subclass of Daemon must implement :meth:`run_once` and
:meth:`run_forever`.
A subclass of Daemon may override :meth:`get_worker_args` to dispatch
arguments to individual child process workers and :meth:`is_healthy` to
perform context specific periodic wellness checks which can reset worker
arguments.
Implementations of Daemon do not know *how* to daemonize, or execute
multiple daemonized workers, they simply provide the behavior of the daemon
and context specific knowledge about how workers should be started.
"""
def __init__(self, conf):
self.conf = conf
self.logger = utils.get_logger(conf, log_route='daemon')
def run_once(self, *args, **kwargs):
"""Override this to run the script once"""
raise NotImplementedError('run_once not implemented')
def run_forever(self, *args, **kwargs):
"""Override this to run forever"""
raise NotImplementedError('run_forever not implemented')
def run(self, once=False, **kwargs):
if once:
self.run_once(**kwargs)
else:
self.run_forever(**kwargs)
def post_multiprocess_run(self):
"""
Override this to do something after running using multiple worker
processes. This method is called in the parent process.
This is probably only useful for run-once mode since there is no
"after running" in run-forever mode.
"""
pass
def get_worker_args(self, once=False, **kwargs):
"""
For each worker yield a (possibly empty) dict of kwargs to pass along
to the daemon's :meth:`run` method after fork. The length of elements
returned from this method will determine the number of processes
created.
If the returned iterable is empty, the Strategy will fallback to
run-inline strategy.
:param once: False if the worker(s) will be daemonized, True if the
worker(s) will be run once
:param kwargs: plumbed through via command line argparser
:returns: an iterable of dicts, each element represents the kwargs to
be passed to a single worker's :meth:`run` method after fork.
"""
return []
def is_healthy(self):
"""
This method is called very frequently on the instance of the daemon
held by the parent process. If it returns False, all child workers are
terminated, and new workers will be created.
:returns: a boolean, True only if all workers should continue to run
"""
return True
class DaemonStrategy(object):
"""
This is the execution strategy for using subclasses of Daemon. The default
behavior is to invoke the daemon's :meth:`Daemon.run` method from within
the parent process. When the :meth:`Daemon.run` method returns the parent
process will exit.
However, if the Daemon returns a non-empty iterable from
:meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will
be invoked in child processes, with the arguments provided from the parent
process's instance of the daemon. If a child process exits it will be
restarted with the same options, unless it was executed in once mode.
:param daemon: an instance of a :class:`Daemon` (has a `run` method)
:param logger: a logger instance
"""
def __init__(self, daemon, logger):
self.daemon = daemon
self.logger = logger
self.running = False
# only used by multi-worker strategy
self.options_by_pid = {}
self.unspawned_worker_options = []
def setup(self, **kwargs):
utils.validate_configuration()
utils.drop_privileges(self.daemon.conf.get('user', 'swift'))
utils.clean_up_daemon_hygiene()
utils.capture_stdio(self.logger, **kwargs)
def kill_children(*args):
self.running = False
self.logger.info('SIGTERM received')
signal.signal(signal.SIGTERM, signal.SIG_IGN)
os.killpg(0, signal.SIGTERM)
os._exit(0)
signal.signal(signal.SIGTERM, kill_children)
self.running = True
def _run_inline(self, once=False, **kwargs):
"""Run the daemon"""
self.daemon.run(once=once, **kwargs)
def run(self, once=False, **kwargs):
"""Daemonize and execute our strategy"""
self.setup(**kwargs)
try:
self._run(once=once, **kwargs)
except KeyboardInterrupt:
self.logger.notice('User quit')
finally:
self.cleanup()
self.running = False
def _fork(self, once, **kwargs):
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.daemon.run(once, **kwargs)
self.logger.debug('Forked worker %s finished', os.getpid())
# do not return from this stack, nor execute any finally blocks
os._exit(0)
else:
self.register_worker_start(pid, kwargs)
return pid
def iter_unspawned_workers(self):
while True:
try:
per_worker_options = self.unspawned_worker_options.pop()
except IndexError:
return
yield per_worker_options
def spawned_pids(self):
return list(self.options_by_pid.keys())
def register_worker_start(self, pid, per_worker_options):
self.logger.debug('Spawned worker %s with %r', pid, per_worker_options)
self.options_by_pid[pid] = per_worker_options
def register_worker_exit(self, pid):
self.unspawned_worker_options.append(self.options_by_pid.pop(pid))
def ask_daemon_to_prepare_workers(self, once, **kwargs):
self.unspawned_worker_options = list(
self.daemon.get_worker_args(once=once, **kwargs))
def abort_workers_if_daemon_would_like(self):
if not self.daemon.is_healthy():
self.logger.debug(
'Daemon needs to change options, aborting workers')
self.cleanup()
return True
return False
def check_on_all_running_workers(self):
for p in self.spawned_pids():
try:
pid, status = os.waitpid(p, os.WNOHANG)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
self.logger.notice('Worker %s died', p)
else:
if pid == 0:
# child still running
continue
self.logger.debug('Worker %s exited', p)
self.register_worker_exit(p)
def _run(self, once, **kwargs):
self.ask_daemon_to_prepare_workers(once, **kwargs)
if not self.unspawned_worker_options:
return self._run_inline(once, **kwargs)
for per_worker_options in self.iter_unspawned_workers():
if self._fork(once, **per_worker_options) == 0:
return 0
while self.running:
if self.abort_workers_if_daemon_would_like():
self.ask_daemon_to_prepare_workers(once, **kwargs)
self.check_on_all_running_workers()
if not once:
for per_worker_options in self.iter_unspawned_workers():
if self._fork(once, **per_worker_options) == 0:
return 0
else:
if not self.spawned_pids():
self.logger.notice('Finished %s', os.getpid())
break
time.sleep(0.1)
self.daemon.post_multiprocess_run()
return 0
def cleanup(self):
for p in self.spawned_pids():
try:
os.kill(p, signal.SIGTERM)
except OSError as err:
if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD):
raise
self.register_worker_exit(p)
self.logger.debug('Cleaned up worker %s', p)
def run_daemon(klass, conf_file, section_name='', once=False, **kwargs):
"""
Loads settings from conf, then instantiates daemon ``klass`` and runs the
daemon with the specified ``once`` kwarg. The section_name will be derived
from the daemon ``klass`` if not provided (e.g. ObjectReplicator =>
object-replicator).
:param klass: Class to instantiate, subclass of :class:`Daemon`
:param conf_file: Path to configuration file
:param section_name: Section name from conf file to load config from
:param once: Passed to daemon :meth:`Daemon.run` method
"""
# very often the config section_name is based on the class name
# the None singleton will be passed through to readconf as is
if section_name == '':
section_name = sub(r'([a-z])([A-Z])', r'\1-\2',
klass.__name__).lower()
try:
conf = utils.readconf(conf_file, section_name,
log_name=kwargs.get('log_name'))
except (ValueError, IOError) as e:
# The message will be printed to stderr
# and results in an exit code of 1.
sys.exit(e)
use_hub(utils.get_hub())
# once on command line (i.e. daemonize=false) will over-ride config
once = once or not utils.config_true_value(conf.get('daemonize', 'true'))
# pre-configure logger
if 'logger' in kwargs:
logger = kwargs.pop('logger')
else:
logger = utils.get_logger(conf, conf.get('log_name', section_name),
log_to_console=kwargs.pop('verbose', False),
log_route=section_name)
# optional nice/ionice priority scheduling
utils.modify_priority(conf, logger)
# disable fallocate if desired
if utils.config_true_value(conf.get('disable_fallocate', 'no')):
utils.disable_fallocate()
# set utils.FALLOCATE_RESERVE if desired
utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \
utils.config_fallocate_value(conf.get('fallocate_reserve', '1%'))
# By default, disable eventlet printing stacktraces
eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no'))
eventlet.debug.hub_exceptions(eventlet_debug)
# Ensure TZ environment variable exists to avoid stat('/etc/localtime') on
# some platforms. This locks in reported times to UTC.
os.environ['TZ'] = 'UTC+0'
time.tzset()
logger.notice('Starting %s', os.getpid())
try:
DaemonStrategy(klass(conf), logger).run(once=once, **kwargs)
except KeyboardInterrupt:
logger.info('User quit')
logger.notice('Exited %s', os.getpid())
| 2.328125 | 2 |
backend/resource_files_sample.py | Bhaskers-Blu-Org1/multicloud-incident-response-navigator | 0 | 6202 | import resource_files
resources = resource_files.ResourceFiles()
# sample use case of getting yamls
print(resources.get_yaml("Pod", "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf", "default", "mycluster"))
# sample use case of getting events
print(resources.get_events('mycluster','default','78abd8c9-ac06-11e9-b68f-0e70a6ce6d3a'))
# sample use case of getting describe info
print(resources.get_logs('mycluster', 'default', "jumpy-shark-gbapp-frontend-844fdccf55-ggkbf"))
| 2.0625 | 2 |
backend/api/v1/auth_module/auth_api.py | aroraenterprise/projecteos | 0 | 6203 | """
Project: flask-rest
Author: <NAME>
Description: Handle auth endpoints such as auth/signup, auth/login
"""
from api.v1 import make_json_ok_response, SageController, SageMethod
from api.v1.fundamentals import helper
from .auth_controller import AuthController
def sage_auth_signup_function(self, resource, **kwargs):
_UserModel = resource.get_account_model()
args = helper.parse_args_for_model(_UserModel)
user = _UserModel(**args) # user has been created
user.put() # save to get a key for the user
result, params = AuthController.create_unique_for_user(user.key)
if not result: # not successful
user.key.delete()
raise params # this holds the error message
else:
return params # this holds accesskey and refresh token
def sage_auth_authenticate_function(self, resource, **kwargs):
result, params = AuthController.authenticate_client()
if not result: # not successful
raise params # this holds the error message
else:
return params # this holds the refresh token and the access token
auth_controller = {
'signup': SageController(sage_auth_signup_function, SageMethod.POST, authenticate=False),
'authenticate': SageController(sage_auth_authenticate_function, SageMethod.POST, authenticate=False)
} | 2.78125 | 3 |
tests/QuantumToolboxIntegration/test_singleQubitOpenDynamics.py | AngsarM/QuanGuru | 9 | 6204 | import random as rn
import numpy as np
# open system dynamics of a qubit and compare numerical results with the analytical calculations
# NOTE these are also TUTORIALS of the library, so see the Tutorials for what these are doing and analytical
# calculations.
# currently includes 2 cases: (i) decay only, and (ii) unitary evolution by calling Liouville method without giving
# any collapse operators. For now, only looks at excited state populations
# TODO this is an unfinished test. below two tests are the same and it actually is not testing open system dynamics.
decayRateSM = rn.random()
excitedPopulation = lambda t: 0.5*np.exp(-(0.00001*(decayRateSM+1)*2+1j)*50*t)
populations = {'excitedAnalytical':[], 'excitedNumerical':[]}
# this is used as the calculate attribute of the qubit, and the singleQubit fixture evolve method calls this at every
# step of the evolution. It stores both numerical and analytical excited state populations into the dictionary above.
def singleQubitDecayCalculate(qub, state, i):
populations['excitedAnalytical'].append(excitedPopulation(i*qub.stepSize))
populations['excitedNumerical'].append(state[0, 0])
def test_qubitUnitaryEvolutionFromLiouville(singleQubit):
for k in populations:
populations[k] = []
singleQubit.evolutionMethod = singleQubit.openEvolution
singleQubit.calculate = singleQubitDecayCalculate
singleQubit.evolve()
assert singleQubit.stepCount == len(populations['excitedNumerical'])
def test_qubitDecay(singleQubit):
for k in populations:
populations[k] = []
singleQubit.evolutionMethod = singleQubit.openEvolution
singleQubit.calculate = singleQubitDecayCalculate
singleQubit.evolve()
assert singleQubit.stepCount == len(populations['excitedNumerical'])
| 2.75 | 3 |
QuGraphy/state.py | Mohamed-ShehabEldin/QuGraphy | 0 | 6205 | <reponame>Mohamed-ShehabEldin/QuGraphy
#this file will contain function that related to vector state
from .density import * #we may use some functions from them and dependencies
def row2col(vec):
if np.ndim(vec)==1:
col=[]
for element in vec:
col.append([element])
return col
else:
return vec
def check_state(state):
row2col(state)
if np.shape(state)[1]>1:
raise Exception("invalid state, not a vector!")
if schmidt_inner(state,state) !=1:
raise Exception("invalid state, not normalized!") | 3.046875 | 3 |
uncoverml/metadata_profiler.py | GeoscienceAustralia/uncoverml | 34 | 6206 | <filename>uncoverml/metadata_profiler.py
#! /usr/bin/env python
"""
Description:
Gather Metadata for the uncover-ml prediction output results:
Reference: email 2019-05-24
Overview
Creator: (person who generated the model)
Model;
Name:
Type and date:
Algorithm:
Extent: Lat/long - location on Australia map?
SB Notes: None of the above is required as this information will be captured in the yaml file.
Model inputs:
1. Covariates - list (in full)
2. Targets: path to shapefile: csv file
SB Notes: Only covaraite list file. Targets and path to shapefile is not required as this is available in the yaml file. May be the full path to the shapefile has some merit as one can specify partial path.
Model performance
JSON file (in full)
SB Notes: Yes
Model outputs
1. Prediction grid including path
2. Quantiles Q5; Q95
3. Variance:
4. Entropy:
5. Feature rank file
6. Raw covariates file (target value - covariate value)
7. Optimisation output
8. Others ??
SB Notes: Not required as these are model dependent, and the metadata will be contained in each of the output geotif file.
Model parameters:
1. YAML file (in full)
2. .SH file (in full)
SB Notes: The .sh file is not required. YAML file is read as a python dictionary in uncoverml which can be dumped in the metadata.
CreationDate: 31/05/19
Developer: <EMAIL>
Revision History:
LastUpdate: 31/05/19 FZ
LastUpdate: dd/mm/yyyy Who Optional description
"""
# import section
import os
import sys
import json
import pickle
import datetime
import getpass
import socket
from ppretty import ppretty
import uncoverml
class MetadataSummary():
"""
Summary Description of the ML prediction output
"""
def __init__(self, model, config):
self.model = model
self.description = "Metadata for the ML results"
username = getpass.getuser()
hostname = socket.gethostname()
self.creator = username
self.computename = hostname
self.datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.version = uncoverml.__version__
model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True,
show_address=False, str_length=50)
self.config = config
self.name = self.config.name # 'demo_regression'
self.algorithm = self.config.algorithm # 'svr'
self.extent = ((-10, 100),(-40, 140))
if config.cross_validate and os.path.exists(config.crossval_scores_file):
with open(config.crossval_scores_file) as sf:
self.model_performance_metrics = json.load(sf)
else:
self.model_performance_metrics = None
def write_metadata(self, out_filename):
"""
write the metadata for this prediction result, into a human-readable txt file.
in order to make the ML results traceable and reproduceable (provenance)
"""
with open(out_filename, 'w') as outf:
outf.write("# Metadata Profile for the Prediction Results")
outf.write("\n\n############ Software Environment ###########\n\n")
outf.write("Creator = %s \n"%self.creator)
outf.write("Computer = %s \n"%self.computename)
outf.write("ML Algorithm = %s \n"%self.algorithm)
outf.write("Version = %s\n"%self.version)
outf.write("Datetime = %s \n"%self.datetime)
outf.write("\n\n############ Performance Matrics ###########\n\n")
if self.model_performance_metrics:
for keys, values in self.model_performance_metrics.items():
outf.write("%s = %s\n"%(keys, values))
outf.write("\n\n############ Configuration ###########\n\n")
conf_str = ppretty(self.config, indent=' ', width=200, seq_length=200,
show_protected=True, show_static=True, show_properties=True,
show_address=False, str_length=200)
outf.write(conf_str)
outf.write("\n\n############ Model ###########\n\n")
model_str = ppretty(self.model, indent=' ', show_protected=True, show_static=True,
show_address=False, str_length=50)
outf.write(model_str)
outf.write("\n\n############ The End of Metadata ###########\n\n")
return out_filename
| 2.328125 | 2 |
testjpkg/jsonify/hij.py | thisisishara/test_pypi_cli | 0 | 6207 | print("hiiiiiiiiiiiiiiiix")
def sayhi():
print("2nd pkg said hi")
| 2.140625 | 2 |
asv_bench/benchmarks/algorithms.py | raspbian-packages/pandas | 0 | 6208 | <reponame>raspbian-packages/pandas<filename>asv_bench/benchmarks/algorithms.py
import numpy as np
import pandas as pd
from pandas.util import testing as tm
class algorithm(object):
goal_time = 0.2
def setup(self):
N = 100000
self.int_unique = pd.Int64Index(np.arange(N * 5))
# cache is_unique
self.int_unique.is_unique
self.int = pd.Int64Index(np.arange(N).repeat(5))
self.float = pd.Float64Index(np.random.randn(N).repeat(5))
# Convenience naming.
self.checked_add = pd.core.nanops._checked_add_with_arr
self.arr = np.arange(1000000)
self.arrpos = np.arange(1000000)
self.arrneg = np.arange(-1000000, 0)
self.arrmixed = np.array([1, -1]).repeat(500000)
def time_int_factorize(self):
self.int.factorize()
def time_float_factorize(self):
self.int.factorize()
def time_int_unique_duplicated(self):
self.int_unique.duplicated()
def time_int_duplicated(self):
self.int.duplicated()
def time_float_duplicated(self):
self.float.duplicated()
def time_add_overflow_pos_scalar(self):
self.checked_add(self.arr, 1)
def time_add_overflow_neg_scalar(self):
self.checked_add(self.arr, -1)
def time_add_overflow_zero_scalar(self):
self.checked_add(self.arr, 0)
def time_add_overflow_pos_arr(self):
self.checked_add(self.arr, self.arrpos)
def time_add_overflow_neg_arr(self):
self.checked_add(self.arr, self.arrneg)
def time_add_overflow_mixed_arr(self):
self.checked_add(self.arr, self.arrmixed)
class hashing(object):
goal_time = 0.2
def setup(self):
N = 100000
self.df = pd.DataFrame(
{'A': pd.Series(tm.makeStringIndex(100).take(
np.random.randint(0, 100, size=N))),
'B': pd.Series(tm.makeStringIndex(10000).take(
np.random.randint(0, 10000, size=N))),
'D': np.random.randn(N),
'E': np.arange(N),
'F': pd.date_range('20110101', freq='s', periods=N),
'G': pd.timedelta_range('1 day', freq='s', periods=N),
})
self.df['C'] = self.df['B'].astype('category')
self.df.iloc[10:20] = np.nan
def time_frame(self):
self.df.hash()
def time_series_int(self):
self.df.E.hash()
def time_series_string(self):
self.df.B.hash()
def time_series_categorical(self):
self.df.C.hash()
| 2.625 | 3 |
RMtools_1D/do_RMsynth_1D.py | lh-astro/RM-Tools | 0 | 6209 | #!/usr/bin/env python
#=============================================================================#
# #
# NAME: do_RMsynth_1D.py #
# #
# PURPOSE: API for runnning RM-synthesis on an ASCII Stokes I, Q & U spectrum.#
# #
# MODIFIED: 16-Nov-2018 by <NAME> #
# MODIFIED: 23-October-2019 by <NAME> #
# #
#=============================================================================#
# #
# The MIT License (MIT) #
# #
# Copyright (c) 2015 - 2018 <NAME> #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the "Software"), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
#=============================================================================#
import sys
import os
import time
import traceback
import json
import math as m
import numpy as np
import matplotlib.pyplot as plt
from RMutils.util_RM import do_rmsynth
from RMutils.util_RM import do_rmsynth_planes
from RMutils.util_RM import get_rmsf_planes
from RMutils.util_RM import measure_FDF_parms
from RMutils.util_RM import measure_qu_complexity
from RMutils.util_RM import measure_fdf_complexity
from RMutils.util_misc import nanmedian
from RMutils.util_misc import toscalar
from RMutils.util_misc import create_frac_spectra
from RMutils.util_misc import poly5
from RMutils.util_misc import MAD
from RMutils.util_plotTk import plot_Ipqu_spectra_fig
from RMutils.util_plotTk import plot_rmsf_fdf_fig
from RMutils.util_plotTk import plot_complexity_fig
from RMutils.util_plotTk import CustomNavbar
from RMutils.util_plotTk import plot_rmsIQU_vs_nu_ax
if sys.version_info.major == 2:
print('RM-tools will no longer run with Python 2! Please use Python 3.')
exit()
C = 2.997924538e8 # Speed of light [m/s]
#-----------------------------------------------------------------------------#
def run_rmsynth(data, polyOrd=3, phiMax_radm2=None, dPhi_radm2=None,
nSamples=10.0, weightType="variance", fitRMSF=False,
noStokesI=False, phiNoise_radm2=1e6, nBits=32, showPlots=False,
debug=False, verbose=False, log=print,units='Jy/beam', prefixOut="prefixOut", args=None):
"""Run RM synthesis on 1D data.
Args:
data (list): Contains frequency and polarization data as either:
[freq_Hz, I, Q, U, dI, dQ, dU]
freq_Hz (array_like): Frequency of each channel in Hz.
I (array_like): Stokes I intensity in each channel.
Q (array_like): Stokes Q intensity in each channel.
U (array_like): Stokes U intensity in each channel.
dI (array_like): Error in Stokes I intensity in each channel.
dQ (array_like): Error in Stokes Q intensity in each channel.
dU (array_like): Error in Stokes U intensity in each channel.
or
[freq_Hz, q, u, dq, du]
freq_Hz (array_like): Frequency of each channel in Hz.
q (array_like): Fractional Stokes Q intensity (Q/I) in each channel.
u (array_like): Fractional Stokes U intensity (U/I) in each channel.
dq (array_like): Error in fractional Stokes Q intensity in each channel.
du (array_like): Error in fractional Stokes U intensity in each channel.
Kwargs:
polyOrd (int): Order of polynomial to fit to Stokes I spectrum.
phiMax_radm2 (float): Maximum absolute Faraday depth (rad/m^2).
dPhi_radm2 (float): Faraday depth channel size (rad/m^2).
nSamples (float): Number of samples across the RMSF.
weightType (str): Can be "variance" or "uniform"
"variance" -- Weight by uncertainty in Q and U.
"uniform" -- Weight uniformly (i.e. with 1s)
fitRMSF (bool): Fit a Gaussian to the RMSF?
noStokesI (bool: Is Stokes I data provided?
phiNoise_radm2 (float): ????
nBits (int): Precision of floating point numbers.
showPlots (bool): Show plots?
debug (bool): Turn on debugging messages & plots?
verbose (bool): Verbosity.
log (function): Which logging function to use.
units (str): Units of data.
Returns:
mDict (dict): Summary of RM synthesis results.
aDict (dict): Data output by RM synthesis.
"""
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: log("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr) = data
if verbose: log("... success.")
except Exception:
if verbose: log("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: log("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = data
if verbose: log("... success.")
noStokesI = True
except Exception:
if verbose: log("...failed.")
if debug:
log(traceback.format_exc())
sys.exit()
if verbose: log("Successfully read in the Stokes spectra.")
# If no Stokes I present, create a dummy spectrum = unity
if noStokesI:
if verbose: log("Warn: no Stokes I data in use.")
IArr = np.ones_like(QArr)
dIArr = np.zeros_like(QArr)
# Convert to GHz for convenience
freqArr_GHz = freqArr_Hz / 1e9
dQUArr = (dQArr + dUArr)/2.0
# Fit the Stokes I spectrum and create the fractional spectra
IModArr, qArr, uArr, dqArr, duArr, fitDict = \
create_frac_spectra(freqArr = freqArr_GHz,
IArr = IArr,
QArr = QArr,
UArr = UArr,
dIArr = dIArr,
dQArr = dQArr,
dUArr = dUArr,
polyOrd = polyOrd,
verbose = True,
debug = debug)
# Plot the data and the Stokes I model fit
if verbose: log("Plotting the input data and spectral index fit.")
freqHirArr_Hz = np.linspace(freqArr_Hz[0], freqArr_Hz[-1], 10000)
IModHirArr = poly5(fitDict["p"])(freqHirArr_Hz/1e9)
specFig = plt.figure(figsize=(12.0, 8))
plot_Ipqu_spectra_fig(freqArr_Hz = freqArr_Hz,
IArr = IArr,
qArr = qArr,
uArr = uArr,
dIArr = dIArr,
dqArr = dqArr,
duArr = duArr,
freqHirArr_Hz = freqHirArr_Hz,
IModArr = IModHirArr,
fig = specFig,
units = units)
# Use the custom navigation toolbar (does not work on Mac OS X)
# try:
# specFig.canvas.toolbar.pack_forget()
# CustomNavbar(specFig.canvas, specFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# if not plt.isinteractive():
# specFig.show()
# DEBUG (plot the Q, U and average RMS spectrum)
if debug:
rmsFig = plt.figure(figsize=(12.0, 8))
ax = rmsFig.add_subplot(111)
ax.plot(freqArr_Hz/1e9, dQUArr, marker='o', color='k', lw=0.5,
label='rms <QU>')
ax.plot(freqArr_Hz/1e9, dQArr, marker='o', color='b', lw=0.5,
label='rms Q')
ax.plot(freqArr_Hz/1e9, dUArr, marker='o', color='r', lw=0.5,
label='rms U')
xRange = (np.nanmax(freqArr_Hz)-np.nanmin(freqArr_Hz))/1e9
ax.set_xlim( np.min(freqArr_Hz)/1e9 - xRange*0.05,
np.max(freqArr_Hz)/1e9 + xRange*0.05)
ax.set_xlabel('$\\nu$ (GHz)')
ax.set_ylabel('RMS '+units)
ax.set_title("RMS noise in Stokes Q, U and <Q,U> spectra")
# rmsFig.show()
#-------------------------------------------------------------------------#
# Calculate some wavelength parameters
lambdaSqArr_m2 = np.power(C/freqArr_Hz, 2.0)
dFreq_Hz = np.nanmin(np.abs(np.diff(freqArr_Hz)))
lambdaSqRange_m2 = ( np.nanmax(lambdaSqArr_m2) -
np.nanmin(lambdaSqArr_m2) )
dLambdaSqMin_m2 = np.nanmin(np.abs(np.diff(lambdaSqArr_m2)))
dLambdaSqMax_m2 = np.nanmax(np.abs(np.diff(lambdaSqArr_m2)))
# Set the Faraday depth range
fwhmRMSF_radm2 = 2.0 * m.sqrt(3.0) / lambdaSqRange_m2
if dPhi_radm2 is None:
dPhi_radm2 = fwhmRMSF_radm2 / nSamples
if phiMax_radm2 is None:
phiMax_radm2 = m.sqrt(3.0) / dLambdaSqMax_m2
phiMax_radm2 = max(phiMax_radm2, fwhmRMSF_radm2*10.) # Force the minimum phiMax to 10 FWHM
# Faraday depth sampling. Zero always centred on middle channel
nChanRM = int(round(abs((phiMax_radm2 - 0.0) / dPhi_radm2)) * 2.0 + 1.0)
startPhi_radm2 = - (nChanRM-1.0) * dPhi_radm2 / 2.0
stopPhi_radm2 = + (nChanRM-1.0) * dPhi_radm2 / 2.0
phiArr_radm2 = np.linspace(startPhi_radm2, stopPhi_radm2, nChanRM)
phiArr_radm2 = phiArr_radm2.astype(dtFloat)
if verbose: log("PhiArr = %.2f to %.2f by %.2f (%d chans)." % (phiArr_radm2[0],
phiArr_radm2[-1],
float(dPhi_radm2),
nChanRM))
# Calculate the weighting as 1/sigma^2 or all 1s (uniform)
if weightType=="variance":
weightArr = 1.0 / np.power(dQUArr, 2.0)
else:
weightType = "uniform"
weightArr = np.ones(freqArr_Hz.shape, dtype=dtFloat)
if verbose: log("Weight type is '%s'." % weightType)
startTime = time.time()
# Perform RM-synthesis on the spectrum
dirtyFDF, lam0Sq_m2 = do_rmsynth_planes(dataQ = qArr,
dataU = uArr,
lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
nBits = nBits,
verbose = verbose,
log = log)
# Calculate the Rotation Measure Spread Function
RMSFArr, phi2Arr_radm2, fwhmRMSFArr, fitStatArr = \
get_rmsf_planes(lambdaSqArr_m2 = lambdaSqArr_m2,
phiArr_radm2 = phiArr_radm2,
weightArr = weightArr,
mskArr = ~np.isfinite(qArr),
lam0Sq_m2 = lam0Sq_m2,
double = True,
fitRMSF = fitRMSF,
fitRMSFreal = False,
nBits = nBits,
verbose = verbose,
log = log)
fwhmRMSF = float(fwhmRMSFArr)
# ALTERNATE RM-SYNTHESIS CODE --------------------------------------------#
#dirtyFDF, [phi2Arr_radm2, RMSFArr], lam0Sq_m2, fwhmRMSF = \
# do_rmsynth(qArr, uArr, lambdaSqArr_m2, phiArr_radm2, weightArr)
#-------------------------------------------------------------------------#
endTime = time.time()
cputime = (endTime - startTime)
if verbose: log("> RM-synthesis completed in %.2f seconds." % cputime)
# Determine the Stokes I value at lam0Sq_m2 from the Stokes I model
# Multiply the dirty FDF by Ifreq0 to recover the PI
freq0_Hz = C / m.sqrt(lam0Sq_m2)
Ifreq0 = poly5(fitDict["p"])(freq0_Hz/1e9)
dirtyFDF *= (Ifreq0) # FDF is in fracpol units initially, convert back to flux
# Calculate the theoretical noise in the FDF !!Old formula only works for wariance weights!
weightArr = np.where(np.isnan(weightArr), 0.0, weightArr)
dFDFth = np.sqrt( np.sum(weightArr**2 * np.nan_to_num(dQUArr)**2) / (np.sum(weightArr))**2 )
# Measure the parameters of the dirty FDF
# Use the theoretical noise to calculate uncertainties
mDict = measure_FDF_parms(FDF = dirtyFDF,
phiArr = phiArr_radm2,
fwhmRMSF = fwhmRMSF,
dFDF = dFDFth,
lamSqArr_m2 = lambdaSqArr_m2,
lam0Sq = lam0Sq_m2)
mDict["Ifreq0"] = toscalar(Ifreq0)
mDict["polyCoeffs"] = ",".join([str(x) for x in fitDict["p"]])
mDict["IfitStat"] = fitDict["fitStatus"]
mDict["IfitChiSqRed"] = fitDict["chiSqRed"]
mDict["lam0Sq_m2"] = toscalar(lam0Sq_m2)
mDict["freq0_Hz"] = toscalar(freq0_Hz)
mDict["fwhmRMSF"] = toscalar(fwhmRMSF)
mDict["dQU"] = toscalar(nanmedian(dQUArr))
mDict["dFDFth"] = toscalar(dFDFth)
mDict["units"] = units
if fitDict["fitStatus"] >= 128:
log("WARNING: Stokes I model contains negative values!")
elif fitDict["fitStatus"] >= 64:
log("Caution: Stokes I model has low signal-to-noise.")
#Add information on nature of channels:
good_channels=np.where(np.logical_and(weightArr != 0,np.isfinite(qArr)))[0]
mDict["min_freq"]=float(np.min(freqArr_Hz[good_channels]))
mDict["max_freq"]=float(np.max(freqArr_Hz[good_channels]))
mDict["N_channels"]=good_channels.size
mDict["median_channel_width"]=float(np.median(np.diff(freqArr_Hz)))
# Measure the complexity of the q and u spectra
mDict["fracPol"] = mDict["ampPeakPIfit"]/(Ifreq0)
mD, pD = measure_qu_complexity(freqArr_Hz = freqArr_Hz,
qArr = qArr,
uArr = uArr,
dqArr = dqArr,
duArr = duArr,
fracPol = mDict["fracPol"],
psi0_deg = mDict["polAngle0Fit_deg"],
RM_radm2 = mDict["phiPeakPIfit_rm2"])
mDict.update(mD)
# Debugging plots for spectral complexity measure
if debug:
tmpFig = plot_complexity_fig(xArr=pD["xArrQ"],
qArr=pD["yArrQ"],
dqArr=pD["dyArrQ"],
sigmaAddqArr=pD["sigmaAddArrQ"],
chiSqRedqArr=pD["chiSqRedArrQ"],
probqArr=pD["probArrQ"],
uArr=pD["yArrU"],
duArr=pD["dyArrU"],
sigmaAdduArr=pD["sigmaAddArrU"],
chiSqReduArr=pD["chiSqRedArrU"],
probuArr=pD["probArrU"],
mDict=mDict)
if saveOutput:
if verbose: print("Saving debug plots:")
outFilePlot = prefixOut + ".debug-plots.pdf"
if verbose: print("> " + outFilePlot)
tmpFig.savefig(outFilePlot, bbox_inches = 'tight')
else:
tmpFig.show()
#add array dictionary
aDict = dict()
aDict["phiArr_radm2"] = phiArr_radm2
aDict["phi2Arr_radm2"] = phi2Arr_radm2
aDict["RMSFArr"] = RMSFArr
aDict["freqArr_Hz"] = freqArr_Hz
aDict["weightArr"]=weightArr
aDict["dirtyFDF"]=dirtyFDF
if verbose:
# Print the results to the screen
log()
log('-'*80)
log('RESULTS:\n')
log('FWHM RMSF = %.4g rad/m^2' % (mDict["fwhmRMSF"]))
log('Pol Angle = %.4g (+/-%.4g) deg' % (mDict["polAngleFit_deg"],
mDict["dPolAngleFit_deg"]))
log('Pol Angle 0 = %.4g (+/-%.4g) deg' % (mDict["polAngle0Fit_deg"],
mDict["dPolAngle0Fit_deg"]))
log('Peak FD = %.4g (+/-%.4g) rad/m^2' % (mDict["phiPeakPIfit_rm2"],
mDict["dPhiPeakPIfit_rm2"]))
log('freq0_GHz = %.4g ' % (mDict["freq0_Hz"]/1e9))
log('I freq0 = %.4g %s' % (mDict["Ifreq0"],units))
log('Peak PI = %.4g (+/-%.4g) %s' % (mDict["ampPeakPIfit"],
mDict["dAmpPeakPIfit"],units))
log('QU Noise = %.4g %s' % (mDict["dQU"],units))
log('FDF Noise (theory) = %.4g %s' % (mDict["dFDFth"],units))
log('FDF Noise (Corrected MAD) = %.4g %s' % (mDict["dFDFcorMAD"],units))
log('FDF Noise (rms) = %.4g %s' % (mDict["dFDFrms"],units))
log('FDF SNR = %.4g ' % (mDict["snrPIfit"]))
log('sigma_add(q) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddQ"],
mDict["dSigmaAddPlusQ"],
mDict["dSigmaAddMinusQ"]))
log('sigma_add(u) = %.4g (+%.4g, -%.4g)' % (mDict["sigmaAddU"],
mDict["dSigmaAddPlusU"],
mDict["dSigmaAddMinusU"]))
log()
log('-'*80)
# Plot the RM Spread Function and dirty FDF
if showPlots or saveOutput:
fdfFig = plt.figure(figsize=(12.0, 8))
plot_rmsf_fdf_fig(phiArr = phiArr_radm2,
FDF = dirtyFDF,
phi2Arr = phi2Arr_radm2,
RMSFArr = RMSFArr,
fwhmRMSF = fwhmRMSF,
vLine = mDict["phiPeakPIfit_rm2"],
fig = fdfFig,
units = units)
# Use the custom navigation toolbar
# try:
# fdfFig.canvas.toolbar.pack_forget()
# CustomNavbar(fdfFig.canvas, fdfFig.canvas.toolbar.window)
# except Exception:
# pass
# Display the figure
# fdfFig.show()
# Pause if plotting enabled
if showPlots:
plt.show()
elif saveOutput or debug:
if verbose: print("Saving RMSF and dirty FDF plot:")
outFilePlot = prefixOut + ".RMSF-dirtyFDF-plots.pdf"
if verbose: print("> " + outFilePlot)
fdfFig.savefig(outFilePlot, bbox_inches = 'tight')
# #if verbose: print "Press <RETURN> to exit ...",
# input()
return mDict, aDict
def readFile(dataFile, nBits, verbose=True, debug=False):
"""
Read the I, Q & U data from the ASCII file.
Inputs:
datafile (str): relative or absolute path to file.
nBits (int): number of bits to store the data as.
verbose (bool): Print verbose messages to terminal?
debug (bool): Print full traceback in case of failure?
Returns:
data (list of arrays): List containing the columns found in the file.
If Stokes I is present, this will be [freq_Hz, I, Q, U, dI, dQ, dU],
else [freq_Hz, q, u, dq, du].
"""
# Default data types
dtFloat = "float" + str(nBits)
dtComplex = "complex" + str(2*nBits)
# Output prefix is derived from the input file name
# Read the data-file. Format=space-delimited, comments="#".
if verbose: print("Reading the data file '%s':" % dataFile)
# freq_Hz, I, Q, U, dI, dQ, dU
try:
if verbose: print("> Trying [freq_Hz, I, Q, U, dI, dQ, dU]", end=' ')
(freqArr_Hz, IArr, QArr, UArr,
dIArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, IArr, QArr, UArr, dIArr, dQArr, dUArr]
except Exception:
if verbose: print("...failed.")
# freq_Hz, q, u, dq, du
try:
if verbose: print("> Trying [freq_Hz, q, u, dq, du]", end=' ')
(freqArr_Hz, QArr, UArr, dQArr, dUArr) = \
np.loadtxt(dataFile, unpack=True, dtype=dtFloat)
if verbose: print("... success.")
data=[freqArr_Hz, QArr, UArr, dQArr, dUArr]
noStokesI = True
except Exception:
if verbose: print("...failed.")
if debug:
print(traceback.format_exc())
sys.exit()
if verbose: print("Successfully read in the Stokes spectra.")
return data
def saveOutput(outdict, arrdict, prefixOut, verbose):
# Save the dirty FDF, RMSF and weight array to ASCII files
if verbose: print("Saving the dirty FDF, RMSF weight arrays to ASCII files.")
outFile = prefixOut + "_FDFdirty.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["phiArr_radm2"], arrdict["dirtyFDF"].real, arrdict["dirtyFDF"].imag)))
outFile = prefixOut + "_RMSF.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["phi2Arr_radm2"], arrdict["RMSFArr"].real, arrdict["RMSFArr"].imag)))
outFile = prefixOut + "_weight.dat"
if verbose:
print("> %s" % outFile)
np.savetxt(outFile, list(zip(arrdict["freqArr_Hz"], arrdict["weightArr"])))
# Save the measurements to a "key=value" text file
outFile = prefixOut + "_RMsynth.dat"
if verbose:
print("Saving the measurements on the FDF in 'key=val' and JSON formats.")
print("> %s" % outFile)
FH = open(outFile, "w")
for k, v in outdict.items():
FH.write("%s=%s\n" % (k, v))
FH.close()
outFile = prefixOut + "_RMsynth.json"
if verbose:
print("> %s" % outFile)
json.dump(dict(outdict), open(outFile, "w"))
#-----------------------------------------------------------------------------#
def main():
import argparse
"""
Start the function to perform RM-synthesis if called from the command line.
"""
# Help string to be shown using the -h option
descStr = """
Run RM-synthesis on Stokes I, Q and U spectra (1D) stored in an ASCII
file. The Stokes I spectrum is first fit with a polynomial and the
resulting model used to create fractional q = Q/I and u = U/I spectra.
The ASCII file should the following columns, in a space separated format:
[freq_Hz, I, Q, U, I_err, Q_err, U_err]
OR
[freq_Hz, Q, U, Q_err, U_err]
To get outputs, one or more of the following flags must be set: -S, -p, -v.
"""
epilog_text="""
Outputs with -S flag:
_FDFdirty.dat: Dirty FDF/RM Spectrum [Phi, Q, U]
_RMSF.dat: Computed RMSF [Phi, Q, U]
_RMsynth.dat: list of derived parameters for RM spectrum
(approximately equivalent to -v flag output)
_RMsynth.json: dictionary of derived parameters for RM spectrum
_weight.dat: Calculated channel weights [freq_Hz, weight]
"""
# Parse the command line options
parser = argparse.ArgumentParser(description=descStr,epilog=epilog_text,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("dataFile", metavar="dataFile.dat", nargs=1,
help="ASCII file containing Stokes spectra & errors.")
parser.add_argument("-t", dest="fitRMSF", action="store_true",
help="fit a Gaussian to the RMSF [False]")
parser.add_argument("-l", dest="phiMax_radm2", type=float, default=None,
help="absolute max Faraday depth sampled [Auto].")
parser.add_argument("-d", dest="dPhi_radm2", type=float, default=None,
help="width of Faraday depth channel [Auto].\n(overrides -s NSAMPLES flag)")
parser.add_argument("-s", dest="nSamples", type=float, default=10,
help="number of samples across the RMSF lobe [10].")
parser.add_argument("-w", dest="weightType", default="variance",
help="weighting [inverse variance] or 'uniform' (all 1s).")
parser.add_argument("-o", dest="polyOrd", type=int, default=2,
help="polynomial order to fit to I spectrum [2].")
parser.add_argument("-i", dest="noStokesI", action="store_true",
help="ignore the Stokes I spectrum [False].")
parser.add_argument("-b", dest="bit64", action="store_true",
help="use 64-bit floating point precision [False (uses 32-bit)]")
parser.add_argument("-p", dest="showPlots", action="store_true",
help="show the plots [False].")
parser.add_argument("-v", dest="verbose", action="store_true",
help="verbose output [False].")
parser.add_argument("-S", dest="saveOutput", action="store_true",
help="save the arrays and plots [False].")
parser.add_argument("-D", dest="debug", action="store_true",
help="turn on debugging messages & plots [False].")
parser.add_argument("-U", dest="units", type=str, default="Jy/beam",
help="Intensity units of the data. [Jy/beam]")
args = parser.parse_args()
# Sanity checks
if not os.path.exists(args.dataFile[0]):
print("File does not exist: '%s'." % args.dataFile[0])
sys.exit()
prefixOut, ext = os.path.splitext(args.dataFile[0])
dataDir, dummy = os.path.split(args.dataFile[0])
# Set the floating point precision
nBits = 32
if args.bit64:
nBits = 64
verbose=args.verbose
data = readFile(args.dataFile[0],nBits, verbose=verbose, debug=args.debug)
# Run RM-synthesis on the spectra
mDict, aDict = run_rmsynth(data = data,
polyOrd = args.polyOrd,
phiMax_radm2 = args.phiMax_radm2,
dPhi_radm2 = args.dPhi_radm2,
nSamples = args.nSamples,
weightType = args.weightType,
fitRMSF = args.fitRMSF,
noStokesI = args.noStokesI,
nBits = nBits,
showPlots = args.showPlots,
debug = args.debug,
verbose = verbose,
units = args.units,
prefixOut = prefixOut,
args = args,
)
if args.saveOutput:
saveOutput(mDict, aDict, prefixOut, verbose)
#-----------------------------------------------------------------------------#
if __name__ == "__main__":
main()
| 1.679688 | 2 |
cogdl/modules/conv/__init__.py | awesome-archive/cogdl | 8 | 6210 | from .message_passing import MessagePassing
from .gcn_conv import GCNConv
from .gat_conv import GATConv
from .se_layer import SELayer
from .aggregator import Meanaggregator
from .maggregator import meanaggr
__all__ = [
'MessagePassing',
'GCNConv',
'GATConv',
'SELayer',
'Meanaggregator'
]
| 1.085938 | 1 |
netket/utils/jax.py | gpescia/MyNetKet | 1 | 6211 | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
from . import struct
def get_afun_if_module(mod_or_fun) -> Callable:
"""Returns the apply function if it's a module. Does nothing otherwise."""
if hasattr(mod_or_fun, "apply"):
return mod_or_fun.apply
else:
return mod_or_fun
@struct.dataclass
class WrappedApplyFun:
"""Wraps a callable to be a module-like object with the method `apply`."""
apply: Callable
"""The wrapped callable."""
def __repr__(self):
return f"{type(self).__name__}(apply={self.apply}, hash={hash(self)})"
def wrap_afun(mod_or_fun):
"""Wraps a callable to be a module-like object with the method `apply`.
Does nothing if it already has an apply method.
"""
if hasattr(mod_or_fun, "apply"):
return mod_or_fun
else:
return WrappedApplyFun(mod_or_fun)
| 2.3125 | 2 |
geetools/batch/featurecollection.py | Kungreye/gee_tools | 0 | 6212 | # coding=utf-8
import ee
from . import utils
import json
import csv
from .. import tools
def fromShapefile(filename, crs=None, start=None, end=None):
""" Convert an ESRI file (.shp and .dbf must be present) to a
ee.FeatureCollection
At the moment only works for shapes with less than 1000 records and doesn't
handle complex shapes.
:param filename: the name of the filename. If the shape is not in the
same path than the script, specify a path instead.
:type filename: str
:param start:
:return: the FeatureCollection
:rtype: ee.FeatureCollection
"""
import shapefile
wgs84 = ee.Projection('EPSG:4326')
# read the filename
reader = shapefile.Reader(filename)
fields = reader.fields[1:]
field_names = [field[0] for field in fields]
field_types = [field[1] for field in fields]
types = dict(zip(field_names, field_types))
features = []
projection = utils.getProjection(filename) if not crs else crs
# catch a string with format "EPSG:XXX"
if isinstance(projection, str):
if 'EPSG:' in projection:
projection = projection.split(':')[1]
projection = 'EPSG:{}'.format(projection)
# filter records with start and end
start = start if start else 0
if not end:
records = reader.shapeRecords()
end = len(records)
else:
end = end + 1
if (end-start)>1000:
msg = "Can't process more than 1000 records at a time. Found {}"
raise ValueError(msg.format(end-start))
for i in range(start, end):
# atr = dict(zip(field_names, sr.record))
sr = reader.shapeRecord(i)
atr = {}
for fld, rec in zip(field_names, sr.record):
fld_type = types[fld]
if fld_type == 'D':
value = ee.Date(rec.isoformat()).millis().getInfo()
elif fld_type in ['C', 'N', 'F']:
value = rec
else:
continue
atr[fld] = value
geom = sr.shape.__geo_interface__
if projection is not None:
geometry = ee.Geometry(geom, projection) \
.transform(wgs84, 1)
else:
geometry = ee.Geometry(geom)
feat = ee.Feature(geometry, atr)
features.append(feat)
return ee.FeatureCollection(features)
def fromGeoJSON(filename=None, data=None, crs=None):
""" Create a list of Features from a GeoJSON file. Return a python tuple
with ee.Feature inside. This is due to failing when attempting to create a
FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating
it yourself casting the result of this function to a ee.List or using it
directly as a FeatureCollection argument.
:param filename: the name of the file to load
:type filename: str
:param crs: a coordinate reference system in EPSG format. If not specified
it will try to get it from the geoJSON, and if not there it will rise
an error
:type: crs: str
:return: a tuple of features.
"""
if filename:
with open(filename, 'r') as geoj:
content = geoj.read()
geodict = json.loads(content)
else:
geodict = data
features = []
# Get crs from GeoJSON
if not crs:
filecrs = geodict.get('crs')
if filecrs:
name = filecrs.get('properties').get('name')
splitcrs = name.split(':')
cleancrs = [part for part in splitcrs if part]
try:
if cleancrs[-1] == 'CRS84':
crs = 'EPSG:4326'
elif cleancrs[-2] == 'EPSG':
crs = '{}:{}'.format(cleancrs[-2], cleancrs[-1])
else:
raise ValueError('{} not recognized'.format(name))
except IndexError:
raise ValueError('{} not recognized'.format(name))
else:
crs = 'EPSG:4326'
for n, feat in enumerate(geodict.get('features')):
properties = feat.get('properties')
geom = feat.get('geometry')
ty = geom.get('type')
coords = geom.get('coordinates')
if ty == 'GeometryCollection':
ee_geom = utils.GEOMETRY_TYPES.get(ty)(geom, opt_proj=crs)
else:
if ty == 'Polygon':
coords = utils.removeZ(coords) if utils.hasZ(coords) else coords
ee_geom = utils.GEOMETRY_TYPES.get(ty)(coords, proj=ee.Projection(crs))
ee_feat = ee.feature.Feature(ee_geom, properties)
features.append(ee_feat)
return tuple(features)
def fromKML(filename=None, data=None, crs=None, encoding=None):
""" Create a list of Features from a KML file. Return a python tuple
with ee.Feature inside. This is due to failing when attempting to create a
FeatureCollection (Broken Pipe ERROR) out of the list. You can try creating
it yourself casting the result of this function to a ee.List or using it
directly as a FeatureCollection argument.
:param filename: the name of the file to load
:type filename: str
:param crs: a coordinate reference system in EPSG format. If not specified
it will try to get it from the geoJSON, and if not there it will rise
an error
:type: crs: str
:return: a tuple of features.
"""
geojsondict = utils.kmlToGeoJsonDict(filename, data, encoding)
features = geojsondict['features']
for feat in features:
# remove styleUrl
prop = feat['properties']
if 'styleUrl' in prop:
prop.pop('styleUrl')
# remove Z value if needed
geom = feat['geometry']
ty = geom['type']
if ty == 'GeometryCollection':
geometries = geom['geometries']
for g in geometries:
c = g['coordinates']
utils.removeZ(c)
else:
coords = geom['coordinates']
utils.removeZ(coords)
return fromGeoJSON(data=geojsondict, crs=crs)
def toDict(collection, split_at=4000):
""" Get the FeatureCollection as a dict object """
size = collection.size()
condition = size.gte(4999)
def greater():
size = collection.size()
seq = tools.ee_list.sequence(0, size, split_at)
limits = ee.List.zip(seq.slice(1), seq)
def over_limits(n):
n = ee.List(n)
ini = ee.Number(n.get(0))
end = ee.Number(n.get(1))
return ee.FeatureCollection(collection.toList(ini, end))
return limits.map(over_limits)
collections = ee.List(
ee.Algorithms.If(condition,
greater(),
ee.List([collection])))
collections_size = collections.size().getInfo()
col = ee.FeatureCollection(collections.get(0))
content = col.getInfo()
feats = content['features']
for i in range(0, collections_size):
c = ee.FeatureCollection(collections.get(i))
content_c = c.getInfo()
feats_c = content_c['features']
feats = feats + feats_c
content['features'] = feats
return content
def toGeoJSON(collection, name, path=None, split_at=4000):
""" Export a FeatureCollection to a GeoJSON file
:param collection: The collection to export
:type collection: ee.FeatureCollection
:param name: name of the resulting file
:type name: str
:param path: The path where to save the file. If None, will be saved
in the current folder
:type path: str
:param split_at: limit to avoid an EE Exception
:type split_at: int
:return: A GeoJSON (.geojson) file.
:rtype: file
"""
import json
import os
if not path:
path = os.getcwd()
# name
if name[-8:-1] != '.geojson':
fname = name+'.geojson'
content = toDict(collection, split_at)
with open(os.path.join(path, fname), 'w') as thefile:
thefile.write(json.dumps(content))
return thefile
def toCSV(collection, filename, split_at=4000):
""" Alternative to download a FeatureCollection as a CSV """
d = toDict(collection, split_at)
fields = list(d['columns'].keys())
fields.append('geometry')
features = d['features']
ext = filename[-4:]
if ext != '.csv':
filename += '.csv'
with open(filename, 'w') as thecsv:
writer = csv.DictWriter(thecsv, fields)
writer.writeheader()
# write rows
for feature in features:
properties = feature['properties']
fid = feature['id']
geom = feature['geometry']['type']
# match fields
properties['system:index'] = fid
properties['geometry'] = geom
# write row
writer.writerow(properties)
return thecsv
def toLocal(collection, filename, filetype=None, selectors=None, path=None):
""" Download a FeatureCollection to a local file a CSV or geoJSON file.
This uses a different method than `toGeoJSON` and `toCSV`
:param filetype: The filetype of download, either CSV or JSON.
Defaults to CSV.
:param selectors: The selectors that should be used to determine which
attributes will be downloaded.
:param filename: The name of the file to be downloaded
"""
if not filetype:
filetype = 'CSV'
url = collection.getDownloadURL(filetype, selectors, filename)
thefile = utils.downloadFile(url, filename, filetype, path)
return thefile
def toAsset(table, assetPath, name=None, create=True, verbose=False, **kwargs):
""" This function can create folders and ImageCollections on the fly.
The rest is the same to Export.image.toAsset. You can pass the same
params as the original function
:param table: the feature collection to upload
:type table: ee.FeatureCollection
:param assetPath: path to upload the image (only PATH, without
filename)
:type assetPath: str
:param name: filename for the image (AssetID will be assetPath + name)
:type name: str
:return: the tasks
:rtype: ee.batch.Task
"""
# Check if the user is specified in the asset path
is_user = (assetPath.split('/')[0] == 'users')
if not is_user:
user = ee.batch.data.getAssetRoots()[0]['id']
assetPath = "{}/{}".format(user, assetPath)
if create:
# Recrusive create path
path2create = assetPath # '/'.join(assetPath.split('/')[:-1])
utils.createAssets([path2create], 'Folder', True)
# Asset ID (Path + name)
assetId = '/'.join([assetPath, name])
# Description
description = utils.matchDescription(name)
# Init task
task = ee.batch.Export.table.toAsset(table, assetId=assetId,
description=description, **kwargs)
task.start()
if verbose:
print('Exporting {} to {}'.format(name, assetPath))
return task | 3 | 3 |
index.py | extwiii/Rock-paper-scissors-lizard-Spock | 1 | 6213 | # Rock-paper-scissors-lizard-Spock template
# The key idea of this program is to equate the strings
# "rock", "paper", "scissors", "lizard", "Spock" to numbers
# as follows:
#
# 0 - rock
# 1 - Spock
# 2 - paper
# 3 - lizard
# 4 - scissors
import random
def name_to_number(name):
if name == "rock":
return 0
elif name == 'Spock':
return 1
elif name == 'paper':
return 2
elif name == 'lizard':
return 3
elif name == 'scissors':
return 4
else :
return None
def number_to_name(number):
if number == 0:
return "rock"
elif number == 1:
return 'Spock'
elif number == 2:
return 'paper'
elif number == 3:
return 'lizard'
elif number == 4:
return 'scissors'
else :
return None
def rpsls(player_choice):
print ""
print "Player chooses",player_choice
player_number = name_to_number(player_choice)
comp_number = random.randrange(5)
comp_choice = number_to_name(comp_number)
print "Computer chooses",comp_choice
diff = (player_number - comp_number)%5
if (diff == 1) or (diff == 2):
print "Player wins!"
elif (diff == 3) or (diff == 4):
print "Computer wins!"
else :
print "Tie!"
rpsls("rock")
rpsls("Spock")
rpsls("paper")
rpsls("lizard")
rpsls("scissors")
| 4 | 4 |
libs/clustering/ensembles/utils.py | greenelab/phenoplier | 3 | 6214 | """
Contains functions to generate and combine a clustering ensemble.
"""
import numpy as np
import pandas as pd
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score as ari
from sklearn.metrics import adjusted_mutual_info_score as ami
from sklearn.metrics import normalized_mutual_info_score as nmi
from tqdm import tqdm
from clustering.utils import reset_estimator, compare_arrays
def generate_ensemble(data, clusterers: dict, attributes: list, affinity_matrix=None):
"""
It generates an ensemble from the data given a set of clusterers (a
clusterer is an instance of a clustering algorithm with a fixed set of
parameters).
Args:
data:
A numpy array, pandas dataframe, or any other structure supported
by the clusterers as data input.
clusterers:
A dictionary with clusterers specified in this format: { 'k-means
#1': KMeans(n_clusters=2), ... }
attributes:
A list of attributes to save in the final dataframe; for example,
including "n_clusters" will extract this attribute from the
estimator and include it in the final dataframe returned.
affinity_matrix:
If the clustering algorithm is AgglomerativeClustering (from
sklearn) and the linkage method is different than ward (which only
support euclidean distance), the affinity_matrix is given as data
input to the estimator instead of data.
Returns:
A pandas DataFrame with all the partitions generated by the clusterers.
Columns include the clusterer name/id, the partition, the estimator
parameters (obtained with the get_params() method) and any other
attribute specified.
"""
ensemble = []
for clus_name, clus_obj in tqdm(clusterers.items(), total=len(clusterers)):
# get partition
#
# for agglomerative clustering both data and affinity_matrix should be
# given; for ward linkage, data is used, and for the other linkage
# methods the affinity_matrix is used
if (type(clus_obj).__name__ == "AgglomerativeClustering") and (
clus_obj.linkage != "ward"
):
partition = clus_obj.fit_predict(affinity_matrix).astype(float)
else:
partition = clus_obj.fit_predict(data).astype(float)
# remove from partition noisy points (for example, if using DBSCAN)
partition[partition < 0] = np.nan
# get number of clusters
partition_no_nan = partition[~np.isnan(partition)]
n_clusters = np.unique(partition_no_nan).shape[0]
# stop if n_clusters <= 1
if n_clusters <= 1:
reset_estimator(clus_obj)
continue
res = pd.Series(
{
"clusterer_id": clus_name,
"clusterer_params": str(clus_obj.get_params()),
"partition": partition,
}
)
for attr in attributes:
if attr == "n_clusters" and not hasattr(clus_obj, attr):
res[attr] = n_clusters
else:
res[attr] = getattr(clus_obj, attr)
ensemble.append(res)
# for some estimators such as DBSCAN this is needed, because otherwise
# the estimator saves references of huge data structures not needed in
# this context
reset_estimator(clus_obj)
return pd.DataFrame(ensemble).set_index("clusterer_id")
def get_ensemble_distance_matrix(ensemble, n_jobs=1):
"""
Given an ensemble, it computes the coassociation matrix (a distance matrix
for all objects using the ensemble information). For each object pair, the
coassociation matrix contains the percentage of times the pair of objects
was clustered together in the ensemble.
Args:
ensemble:
A numpy array representing a set of clustering solutions on the same
data. Each row is a clustering solution (partition) and columns are
objects.
n_jobs:
The number of jobs used by the pairwise_distance matrix from
sklearn.
Returns:
A numpy array representing a square distance matrix for all objects
(coassociation matrix).
"""
def _compare(x, y):
xy = np.array([x, y]).T
xy = xy[~np.isnan(xy).any(axis=1)]
return (xy[:, 0] != xy[:, 1]).sum() / xy.shape[0]
return pairwise_distances(
ensemble.T, metric=_compare, n_jobs=n_jobs, force_all_finite="allow-nan"
)
def supraconsensus(ensemble, k, methods, selection_criterion, n_jobs=1, use_tqdm=False):
"""
It combines a clustering ensemble using a set of methods that the user can
specify. Each of these methods combines the ensemble and returns a single
partition. This function returns the combined partition that maximizes the
selection criterion.
Args:
ensemble:
a clustering ensemble (rows are partitions, columns are objects).
k:
the final number of clusters for the combined partition.
methods:
a list of methods to apply on the ensemble; each returns a combined
partition.
selection_criterion:
a function that represents the selection criterion; this function
has to accept an ensemble as the first argument, and a partition as
the second one.
n_jobs:
number of jobs.
use_tqdm:
ensembles/disables the use of tqdm to show a progress bar.
Returns:
Returns a tuple: (partition, best method name, best criterion value)
"""
from concurrent.futures import ProcessPoolExecutor, as_completed
methods_results = {}
with ProcessPoolExecutor(max_workers=n_jobs) as executor:
tasks = {executor.submit(m, ensemble, k): m.__name__ for m in methods}
for future in tqdm(
as_completed(tasks),
total=len(tasks),
disable=(not use_tqdm),
ncols=100,
):
method_name = tasks[future]
part = future.result()
criterion_value = selection_criterion(ensemble, part)
methods_results[method_name] = {
"partition": part,
"criterion_value": criterion_value,
}
# select the best performing method according to the selection criterion
best_method = max(
methods_results, key=lambda x: methods_results[x]["criterion_value"]
)
best_method_results = methods_results[best_method]
return (
best_method_results["partition"],
best_method,
best_method_results["criterion_value"],
)
def run_method_and_compute_agreement(method_func, ensemble_data, ensemble, k, **kwargs):
"""
Runs a consensus clustering method on the ensemble data, obtains the
consolidated partition with the desired number of clusters, and computes
a series of performance measures.
Args:
method_func:
A consensus function (first argument is either the ensemble or
the coassociation matrix derived from the ensemble).
ensemble_data:
A numpy array with the ensemble data that will be given to the
specified method. For evidence accumulation methods, this is the
coassociation matrix (a square matrix with the distance between
object pairs derived from the ensemble).
ensemble:
A numpy array representing the ensemble (partitions in rows, objects
in columns).
k:
The number of clusters to obtain from the ensemble data using the
specified method.
kwargs:
Other parameters passed to `method_func`.
Returns:
It returns a tuple with the data partition derived from the ensemble
data using the specified method, and some performance measures of this
partition.
"""
part = method_func(ensemble_data, k, **kwargs)
nmi_values = np.array(
[
compare_arrays(ensemble_member, part, nmi, use_weighting=True)
for ensemble_member in ensemble
]
)
ami_values = np.array(
[
compare_arrays(ensemble_member, part, ami, use_weighting=True)
for ensemble_member in ensemble
]
)
ari_values = np.array(
[
compare_arrays(ensemble_member, part, ari, use_weighting=True)
for ensemble_member in ensemble
]
)
performance_values = {
"ari_mean": np.mean(ari_values),
"ari_median": np.median(ari_values),
"ari_std": np.std(ari_values),
"ami_mean": np.mean(ami_values),
"ami_median": np.median(ami_values),
"ami_std": np.std(ami_values),
"nmi_mean": np.mean(nmi_values),
"nmi_median": np.median(nmi_values),
"nmi_std": np.std(nmi_values),
}
return part, performance_values
| 3.25 | 3 |
backend/chart/application/service/employees.py | toshi-click/chart_app | 0 | 6215 | import logging
from django.db import transaction, connection
from django.utils import timezone
from django.utils.timezone import localtime
from chart.application.enums.department_type import DepartmentType
from chart.application.enums.gender_type import GenderType
from chart.application.service.app_logic_base import AppLogicBaseService
from chart.models import Employees, Departments
"""
employeesテーブルを操作するクラスです。
"""
class EmployeesService(AppLogicBaseService):
def __init__(self):
super().__init__()
@staticmethod
@transaction.atomic()
def create_employees():
"""
Employeesを作成する
"""
service = EmployeesService()
for emp_no in range(1, 11):
if Employees.objects.filter(emp_no=emp_no, delete_flag=0).count() == 0:
if emp_no <= 5:
department_no = DepartmentType.SALES.value
else:
department_no = DepartmentType.MARKETING.value
select_model = Departments.objects.filter(department_no=department_no).values("id").first()
# データを登録する
service._regist_employees(select_model['id'], emp_no)
@staticmethod
@transaction.atomic()
def create_departments():
"""
Departmentsを作成する
"""
service = EmployeesService()
# データをすべて削除する
# ForeignKeyが指定されているためdeleteコマンドを実行する
Departments.objects.all().delete()
for department_type in DepartmentType:
department_no = department_type.value
if Departments.objects.filter(department_no=department_no, delete_flag=0).count() == 0:
# データを登録する
service._regist_departments(department_no, department_type.en_name)
@staticmethod
@transaction.atomic()
def update_employees():
"""
Employeesを更新する
"""
service = EmployeesService()
# filterによる絞込を行う
# gt:...より大きい(>),lt:...より小さい(<)になる
for employees_item in Employees.objects.filter(emp_no__gt=1, emp_no__lt=3, delete_flag=0):
employees_id = employees_item.id
select_model = Departments.objects.filter(department_no=DepartmentType.PRODUCTION.value).values(
"id").first()
department_id = select_model['id']
department_date_from = 20190903
# データを更新する
service._update_employees_department(employees_id, department_id, department_date_from)
# filterによる絞込を行う
# gte:...以上(>=),lte:...以下(<=)になる
for employees_item in Employees.objects.filter(emp_no__gte=7, emp_no__lte=9, delete_flag=0):
employees_id = employees_item.id
select_model = Departments.objects.filter(department_no=DepartmentType.SALES.value).values("id").first()
department_id = select_model['id']
department_date_from = 20190905
# データを更新する
service._update_employees_department(employees_id, department_id, department_date_from)
@staticmethod
def select_employees():
"""
Employeesを検索する
"""
# テーブル名__項目名で指定するとINNER JOINになる
# Queryは参照先のテーブルを参照する度に発行されます
for employees_item in Employees.objects.filter(department__department_no=DepartmentType.SALES.value,
delete_flag=0):
logging.debug("reference:emp_no={}".format(employees_item.emp_no))
logging.debug("reference:department_no={}".format(employees_item.department.department_no))
logging.debug("reference:department_name={}".format(employees_item.department.department_name))
logging.debug("reference:first_name={}".format(employees_item.first_name))
logging.debug("reference:last_name={}".format(employees_item.last_name))
# select_relatedを使用した参照先情報を取得してキャッシュします
# Queryは1回のみ発行されます
for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).select_related("department"):
logging.debug("select_related:emp_no={}".format(employees_item.emp_no))
logging.debug("select_related:first_name={}".format(employees_item.first_name))
logging.debug("select_related:last_name={}".format(employees_item.last_name))
logging.debug("select_related:department_no={}".format(employees_item.department.department_no))
logging.debug("select_related:department_name={}".format(employees_item.department.department_name))
# prefetch_relatedを使用した参照先情報を取得してキャッシュします
# Queryは2回発行されてForeignKeyで結合します
for employees_item in Employees.objects.filter(emp_no__gte=7, delete_flag=0).prefetch_related(
"department__employees_set"):
logging.debug("prefetch_related:emp_no={}".format(employees_item.emp_no))
logging.debug("prefetch_related:first_name={}".format(employees_item.first_name))
logging.debug("prefetch_related:last_name={}".format(employees_item.last_name))
logging.debug("prefetch_related:department_no={}".format(employees_item.department.department_no))
logging.debug("prefetch_related:department_name={}".format(employees_item.department.department_name))
@staticmethod
@transaction.atomic()
def truncate_employees():
"""
トランケートを行う
"""
cursor = connection.cursor()
cursor.execute('TRUNCATE TABLE {0}'.format(Employees._meta.db_table))
def _regist_employees(self, department_id, emp_no):
"""
employeesを登録する
"""
self.regist_model = Employees()
self.regist_model.emp_no = emp_no
self.regist_model.department_id = department_id
self.regist_model.first_name = "first_name_" + str(emp_no).zfill(3)
self.regist_model.last_name = "last_name_" + str(emp_no).zfill(3)
self.regist_model.gender = GenderType.MAN.value
self.regist_model.department_date_from = "20190902"
self.regist_model.delete_flag = 0
self.regist_model.regist_dt = localtime(timezone.now())
self.regist_model.update_dt = localtime(timezone.now())
self.regist_model.save()
return self.regist_model.id
def _regist_departments(self, department_no, department_name):
"""
departmentsを登録する
"""
self.regist_model = Departments()
self.regist_model.department_no = department_no
self.regist_model.department_name = department_name
self.regist_model.delete_flag = 0
self.regist_model.regist_dt = localtime(timezone.now())
self.regist_model.update_dt = localtime(timezone.now())
self.regist_model.save()
def _update_employees_department(self, employees_id, department_id, department_date_from):
"""
配属情報を更新する
"""
self.update_model = Employees()
self.update_model.pk = employees_id
self.update_model.department_id = department_id
self.update_model.department_date_from = department_date_from
self.update_model.update_dt = localtime(timezone.now())
self.update_model.save(update_fields=['department_id', 'department_date_from', 'update_dt'])
| 2.046875 | 2 |
DataQualityTester/views/pages.py | pwyf/data-quality-tester | 0 | 6216 | from flask import render_template
def home():
return render_template('upload.html')
def about():
return render_template('about.html')
| 1.960938 | 2 |
hastakayit_gui.py | roselight/Image-Recognition-with-OpenCv | 2 | 6217 | <reponame>roselight/Image-Recognition-with-OpenCv<filename>hastakayit_gui.py
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '.\hastakayit_gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import mysql.connector
from PyQt5.QtWidgets import QMessageBox,QWidget,QMainWindow
from PyQt5.QtCore import Qt, QDate, QDateTime
# Veritabanı bağlantısı için sql cümleciği oluşturuldu.
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="<PASSWORD>",
database="cilth_vt"
)
cursor = db.cursor()
class Ui_MainWindow2(QMainWindow):
def setupUi2(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(600, 205)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../heartbeat.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
MainWindow.setWindowIcon(icon)
MainWindow.setTabShape(QtWidgets.QTabWidget.Triangular)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.btn_kayit = QtWidgets.QPushButton(self.centralwidget)
self.btn_kayit.setGeometry(QtCore.QRect(180, 150, 121, 31))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("../avatar.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.btn_kayit.setIcon(icon1)
self.btn_kayit.setObjectName("btn_kayit")
self.btn_kayit.clicked.connect(self.kayitekle)
self.btn_cikis = QtWidgets.QPushButton(self.centralwidget)
self.btn_cikis.setGeometry(QtCore.QRect(310, 150, 121, 31))
self.btn_cikis.setObjectName("btn_cikis")
self.btn_cikis.clicked.connect(self.close)
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(10, 10, 571, 128))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.gridLayout_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.lbl_htc = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_htc.setObjectName("lbl_htc")
self.gridLayout_3.addWidget(self.lbl_htc, 0, 0, 1, 1)
self.lbl_hadsoyad = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_hadsoyad.setObjectName("lbl_hadsoyad")
self.gridLayout_3.addWidget(self.lbl_hadsoyad, 1, 0, 1, 1)
self.lbl_hcinsiyet = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_hcinsiyet.setObjectName("lbl_hcinsiyet")
self.gridLayout_3.addWidget(self.lbl_hcinsiyet, 2, 0, 1, 1)
self.lineEdit_2 = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit_2.setObjectName("lineEdit_2")
self.gridLayout_3.addWidget(self.lineEdit_2, 1, 1, 1, 1)
self.lineEdit_3 = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit_3.setObjectName("lineEdit_3")
self.gridLayout_3.addWidget(self.lineEdit_3, 2, 1, 1, 1)
self.lineEdit = QtWidgets.QLineEdit(self.gridLayoutWidget_2)
self.lineEdit.setObjectName("lineEdit")
self.gridLayout_3.addWidget(self.lineEdit, 0, 1, 1, 1)
self.lbl_hdt = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.lbl_hdt.setObjectName("lbl_hdt")
self.gridLayout_3.addWidget(self.lbl_hdt, 3, 0, 1, 1)
self.dt_hdt = QtWidgets.QDateEdit(self.gridLayoutWidget_2)
self.dt_hdt.setObjectName("dt_hdt")
self.dt_hdt.setDateTime(QtCore.QDateTime(QtCore.QDate(2019, 1, 1), QtCore.QTime(0, 0, 0)))
self.gridLayout_3.addWidget(self.dt_hdt, 3, 1, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def kayitekle(self):
# k_ad/k_sfire lineedit'ten alınan verileri sorguya gönderir.
h_tc=self.lineEdit.text()
h_ads=self.lineEdit_2.text()
h_csyt=self.lineEdit_3.text()
h_dt=self.dt_hdt.text()
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("heartbeat.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
QMessageBox.setWindowIcon(self, icon)
try:
hasta_ekle = ("INSERT INTO hasta(h_tc,h_ad_sad,h_cins,h_dt) VALUES (%s,%s,%s,%s)")
cursor.execute(hasta_ekle,(h_tc,h_ads,h_csyt,h_dt))
db.commit()
veri = cursor.rowcount
except:
veri=2
if (veri == 1):
QMessageBox.information(self, 'BİLGİLENDİRME', "İşlem Başarılı.")
else:
QMessageBox.information(self, 'BİLGİLENDİRME', "İşlem Başarısız")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Cilt Hastalıkları Tespit Uygulaması-Hasta Kayıt Ekranı"))
self.btn_kayit.setText(_translate("MainWindow", "ONAYLA"))
self.btn_cikis.setText(_translate("MainWindow", "İPTAL"))
self.lbl_htc.setText(_translate("MainWindow", "TC Kimlik No:"))
self.lbl_hadsoyad.setText(_translate("MainWindow", "Hasta Adı Soyadı:"))
self.lbl_hcinsiyet.setText(_translate("MainWindow", "Cinsiyet: "))
self.lbl_hdt.setText(_translate("MainWindow", "Doğum Tarihi:"))
self.dt_hdt.setDisplayFormat(_translate("MainWindow", "yyyy.MM.dd"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow2()
ui.setupUi2(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 2.328125 | 2 |
.travis/manage_daily_builds.py | loonwerks/AGREE | 5 | 6218 | <filename>.travis/manage_daily_builds.py<gh_stars>1-10
#!/usr/bin/env python3
'''
Copyright (c) 2021, Collins Aerospace.
Developed with the sponsorship of Defense Advanced Research Projects Agency (DARPA).
Permission is hereby granted, free of charge, to any person obtaining a copy of this data,
including any software or models in source or binary form, as well as any drawings, specifications,
and documentation (collectively "the Data"), to deal in the Data without restriction, including
without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Data, and to permit persons to whom the Data is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Data.
THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
'''
import os
import re
import sys
from github3 import GitHub
from pprint import pformat
GITHUB_API = 'https://api.github.com/repos'
GITHUB_RELEASES = 'releases'
AUTH_TOKEN = os.environ['GH_TOKEN'] if 'GH_TOKEN' in os.environ.keys() else None
REPOSITORY_OWNER = 'loonwerks'
REPOSITORY_REPO = 'AGREE'
PRODUCT_ASSET_PATTERN = re.compile(r'com.rockwellcollins.atc.agree.repository-\d+\.\d+\.\d+(-(\d{12}))?-.*')
def manage_daily_builds(sname):
print('Managing builds matching %s' % (sname))
# obtain git handle
gh = GitHub(GITHUB_API, token=AUTH_TOKEN)
repository = gh.repository(REPOSITORY_OWNER, REPOSITORY_REPO)
# get list of releases
releases = repository.releases()
# extract keys and sort by build date
release_keys = {x.id : x.created_at for x in releases if sname in x.name}
sorted_keys = sorted(release_keys.items(), reverse=True, key=lambda x: x[1])
print('%s' % (pformat(sorted_keys)))
# filter to obtain the keys to delete
delete_keys = [v[0] for v in sorted_keys[2:]]
print('Deleting releases: %s' % (pformat(delete_keys)))
# iterate, deleting the releases and corresponding tags
for rel in releases:
print('examining rel %d from %s...' % (rel.id, str(rel.created_at)))
if rel.id in delete_keys and rel.tag_name is not None:
print(' deleting release id %d and tag %s.' % (rel.id, rel.tag_name))
rel_tag_ref = repository.ref('tags/%s' % (rel.tag_name))
rel.delete()
if rel_tag_ref is not None:
print(' deleting tag %s' % (rel_tag_ref.ref))
rel_tag_ref.delete()
else:
# Look for stale files in the release
assets = rel.assets()
print('In release %s found assets:' % (rel.name))
for asset in assets:
match = PRODUCT_ASSET_PATTERN.search(asset.name)
print(' asset named %s matches %s' % (asset.name, match.group(1) if match is not None else 'None'))
build_times = sorted([PRODUCT_ASSET_PATTERN.search(x.name).group(1) for x in assets if PRODUCT_ASSET_PATTERN.search(x.name)])
latest_build_time = build_times[-1] if build_times else None
print('Lastest build time is %s' % (latest_build_time))
for asset in assets:
match = PRODUCT_ASSET_PATTERN.search(asset.name)
# print(' asset named %s matches %s' % (asset.name, match.group(1) if match is not None else 'None'))
if match is not None:
asset_build_time = match.group(1)
if asset_build_time != latest_build_time:
print('deleting stale asset %s' % (asset.name))
asset.delete()
if __name__ == '__main__':
manage_daily_builds(sys.argv[1])
| 1.75 | 2 |
tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py | KaihuiLiang/ParlAI | 0 | 6219 | <filename>tests/crowdsourcing/tasks/turn_annotations_static/test_turn_annotations_static_analysis.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Test components of specific crowdsourcing tasks.
"""
import json
import os
import unittest
import pandas as pd
import parlai.utils.testing as testing_utils
try:
from parlai.crowdsourcing.tasks.turn_annotations_static.analysis.compile_results import (
TurnAnnotationsStaticResultsCompiler,
)
from parlai.crowdsourcing.utils.tests import check_stdout
class TestAnalysis(unittest.TestCase):
"""
Test the analysis code for the static turn annotations task.
"""
def test_compile_results(self):
"""
Test compiling results on a dummy set of data.
"""
with testing_utils.tempdir() as tmpdir:
# Define expected stdout
# Paths
analysis_samples_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'analysis_samples'
)
analysis_outputs_folder = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'test_turn_annotations_static_analysis',
)
expected_stdout_path = os.path.join(
analysis_outputs_folder, 'test_stdout.txt'
)
temp_gold_annotations_path = os.path.join(
tmpdir, 'gold_annotations.json'
)
# Save a file of gold annotations
gold_annotations = {
"1_0_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": False,
"none_all_good": True,
},
"1_1_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": True,
"none_all_good": False,
},
"2_0_5": {
"bucket_0": False,
"bucket_1": True,
"bucket_2": False,
"bucket_3": False,
"bucket_4": False,
"none_all_good": False,
},
"2_1_5": {
"bucket_0": False,
"bucket_1": False,
"bucket_2": False,
"bucket_3": False,
"bucket_4": True,
"none_all_good": False,
},
}
with open(temp_gold_annotations_path, 'w') as f:
json.dump(gold_annotations, f)
# Run compilation of results
parser = TurnAnnotationsStaticResultsCompiler.setup_args()
parser.set_defaults(
**{
'results_folders': analysis_samples_folder,
'output_folder': tmpdir,
'onboarding_in_flight_data_file': os.path.join(
analysis_samples_folder, 'onboarding_in_flight.jsonl'
),
'gold_annotations_file': temp_gold_annotations_path,
}
)
args = parser.parse_args([])
with testing_utils.capture_output() as output:
compiler = TurnAnnotationsStaticResultsCompiler(vars(args))
compiler.NUM_SUBTASKS = 3
compiler.NUM_ANNOTATIONS = 3
compiler.compile_results()
actual_stdout = output.getvalue()
# Check the output against what it should be
check_stdout(
actual_stdout=actual_stdout,
expected_stdout_path=expected_stdout_path,
)
# Check that the saved results file is what it should be
sort_columns = ['hit_id', 'worker_id', 'conversation_id', 'turn_idx']
expected_results_path = os.path.join(
analysis_outputs_folder, 'expected_results.csv'
)
expected_results = (
pd.read_csv(expected_results_path)
.drop('folder', axis=1)
.sort_values(sort_columns)
.reset_index(drop=True)
)
# Drop the 'folder' column, which contains a system-dependent path string
actual_results_rel_path = [
obj for obj in os.listdir(tmpdir) if obj.startswith('results')
][0]
actual_results_path = os.path.join(tmpdir, actual_results_rel_path)
actual_results = (
pd.read_csv(actual_results_path)
.drop('folder', axis=1)
.sort_values(sort_columns)
.reset_index(drop=True)
)
if not actual_results.equals(expected_results):
raise ValueError(
f'\n\n\tExpected results:\n{expected_results.to_csv()}'
f'\n\n\tActual results:\n{actual_results.to_csv()}'
)
except ImportError:
pass
if __name__ == "__main__":
unittest.main()
| 2.5 | 2 |
scripts/selectors.py | bartongroup/slivka-bio | 0 | 6220 | def example_selector(*args, **kwargs): return "default"
| 1.429688 | 1 |
applied_python/applied_python/lib/python2.7/site-packages/ansible/modules/extras/messaging/rabbitmq_plugin.py | mith1979/ansible_automation | 1 | 6221 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Chatham Financial <<EMAIL>>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rabbitmq_plugin
short_description: Adds or removes plugins to RabbitMQ
description:
- Enables or disables RabbitMQ plugins
version_added: "1.1"
author: <NAME>
options:
names:
description:
- Comma-separated list of plugin names
required: true
default: null
aliases: [name]
new_only:
description:
- Only enable missing plugins
- Does not disable plugins that are not in the names list
required: false
default: "no"
choices: [ "yes", "no" ]
state:
description:
- Specify if plugins are to be enabled or disabled
required: false
default: enabled
choices: [enabled, disabled]
prefix:
description:
- Specify a custom install prefix to a Rabbit
required: false
version_added: "1.3"
default: null
'''
EXAMPLES = '''
# Enables the rabbitmq_management plugin
- rabbitmq_plugin: names=rabbitmq_management state=enabled
'''
class RabbitMqPlugins(object):
def __init__(self, module):
self.module = module
if module.params['prefix']:
self._rabbitmq_plugins = module.params['prefix'] + "/sbin/rabbitmq-plugins"
else:
self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True)
def _exec(self, args, run_in_check_mode=False):
if not self.module.check_mode or (self.module.check_mode and run_in_check_mode):
cmd = [self._rabbitmq_plugins]
rc, out, err = self.module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def get_all(self):
return self._exec(['list', '-E', '-m'], True)
def enable(self, name):
self._exec(['enable', name])
def disable(self, name):
self._exec(['disable', name])
def main():
arg_spec = dict(
names=dict(required=True, aliases=['name']),
new_only=dict(default='no', type='bool'),
state=dict(default='enabled', choices=['enabled', 'disabled']),
prefix=dict(required=False, default=None)
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
names = module.params['names'].split(',')
new_only = module.params['new_only']
state = module.params['state']
rabbitmq_plugins = RabbitMqPlugins(module)
enabled_plugins = rabbitmq_plugins.get_all()
enabled = []
disabled = []
if state == 'enabled':
if not new_only:
for plugin in enabled_plugins:
if plugin not in names:
rabbitmq_plugins.disable(plugin)
disabled.append(plugin)
for name in names:
if name not in enabled_plugins:
rabbitmq_plugins.enable(name)
enabled.append(name)
else:
for plugin in enabled_plugins:
if plugin in names:
rabbitmq_plugins.disable(plugin)
disabled.append(plugin)
changed = len(enabled) > 0 or len(disabled) > 0
module.exit_json(changed=changed, enabled=enabled, disabled=disabled)
# import module snippets
from ansible.module_utils.basic import *
main()
| 1.484375 | 1 |
vitrage/datasources/static/driver.py | HoonMinJeongUm/Hunmin-vitrage | 0 | 6222 | # Copyright 2016 - Nokia, ZTE
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from itertools import chain
from six.moves import reduce
from oslo_log import log
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import GraphAction
from vitrage.datasources.driver_base import DriverBase
from vitrage.datasources.static import STATIC_DATASOURCE
from vitrage.datasources.static import StaticFields
from vitrage.utils import file as file_utils
LOG = log.getLogger(__name__)
class StaticDriver(DriverBase):
# base fields are required for all entities, others are treated as metadata
BASE_FIELDS = {StaticFields.STATIC_ID,
StaticFields.TYPE,
StaticFields.ID}
def __init__(self, conf):
super(StaticDriver, self).__init__()
self.cfg = conf
self.entities_cache = []
@staticmethod
def _is_valid_config(config):
"""check for validity of configuration"""
# TODO(yujunz) check with yaml schema or reuse template validation
return StaticFields.DEFINITIONS in config
@staticmethod
def get_event_types():
return []
def enrich_event(self, event, event_type):
pass
def get_all(self, datasource_action):
return self.make_pickleable(self._get_and_cache_all_entities(),
STATIC_DATASOURCE,
datasource_action)
def get_changes(self, datasource_action):
return self.make_pickleable(self._get_and_cache_changed_entities(),
STATIC_DATASOURCE,
datasource_action)
def _get_and_cache_all_entities(self):
self.entities_cache = self._get_all_entities()
return self.entities_cache
def _get_all_entities(self):
files = file_utils.list_files(self.cfg.static.directory, '.yaml', True)
return list(reduce(chain, [self._get_entities_from_file(path)
for path in files], []))
def _get_and_cache_changed_entities(self):
changed_entities = []
new_entities = self._get_all_entities()
for new_entity in new_entities:
old_entity = self._find_entity(new_entity, self.entities_cache)
if old_entity:
# Add modified entities
if not self._equal_entities(old_entity, new_entity):
changed_entities.append(new_entity.copy())
else:
# Add new entities
changed_entities.append(new_entity.copy())
# Add deleted entities
for old_entity in self.entities_cache:
if not self._find_entity(old_entity, new_entities):
old_entity_copy = old_entity.copy()
old_entity_copy[DSProps.EVENT_TYPE] = GraphAction.DELETE_ENTITY
changed_entities.append(old_entity_copy)
self.entities_cache = new_entities
return changed_entities
@classmethod
def _get_entities_from_file(cls, path):
config = file_utils.load_yaml_file(path)
if not cls._is_valid_config(config):
LOG.warning("Skipped invalid config (possible obsoleted): {}"
.format(path))
return []
definitions = config[StaticFields.DEFINITIONS]
entities = definitions[StaticFields.ENTITIES]
relationships = definitions[StaticFields.RELATIONSHIPS]
return cls._pack(entities, relationships)
@classmethod
def _pack(cls, entities, relationships):
entities_dict = {}
for entity in entities:
cls._pack_entity(entities_dict, entity)
for rel in relationships:
cls._pack_rel(entities_dict, rel)
return entities_dict.values()
@classmethod
def _pack_entity(cls, entities_dict, entity):
static_id = entity[StaticFields.STATIC_ID]
if static_id not in entities_dict:
metadata = {key: value for key, value in entity.items()
if key not in cls.BASE_FIELDS}
entities_dict[static_id] = entity
entity[StaticFields.RELATIONSHIPS] = []
entity[StaticFields.METADATA] = metadata
else:
LOG.warning("Skipped duplicated entity: {}".format(entity))
@classmethod
def _pack_rel(cls, entities_dict, rel):
source_id = rel[StaticFields.SOURCE]
target_id = rel[StaticFields.TARGET]
if source_id == target_id:
# self pointing relationship
entities_dict[source_id][StaticFields.RELATIONSHIPS].append(rel)
else:
source, target = entities_dict[source_id], entities_dict[target_id]
source[StaticFields.RELATIONSHIPS].append(
cls._expand_neighbor(rel, target))
@staticmethod
def _expand_neighbor(rel, neighbor):
"""Expand config id to neighbor entity
rel={'source': 's1', 'target': 'r1', 'relationship_type': 'attached'}
neighbor={'static_id': 'h1', 'vitrage_type': 'host.nova', 'id': 1}
result={'relationship_type': 'attached', 'source': 's1',
'target': {'static_id': 'h1',
'vitrage_type': 'host.nova',
'id': 1}}
"""
rel = rel.copy()
if rel[StaticFields.SOURCE] == neighbor[StaticFields.STATIC_ID]:
rel[StaticFields.SOURCE] = neighbor
elif rel[StaticFields.TARGET] == neighbor[StaticFields.STATIC_ID]:
rel[StaticFields.TARGET] = neighbor
else:
# TODO(yujunz) raise exception and ignore invalid relationship
LOG.error("Invalid neighbor {} for relationship {}"
.format(neighbor, rel))
return None
return rel
@staticmethod
def _find_entity(search_entity, entities):
# naive implementation since we don't expect many static entities
for entity in entities:
if entity[StaticFields.TYPE] == search_entity[StaticFields.TYPE] \
and entity[StaticFields.ID] == \
search_entity[StaticFields.ID]:
return entity
@staticmethod
def _equal_entities(old_entity, new_entity):
# TODO(iafek): compare also the relationships
return old_entity.get(StaticFields.TYPE) == \
new_entity.get(StaticFields.TYPE) and \
old_entity.get(StaticFields.ID) == \
new_entity.get(StaticFields.ID) and \
old_entity.get(StaticFields.NAME) == \
new_entity.get(StaticFields.NAME) and \
old_entity.get(StaticFields.STATE) == \
new_entity.get(StaticFields.STATE)
| 1.710938 | 2 |
napari/layers/shapes/mesh.py | marshuang80/napari | 0 | 6223 | <reponame>marshuang80/napari
import numpy as np
class Mesh:
"""Contains meshses of shapes that will ultimately get rendered.
Attributes
----------
vertices : np.ndarray
Qx2 array of vertices of all triangles for shapes including edges and
faces
vertices_centers : np.ndarray
Qx2 array of centers of vertices of triangles for shapes. For vertices
corresponding to faces these are the same as the actual vertices. For
vertices corresponding to edges these values should be added to a
scaled `vertices_offsets` to get the actual vertex positions.
The scaling corresponds to the width of the edge
vertices_offsets : np.ndarray
Qx2 array of offsets of vertices of triangles for shapes. For vertices
corresponding to faces these are 0. For vertices corresponding to
edges these values should be scaled and added to the
`vertices_centers` to get the actual vertex positions.
The scaling corresponds to the width of the edge
vertices_index : np.ndarray
Qx2 array of the index (0, ..., N-1) of each shape that each vertex
corresponds and the mesh type (0, 1) for face or edge.
triangles : np.ndarray
Px3 array of vertex indices that form the mesh triangles
triangles_index : np.ndarray
Px2 array of the index (0, ..., N-1) of each shape that each triangle
corresponds and the mesh type (0, 1) for face or edge.
triangles_colors : np.ndarray
Px4 array of the rgba color of each triangle
triangles_z_order : np.ndarray
Length P array of the z order of each triangle. Must be a permutation
of (0, ..., P-1)
Extended Summary
----------
_types : list
Length two list of the different mesh types corresponding to faces and
edges
"""
_types = ['face', 'edge']
def __init__(self):
self.clear()
def clear(self):
"""Resets mesh data
"""
self.vertices = np.empty((0, 2))
self.vertices_centers = np.empty((0, 2))
self.vertices_offsets = np.empty((0, 2))
self.vertices_index = np.empty((0, 2), dtype=int)
self.triangles = np.empty((0, 3), dtype=np.uint32)
self.triangles_index = np.empty((0, 2), dtype=int)
self.triangles_colors = np.empty((0, 4))
self.triangles_z_order = np.empty((0), dtype=int)
| 2.859375 | 3 |
python/helpers.py | cdacos/astrophysics_with_a_pc | 0 | 6224 | <gh_stars>0
import sys
def start_parameter(text, i):
if len(sys.argv) > i:
print('{0}{1}'.format(text, sys.argv[i]))
return float(sys.argv[i])
else:
return float(raw_input(text))
| 2.796875 | 3 |
configs/docker-ubuntu-img/para.py | MarioCarrilloA/stx-packaging | 1 | 6225 | <reponame>MarioCarrilloA/stx-packaging
#!/usr/bin/python3
# vim:se tw=0 sts=4 ts=4 et ai:
"""
Copyright © 2014 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import os
import pwd
import sys
import time
import debmake.read
###########################################################################
# undefined environment variable -> ''
def env(var):
try:
return os.environ[var]
except KeyError:
return ''
#######################################################################
# Initialize parameters
#######################################################################
def para(para):
debmail = env('DEBEMAIL')
if not debmail:
#debmail = os.getlogin() + '@localhost'
debemail = pwd.getpwuid(os.getuid())[0] + '@localhost'
debfullname = env('DEBFULLNAME')
if not debfullname:
# os.getlogin may not work well: #769392
#debfullname = pwd.getpwnam(os.getlogin())[4].split(',')[0]
debfullname = pwd.getpwuid(os.getuid())[4].split(',')[0]
#######################################################################
# command line setting
#######################################################################
p = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description = '''\
{0}: make Debian source package Version: {1}
{2}
{0} helps to build the Debian package from the upstream source.
Normally, this is done as follows:
* The upstream tarball is downloaded as the package-version.tar.gz file.
* It is untared to create many files under the package-version/ directory.
* {0} is invoked in the package-version/ directory possibly without any arguments.
* Files in the package-version/debian/ directory are manually adjusted.
* dpkg-buildpackage (usually from its wrapper debuild or pdebuild) is invoked in the package-version/ directory to make debian packages.
Argument may need to be quoted to protect from the shell.
'''.format(
para['program_name'],
para['program_version'],
para['program_copyright']),
epilog='See debmake(1) manpage for more.')
ck = p.add_mutually_exclusive_group()
ck.add_argument(
'-c',
'--copyright',
action = 'count',
default = 0,
help = 'scan source for copyright+license text and exit')
ck.add_argument(
'-k',
'--kludge',
action = 'count',
default = 0,
help = 'compare debian/copyright with the source and exit')
sp = p.add_mutually_exclusive_group()
sp.add_argument(
'-n',
'--native',
action = 'store_true',
default = False,
help = 'make a native source package without .orig.tar.gz')
sp.add_argument(
'-a',
'--archive',
type = str,
action = 'store',
default = '',
help = 'use the upstream source tarball directly (-p, -u, -z: overridden)',
metavar = 'package-version.tar.gz')
sp.add_argument(
'-d',
'--dist',
action = 'store_true',
default = False,
help = 'run "make dist" equivalent first to generate upstream tarball and use it')
sp.add_argument(
'-t',
'--tar',
action = 'store_true',
default = False,
help = 'run "tar" to generate upstream tarball and use it')
p.add_argument(
'-p',
'--package',
action = 'store',
default = '',
help = 'set the Debian package name',
metavar = 'package')
p.add_argument(
'-u',
'--upstreamversion',
action = 'store',
default = '',
help = 'set the upstream package version',
metavar = 'version')
p.add_argument(
'-r',
'--revision',
action = 'store',
default = '',
help = 'set the Debian package revision',
metavar = 'revision')
p.add_argument(
'-z',
'--targz',
action = 'store',
default = '',
help = 'set the tarball type, extension=(tar.gz|tar.bz2|tar.xz)',
metavar = 'extension')
p.add_argument(
'-b',
'--binaryspec',
action = 'store',
default = '',
help = 'set binary package specs as comma separated list of "binarypackage":"type" pairs, e.g., in full form "foo:bin,foo-doc:doc,libfoo1:lib,libfoo1-dbg:dbg,libfoo-dev:dev" or in short form ",-doc,libfoo1,libfoo1-dbg, libfoo-dev". Here, "binarypackage" is the binary package name; and optional "type" is chosen from "bin", "data", "dbg", "dev", "doc", "lib", "perl", "python", "python3", "ruby", and "script". If "type" is not specified but obvious, it is set by "binarypackage". Otherwise it is set to "bin" for the compiled ELF binary.',
metavar = 'binarypackage[:type]')
p.add_argument(
'-e',
'--email',
action = 'store',
default = debmail,
help = 'set e-mail address',
metavar = '<EMAIL>')
p.add_argument(
'-f',
'--fullname',
action = 'store',
default = debfullname,
help = 'set the fullname',
metavar = '"firstname lastname"')
# p.add_argument(
# '-g',
# '--gui',
# action = 'store_true',
# default = False,
# help = 'run GUI configuration')
#
# -h : used by argparse for --help
ep = p.add_mutually_exclusive_group()
ep.add_argument(
'-i',
'--invoke',
default = '',
action = 'store',
help = 'invoke package build tool',
metavar = '[debuild|pdebuild|...]')
ep.add_argument(
'-j',
'--judge',
action = 'store_true',
default = False,
help = 'run "dpkg-depcheck" to judge build dependencies and identify file paths')
p.add_argument(
'-l',
'--license',
default = '',
action = 'store',
help = 'add formatted license to debian/copyright',
metavar = '"license_file"')
p.add_argument(
'-m',
'--monoarch',
action = 'store_true',
default = False,
help = 'force packages to be non-multiarch')
p.add_argument(
'-o',
'--option',
default = '',
action = 'store',
help = 'read optional parameters from "file"',
metavar = '"file"')
p.add_argument(
'-q',
'--quitearly',
action = 'store_true',
default = False,
help='quit early before creating files in the debian directory')
p.add_argument(
'-s',
'--spec',
action = 'store_true',
default = False,
help = 'use upstream spec')
p.add_argument(
'-v',
'--version',
action = 'store_true',
default = False,
help = 'show version information')
p.add_argument(
'-w',
'--with',
action = 'store',
default = '',
dest = 'withargs',
help = 'set additional "dh --with" option arguments',
metavar = 'args')
p.add_argument(
'-x',
'--extra',
default = '',
action = 'store',
help = 'generate extra configuration files as templates',
metavar = '[01234]')
p.add_argument(
'-y',
'--yes',
action = 'count',
default = 0,
help = '"force yes" for all prompts')
p.add_argument(
'-L',
'--local',
action = 'store_true',
default = False,
help='generate configuration files for the local package')
p.add_argument(
'-P',
'--pedantic',
action = 'store_true',
default = False,
help='pedantically check auto-generated files')
p.add_argument(
'-T',
'--tutorial',
action = 'store_true',
default = False,
help='output tutorial comment lines in template files')
args = p.parse_args()
#######################################################################
# Set parameter values
#######################################################################
############################################# -a
if args.archive:
para['archive'] = True
para['tarball'] = args.archive
else:
para['archive'] = False
para['tarball'] = ''
#############################################
para['binaryspec'] = args.binaryspec # -b
para['copyright'] = min(args.copyright, 6) # -c
if para['copyright'] >=4:
para['copyright'] = 3 - para['copyright']
# 0: debian/copyright, +/-1: simple, +/-2: standard +/-3: extensive
para['dist'] = args.dist # -d
para['email'] = args.email # -e
para['fullname'] = args.fullname # -f
# para['gui'] = args.gui # -g
para['invoke'] = args.invoke # -i
para['judge'] = args.judge # -j
if para['judge']:
para['override'].update({'judge'})
para['kludge'] = args.kludge # -k
############################################# -l
# --license: args.license -> para['license'] as set
if args.license == '':
para['license'] = set({'[Cc][Oo][Pp][Yy][Ii][Nn][Gg]*',
'[Ll][Ii][Cc][Ee][Nn][Ss][Ee]*'}) # default
else:
para['license'] = set(args.copyright.split(','))
#############################################
para['monoarch'] = args.monoarch # -m
para['native'] = args.native # -n
para['package'] = args.package.lower() # -p
#############################################
para['quitearly'] = args.quitearly # -q
para['revision'] = args.revision # -r
para['spec'] = args.spec # -s
para['tar'] = args.tar # -t
para['version'] = args.upstreamversion # -u
para['print_version'] = args.version # -v
############################################# -w
# --with: args.withargs -> para['dh_with'] as set
if args.withargs == '':
para['dh_with'] = set() # default is empty set
else:
para['dh_with'] = set(args.withargs.split(','))
#############################################
para['extra'] = args.extra # -x
para['yes'] = min(args.yes, 2) # -y
# 0: ask, 1: yes, 2: no
para['targz'] = args.targz # -z
para['local'] = args.local # -L
para['pedantic'] = args.pedantic # -P
para['tutorial'] = args.tutorial # -T
############################################# -o
if args.option:
exec(debmake.read.read(args.option))
#######################################################################
# return command line parameters
#######################################################################
return para
#######################################################################
# Test code
#######################################################################
if __name__ == '__main__':
for p, v in para().items():
print("para['{}'] = \"{}\"".format(p,v))
| 1.890625 | 2 |
build/lib/jet_django/views/model.py | lukejamison/jet-dasboard | 193 | 6226 | from django.core.exceptions import NON_FIELD_ERRORS
from rest_framework import status, viewsets, serializers
from rest_framework.decorators import list_route
from rest_framework.response import Response
from rest_framework.serializers import ModelSerializer
from jet_django.filters.model_aggregate import AggregateFilter
from jet_django.filters.model_group import GroupFilter
from jet_django.pagination import CustomPageNumberPagination
from jet_django.permissions import HasProjectPermissions, ModifyNotInDemo
from jet_django.serializers.reorder import reorder_serializer_factory
class AggregateSerializer(serializers.Serializer):
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
class GroupSerializer(serializers.Serializer):
group = serializers.CharField()
y_func = serializers.IntegerField()
def __init__(self, *args, **kwargs):
if 'group_serializer' in kwargs:
self.fields['group'] = kwargs.pop('group_serializer')
if 'y_func_serializer' in kwargs:
self.fields['y_func'] = kwargs.pop('y_func_serializer')
super().__init__(*args, **kwargs)
def model_viewset_factory(build_model, build_filter_class, build_serializer_class, build_detail_serializer_class, build_queryset, build_actions, ordering_field):
ReorderSerializer = reorder_serializer_factory(build_queryset, ordering_field)
class Viewset(viewsets.ModelViewSet):
model = build_model
queryset = build_queryset
pagination_class = CustomPageNumberPagination
filter_class = build_filter_class
authentication_classes = ()
permission_classes = (HasProjectPermissions, ModifyNotInDemo)
def get_serializer_class(self):
if self.action == 'aggregate':
return AggregateSerializer
elif self.action == 'group':
return GroupSerializer
elif self.action == 'retrieve':
return build_detail_serializer_class
else:
return build_serializer_class
@list_route(methods=['get'])
def aggregate(self, request):
queryset = self.filter_queryset(self.get_queryset())
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
y_field = self.model._meta.get_field(y_column)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = AggregateFilter().filter(queryset, {
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
y_func_serializer=y_serializer
)
return Response(serializer.data)
@list_route(methods=['get'])
def group(self, request):
queryset = self.filter_queryset(self.get_queryset())
x_column = request.GET['_x_column']
x_lookup_name = request.GET.get('_x_lookup')
y_func = request.GET['_y_func'].lower()
y_column = request.GET.get('_y_column', 'id')
x_field = self.model._meta.get_field(x_column)
x_lookup = x_field.class_lookups.get(x_lookup_name)
y_field = self.model._meta.get_field(y_column)
if x_lookup:
x_field = x_lookup('none').output_field
x_serializer_class, x_serializer_kwargs = ModelSerializer().build_standard_field(x_column, x_field)
x_serializer = x_serializer_class(**x_serializer_kwargs)
y_serializer_class, y_serializer_kwargs = ModelSerializer().build_standard_field(y_column, y_field)
y_serializer = y_serializer_class(**y_serializer_kwargs)
queryset = GroupFilter().filter(queryset, {
'x_column': x_column,
'x_lookup': x_lookup,
'y_func': y_func,
'y_column': y_column
})
serializer = self.get_serializer(
queryset,
many=True,
group_serializer=x_serializer,
y_func_serializer=y_serializer
)
return Response(serializer.data)
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs['context'] = self.get_serializer_context()
return serializer_class(*args, **kwargs)
@list_route(methods=['post'])
def reorder(self, request):
serializer = ReorderSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
@list_route(methods=['post'])
def reset_order(self, request):
i = 1
for instance in build_queryset:
setattr(instance, ordering_field, i)
instance.save()
i += 1
return Response({})
for action in build_actions:
def route(self, request):
form = action(data=request.data)
if not form.is_valid():
return Response(form.errors, status=status.HTTP_400_BAD_REQUEST)
queryset = form.filer_queryset(self.get_queryset())
try:
result = form.save(queryset)
except Exception as e:
return Response({NON_FIELD_ERRORS: str(e)}, status=status.HTTP_400_BAD_REQUEST)
return Response({'action': form._meta.name, 'result': result})
decorator = list_route(methods=['post'])
route = decorator(route)
setattr(Viewset, action._meta.name, route)
return Viewset
| 1.914063 | 2 |
python_minecraft_tut_2021/weatherCraft.py | LeGamermc/ursina_tutorials | 13 | 6227 | """
Weather functions.
"""
from ursina import color, window, time
from nMap import nMap
class Weather:
def __init__(this, rate=1):
this.red = 0
this.green = 200
this.blue = 211
this.darkling = 0
this.rate = rate
this.towardsNight = 1
def setSky(this):
r = nMap(this.darkling,0,100,0,this.red)
g = nMap(this.darkling,0,100,0,this.green)
b = nMap(this.darkling,0,100,0,this.blue)
window.color = color.rgb(r,g,b)
def update(this):
this.darkling -= ( this.rate *
this.towardsNight *
time.dt)
if this.darkling < 0:
this.towardsNight *= -1
this.darkling = 0
this.setSky()
| 3.21875 | 3 |
davenetgame/dispatch/dispatcher.py | davefancella/davenetgame | 0 | 6228 | <filename>davenetgame/dispatch/dispatcher.py<gh_stars>0
#!/usr/bin/env python3
'''
Copyright 2016 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import threading, time
from davenetgame.dispatch.base import DispatcherBase
from davenetgame.protocol import connection
## @file dispatcher
#
# This file contains the standard, generic EventDispatcher class. It's the one you use if
# the library doesn't support your preferred game engine, or if you'd rather manage the library
# independently of your game engine.
## This is the standard EventDispatcher.
class EventDispatcher(DispatcherBase):
pass
## This is a special server-oriented EventDispatcher that provides for an interactive console
# on the server when run in a terminal. This is probably most useful for testing the library,
# though it's not unheard of for a server to run in a terminal and have a console.
class EventDispatcherServer(DispatcherBase):
__console = None
__consolecommands = None
def __init__(self, **args):
super().__init__(**args)
self.__console = ConsoleInput()
self.__consolecommands = []
# Register the standard commands available to every game server.
self.RegisterCommand('show', self.consoleShow, "show (connections)", "Show whatever you want to see.")
self.RegisterCommand('help', self.consoleHelp, "help [command]", "print this helpful text. Alternately, type in a command to see its helpful text.")
self.RegisterCommand('quit', self.consoleQuit, "quit", "Quit the server.")
def Start(self):
self.__console.Start()
super().Start()
def Update(self, timestep):
try:
while self.__console.HasPending():
msg = self.__console.pop()
args = msg.split(" ")
command = args.pop(0)
command = command.lower()
# Ignore simple presses of enter
if command == '':
continue
foundcommand = False
for a in self.__consolecommands:
if a.command() == command:
a.callback(*args)
foundcommand = True
if not foundcommand:
print("Command not recognized: " + command)
except:
pass
super().Update(timestep)
## @name Console API
#
# These methods give access to the built-in server console and the various commands that
# can be created.
#@{
## Console command: show
def consoleShow(self, *args):
if len(args) != 1:
print("Usage: show (connections)")
else:
if args[0] == "connections":
if len(self.GetConnections() ) == 0:
print("There are no connections at this time.")
else:
for a in self.GetConnections():
print("{0:3}: {1:40} {2:10} {3:4}".format(a.id(), str(a), connection.statuslist[a.Status()][1], int(a.GetConnectionPing() * 1000) ) )
else:
print("Unknown thing to show: " + args[0])
## Console command: help
def consoleHelp(self, *args):
if len(args) > 0:
for a in self.__consolecommands:
if a.command() == args[0]:
print("%10s : %s" % (args[0], a.helplong() ))
print("%13s %s" % (" ", a.helpshort() ))
print
else:
print("Command not found.")
else:
for a in self.__consolecommands:
print("%10s : %s" % (a.command(), a.helplong() ))
print("%13s %s" % (" ", a.helpshort() ))
print()
## Console command: quit
def consoleQuit(self, *args):
print("Quit signaled from console.")
self.Stop()
self.__console.Stop()
## Call to register console commands with the server. The library implements a number of standard
# commands, but games may need their own commands. In that case, you will need your own callbacks.
def RegisterCommand(self, command, callback, helpshort, helplong):
self.__consolecommands.append(ConsoleCommand(
command = command,
callback = callback,
helpshort = helpshort,
helplong = helplong
)
)
#@}
## This class implements console commands. To create a new console command, simply make an instance of
# this class, giving all the keyword arguments in the constructor.
# @param 'command' : the name of the command, what the user types to use it.
# @param 'callback' : a function that will process the command when the user types it.
# @param 'helpshort' : short help text, usually one line of text, preferably not more than 50 characters.
# In output, it will be prepended with "Usage: "
# @param 'helplong' : long help text, can be as long as needed, as many lines as needed. Do not put
# line endings, however. Those will be added as needed. You may put line endings to
# signify paragraph breaks, if need be.
class ConsoleCommand(object):
__command = None
__callback = None
__helpshort = None
__helplong = None
def __init__(self, **args):
# Ensure the command is always lowercase
self.__command = args['command'].strip().lower()
self.__callback = args['callback']
self.__helpshort = args['helpshort']
self.__helplong = args['helplong']
def callback(self, *args):
self.__callback(*args)
def command(self):
return self.__command
def helpshort(self):
return self.__helpshort
def helplong(self):
return self.__helplong
## This class makes the console input non-blocking.
class ConsoleInput(threading.Thread):
## This is the lock that must be called to avoid thread collisions
__lock = None
## This is a queue of commands, unparsed.
__pcommands = None
def __init__(self, **args):
threading.Thread.__init__(self, **args)
self.__lock = threading.RLock()
self.__pcommands = []
## Call to start the client.
def Start(self):
self.__continue = True
self.start()
## Stops the server. It may still take a few seconds or so. If blocking is "True", then the call will
# block until the server has shut down.
def Stop(self, blocking=False):
self.__continue = False
if blocking:
self.join()
## Returns true if there are pending lines from stdin to work with
def HasPending(self):
if len(self.__pcommands) > 0:
return True
return False
## Starts the console input. Don't call this directly, instead call Start().
def run(self):
while self.__continue:
msg = input(': ')
self.__lock.acquire()
self.__pcommands.append(msg.strip() )
self.__lock.release()
time.sleep(0.01)
## Pops the first item off the commands list and returns it.
def pop(self):
theCommand = None
if len(self.__pcommands) > 0:
self.__lock.acquire()
theCommand = self.__pcommands.pop(0)
self.__lock.release()
return theCommand
| 2.4375 | 2 |
account/migrations/0003_customuser_phone_number.py | zenofewords/thebrushstash | 0 | 6229 | <filename>account/migrations/0003_customuser_phone_number.py
# Generated by Django 2.2.7 on 2019-11-17 17:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0002_remove_customuser_full_name'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='phone_number',
field=models.CharField(blank=True, max_length=500),
),
]
| 1.578125 | 2 |
03_Estrutura_de_Repeticao/13_potenciacao.py | gabrieldcpadilha/ListaDeExercicios-PythonBrasil | 0 | 6230 | <reponame>gabrieldcpadilha/ListaDeExercicios-PythonBrasil<filename>03_Estrutura_de_Repeticao/13_potenciacao.py
base = int(input('Digite o valor da base: '))
expoente = 0
while expoente <= 0:
expoente = int(input('Digite o valor do expoente: '))
if expoente <= 0:
print('O expoente tem que ser positivo')
potencia = 1
for c in range(1, expoente + 1):
potencia *= base
print(f'{base}^ {expoente} = {potencia}')
| 3.953125 | 4 |
accounting/accounting/doctype/journal_entry/journal_entry.py | noahjacob/Accounting | 1 | 6231 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (c) 2021, <NAME> and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.utils import flt
from accounting.accounting.general_ledger import make_gl_entry, make_reverse_gl_entry
class JournalEntry(Document):
def validate(self):
calc_total_debit_credit(self)
if self.difference:
frappe.throw("The total debit and credit must be equal. The current difference is {}".format(self.difference))
if self.total_credit == 0 or self.total_debit == 0 :
frappe.throw('Total Cannot be Zero')
if not self.accounts:
frappe.throw('Account Entries are required')
else:
self.title = self.accounts[0].account
def on_submit(self):
for entry in self.accounts:
make_gl_entry(self,entry.account,entry.debit,entry.credit)
def on_cancel(self):
# cancel gl entry
make_reverse_gl_entry(self,self.doctype,self.name)
def calc_total_debit_credit(self):
self.total_debit, self.total_credit,self.difference = 0,0,0
for entry in self.accounts:
self.total_debit = flt(self.total_debit) +flt(entry.debit)
self.total_credit = flt(self.total_credit) + flt(entry.credit)
self.difference = flt(self.total_debit) - (self.total_credit) | 2.4375 | 2 |
polls/models.py | mmeooo/test_django | 0 | 6232 | <reponame>mmeooo/test_django<filename>polls/models.py
from django.db import models
# Create your models here.
# 클래스의 기능: 상속
class Question(models.Model): # Table
question_text= models.CharField(max_length= 100) # column, datatype
public_date= models.CharField(max_length= 100)
votes= models.DecimalField(max_digits= 20, decimal_places= 10)
# 위의 2개 타입으로 클래스 만들면 ok
# link, string-> CharField, data-> DecimalField
# 보통 max_length= 100으로 함
class Economics(models.Model):
title= models.CharField(max_length= 100)
href= models.CharField(max_length= 100)
create_date= models.CharField(max_length= 100)
| 2.40625 | 2 |
ipcam/test_snap.py | jack139/HF | 10 | 6233 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys,os,time
if len(sys.argv)<2:
print "usage: test_snap.py <check|show>"
sys.exit(2)
kam_cmd=sys.argv[1]
path='/var/data2/snap_store'
a=os.listdir(path)
a.remove('535e1a5c1ecffb2fa372fd7d') # this is a camera not used in HF system
if kam_cmd=='show' or kam_cmd=='check':
last_sub=int(time.time()/600)
for i in a:
sub='%s/%s' % (path, i)
b=os.listdir(sub)
if 'capture' in b:
b.remove('capture')
b.sort()
sub2='%s/%s' % (sub, b[-1])
c=os.listdir(sub2)
if kam_cmd=='show' or last_sub-int(b[-1])>3:
print "%s - %d, %s - %d, (%d)" % (i, len(b), b[-1], len(c), last_sub-int(b[-1]))
else:
print "usage: test_snap.py <check|show>"
sys.exit(2)
| 2.4375 | 2 |
sources/datasets/client_dataset_definitions/client_dataset.py | M4rukku/impact_of_non_iid_data_in_federated_learning | 0 | 6234 | import functools
import gc
from abc import ABC
from sources.datasets.client_dataset_definitions.client_dataset_loaders.client_dataset_loader import ClientDatasetLoader, DatasetComponents
from sources.datasets.client_dataset_definitions.client_dataset_processors.client_dataset_processor import ClientDatasetProcessor
from sources.utils.exception_definitions import OutsideOfContextError
def throw_error_outside_context(func):
@functools.wraps(func)
def wrapper_decorator(self, *args, **kwargs):
if not self.within_context:
raise OutsideOfContextError(
"""Error: Tried to access client Dataset outside of context
manager. This might lead to data leaks and bad use of
memory. Please wrap the usage of ClientDataset.dataset_x
inside a "with statement". """)
else:
value = func(self, *args, **kwargs)
return value
return wrapper_decorator
class ClientDataset(ABC):
def __init__(self,
client_identifier: str,
client_dataset_loader: ClientDatasetLoader,
client_dataset_processor: ClientDatasetProcessor,
):
self.client_identifier = client_identifier
self.client_dataset_loader = client_dataset_loader
self.client_dataset_processor = client_dataset_processor
self._train_data = None
self._test_data = None
self._validation_data = None
self.within_context = False
def process_x(self, raw_x_batch):
"""Pre-processes each batch of features
before being fed to the model."""
return self.client_dataset_processor.process_x(raw_x_batch)
def process_y(self, raw_y_batch):
"""Pre-processes each batch of labels before being fed to the model."""
return self.client_dataset_processor.process_y(raw_y_batch)
def _lazy_initialise_data(self, data, dataset_component: DatasetComponents):
if data is None:
data = self.client_dataset_loader.load_dataset(self.client_identifier,
dataset_component)
return self.process_x(data["x"]), self.process_y(data["y"])
else:
return data
@property
@throw_error_outside_context
def training_data(self):
"""Returns the Training Data as pair of arrays containing the samples x,
and classification y"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data
@property
@throw_error_outside_context
def training_data_x(self):
"""Returns the Training Data as an array of samples"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data[0]
@property
@throw_error_outside_context
def training_data_y(self):
"""Returns the Classifications for the Training Data as array"""
self._train_data = self._lazy_initialise_data(self._train_data,
DatasetComponents.TRAIN)
return self._train_data[1]
@property
@throw_error_outside_context
def test_data(self):
"""Returns the Training Data as pair of arrays containing the samples x,
and classification y"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data
@property
@throw_error_outside_context
def test_data_x(self):
"""Returns the Test Data as an array of samples"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data[0]
@property
@throw_error_outside_context
def test_data_y(self):
"""Returns the Classifications for the Test Data as array"""
self._test_data = self._lazy_initialise_data(self._test_data,
DatasetComponents.TEST)
return self._test_data[1]
@property
@throw_error_outside_context
def validation_data(self):
"""Returns the Validation Data as pair of arrays containing the
samples x,
and classification y"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data
@property
@throw_error_outside_context
def validation_data_x(self):
"""Returns the Validation Data as an array of samples"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data[0]
@property
@throw_error_outside_context
def validation_data_y(self):
"""Returns the Classifications for the Validation Data as array"""
self._validation_data = self._lazy_initialise_data(
self._validation_data, DatasetComponents.VALIDATION)
return self._validation_data[1]
def __enter__(self):
self.within_context = True
def __exit__(self, exc_type, exc_value, exc_traceback):
self.within_context = False
self._train_data = None
self._test_data = None
self._validation_data = None
gc.collect()
| 2.359375 | 2 |
src/rmt/kinematics.py | mfrigerio17/robot-model-tools | 2 | 6235 | <reponame>mfrigerio17/robot-model-tools
import logging
import numpy
import kgprim.motions as motions
import kgprim.ct.frommotions as frommotions
import kgprim.ct.repr.mxrepr as mxrepr
import motiondsl.motiondsl as motdsl
logger = logging.getLogger(__name__)
class RobotKinematics:
'''The composition of the constant poses and the joint poses of a robot.
This class is a simple aggregation of the geometry model and the joint-poses
model. By merging the two, this class have access to the full robot
kinematics.
Thanks to gr.motions.ConnectedFramesInspector, an arbitrary relative pose
between two frames on the robot can be obtained.
'''
def __init__(self, geometry, jointPoses):
self.robotGeometry = geometry
self.jointPoses = jointPoses
self.baseFrame = geometry.framesModel.linkFrames[ geometry.connectivityModel.base ]
allPoses = geometry.posesModel.mergeModel( jointPoses.jointPosesModel )
self.framesConnectivity = motions.ConnectedFramesInspector(allPoses)
def base_H_ee(kinematics, framename):
if framename not in kinematics.robotGeometry.framesModel.framesByName:
logger.error("Could not find frame '{0}' in model '{1}'".format(framename, kinematics.robotGeometry.robotName))
return None
ee = kinematics.robotGeometry.framesModel.framesByName[ framename ]
if not kinematics.framesConnectivity.hasRelativePose(ee, kinematics.baseFrame):
logger.error("Frame '{0}' and the base frame do not seem to be connected".format(framename))
return None
poseSpec = kinematics.framesConnectivity.getPoseSpec(ee, kinematics.baseFrame)
cotr = frommotions.toCoordinateTransform(poseSpec)
H = mxrepr.hCoordinatesSymbolic(cotr)
q = numpy.zeros( len(H.variables) )
H = H.setVariablesValue( valueslist=q )
return H
def serializeToMotionDSLModel(robotKinematics, ostream):
header ='''
Model {modelname}
Convention = currentFrame
'''.format(modelname=robotKinematics.robotGeometry.robotName)
ostream.write(header)
for jp in robotKinematics.jointPoses.poseSpecByJoint.values():
text = motdsl.poseSpecToMotionDSLSnippet( jp )
ostream.write(text)
ostream.write('\n')
for cp in robotKinematics.robotGeometry.byPose.values() :
text = motdsl.poseSpecToMotionDSLSnippet( cp )
ostream.write(text)
ostream.write('\n')
| 2.171875 | 2 |
awx/main/management/commands/run_dispatcher.py | atr0s/awx | 0 | 6236 | <filename>awx/main/management/commands/run_dispatcher.py
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
import os
import logging
from multiprocessing import Process
from django.conf import settings
from django.core.cache import cache as django_cache
from django.core.management.base import BaseCommand
from django.db import connection as django_connection
from kombu import Connection, Exchange, Queue
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.dispatch.control import Control
from awx.main.dispatch.pool import AutoscalePool
from awx.main.dispatch.worker import AWXConsumer, TaskWorker
logger = logging.getLogger('awx.main.dispatch')
def construct_bcast_queue_name(common_name):
return common_name.encode('utf8') + '_' + settings.CLUSTER_HOST_ID
class Command(BaseCommand):
help = 'Launch the task dispatcher'
def add_arguments(self, parser):
parser.add_argument('--status', dest='status', action='store_true',
help='print the internal state of any running dispatchers')
parser.add_argument('--running', dest='running', action='store_true',
help='print the UUIDs of any tasked managed by this dispatcher')
parser.add_argument('--reload', dest='reload', action='store_true',
help=('cause the dispatcher to recycle all of its worker processes;'
'running jobs will run to completion first'))
def beat(self):
from celery import Celery
from celery.beat import PersistentScheduler
from celery.apps import beat
class AWXScheduler(PersistentScheduler):
def __init__(self, *args, **kwargs):
self.ppid = os.getppid()
super(AWXScheduler, self).__init__(*args, **kwargs)
def setup_schedule(self):
super(AWXScheduler, self).setup_schedule()
self.update_from_dict(settings.CELERYBEAT_SCHEDULE)
def tick(self, *args, **kwargs):
if os.getppid() != self.ppid:
# if the parent PID changes, this process has been orphaned
# via e.g., segfault or sigkill, we should exit too
raise SystemExit()
return super(AWXScheduler, self).tick(*args, **kwargs)
def apply_async(self, entry, producer=None, advance=True, **kwargs):
task = TaskWorker.resolve_callable(entry.task)
result, queue = task.apply_async()
class TaskResult(object):
id = result['uuid']
return TaskResult()
app = Celery()
app.conf.BROKER_URL = settings.BROKER_URL
app.conf.CELERY_TASK_RESULT_EXPIRES = False
beat.Beat(
30,
app,
schedule='/var/lib/awx/beat.db', scheduler_cls=AWXScheduler
).run()
def handle(self, *arg, **options):
if options.get('status'):
print Control('dispatcher').status()
return
if options.get('running'):
print Control('dispatcher').running()
return
if options.get('reload'):
return Control('dispatcher').control({'control': 'reload'})
# It's important to close these because we're _about_ to fork, and we
# don't want the forked processes to inherit the open sockets
# for the DB and memcached connections (that way lies race conditions)
django_connection.close()
django_cache.close()
beat = Process(target=self.beat)
beat.daemon = True
beat.start()
reaper.reap()
consumer = None
with Connection(settings.BROKER_URL) as conn:
try:
bcast = 'tower_broadcast_all'
queues = [
Queue(q, Exchange(q), routing_key=q)
for q in (settings.AWX_CELERY_QUEUES_STATIC + [get_local_queuename()])
]
queues.append(
Queue(
construct_bcast_queue_name(bcast),
exchange=Exchange(bcast, type='fanout'),
routing_key=bcast,
reply=True
)
)
consumer = AWXConsumer(
'dispatcher',
conn,
TaskWorker(),
queues,
AutoscalePool(min_workers=4)
)
consumer.run()
except KeyboardInterrupt:
logger.debug('Terminating Task Dispatcher')
if consumer:
consumer.stop()
| 2.0625 | 2 |
pcdet/utils/box_coder_utils.py | Nuri-benbarka/PCDet | 7 | 6237 | <gh_stars>1-10
import numpy as np
import torch
from . import common_utils
class ResidualCoder(object):
def __init__(self, code_size=7):
super().__init__()
self.code_size = code_size
@staticmethod
def encode_np(boxes, anchors):
"""
:param boxes: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
box_ndim = anchors.shape[-1]
xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1)
xg, yg, zg, wg, lg, hg, rg, *cgs = np.split(boxes, box_ndim, axis=-1)
# need to convert boxes to z-center format
zg = zg + hg / 2
za = za + ha / 2
diagonal = np.sqrt(la ** 2 + wa ** 2) # 4.3
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha # 1.6
lt = np.log(lg / la)
wt = np.log(wg / wa)
ht = np.log(hg / ha)
rt = rg - ra
cts = [g - a for g, a in zip(cgs, cas)]
return np.concatenate([xt, yt, zt, wt, lt, ht, rt, *cts], axis=-1)
@staticmethod
def decode_np(box_encodings, anchors):
"""
:param box_encodings: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
box_ndim = anchors.shape[-1]
xa, ya, za, wa, la, ha, ra, *cas = np.split(anchors, box_ndim, axis=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = np.split(box_encodings, box_ndim, axis=-1)
# need to convert box_encodings to z-bottom format
za = za + ha / 2
diagonal = np.sqrt(la ** 2 + wa ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
lg = np.exp(lt) * la
wg = np.exp(wt) * wa
hg = np.exp(ht) * ha
rg = rt + ra
zg = zg - hg / 2
cgs = [t + a for t, a in zip(cts, cas)]
return np.concatenate([xg, yg, zg, wg, lg, hg, rg, *cgs], axis=-1)
@staticmethod
def encode_torch(boxes, anchors):
"""
:param boxes: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
xg, yg, zg, wg, lg, hg, rg, *cgs = torch.split(boxes, 1, dim=-1)
za = za + ha / 2
zg = zg + hg / 2
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha
lt = torch.log(lg / la)
wt = torch.log(wg / wa)
ht = torch.log(hg / ha)
rt = rg - ra
cts = [g - a for g, a in zip(cgs, cas)]
return torch.cat([xt, yt, zt, wt, lt, ht, rt, *cts], dim=-1)
@staticmethod
def decode_torch(box_encodings, anchors):
"""
:param box_encodings: (N, 7 + ?) x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (N, 7 + ?)
:return:
"""
xa, ya, za, wa, la, ha, ra, *cas = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt, *cts = torch.split(box_encodings, 1, dim=-1)
za = za + ha / 2
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
lg = torch.exp(lt) * la
wg = torch.exp(wt) * wa
hg = torch.exp(ht) * ha
rg = rt + ra
zg = zg - hg / 2
cgs = [t + a for t, a in zip(cts, cas)]
return torch.cat([xg, yg, zg, wg, lg, hg, rg, *cgs], dim=-1)
def decode_with_head_direction_torch(self, box_preds, anchors, dir_cls_preds,
num_dir_bins, dir_offset, dir_limit_offset, use_binary_dir_classifier=False):
"""
:param box_preds: (batch_size, N, 7 + ?), x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param anchors: (batch_size, N, 7 + ?), x, y, z, w, l, h, r, custom values, z is the box center in z-axis
:param dir_cls_preds: (batch_size, H, W, num_anchors_per_locations*2)
:return:
"""
batch_box_preds = self.decode_torch(box_preds, anchors)
if dir_cls_preds is not None:
dir_cls_preds = dir_cls_preds.view(box_preds.shape[0], box_preds.shape[1], -1)
if use_binary_dir_classifier:
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
opp_labels = (batch_box_preds[..., -1] > 0) ^ dir_labels.byte()
batch_box_preds[..., -1] += torch.where(
opp_labels,
torch.tensor(np.pi).type_as(batch_box_preds),
torch.tensor(0.0).type_as(batch_box_preds)
)
else:
dir_labels = torch.max(dir_cls_preds, dim=-1)[1]
period = (2 * np.pi / num_dir_bins)
dir_rot = common_utils.limit_period_torch(
batch_box_preds[..., 6] - dir_offset, dir_limit_offset, period
)
batch_box_preds[..., 6] = dir_rot + dir_offset + period * dir_labels.to(batch_box_preds.dtype)
return batch_box_preds
if __name__ == '__main__':
pass
| 2.234375 | 2 |
utils/utils.py | jainajinkya/deep_bingham | 0 | 6238 | <reponame>jainajinkya/deep_bingham
""" Utilities for learning pipeline."""
from __future__ import print_function
import copy
import dill
import hashlib
import itertools
import third_party.deep_bingham.bingham_distribution as ms
import math
import numpy as np
import os
import scipy
import scipy.integrate as integrate
import scipy.special
import sys
import torch
from pathos.multiprocessing import ProcessingPool as Pool
from pathos.multiprocessing import cpu_count
def convert_euler_to_quaternion(roll, yaw, pitch):
"""Converts roll, yaw, pitch to a quaternion.
"""
# roll (z), yaw (y), pitch (x)
cy = math.cos(math.radians(roll) * 0.5)
sy = math.sin(math.radians(roll) * 0.5)
cp = math.cos(math.radians(yaw) * 0.5)
sp = math.sin(math.radians(yaw) * 0.5)
cr = math.cos(math.radians(pitch) * 0.5)
sr = math.sin(math.radians(pitch) * 0.5)
w = cy * cp * cr + sy * sp * sr
x = cy * cp * sr - sy * sp * cr
y = sy * cp * sr + cy * sp * cr
z = sy * cp * cr - cy * sp * sr
quat = np.array([w, x, y, z])
quat = quat / np.linalg.norm(quat)
return quat
def radians(degree_tensor):
"""
Method to convert a torch tensor of angles in degree format to radians.
Arguments:
degree_tensor (torch.Tensor): Tensor consisting of angles in degree format.
Returns:
radian_tensor (torch.Tensor): Tensor consisting of angles in radian format.
"""
radian_tensor = degree_tensor/180 * math.pi
return radian_tensor
def generate_coordinates(coords):
"""
A function that returns all possible triples of coords
Parameters:
coords: a numpy array of coordinates
Returns:
x: the first coordinate of possible triples
y: the second coordinate of possible triples
z the third coordinate of possible triples
"""
x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten()
y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords))
z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords))
return x, y, z
def ensure_dir_exists(path):
""" Checks if a directory exists and creates it otherwise. """
if not os.path.exists(path):
os.makedirs(path)
def load_lookup_table(path):
"""
Loads lookup table from dill serialized file.
Returns a table specific tuple. For the Bingham case, the tuple containins:
table_type (str):
options (dict): The options used to generate the lookup table.
res_tensor (numpy.ndarray): The actual lookup table data.
coords (numpy.ndarray): Coordinates at which lookup table was evaluated.
For the von Mises case, it contains:
options (dict): The options used to generate the lookup table.
res_tensor (numpy.ndarray): The actual lookup table data.
"""
assert os.path.exists(path), "Lookup table file not found."
with open(path, "rb") as dillfile:
return dill.load(dillfile)
def eaad_von_mises(kappas, integral_options=None):
""" Expected Absolute Angular Deviation of Bingham Random Vector
Arguments:
kappas: Von Mises kappa parameters for roll, pitch, yaw.
integral_options: Options to pass on to the scipy integrator for
computing the eaad and the bingham normalization constant.
"""
def aad(quat_a, quat_b):
acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))
diff_ang = 2.0 * acos_val
return diff_ang
if integral_options is None:
integral_options = {"epsrel": 1e-2, "epsabs": 1e-2}
param_mu = np.array([0., 0., 0.]) # radians
quat_mu = convert_euler_to_quaternion(
math.degrees(param_mu[0]), math.degrees(param_mu[1]),
math.degrees(param_mu[2])
)
param_kappa = kappas
direct_norm_const = 8.0 * (np.pi ** 3) \
* scipy.special.iv(0, param_kappa[0]) \
* scipy.special.iv(0, param_kappa[1]) \
* scipy.special.iv(0, param_kappa[2])
def integrand_aad(phi1, phi2, phi3):
return np.exp(param_kappa[0] * np.cos(phi1)) \
* np.exp(param_kappa[1] * np.cos(phi2)) \
* np.exp(param_kappa[2] * np.cos(phi3)) \
* aad(quat_mu,
convert_euler_to_quaternion(
math.degrees(phi1), math.degrees(phi2),
math.degrees(phi3)
))
eaad_int = integrate.tplquad(
integrand_aad,
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: 2. * np.pi, # phi2
lambda x, y: 0.0, lambda x, y: 2. * np.pi, # phi1
**integral_options
)
return eaad_int[0]/direct_norm_const
def eaad_bingham(bingham_z, integral_options=None):
""" Expected Absolute Angular Deviation of Bingham Random Vector
Arguments:
bingham_z: Bingham dispersion parameter in the format expected by the
manstats BinghamDistribution class.
integral_options: Options to pass on to the scipy integrator for
computing the eaad and the bingham normalization constant.
"""
def aad(quat_a, quat_b):
# acos_val = np.arccos(np.dot(quat_a, quat_b))
# diff_ang = 2 * np.min([acos_val, np.pi - acos_val])
acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))
diff_ang = 2 * acos_val
return diff_ang
if integral_options is None:
integral_options = {"epsrel": 1e-4, "epsabs": 1e-4}
bd = ms.BinghamDistribution(
np.eye(4), bingham_z,
{"norm_const_mode": "numerical",
"norm_const_options": integral_options}
)
def integrand_transformed(x):
# To avoid unnecessary divisions, this term does not contain the
# normalization constant. At the end, the result of the integration is
# divided by it.
return aad(x, bd.mode) \
* np.exp(np.dot(x, np.dot(np.diag(bingham_z), x)))
def integrand(phi1, phi2, phi3):
sp1 = np.sin(phi1)
sp2 = np.sin(phi2)
return integrand_transformed(np.array([
sp1 * sp2 * np.sin(phi3),
sp1 * sp2 * np.cos(phi3),
sp1 * np.cos(phi2),
np.cos(phi1)
])) * (sp1 ** 2.) * sp2
eaad_int = integrate.tplquad(
integrand,
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: np.pi, # phi2
lambda x, y: 0.0, lambda x, y: np.pi, # phi1
**integral_options
)
return eaad_int[0] / bd.norm_const
def build_bd_lookup_table(table_type, options, path=None):
"""
Builds a lookup table for interpolating the bingham normalization
constant. If a lookup table with the given options already exists, it is
loaded and returned instead of building a new one.
Arguments:
table_type: Type of lookup table used. May be 'uniform' or 'nonuniform'
options: Dict cotaining type specific options.
If type is "uniform" this dict must contain:
"bounds" = Tuple (lower_bound, upper_bound) representing bounds.
"num_points" = Number of points per dimension.
If type is "nonuniform" this dict must contain a key "coords" which
is a numpy arrays representing the coordinates at which the
interpolation is evaluated.
path: absolute path for the lookup table (optional). The default is to
create a hash based on the options and to use this for constructing
a file name and placing the file in the precomputed folder.
"""
hash_obj = hashlib.sha256()
hash_obj.update(table_type.encode('utf-8'))
hash_obj.update(dill.dumps(options))
config_hash = hash_obj.hexdigest()
if not path:
path = os.path.dirname(__file__) \
+ "/../precomputed/lookup_{}.dill".format(config_hash)
# Load existing table or create new one.
if os.path.exists(path):
with open(path, "rb") as dillfile:
(serialized_type, serialized_options, res_table, coords) \
= dill.load(dillfile)
hash_obj = hashlib.sha256()
hash_obj.update(serialized_type)
hash_obj.update(dill.dumps(serialized_options))
file_config_hash = hash_obj.hexdigest()
assert file_config_hash == config_hash, \
"Serialized lookup table does not match given type & options."
elif table_type == "uniform":
# Number of points per axis.
(lbound, rbound) = options["bounds"]
num_points = options["num_points"]
assert num_points > 1, \
"Grid must have more than one point per dimension."
nc_options = {"epsrel": 1e-3, "epsabs": 1e-7}
coords = np.linspace(lbound, rbound, num_points)
res_table = _compute_bd_lookup_table(coords, nc_options)
with open(path, "wb") as dillfile:
dill.dump((table_type, options, res_table, coords), dillfile)
elif table_type == "nonuniform":
nc_options = {"epsrel": 1e-3, "epsabs": 1e-7}
coords = options["coords"]
res_table = _compute_bd_lookup_table(coords, nc_options)
with open(path, "wb") as dillfile:
dill.dump((table_type, options, res_table, coords), dillfile)
else:
sys.exit("Unknown lookup table type")
return res_table
def build_vm_lookup_table(options, path=None):
"""
Builds a lookup table for interpolating the bingham normalization
constant. If a lookup table with the given options already exists, it is
loaded and returned instead of building a new one.
Arguments:
options: Dict cotaining table options. It must contain a key "coords"
which is a numpy arrays representing the coordinates at which the
interpolation is evaluated.
path: absolute path for the lookup table (optional). The default is to
create a hash based on the options and to use this for constructing
a file name and placing the file in the precomputed folder.
"""
hash_obj = hashlib.sha256()
hash_obj.update(dill.dumps(options))
config_hash = hash_obj.hexdigest()
if not path:
path = os.path.dirname(__file__) \
+ "/../precomputed/lookup_{}.dill".format(config_hash)
# Load existing table or create new one.
if os.path.exists(path):
with open(path, "rb") as dillfile:
(serialized_options, res_table) \
= dill.load(dillfile)
hash_obj = hashlib.sha256()
hash_obj.update(dill.dumps(serialized_options))
file_config_hash = hash_obj.hexdigest()
assert file_config_hash == config_hash, \
"Serialized lookup table does not match given type & options."
else:
coords = options["coords"]
res_table = _compute_vm_lookup_table(coords)
with open(path, "wb") as dillfile:
dill.dump((options, res_table), dillfile)
return res_table
def _compute_bd_lookup_table(coords, nc_options):
num_points = len(coords)
pool = Pool(max(cpu_count()//2, 1))
def nc_wrapper(idx):
pt_idx = point_indices[idx]
# Indexing pt_idx in the order 2,1,0 vs. 0,1,2 has no impact
# on the result as the Bingham normalization constant is agnostic to it.
# However, the numpy integration that is used to compute it, combines
# numerical 2d and 1d integration which is why the order matters for the
# actual computation time.
#
# TODO: Make pymanstats choose best order automatically.
norm_const = ms.BinghamDistribution.normalization_constant(
np.array(
[coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]),
"numerical", nc_options)
print("Computing NC for Z=[{}, {}, {}, 0.0]: {}".format(
coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]],
norm_const))
return norm_const
point_indices = list(itertools.combinations_with_replacement(
range(0, num_points), 3))
results = pool.map(nc_wrapper, range(len(point_indices)))
res_tensor = -np.ones((num_points, num_points, num_points))
for idx_pos, pt_idx in enumerate(point_indices):
res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos]
return res_tensor
class AverageMeter(object):
"""Computes and stores the averages over a numbers or dicts of numbers.
For the dict, this class assumes that no new keys are added during
the computation.
"""
def __init__(self):
self.last_val = 0
self.avg = 0
self.count = 0
def update(self, val, n=1):
self.last_val = val
n = float(n)
if type(val) == dict:
if self.count == 0:
self.avg = copy.deepcopy(val)
else:
for key in val:
self.avg[key] *= self.count / (self.count + n)
self.avg[key] += val[key] * n / (self.count + n)
else:
self.avg *= self.count / (self.count + n)
self.avg += val * n / (self.count + n)
self.count += n
self.last_val = val
def _compute_vm_lookup_table(coords):
num_points = len(coords)
pool = Pool()
def nc_wrapper(idx):
cur_pt_idx = point_indices[idx]
log_norm_const = np.log(8.0) + (3. * np.log(np.pi)) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[2]]))
print("Computing NC for kappas=[{}, {}, {}]: {}".format(
coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]],
log_norm_const))
return log_norm_const
point_indices = list(itertools.combinations_with_replacement(
range(0, num_points), 3))
results = pool.map(nc_wrapper, range(len(point_indices)))
res_tensor = -np.ones((num_points, num_points, num_points))
for idx_pos, pt_idx in enumerate(point_indices):
res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos]
return res_tensor
def vec_to_bingham_z_many(y):
z = -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0)
return z
def vec_to_bingham_z(y):
z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0)
if not all(z[0][:-1] <= z[0][1:]):
print(z)
return z
| 2.3125 | 2 |
cli_ui.py | obatsis/Distributed-NTUA | 0 | 6239 | import requests
import os
from PyInquirer import style_from_dict, Token, prompt
import sys
import utils.config as config
import utils.ends as ends
from utils.colorfy import *
from auto.testing import test_trans
import time
import json
style = style_from_dict({
Token.QuestionMark: '#E91E63 bold',
Token.Selected: '#673AB7 bold',
Token.Instruction: '#<PASSWORD>',
Token.Answer: '#<PASSWORD> bold',
Token.Question: '#<PASSWORD>16 bold',
})
def client(ip, port):
os.system('clear')
cyan('What a beautiful day to enter the cult...')
baseURL = 'http://' + ip + ':' + port
while True:
print('----------------------------------------------------------------------')
method_q = {
'type': 'list',
'name': 'method',
'message': 'Select action:',
'choices': ['Network Overlay', \
'Insert a Song', \
'Search for a Song', \
'Delete a Song', \
'Depart from Chord', \
'Run automated test', \
'Help', \
'Exit']
}
method_a = prompt(method_q, style=style)['method']
os.system('clear')
if method_a == 'Depart from Chord':
print(cyan("Preparing Node to depart from Chord..."))
try:
response = requests.get(baseURL + ends.c_depart)
if response.status_code == 200:
if response.text == "Left the Chord":
print(response.text)
print(green("Node is out of Toychord network"))
else:
print(red(response.text))
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node. Node didnt depart..."))
print(red("Unfortunately exiting..."))
break
elif method_a == 'Insert a Song':
print('Insert a Title-Value pair for the song you wish to insert')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
},
{
'type': 'input',
'name': 'value',
'message': 'Value:',
'filter': lambda val: str(val)
}
]
fetch_a = prompt(fetch_q, style=style)
print(cyan("Inserting Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_insert ,data={'key':fetch_a['key'],'value':fetch_a['value']})
if response.status_code == 200:
print(cyan("Inserted by node with id: ") + green(response.text.split(" ")[0]))
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node. Song wasnt inserted..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Delete a Song':
print('Insert the Song Title you wish to delete')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
}]
fetch_a = prompt(fetch_q, style=style)
print(cyan("Deleting Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_delete ,data={'key':fetch_a['key']})
if response.status_code == 200 and response.text.split(" ")[1] != "@!@":
# print(cyan("Deleting Song: ") + green(response.text.split(" ")[1]) + )
print(cyan("Deleted by node with id: ") + green(response.text.split(" ")[0]))
else :
print(yellow("Song doesnt exist in the Chord"))
print(yellow("Couldnt delete it"))
except:
print(red("Could not establish connection with Node. Song wasnt deleted..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Search for a Song':
print('Insert the Song Title you wish to Search or * to get all songs of the Chord')
fetch_q = [
{
'type': 'input',
'name': 'key',
'message': 'Song Title:',
'filter': lambda val: str(val)
}]
fetch_a = prompt(fetch_q, style=style)
if fetch_a['key'] == "*":
print(cyan("Fetching all the songs of the Chord..."))
try:
response = requests.get(baseURL + ends.c_query_star)
if response.status_code == 200:
nodes_list = json.loads(response.text)
# print(green(response.text))
# print(cyan()))
for node in nodes_list["res"]:
print(header("\n" + node["uid"]) + " " + underline(node["ip"] + ":" + node["port"]))
for song in node["song"]:
print(" -" + green(song["key"]) + " " + song["value"])
else:
print(yellow("Something went Wrong...") + response.status_code)
except:
print(red("Could not establish connection with Node. Couldnt search for song..."))
print(red("Unfortunately exiting..."))
exit(0)
else:
print(cyan("Searching Song: ") + fetch_a['key'] + cyan("..."))
try:
response = requests.post(baseURL + ends.c_query ,data={'key':fetch_a['key']})
if response.status_code == 200 and response.text.split(" ")[1] != "@!@":
print("Song found in node with id: ",green(response.text.split(" ")[0]))
print("Song value: " + green(response.text.split(" ")[1]))
else:
print(yellow("Song doesnt exist in the Chord"))
except:
print(red("Could not establish connection with Node. Couldnt search for song..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Network Overlay':
print(cyan("Initiating Network Overlay..."))
try:
response = requests.get(baseURL + ends.c_overlay)
if response.status_code == 200:
nodes_list = json.loads(response.text)
print('\n')
for node in nodes_list["res"]:
print(green(node["ip"] + ":" + node["port"]), end = '')
if node != nodes_list["res"][-1]:
print(" -> ", end = '')
print('\n')
else :
print(red("Got a bad response status code " + response.status_code))
except:
print(red("Could not establish connection with Node..."))
print(red("Unfortunately exiting..."))
exit(0)
continue
elif method_a == 'Help':
print('-------------------------------- Help --------------------------------\n')
overlayHelp=header("Overlay: ") + cyan("This functions recreates and prints the current Network Topology(eg. Node1 -> Node2 -> ...)\n")
insertHelp=header("Insert Song: ") + cyan("This functions expects a Song Title and a Song Value and inserts them in the Chord\n")
queryHelp=header("Search Song: ") + cyan("This function expects a Song Title and returns the Node in whitch the song is stored and the value of the song\n")
deleteHelp=header("Delete Song: ") + cyan("This function expects a Song Title and returns the Node who deleted the song\n")
departHelp=header("Depart: ") + cyan("This function makes the node connected to this cli leave the Chord\n")
autoTests=header("Run automated tests: ") + cyan("This function expects a test number (1=insert, 2=query, 3=requests), runs the test and returns the chord throughput")
print( " -",overlayHelp,"\n"
" -",insertHelp,"\n",
"-",queryHelp,"\n",
"-",deleteHelp,"\n",
"-",departHelp,"\n",
"-",autoTests,"\n",
)
continue
elif method_a == 'Run automated test':
print('Select which test you wish to run (1 = insert, 2 = query, 3 = requests)')
fetch_q = [
{
'type': 'input',
'name': 'test_n',
'message': 'Test:',
'filter': lambda val: str(val)
}
]
fetch_a = prompt(fetch_q, style=style)
test_number = fetch_a['test_n'] if fetch_a['test_n'] else 's'
if test_number not in ('1', '2', '3'):
print(yellow("Wrong test number (give 1, 2 or 3)"))
continue
print(cyan("Running automated test: ") + ("insert" if test_number == '1' else ("query" if test_number == '2' else "requests")) + cyan("..."))
print(blue(test_trans(test_number)))
print(cyan("Done!"))
continue
elif method_a == 'Exit':
os.system('clear')
break
else:
os.system('clear')
continue
if __name__ == '__main__':
if len(sys.argv) < 3:
print("!! you must tell me the port. Ex. -p 5000 !!")
exit(0)
if sys.argv[1] in ("-p", "-P"):
my_port = sys.argv[2]
my_ip = os.popen('ip addr show ' + config.NETIFACE + ' | grep "\<inet\>" | awk \'{ print $2 }\' | awk -F "/" \'{ print $1 }\'').read().strip()
client(my_ip, my_port)
| 2.84375 | 3 |
Contents/scripts/siweighteditor/weight.py | jdrese/SIWeightEditor | 1 | 6240 | <reponame>jdrese/SIWeightEditor<filename>Contents/scripts/siweighteditor/weight.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from maya import mel
from maya import cmds
from . import lang
from . import common
import os
import json
import re
class WeightCopyPaste():
def main(self, skinMeshes, mode='copy', saveName='default', method='index', weightFile='auto',
threshold=0.2, engine='maya', tgt=1, path='default', viewmsg=False):
if viewmsg:
cmds.inViewMessage( amg='<hl>Simple Weight</hl> : '+mode, pos='midCenterTop', fade=True, ta=0.75, a=0.5)
'''
ウェイトデータの保存、読み込み関数
mode→コピーするかペーストするか'copy'or'paste'
saveName→ウェイトデータの保存フォルダ名。ツール、モデル名とかで分けたい場合に指定
method→ペーストの仕方,「index」、「nearest」、「barycentric」、「over」
「index」法は、頂点インデックスを使用してウェイトをオブジェクトにマッピングします。マッピング先のオブジェクトと書き出し後のデータのトポロジが同じ場合、これが最も便利な手法です。
「nearest」法は、読み込んだデータのニアレスト頂点を検索し、ウェイト値をその値に設定します。これは、高解像度メッシュを低解像度メッシュにマッピングする場合に最適です。
「barycentric」法はポリゴン メッシュでのみサポートされます。ターゲット ジオメトリのニアレスト三角を検索し、
ソース ポイントと頂点の距離に応じてウェイトを再スケールします。これは通常、高解像度メッシュにマッピングされる粗いメッシュで使用されます。
「over」法は「index」法に似ていますが、マッピング前に対象メッシュのウェイトがクリアされないため、一致していないインデックスのウェイトがそのまま維持されます。
nearest と barycentricは不具合のため現状仕様不可能(処理が終わらない)2016/11/03現在
→barycentric、bylinearはMaya2016Extention2から利用可能
weightFile→メッシュ名検索でなく手動指定したい場合にパスを指定。methodのnearest、barycentricとセットで使う感じ。
→Mayaコピー時にファイル名指定すると複数保存できないので注意。
threshold→nearest,barycentricの位置検索範囲
'''
self.skinMeshes = skinMeshes
self.saveName = saveName
self.method = method
self.weightFile = weightFile
self.threshold = threshold
self.engine = engine
self.memShapes = {}
self.target = tgt
self.pasteMode = {'index':1, 'nearest':3}
# リストタイプじゃなかったらリストに変換する
if not isinstance(self.skinMeshes, list):
temp = self.skinMeshes
self.skinMeshes = []
self.skinMeshes.append(temp)
# ファイルパスを生成しておく
if path == 'default':
self.filePath = os.getenv('MAYA_APP_DIR') + '\\Scripting_Files\\weight\\' + self.saveName
elif path == 'project':
self.scene_path = '/'.join(cmds.file(q=True, sceneName=True).split('/')[:-1])
self.protect_path = os.path.join(self.scene_path, 'weight_protector')
try:
if not os.path.exists(self.protect_path):
os.makedirs(self.protect_path)
except Exception as e:
print e.message
return
self.filePath = self.protect_pat+'\\' + self.saveName
self.fileName = os.path.join(self.filePath, self.saveName + '.json')
self.apiName = os.path.join(self.filePath, self.saveName + '.skn')
# コピーかペーストをそれぞれ呼び出し
if mode == 'copy':
self.weightCopy()
if mode == 'paste':
self.weightPaste()
def weightPaste(self):
dummy = cmds.spaceLocator()
for skinMesh in self.skinMeshes:
# 読みに行くセーブファイル名を指定、autoならメッシュ名
if self.weightFile == 'auto':
weightFile = skinMesh
else:
weightFile = self.weightFile
dstSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする
if not dstSkinCluster:
meshName = str(weightFile).replace('|', '__pipe__')
if os.path.exists(self.fileName):
try:
with open(self.fileName, 'r') as f: # ファイル開く'r'読み込みモード'w'書き込みモード
saveData = json.load(f) # ロード
# self.visibility = saveData['visibility']#セーブデータ読み込み
skinningMethod = saveData[';skinningMethod']
dropoffRate = saveData[';dropoffRate']
maintainMaxInfluences = saveData[';maintainMaxInfluences']
maxInfluences = saveData[';maxInfluences']
bindMethod = saveData[';bindMethod']
normalizeWeights = saveData[';normalizeWeights']
influences = saveData[';influences']
# 子のノードがトランスフォームならダミーに親子付けして退避
common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='cut')
influences = cmds.ls(influences, l=True, tr=True)
# バインド
dstSkinCluster = cmds.skinCluster(
skinMesh,
influences,
omi=maintainMaxInfluences,
mi=maxInfluences,
dr=dropoffRate,
sm=skinningMethod,
nw=normalizeWeights,
tsb=True,
)
dstSkinCluster = dstSkinCluster[0]
# 親子付けを戻す
common.TemporaryReparent().main(skinMesh, dummyParent=dummy, mode='parent')
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
except Exception as e:
print e.message
print 'Error !! Skin bind failed : ' + skinMesh
continue
else:
dstSkinCluster = dstSkinCluster[0]
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
if self.engine == 'maya':
files = os.listdir(self.filePath)
print files
if len(files) == 2:
for file in files:
name, ext = os.path.splitext(file)
if ext == '.xml':
xml_name = file
else:
# Pipeはファイル名に出来ないので変換しておく
meshName = str(weightFile).replace('|', '__pipe__')
# コロンはファイル名に出来ないので変換しておく
meshName = str(meshName).replace(':', '__colon__')
xml_name = meshName + '.xml'
if os.path.isfile(self.filePath + '\\' + xml_name):
if self.method == 'index' or self.method == 'over':
cmds.deformerWeights(xml_name,
im=True,
method=self.method,
deformer=dstSkinCluster,
path=self.filePath + '\\')
else:
cmds.deformerWeights(xml_name,
im=True,
deformer=dstSkinCluster,
method=self.method,
worldSpace=True,
positionTolerance=self.threshold,
path=self.filePath + '\\')
cmds.skinCluster(dstSkinCluster, e=True, forceNormalizeWeights=True)
print 'Weight paste to : ' + str(skinMesh)
else:
print 'Not exist seved weight XML file : ' + skinMesh
# ダミー親削除
cmds.delete(dummy)
cmds.select(self.skinMeshes, r=True)
# ウェイト情報を保存する関数
def weightCopy(self):
saveData = {}
# 保存ディレクトリが無かったら作成
if not os.path.exists(self.filePath):
os.makedirs(os.path.dirname(self.filePath + '\\')) # 末尾\\が必要なので注意
else: # ある場合は中身を削除
files = os.listdir(self.filePath)
if files is not None:
for file in files:
os.remove(self.filePath + '\\' + file)
skinFlag = False
all_influences = []
for skinMesh in self.skinMeshes:
try:
cmds.bakePartialHistory(skinMesh, ppt=True)
except:
pass
# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
if not srcSkinCluster:
continue # スキンクラスタがなかったら次に移行
tempSkinNode = skinMesh#親を取得するためスキンクラスタのあるノードを保存しておく
# スキンクラスタのパラメータ色々を取得しておく
srcSkinCluster = srcSkinCluster[0]
skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm')
dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr')
maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi')
maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi')
bindMethod = cmds.getAttr(srcSkinCluster + ' .bm')
normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw')
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)
saveData[';skinningMethod'] = skinningMethod
saveData[';dropoffRate'] = dropoffRate
saveData[';maintainMaxInfluences'] = maintainMaxInfluences
saveData[';maxInfluences'] = maxInfluences
saveData[';bindMethod'] = bindMethod
saveData[';normalizeWeights'] = normalizeWeights
all_influences += influences
#saveData[';influences'] = influences
skinFlag = True
all_influences = list(set(all_influences))
saveData[';influences'] = all_influences
#インフルエンス数の変化に耐えられるようにあらかじめAddしてからコピーするS
for skinMesh in self.skinMeshes:
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
if not srcSkinCluster:
continue # スキンクラスタがなかったらfor分の次に移行
srcSkinCluster = srcSkinCluster[0]
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True)
sub_influences = list(set(all_influences) - set(influences))
if sub_influences:
cmds.skinCluster(skinMesh, e=True, ai=sub_influences, lw=True, ug=True, wt=0, ps=0)
if self.engine == 'maya':
# 読みに行くセーブファイル名を指定、autoならメッシュ名
if self.weightFile == 'auto':
weightFile = skinMesh
else:
weightFile = self.weightFile
# Pipeはファイル名に出来ないので変換しておく
meshName = str(weightFile).replace('|', '__pipe__')
# コロンはファイル名に出来ないので変換しておく
meshName = str(meshName).replace(':', '__colon__')
cmds.deformerWeights(meshName + '.xml', export=True, deformer=srcSkinCluster, path=self.filePath + '\\')
with open(self.fileName, 'w') as f: # ファイル開く'r'読み込みモード'w'書き込みモード
json.dump(saveData, f)
def transfer_weight(skinMesh, transferedMesh, transferWeight=True, returnInfluences=False, logTransfer=True):
'''
スキンウェイトの転送関数
転送先がバインドされていないオブジェクトの場合は転送元のバインド情報を元に自動バインド
・引数
skinMesh→転送元メッシュ(1個,リスト形式でも可)
transferedMesh(リスト形式,複数可、リストじゃなくても大丈夫)
transferWeight→ウェイトを転送するかどうか。省略可能、デフォルトはTrue
logTransfer→ログ表示するかどうか
returnInfluences→バインドされているインフルエンス情報を戻り値として返すかどうか。省略可能、デフォルトはFalse
'''
massege01 = lang.Lang(
en=': It does not perform the transfer of weight because it is not a skin mesh.',
ja=u': スキンメッシュではないのでウェイトの転送を行いません'
).output()
massege02 = lang.Lang(
en='Transfer the weight:',
ja=u'ウェイトを転送:'
).output()
massege03 = lang.Lang(
en='Transfer bind influences:',
ja=u'バインド状態を転送:'
).output()
if isinstance(skinMesh, list): # 転送元がリストだった場合、最初のメッシュのみ取り出す
skinMesh = skinMesh[0] # リストを渡されたときのための保険
# ノードの中からスキンクラスタを取得してくる#inMesh直上がSkinClusterとは限らないので修正
srcSkinCluster = cmds.ls(cmds.listHistory(skinMesh), type='skinCluster')
# srcSkinCluster = cmds.listConnections(skinMesh+'.inMesh', s=True, d=False)
if not srcSkinCluster:
if logTransfer:
print skinMesh + massege01
return False # スキンクラスタがなかったら関数抜ける
# スキンクラスタのパラメータ色々を取得しておく
srcSkinCluster = srcSkinCluster[0]
skinningMethod = cmds.getAttr(srcSkinCluster + ' .skm')
dropoffRate = cmds.getAttr(srcSkinCluster + ' .dr')
maintainMaxInfluences = cmds.getAttr(srcSkinCluster + ' .mmi')
maxInfluences = cmds.getAttr(srcSkinCluster + ' .mi')
bindMethod = cmds.getAttr(srcSkinCluster + ' .bm')
normalizeWeights = cmds.getAttr(srcSkinCluster + ' .nw')
influences = cmds.skinCluster(srcSkinCluster, q=True, inf=True) # qフラグは照会モード、ちなみにeは編集モード
# リストタイプじゃなかったらリストに変換する
if not isinstance(transferedMesh, list):
temp = transferedMesh
transferedMesh = []
transferedMesh.append(temp)
for dst in transferedMesh:
#子供のノード退避用ダミーペアレントを用意
dummy = common.TemporaryReparent().main(mode='create')
common.TemporaryReparent().main(dst,dummyParent=dummy, mode='cut')
shapes = cmds.listRelatives(dst, s=True, pa=True, type='mesh')
if not shapes: # もしメッシュがなかったら
continue # 処理を中断して次のオブジェクトへ
# スキンクラスタの有無を取得
dstSkinCluster = cmds.ls(cmds.listHistory(shapes[0]), type='skinCluster')
# スキンクラスタがない場合はあらかじめ取得しておいた情報をもとにバインドする
if not dstSkinCluster:
# バインド
dstSkinCluster = cmds.skinCluster(
dst,
influences,
omi=maintainMaxInfluences,
mi=maxInfluences,
dr=dropoffRate,
sm=skinningMethod,
nw=normalizeWeights,
tsb=True,
)
if logTransfer:
print massege03 + '[' + skinMesh + '] >>> [' + dst + ']'
dstSkinCluster = dstSkinCluster[0]
if transferWeight:
cmds.copySkinWeights(
ss=srcSkinCluster,
ds=dstSkinCluster,
surfaceAssociation='closestPoint',
influenceAssociation=['name', 'closestJoint', 'oneToOne'],
normalize=True,
noMirror=True
)
if logTransfer:
print massege02 + '[' + skinMesh + '] >>> [' + dst + ']'
#親子付けを戻す
common.TemporaryReparent().main(dst,dummyParent=dummy, mode='parent')
#ダミーペアレントを削除
common.TemporaryReparent().main(dummyParent=dummy, mode='delete')
if returnInfluences:
return influences
else:
return True
def symmetry_weight(srcNode=None, dstNode=None, symWeight=True):
'''
ウェイトシンメトリする関数
srcNode→反転元
dstNode→反転先
symWeight→ウェイトミラーするかどうか
'''
# スキンクラスタを取得
if srcNode is None:
return
srcShapes = cmds.listRelatives(srcNode, s=True, pa=True, type='mesh')
if srcShapes:
srcSkinCluster = cmds.ls(cmds.listHistory(srcNode), type='skinCluster')
# スキンクラスタがあったらジョイントラベルを設定してウェイトミラー
if srcSkinCluster:
# バインド状態を転送する関数呼び出し
skinJointAll = cmds.skinCluster(srcSkinCluster, q=True, inf=True) #ジョイントを取得
for skinJoint in skinJointAll:
# ジョイントラベル設定関数呼び出し
joint_label(skinJoint, visibility=False)
if symWeight is False or dstNode is None:
return
transfer_weight(srcNode, dstNode, transferWeight=False, returnInfluences=True)
dstShapes = cmds.listRelatives(dstNode, s=True, pa=True, type='mesh')
dstSkinCluster = cmds.listConnections(dstShapes[0] + '.inMesh', s=True, d=False)
cmds.copySkinWeights(ss=srcSkinCluster[0], ds=dstSkinCluster[0],
mirrorMode='YZ', surfaceAssociation='closestComponent',
influenceAssociation='label', normalize=True)
def load_joint_label_rules():
#ロードできなかった時の初期値
start_l_list = ['L_', 'l_', 'Left_', 'left_']
start_r_list = ['R_', 'r_', 'Right_', 'right_']
mid_l_list = ['_L_', '_l_', '_Left_', '_left_']
mid_r_list = ['_R_', '_r_', '_Right_', '_right_']
end_l_list = ['_L', '_l', '_L.', '_l.', '_L..', '_l..', '_Left', '_left']
end_r_list = ['_R', '_r', '_R.', '_r.', '_R..', '_r..', '_Right', '_right']
def_left_list_list = [start_l_list, mid_l_list, end_l_list]
def_right_list_list = [start_r_list, mid_r_list, end_r_list]
#左右対称設定ファイルからルールをロードする
dir_path = os.path.join(
os.getenv('MAYA_APP_dir'),
'Scripting_Files')
start_file = dir_path+'/joint_rule_start.json'
middle_file = dir_path+'/joint_rule_middle.json'
end_file = dir_path+'/joint_rule_end.json'
save_files = [start_file, middle_file, end_file]
left_list_list = []
right_list_list = []
for i, save_file in enumerate(save_files):
if os.path.exists(save_file):#保存ファイルが存在したら
try:
with open(save_file, 'r') as f:
save_data = json.load(f)
l_list = save_data.keys()
r_list = save_data.values()
left_list_list.append(l_list)
right_list_list.append(r_list)
except Exception as e:
print e.message
left_list_list.append(def_left_list_list[i])
right_list_list.append(def_right_list_list[i])
else:
left_list_list.append(def_left_list_list[i])
right_list_list.append(def_right_list_list[i])
return left_list_list, right_list_list
def joint_label(object, visibility=False):
'''
ジョイントラベル設定関数
object→オブジェクト、リスト形式可
visibility→ラベルの可視性、省略可能。デフォルトFalse。
'''
#ラベリングルールをロードしておく
left_list_list, right_list_list = load_joint_label_rules()
# リストタイプじゃなかったらリストに変換する
if not isinstance(object, list):
temp = object
object = []
object.append(temp)
for skinJoint in object:
objTypeName = cmds.objectType(skinJoint)
if objTypeName == 'joint':
split_name = skinJoint.split('|')[-1]
# スケルトン名にLRが含まれているかどうかを判定
side = 0
side_name = ''
for i, (l_list, r_list) in enumerate(zip(left_list_list, right_list_list)):
for j, lr_list in enumerate([l_list, r_list]):
for k, lr in enumerate(lr_list):
if i == 0:
if re.match(lr, split_name):
side = j + 1
if i == 1:
if re.search(lr, split_name):
side = j + 1
if i == 2:
if re.match(lr[::-1], split_name[::-1]):
side = j + 1
if side:#対象が見つかってたら全部抜ける
side_name = lr
break
if side:
break
if side:
break
#print 'joint setting :', split_name, side, side_name
# 左右のラベルを設定、どちらでもないときは中央
cmds.setAttr(skinJoint + '.side', side)
# ラベルタイプを”その他”に設定
cmds.setAttr(skinJoint + '.type', 18)
new_joint_name = split_name.replace(side_name.replace('.', ''), '')
# スケルトン名設定
cmds.setAttr(skinJoint + '.otherType', new_joint_name, type='string')
# 可視性設定
cmds.setAttr(skinJoint + '.drawLabel', visibility)
else:
print(str(skinJoint) + ' : ' + str(objTypeName) + ' Skip Command')
#ウェイトのミュートをトグル
def toggle_mute_skinning():
msg01 = lang.Lang(
en='No mesh selection.\nWould you like to process all of mesh in this scene?.',
ja=u'選択メッシュがありません。\nシーン内のすべてのメッシュを処理しますか?').output()
msg02 = lang.Lang(en='Yes', ja=u'はい').output()
msg03 = lang.Lang(en='No', ja=u'いいえ').output()
msg04 = lang.Lang(
en='Skinning is disabled',
ja=u'スキニングは無効になりました') .output()
msg05 = lang.Lang(
en='Skinning is enabled',
ja=u'スキニングが有効になりました') .output()
cmds.selectMode(o=True)
objects = cmds.ls(sl=True, l=True)
ad_node = []
for node in objects:
children = cmds.ls(cmds.listRelatives(node, ad=True, f=True), type ='transform')
ad_node += [node]+children
#print len(ad_node)
objects = set(ad_node)
#print len(objects)
if not objects:
all_mesh = cmds.confirmDialog(m=msg01, t='', b= [msg02, msg03], db=msg02, cb=msg03, icn='question',ds=msg03)
if all_mesh == msg02:
objects = cmds.ls(type='transform')
if not objects:
return
mute_flag = 1
skin_list = []
for node in objects:
skin = cmds.ls(cmds.listHistory(node), type='skinCluster')
if not skin:
continue
skin_list.append(skin)
if cmds.getAttr(skin[0]+'.envelope') > 0:
mute_flag = 0
for skin in skin_list:
cmds.setAttr(skin[0]+'.envelope', mute_flag)
if mute_flag == 0:
cmds.confirmDialog(m=msg04)
if mute_flag == 1:
cmds.confirmDialog(m=msg05) | 2.046875 | 2 |
pyConTextNLP/__init__.py | Blulab-Utah/pyConTextPipeline | 1 | 6241 | <filename>pyConTextNLP/__init__.py<gh_stars>1-10
#Copyright 2010 <NAME>
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""This is an alternative implementation of the pyConText package where I make
use of graphs to indicate relationships between targets and modifiers. Nodes of
thegraphs are the targets and modifiers identified in the text; edges of the
graphs are relationships between the targets. This provides for much simpler
code than what exists in the other version of pyConText where each object has a
dictionary of __modifies and __modifiedby that must be kept in sync with each
other.
Also it is hoped that the use of a directional graph could ultimately simplify
our itemData structures as we could chain together items"""
import os
version = {}
with open(os.path.join(os.path.dirname(__file__),"version.py")) as f0:
exec(f0.read(), version)
__version__ = version['__version__']
| 2.15625 | 2 |
pypeit/metadata.py | rcooke-ast/PYPIT | 0 | 6242 | """
Provides a class that handles the fits metadata required by PypeIt.
.. include common links, assuming primary doc root is up one directory
.. include:: ../include/links.rst
"""
import os
import io
import string
from copy import deepcopy
import datetime
from IPython import embed
import numpy as np
import yaml
from astropy import table, coordinates, time, units
from pypeit import msgs
from pypeit import utils
from pypeit.core import framematch
from pypeit.core import flux_calib
from pypeit.core import parse
from pypeit.core import meta
from pypeit.io import dict_to_lines
from pypeit.par import PypeItPar
from pypeit.par.util import make_pypeit_file
from pypeit.bitmask import BitMask
# TODO: Turn this into a DataContainer
# Initially tried to subclass this from astropy.table.Table, but that
# proved too difficult.
class PypeItMetaData:
"""
Provides a table and interface to the relevant fits file metadata
used during the reduction.
The content of the fits table is dictated by the header keywords
specified for the provided spectrograph. It is expected that this
table can be used to set the frame type of each file.
The metadata is validated using checks specified by the provided
spectrograph class.
For the data table, one should typically provide either the file
list from which to grab the data from the fits headers or the
data directly. If neither are provided the table is instantiated
without any data.
Args:
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The spectrograph used to collect the data save to each file.
The class is used to provide the header keyword data to
include in the table and specify any validation checks.
par (:obj:`pypeit.par.pypeitpar.PypeItPar`):
PypeIt parameters used to set the code behavior.
files (:obj:`str`, :obj:`list`, optional):
The list of files to include in the table.
data (table-like, optional):
The data to include in the table. The type can be anything
allowed by the instantiation of
:class:`astropy.table.Table`.
usrdata (:obj:`astropy.table.Table`, optional):
A user provided set of data used to supplement or overwrite
metadata read from the file headers. The table must have a
`filename` column that is used to match to the metadata
table generated within PypeIt. **Note**: This is ignored if
`data` is also provided. This functionality is only used
when building the metadata from the fits files.
strict (:obj:`bool`, optional):
Function will fault if there is a problem with the reading
the header for any of the provided files; see
:func:`pypeit.spectrographs.spectrograph.get_headarr`. Set
to False to instead report a warning and continue.
Attributes:
spectrograph
(:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The spectrograph used to collect the data save to each file.
The class is used to provide the header keyword data to
include in the table and specify any validation checks.
par (:class:`pypeit.par.pypeitpar.PypeItPar`):
PypeIt parameters used to set the code behavior. If not
provided, the default parameters specific to the provided
spectrograph are used.
configs (:obj:`dict`):
A dictionary of the unique configurations identified.
type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`):
The bitmask used to set the frame type of each fits file.
calib_bitmask (:class:`BitMask`):
The bitmask used to keep track of the calibration group bits.
table (:class:`astropy.table.Table`):
The table with the relevant metadata for each fits file to
use in the data reduction.
"""
def __init__(self, spectrograph, par, files=None, data=None, usrdata=None,
strict=True):
if data is None and files is None:
# Warn that table will be empty
msgs.warn('Both data and files are None in the instantiation of PypeItMetaData.'
' The table will be empty!')
# Initialize internals
self.spectrograph = spectrograph
self.par = par
if not isinstance(self.par, PypeItPar):
raise TypeError('Input parameter set must be of type PypeItPar.')
self.type_bitmask = framematch.FrameTypeBitMask()
# Build table
self.table = table.Table(data if files is None
else self._build(files, strict=strict,
usrdata=usrdata))
# Merge with user data, if present
if usrdata is not None:
self.merge(usrdata)
# Impose types on specific columns
self._impose_types(['comb_id', 'bkg_id', 'manual'], [int, int, str])
# Initialize internal attributes
self.configs = None
self.calib_bitmask = None
# Initialize columns that the user might add
self.set_user_added_columns()
# Validate instrument name
self.spectrograph.vet_instrument(self.table)
def _impose_types(self, columns, types):
"""
Impose a set of types on certain columns.
.. note::
:attr:`table` is edited in place.
Args:
columns (:obj:`list`):
List of column names
types (:obj:`list`):
List of types
"""
for c,t in zip(columns, types):
if c in self.keys():
self.table[c] = self.table[c].astype(t)
def _build(self, files, strict=True, usrdata=None):
"""
Generate the fitstbl that will be at the heart of PypeItMetaData.
Args:
files (:obj:`str`, :obj:`list`):
One or more files to use to build the table.
strict (:obj:`bool`, optional):
Function will fault if :func:`fits.getheader` fails to
read any of the headers. Set to False to report a
warning and continue.
usrdata (astropy.table.Table, optional):
Parsed for frametype for a few instruments (e.g. VLT)
where meta data may not be required
Returns:
dict: Dictionary with the data to assign to :attr:`table`.
"""
# Allow for single files
_files = files if hasattr(files, '__len__') else [files]
# Build lists to fill
data = {k:[] for k in self.spectrograph.meta.keys()}
data['directory'] = ['None']*len(_files)
data['filename'] = ['None']*len(_files)
# Build the table
for idx, ifile in enumerate(_files):
# User data (for frame type)
if usrdata is None:
usr_row = None
else:
# TODO: This check should be done elsewhere
# Check
if os.path.basename(ifile) != usrdata['filename'][idx]:
msgs.error('File name list does not match user-provided metadata table. See '
'usrdata argument of instantiation of PypeItMetaData.')
usr_row = usrdata[idx]
# Add the directory and file name to the table
data['directory'][idx], data['filename'][idx] = os.path.split(ifile)
if not data['directory'][idx]:
data['directory'][idx] = '.'
# Read the fits headers
headarr = self.spectrograph.get_headarr(ifile, strict=strict)
# Grab Meta
for meta_key in self.spectrograph.meta.keys():
value = self.spectrograph.get_meta_value(headarr, meta_key,
required=strict,
usr_row=usr_row,
ignore_bad_header = self.par['rdx']['ignore_bad_headers'])
if isinstance(value, str) and '#' in value:
value = value.replace('#', '')
msgs.warn('Removing troublesome # character from {0}. Returning {1}.'.format(
meta_key, value))
data[meta_key].append(value)
msgs.info('Added metadata for {0}'.format(os.path.split(ifile)[1]))
# JFH Changed the below to not crash if some files have None in
# their MJD. This is the desired behavior since if there are
# empty or corrupt files we still want this to run.
# Validate, print out a warning if there is problem
try:
time.Time(data['mjd'], format='mjd')
except ValueError:
mjd = np.asarray(data['mjd'])
filenames = np.asarray(data['filename'])
bad_files = filenames[mjd == None]
# Print status message
msg = 'Time invalid for {0} files.\n'.format(len(bad_files))
msg += 'Continuing, but the following frames may be empty or have corrupt headers:\n'
for file in bad_files:
msg += ' {0}\n'.format(file)
msgs.warn(msg)
# Return
return data
# TODO: In this implementation, slicing the PypeItMetaData object
# will return an astropy.table.Table, not a PypeItMetaData object.
def __getitem__(self, item):
return self.table.__getitem__(item)
def __setitem__(self, item, value):
return self.table.__setitem__(item, value)
def __len__(self):
return self.table.__len__()
def __repr__(self):
return self.table._base_repr_(html=False,
descr_vals=['PypeItMetaData:\n',
' spectrograph={0}\n'.format(
self.spectrograph.name),
' length={0}\n'.format(len(self))])
def _repr_html_(self):
return self.table._base_repr_(html=True, max_width=-1,
descr_vals=['PypeItMetaData: spectrograph={0}, length={1}\n'.format(
self.spectrograph.name, len(self))])
@staticmethod
def default_keys():
return [ 'directory', 'filename', 'instrume' ]
def keys(self):
return self.table.keys()
def sort(self, col):
return self.table.sort(col)
def merge(self, usrdata, match_type=True):
"""
Use the provided table to supplement or overwrite the metadata.
If the internal table already contains the column in `usrdata`,
the function will try to match the data type of the `usrdata`
column to the existing data type. If it can't it will just add
the column anyway, with the type in `usrdata`. You can avoid
this step by setting `match_type=False`.
Args:
usrdata (:obj:`astropy.table.Table`):
A user provided set of data used to supplement or
overwrite metadata read from the file headers. The
table must have a `filename` column that is used to
match to the metadata table generated within PypeIt.
match_type (:obj:`bool`, optional):
Attempt to match the data type in `usrdata` to the type
in the internal table. See above.
Raises:
TypeError:
Raised if `usrdata` is not an `astropy.io.table.Table`
KeyError:
Raised if `filename` is not a key in the provided table.
"""
meta_data_model = meta.get_meta_data_model()
# Check the input
if not isinstance(usrdata, table.Table):
raise TypeError('Must provide an astropy.io.table.Table instance.')
if 'filename' not in usrdata.keys():
raise KeyError('The user-provided table must have \'filename\' column!')
# Make sure the data are correctly ordered
srt = [np.where(f == self.table['filename'])[0][0] for f in usrdata['filename']]
# Convert types if possible
existing_keys = list(set(self.table.keys()) & set(usrdata.keys()))
radec_done = False
if len(existing_keys) > 0 and match_type:
for key in existing_keys:
if len(self.table[key].shape) > 1: # NOT ALLOWED!!
# TODO: This should be converted to an assert statement...
raise ValueError('CODING ERROR: Found high-dimensional column.')
#embed(header='372 of metadata')
elif key in meta_data_model.keys(): # Is this meta data??
dtype = meta_data_model[key]['dtype']
else:
dtype = self.table[key].dtype
# Deal with None's properly
nones = usrdata[key] == 'None'
usrdata[key][nones] = None
# Rest
# Allow for str RA, DEC (backwards compatability)
if key in ['ra', 'dec'] and not radec_done:
ras, decs = meta.convert_radec(usrdata['ra'][~nones].data,
usrdata['dec'][~nones].data)
usrdata['ra'][~nones] = ras.astype(dtype)
usrdata['dec'][~nones] = decs.astype(dtype)
radec_done = True
else:
usrdata[key][~nones] = usrdata[key][~nones].astype(dtype)
# Include the user data in the table
for key in usrdata.keys():
self.table[key] = usrdata[key][srt]
def finalize_usr_build(self, frametype, setup):
"""
Finalize the build of the table based on user-provided data,
typically pulled from the PypeIt file.
This function:
- sets the frame types based on the provided object
- sets all the configurations to the provided `setup`
- assigns all frames to a single calibration group, if the
'calib' column does not exist
- if the 'comb_id' column does not exist, this sets the
combination groups to be either undefined or to be unique
for each science or standard frame, see
:func:`set_combination_groups`.
.. note::
This should only be run if all files are from a single
instrument configuration. :attr:`table` is modified
in-place.
See also: :func:`pypeit.pypeitsetup.PypeItSetup.run`.
.. todo::
- Why isn't frametype just in the user-provided data? It
may be (see get_frame_types) and I'm just not using it...
Args:
frametype (:obj:`dict`):
A dictionary with the types designated by the user. The
file name and type are expected to be the key and value
of the dictionary, respectively. The number of keys
therefore *must* match the number of files in
:attr:`table`. For frames that have multiple types, the
types should be provided as a string with
comma-separated types.
setup (:obj:`str`):
If the 'setup' columns does not exist, fill the
configuration setup columns with this single identifier.
"""
self.get_frame_types(user=frametype)
# TODO: Add in a call to clean_configurations? I didn't add it
# here, because this method is only called for a preconstructed
# pypeit file, which should nominally follow an execution of
# pypeit_setup. If the user edits back in a frame that has an
# invalid key, at least for now the DEIMOS image reader will
# fault.
self.set_configurations(fill=setup)
self.set_calibration_groups(default=True)
self.set_combination_groups()
def get_configuration(self, indx, cfg_keys=None):
"""
Return the configuration dictionary for a given frame.
This is not the same as the backwards compatible "setup"
dictionary.
Args:
indx (:obj:`int`):
The index of the table row to use to construct the
configuration.
cfg_keys (:obj:`list`, optional):
The list of metadata keys to use to construct the
configuration. If None, the `configuration_keys` of
:attr:`spectrograph` is used.
Returns:
dict: A dictionary with the metadata values from the
selected row.
"""
_cfg_keys = self.spectrograph.configuration_keys() if cfg_keys is None else cfg_keys
return {k:self.table[k][indx] for k in _cfg_keys}
def master_key(self, row, det=1):
"""
Construct the master key for the file in the provided row.
The master key is the combination of the configuration, the
calibration group, and the detector. The configuration ID is
the same as included in the configuration column (A, B, C, etc),
the calibration group is the same as the calibration bit number,
and the detector number is provided as an argument and converted
to a zero-filled string with two digits (the maximum number of
detectors is 99).
Using the calibration bit in the keyword allows MasterFrames to
be used with multiple calibration groups.
Args:
row (:obj:`int`):
The 0-indexed row used to construct the key.
det (:obj:`int`, :obj:`tuple`, optional):
The 1-indexed detector number(s). If a tuple, it must include
detectors designated as a viable mosaic for
:attr:`spectrograph`; see
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`.
Returns:
:obj:`str`: Master key with configuration, calibration group(s), and
detector.
Raises:
PypeItError:
Raised if the 'setup' or 'calibbit' columns
haven't been defined.
"""
if 'setup' not in self.keys() or 'calibbit' not in self.keys():
msgs.error('Cannot provide master key string without setup and calibbit; '
'run set_configurations and set_calibration_groups.')
det_name = self.spectrograph.get_det_name(det)
return f"{self['setup'][row]}_{self['calibbit'][row]}_{det_name}"
def construct_obstime(self, row):
"""
Construct the MJD of when the frame was observed.
.. todo::
- Consolidate with :func:`convert_time` ?
Args:
row (:obj:`int`):
The 0-indexed row of the frame.
Returns:
astropy.time.Time: The MJD of the observation.
"""
return time.Time(self['mjd'][row], format='mjd')
def construct_basename(self, row, obstime=None):
"""
Construct the root name primarily for PypeIt file output.
Args:
row (:obj:`int`):
The 0-indexed row of the frame.
obstime (:class:`astropy.time.Time`, optional):
The MJD of the observation. If None, constructed using
:func:`construct_obstime`.
Returns:
str: The root name for file output.
"""
_obstime = self.construct_obstime(row) if obstime is None else obstime
tiso = time.Time(_obstime, format='isot')
dtime = datetime.datetime.strptime(tiso.value, '%Y-%m-%dT%H:%M:%S.%f')
return '{0}-{1}_{2}_{3}{4}'.format(self['filename'][row].split('.fits')[0],
self['target'][row].replace(" ", ""),
self.spectrograph.camera,
datetime.datetime.strftime(dtime, '%Y%m%dT'),
tiso.value.split("T")[1].replace(':',''))
def get_setup(self, row, det=None, config_only=False):
"""
Construct the setup dictionary.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting it. And it may be something to put
in the relevant spectrograph class.
Args:
row (:obj:`int`):
The 0-indexed row used to construct the setup.
det (:obj:`int`, optional):
The 1-indexed detector to include. If None, all
detectors are included.
config_only (:obj:`bool`, optional):
Just return the dictionary with the configuration, don't
include the top-level designation of the configuration
itself.
Returns:
dict: The pypeit setup dictionary with the default format.
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot provide instrument setup without \'setup\' column; '
'run set_configurations.')
dispname = 'none' if 'dispname' not in self.keys() else self['dispname'][row]
dispangle = 'none' if 'dispangle' not in self.keys() else self['dispangle'][row]
dichroic = 'none' if 'dichroic' not in self.keys() else self['dichroic'][row]
decker = 'none' if 'decker' not in self.keys() else self['decker'][row]
slitwid = 'none' if 'slitwid' not in self.keys() else self['slitwid'][row]
slitlen = 'none' if 'slitlen' not in self.keys() else self['slitlen'][row]
binning = '1,1' if 'binning' not in self.keys() else self['binning'][row]
skey = 'Setup {}'.format(self['setup'][row])
# Key names *must* match configuration_keys() for spectrographs
setup = {skey:
{'--':
{'disperser': {'dispname': dispname, 'dispangle':dispangle},
'dichroic': dichroic,
'slit': {'decker': decker, 'slitwid':slitwid, 'slitlen':slitlen},
'binning': binning, # PypeIt orientation binning of a science image
}
}
}
#_det = np.arange(self.spectrograph.ndet)+1 if det is None else [det]
#for d in _det:
# setup[skey][str(d).zfill(2)] \
# = {'binning': binning, 'det': d,
# 'namp': self.spectrograph.detector[d-1]['numamplifiers']}
return setup[skey] if config_only else setup
def get_configuration_names(self, ignore=None, return_index=False, configs=None):
"""
Get the list of the unique configuration names.
This provides just the list of setup identifiers ('A', 'B',
etc.) and the row index where it first occurs. This is
different from :func:`unique_configurations` because the latter
determines and provides the configurations themselves.
This is mostly a convenience function for the writing routines.
Args:
ignore (:obj:`list`, optional):
Ignore configurations in the provided list.
return_index (:obj:`bool`, optional):
Return row indices with the first occurence of these
configurations.
configs (:obj:`str`, :obj:`list`, optional):
One or more strings used to select the configurations
to include in the returned objects. If ``'all'``,
pass back all configurations. Otherwise, only return
the configurations matched to this provided string or
list of strings (e.g., ['A','C']).
Returns:
numpy.array: The list of unique setup names. A second
returned object provides the indices of the first occurrence
of these setups, if requested.
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot get setup names; run set_configurations.')
# Unique configurations
setups, indx = np.unique(self['setup'], return_index=True)
if ignore is not None:
# Remove the selected configurations to ignore
rm = np.logical_not(np.isin(setups, ignore))
setups = setups[rm]
indx = indx[rm]
# Restrict
_configs = None if configs is None else np.atleast_1d(configs)
# TODO: Why do we need to specify 'all' here? Can't `configs is
# None` mean that you want all the configurations? Or can we
# make the default 'all'?
if configs is not None and 'all' not in _configs:
use = np.isin(setups, _configs)
setups = setups[use]
indx = indx[use]
return setups, indx if return_index else setups
def _get_cfgs(self, copy=False, rm_none=False):
"""
Convenience method to return :attr:`configs` with possible
alterations.
This method *should not* be called by any method outside of
this class; use :func:`unique_configurations` instead.
Args:
copy (:obj:`bool`, optional):
Return a deep copy of :attr:`configs` instead of the
object itself.
rm_none (:obj:`bool`, optional):
Remove any configurations set to 'None'. If copy is
True, this is done *after* :attr:`configs` is copied
to a new dictionary.
Returns:
:obj:`dict`: A nested dictionary, one dictionary per
configuration with the associated metadata for each.
"""
_cfg = deepcopy(self.configs) if copy else self.configs
if rm_none and 'None' in _cfg.keys():
del _cfg['None']
return _cfg
def unique_configurations(self, force=False, copy=False, rm_none=False):
"""
Return the unique instrument configurations.
If run before the ``'setup'`` column is initialized, this function
determines the unique instrument configurations by finding
unique combinations of the items in the metadata table listed by
the spectrograph ``configuration_keys`` method.
If run after the ``'setup'`` column has been set, this simply
constructs the configuration dictionary using the unique
configurations in that column.
This is used to set the internal :attr:`configs`. If this
attribute is not None, this function simply returns
:attr:`config` (cf. ``force``).
.. warning::
Any frame types returned by the
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`
method for :attr:`spectrograph` will be ignored in the
construction of the unique configurations. If
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.config_independent_frames`
does not return None and the frame types have not yet
been defined (see :func:`get_frame_types`), this method
will fault!
Args:
force (:obj:`bool`, optional):
Force the configurations to be redetermined. Otherwise
the configurations are only determined if
:attr:`configs` has not yet been defined.
copy (:obj:`bool`, optional):
Return a deep copy of :attr:`configs` instead of the
object itself.
rm_none (:obj:`bool`, optional):
Remove any configurations set to 'None'. If copy is
True, this is done *after* :attr:`configs` is copied
to a new dictionary.
Returns:
:obj:`dict`: A nested dictionary, one dictionary per
configuration with the associated metadata for each.
Raises:
PypeItError:
Raised if there are list of frame types to ignore but
the frame types have not been defined yet.
"""
if self.configs is not None and not force:
return self._get_cfgs(copy=copy, rm_none=rm_none)
if 'setup' in self.keys():
msgs.info('Setup column already set. Finding unique configurations.')
uniq, indx = np.unique(self['setup'], return_index=True)
ignore = uniq == 'None'
if np.sum(ignore) > 0:
msgs.warn('Ignoring {0} frames with configuration set to None.'.format(
np.sum(ignore)))
self.configs = {}
for i in range(len(uniq)):
if ignore[i]:
continue
self.configs[uniq[i]] = self.get_configuration(indx[i])
msgs.info('Found {0} unique configurations.'.format(len(self.configs)))
return self._get_cfgs(copy=copy, rm_none=rm_none)
msgs.info('Using metadata to determine unique configurations.')
# If the frame types have been set, ignore anything listed in
# the ignore_frames
indx = np.arange(len(self))
ignore_frames = self.spectrograph.config_independent_frames()
if ignore_frames is not None:
if 'frametype' not in self.keys():
msgs.error('To ignore frames, types must have been defined; run get_frame_types.')
ignore_frames = list(ignore_frames.keys())
msgs.info('Unique configurations ignore frames with type: {0}'.format(ignore_frames))
use = np.ones(len(self), dtype=bool)
for ftype in ignore_frames:
use &= np.logical_not(self.find_frames(ftype))
indx = indx[use]
if len(indx) == 0:
msgs.error('No frames to use to define configurations!')
# Get the list of keys to use
cfg_keys = self.spectrograph.configuration_keys()
# Configuration identifiers are iterations through the
# upper-case letters: A, B, C, etc.
double_alphabet = [str_i + str_j for str_i in string.ascii_uppercase for str_j in string.ascii_uppercase]
cfg_iter = list(string.ascii_uppercase) + double_alphabet
cfg_indx = 0
# TODO: Placeholder: Allow an empty set of configuration keys
# meaning that the instrument setup has only one configuration.
if len(cfg_keys) == 0:
self.configs = {}
self.configs[cfg_iter[cfg_indx]] = {}
msgs.info('All files assumed to be from a single configuration.')
return self._get_cfgs(copy=copy, rm_none=rm_none)
# Use the first file to set the first unique configuration
self.configs = {}
self.configs[cfg_iter[cfg_indx]] = self.get_configuration(indx[0], cfg_keys=cfg_keys)
cfg_indx += 1
# Check if any of the other files show a different
# configuration.
for i in indx[1:]:
j = 0
for c in self.configs.values():
if row_match_config(self.table[i], c, self.spectrograph):
break
j += 1
unique = j == len(self.configs)
if unique:
if cfg_indx == len(cfg_iter):
msgs.error('Cannot assign more than {0} configurations!'.format(len(cfg_iter)))
self.configs[cfg_iter[cfg_indx]] = self.get_configuration(i, cfg_keys=cfg_keys)
cfg_indx += 1
msgs.info('Found {0} unique configurations.'.format(len(self.configs)))
return self._get_cfgs(copy=copy, rm_none=rm_none)
def set_configurations(self, configs=None, force=False, fill=None):
"""
Assign each frame to a configuration (setup) and include it
in the metadata table.
The internal table is edited *in place*. If the 'setup'
column already exists, the configurations are **not** reset
unless you call the function with ``force=True``.
Args:
configs (:obj:`dict`, optional):
A nested dictionary, one dictionary per configuration
with the associated values of the metadata associated
with each configuration. The metadata keywords in the
dictionary should be the same as in the table, and the
keywords used to set the configuration should be the
same as returned by the spectrograph
`configuration_keys` method. The latter is not checked.
If None, this is set by :func:`unique_configurations`.
force (:obj:`bool`, optional):
Force the configurations to be reset.
fill (:obj:`str`, optional):
If the 'setup' column does not exist, fill the
configuration setup columns with this single identifier.
Ignores other inputs.
Raises:
PypeItError:
Raised if none of the keywords in the provided
configuration match with the metadata keywords. Also
raised when some frames cannot be assigned to a
configuration, the spectrograph defined frames that
have been ignored in the determination of the unique
configurations, but the frame types have not been set
yet.
"""
# Configurations have already been set
if 'setup' in self.keys() and not force:
return
if 'setup' not in self.keys() and fill is not None:
self['setup'] = fill
return
_configs = self.unique_configurations() if configs is None else configs
for k, cfg in _configs.items():
if len(set(cfg.keys()) - set(self.keys())) > 0:
msgs.error('Configuration {0} defined using unavailable keywords!'.format(k))
self.table['setup'] = 'None'
nrows = len(self)
for i in range(nrows):
for d, cfg in _configs.items():
if row_match_config(self.table[i], cfg, self.spectrograph):
self.table['setup'][i] = d
# Check if any of the configurations are not set
not_setup = self.table['setup'] == 'None'
if not np.any(not_setup):
# All are set, so we're done
return
# Some frame types may have been ignored
ignore_frames = self.spectrograph.config_independent_frames()
if ignore_frames is None:
# Nope, we're still done
return
# At this point, we need the frame type to continue
if 'frametype' not in self.keys():
msgs.error('To account for ignored frames, types must have been defined; run '
'get_frame_types.')
# For each configuration, determine if any of the frames with
# the ignored frame types should be assigned to it:
for cfg_key in _configs.keys():
in_cfg = self.table['setup'] == cfg_key
for ftype, metakey in ignore_frames.items():
# TODO: For now, use this assert to check that the
# metakey is either not set or a string
assert metakey is None or isinstance(metakey, str), \
'CODING ERROR: metadata keywords set by config_indpendent_frames are not ' \
'correctly defined for {0}; values must be None or a string.'.format(
self.spectrograph.__class__.__name__)
# Get the list of frames of this type without a
# configuration
indx = (self.table['setup'] == 'None') & self.find_frames(ftype)
if not np.any(indx):
continue
if metakey is None:
# No matching meta data defined, so just set all
# the frames to this (first) configuration
self.table['setup'][indx] = cfg_key
continue
# Find the unique values of meta for this configuration
uniq_meta = np.unique(self.table[metakey][in_cfg].data)
# Warn the user that the matching meta values are not
# unique for this configuration.
if uniq_meta.size != 1:
msgs.warn('When setting the instrument configuration for {0} '.format(ftype)
+ 'frames, configuration {0} does not have unique '.format(cfg_key)
+ '{0} values.' .format(meta))
# Find the frames of this type that match any of the
# meta data values
indx &= np.isin(self.table[metakey], uniq_meta)
self.table['setup'][indx] = cfg_key
def clean_configurations(self):
"""
Ensure that configuration-defining keywords all have values
that will yield good PypeIt reductions. Any frames that do
not are removed from :attr:`table`, meaning this method may
modify that attribute directly.
The valid values for configuration keys is set by
:func:`~pypeit.spectrographs.spectrograph.Spectrograph.valid_configuration_values`.
"""
cfg_limits = self.spectrograph.valid_configuration_values()
if cfg_limits is None:
# No values specified, so we're done
return
good = np.ones(len(self), dtype=bool)
for key in cfg_limits.keys():
# NOTE: For now, check that the configuration values were
# correctly assigned in the spectrograph class definition.
# This should probably go somewhere else or just removed.
assert isinstance(cfg_limits[key], list), \
'CODING ERROR: valid_configuration_values is not correctly defined ' \
'for {0}; values must be a list.'.format(self.spectrograph.__class__.__name__)
# Check that the metadata are valid for this column.
indx = np.isin(self[key], cfg_limits[key])
if not np.all(indx):
msgs.warn('Found frames with invalid {0}.'.format(key))
good &= indx
if np.all(good):
# All values good, so we're done
return
# Alert the user that some of the frames are going to be
# removed
msg = 'The following frames have configurations that cannot be reduced by PypeIt' \
' and will be removed from the metadata table (pypeit file):\n'
indx = np.where(np.logical_not(good))[0]
for i in indx:
msg += ' {0}\n'.format(self['filename'][i])
msgs.warn(msg)
# And remove 'em
self.table = self.table[good]
def _set_calib_group_bits(self):
"""
Set the calibration group bit based on the string values of the
'calib' column.
"""
# Find the number groups by searching for the maximum number
# provided, regardless of whether or not a science frame is
# assigned to that group.
ngroups = 0
for i in range(len(self)):
if self['calib'][i] in ['all', 'None']:
# No information, keep going
continue
# Convert to a list of numbers
l = np.amax([ 0 if len(n) == 0 else int(n)
for n in self['calib'][i].replace(':',',').split(',')])
# Check against current maximum
ngroups = max(l+1, ngroups)
# Define the bitmask and initialize the bits
self.calib_bitmask = BitMask(np.arange(ngroups))
self['calibbit'] = 0
# Set the calibration bits
for i in range(len(self)):
# Convert the string to the group list
grp = parse.str2list(self['calib'][i], ngroups)
if grp is None:
# No group selected
continue
# Assign the group; ensure the integers are unique
self['calibbit'][i] = self.calib_bitmask.turn_on(self['calibbit'][i], grp)
def _check_calib_groups(self):
"""
Check that the calibration groups are valid.
This currently only checks that the science frames are
associated with one calibration group.
TODO: Is this appropriate for NIR data?
"""
is_science = self.find_frames('science')
for i in range(len(self)):
if not is_science[i]:
continue
if len(self.calib_bitmask.flagged_bits(self['calibbit'][i])) > 1:
msgs.error('Science frames can only be assigned to a single calibration group.')
@property
def n_calib_groups(self):
"""Return the number of calibration groups."""
return None if self.calib_bitmask is None else self.calib_bitmask.nbits
def set_calibration_groups(self, global_frames=None, default=False, force=False):
"""
Group calibration frames into sets.
Requires the 'setup' column to have been defined. For now this
is a simple grouping of frames with the same configuration.
.. todo::
- Maintain a detailed description of the logic.
The 'calib' column has a string type to make sure that it
matches with what can be read from the pypeit file. The
'calibbit' column is actually what is used to determine the
calibration group of each frame; see :attr:`calib_bitmask`.
Args:
global_frames (:obj:`list`, optional):
A list of strings with the frame types to use in all
calibration groups (e.g., ['bias', 'dark']).
default (:obj:`bool`, optional):
If the 'calib' column is not present, set a single
calibration group *for all rows*.
force (:obj:`bool`, optional):
Force the calibration groups to be reconstructed if
the 'calib' column already exists.
Raises:
PypeItError:
Raised if 'setup' column is not defined, or if
`global_frames` is provided but the frame types have not
been defined yet.
"""
# Set the default if requested and 'calib' doesn't exist yet
if 'calib' not in self.keys() and default:
self['calib'] = '0'
# Make sure the calibbit column does not exist
if 'calibbit' in self.keys():
del self['calibbit']
# Groups have already been set
if 'calib' in self.keys() and 'calibbit' in self.keys() and not force:
return
# Groups have been set but the bits have not (likely because the
# data was read from a pypeit file)
if 'calib' in self.keys() and 'calibbit' not in self.keys() and not force:
self._set_calib_group_bits()
self._check_calib_groups()
return
# TODO: The rest of this just nominally sets the calibration
# group based on the configuration. This will change!
# The configuration must be present to determine the calibration
# group
if 'setup' not in self.keys():
msgs.error('Must have defined \'setup\' column first; try running set_configurations.')
configs = np.unique(self['setup'].data).tolist()
if 'None' in configs:
configs.remove('None') # Ignore frames with undefined configurations
n_cfg = len(configs)
# TODO: Science frames can only have one calibration group
# Assign everything from the same configuration to the same
# calibration group; this needs to have dtype=object, otherwise
# any changes to the strings will be truncated at 4 characters.
self.table['calib'] = np.full(len(self), 'None', dtype=object)
for i in range(n_cfg):
self['calib'][(self['setup'] == configs[i]) & (self['framebit'] > 0)] = str(i)
# Allow some frame types to be used in all calibration groups
# (like biases and darks)
if global_frames is not None:
if 'frametype' not in self.keys():
msgs.error('To set global frames, types must have been defined; '
'run get_frame_types.')
calibs = '0' if n_cfg == 1 else ','.join(np.arange(n_cfg).astype(str))
for ftype in global_frames:
indx = np.where(self.find_frames(ftype))[0]
for i in indx:
self['calib'][i] = calibs
# Set the bits based on the string representation of the groups
self._set_calib_group_bits()
# Check that the groups are valid
self._check_calib_groups()
def find_frames(self, ftype, calib_ID=None, index=False):
"""
Find the rows with the associated frame type.
If the index is provided, the frames must also be matched to the
relevant science frame.
Args:
ftype (str):
The frame type identifier. See the keys for
:class:`pypeit.core.framematch.FrameTypeBitMask`. If
set to the string 'None', this returns all frames
without a known type.
calib_ID (:obj:`int`, optional):
Index of the calibration group that it must match. If None,
any row of the specified frame type is included.
index (:obj:`bool`, optional):
Return an array of 0-indexed indices instead of a
boolean array.
Returns:
numpy.ndarray: A boolean array, or an integer array if
index=True, with the rows that contain the frames of the
requested type.
Raises:
PypeItError:
Raised if the `framebit` column is not set in the table.
"""
if 'framebit' not in self.keys():
msgs.error('Frame types are not set. First run get_frame_types.')
if ftype == 'None':
return self['framebit'] == 0
# Select frames
indx = self.type_bitmask.flagged(self['framebit'], ftype)
if calib_ID is not None:
# Select frames in the same calibration group
indx &= self.find_calib_group(calib_ID)
# Return
return np.where(indx)[0] if index else indx
def find_frame_files(self, ftype, calib_ID=None):
"""
Return the list of files with a given frame type.
The frames must also match the science frame index, if it is
provided.
Args:
ftype (str):
The frame type identifier. See the keys for
:class:`pypeit.core.framematch.FrameTypeBitMask`.
calib_ID (:obj:`int`, optional):
Index of the calibration group that it must match. If None,
any row of the specified frame type is included.
Returns:
list: List of file paths that match the frame type and
science frame ID, if the latter is provided.
"""
return self.frame_paths(self.find_frames(ftype, calib_ID=calib_ID))
def frame_paths(self, indx):
"""
Return the full paths to one or more frames.
Args:
indx (:obj:`int`, array-like):
One or more 0-indexed rows in the table with the frames
to return. Can be an array of indices or a boolean
array of the correct length.
Returns:
list: List of the full paths of one or more frames.
"""
if isinstance(indx, (int,np.integer)):
return os.path.join(self['directory'][indx], self['filename'][indx])
return [os.path.join(d,f) for d,f in zip(self['directory'][indx], self['filename'][indx])]
def set_frame_types(self, type_bits, merge=True):
"""
Set and return a Table with the frame types and bits.
Args:
type_bits (numpy.ndarray):
Integer bitmask with the frame types. The length must
match the existing number of table rows.
merge (:obj:`bool`, optional):
Merge the types and bits into the existing table. This
will *overwrite* any existing columns.
Returns:
`astropy.table.Table`: Table with two columns, the frame
type name and bits.
"""
# Making Columns to pad string array
ftype_colmA = table.Column(self.type_bitmask.type_names(type_bits), name='frametype')
# KLUDGE ME
#
# TODO: It would be good to get around this. Is it related to
# this change?
# http://docs.astropy.org/en/stable/table/access_table.html#bytestring-columns-in-python-3
#
# See also:
#
# http://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.convert_bytestring_to_unicode
#
# Or we can force type_names() in bitmask to always return the
# correct type...
if int(str(ftype_colmA.dtype)[2:]) < 9:
ftype_colm = table.Column(self.type_bitmask.type_names(type_bits), dtype='U9',
name='frametype')
else:
ftype_colm = ftype_colmA
fbits_colm = table.Column(type_bits, name='framebit')
t = table.Table([ftype_colm, fbits_colm])
if merge:
self['frametype'] = t['frametype']
self['framebit'] = t['framebit']
return t
def edit_frame_type(self, indx, frame_type, append=False):
"""
Edit the frame type by hand.
Args:
indx (:obj:`int`):
The 0-indexed row in the table to edit
frame_type (:obj:`str`, :obj:`list`):
One or more frame types to append/overwrite.
append (:obj:`bool`, optional):
Append the frame type. If False, all existing frame
types are overwitten by the provided type.
"""
if not append:
self['framebit'][indx] = 0
self['framebit'][indx] = self.type_bitmask.turn_on(self['framebit'][indx], flag=frame_type)
self['frametype'][indx] = self.type_bitmask.type_names(self['framebit'][indx])
def get_frame_types(self, flag_unknown=False, user=None, merge=True):
"""
Generate a table of frame types from the input metadata object.
.. todo::
- Here's where we could add a SPIT option.
Args:
flag_unknown (:obj:`bool`, optional):
Instead of crashing out if there are unidentified files,
leave without a type and continue.
user (:obj:`dict`, optional):
A dictionary with the types designated by the user. The
file name and type are expected to be the key and value
of the dictionary, respectively. The number of keys
therefore *must* match the number of files in
:attr:`table`. For frames that have multiple types, the
types should be provided as a string with
comma-separated types.
merge (:obj:`bool`, optional):
Merge the frame typing into the exiting table.
Returns:
:obj:`astropy.table.Table`: A Table with two columns, the
type names and the type bits. See
:class:`pypeit.core.framematch.FrameTypeBitMask` for the
allowed frame types.
"""
# Checks
if 'frametype' in self.keys() or 'framebit' in self.keys():
msgs.warn('Removing existing frametype and framebit columns.')
if 'frametype' in self.keys():
del self.table['frametype']
if 'framebit' in self.keys():
del self.table['framebit']
# # TODO: This needs to be moved into each Spectrograph
# if useIDname and 'idname' not in self.keys():
# raise ValueError('idname is not set in table; cannot use it for file typing.')
# Start
msgs.info("Typing files")
type_bits = np.zeros(len(self), dtype=self.type_bitmask.minimum_dtype())
# Use the user-defined frame types from the input dictionary
if user is not None:
if len(user.keys()) != len(self):
raise ValueError('The user-provided dictionary does not match table length.')
msgs.info('Using user-provided frame types.')
for ifile,ftypes in user.items():
indx = self['filename'] == ifile
type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftypes.split(','))
return self.set_frame_types(type_bits, merge=merge)
# Loop over the frame types
for i, ftype in enumerate(self.type_bitmask.keys()):
# # Initialize: Flag frames with the correct ID name or start by
# # flagging all as true
# indx = self['idname'] == self.spectrograph.idname(ftype) if useIDname \
# else np.ones(len(self), dtype=bool)
# Include a combination of instrument-specific checks using
# combinations of the full set of metadata
exprng = self.par['scienceframe']['exprng'] if ftype == 'science' \
else self.par['calibrations']['{0}frame'.format(ftype)]['exprng']
# TODO: Use & or | ? Using idname above gets overwritten by
# this if the frames to meet the other checks in this call.
# indx &= self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)
indx = self.spectrograph.check_frame_type(ftype, self.table, exprng=exprng)
# Turn on the relevant bits
type_bits[indx] = self.type_bitmask.turn_on(type_bits[indx], flag=ftype)
# Find the nearest standard star to each science frame
# TODO: Should this be 'standard' or 'science' or both?
if 'ra' not in self.keys() or 'dec' not in self.keys():
msgs.warn('Cannot associate standard with science frames without sky coordinates.')
else:
# TODO: Do we want to do this here?
indx = self.type_bitmask.flagged(type_bits, flag='standard')
for b, f, ra, dec in zip(type_bits[indx], self['filename'][indx], self['ra'][indx],
self['dec'][indx]):
if ra == 'None' or dec == 'None':
msgs.warn('RA and DEC must not be None for file:' + msgs.newline() + f)
msgs.warn('The above file could be a twilight flat frame that was'
+ msgs.newline() + 'missed by the automatic identification.')
b = self.type_bitmask.turn_off(b, flag='standard')
continue
# If an object exists within 20 arcmins of a listed standard,
# then it is probably a standard star
foundstd = flux_calib.find_standard_file(ra, dec, check=True)
b = self.type_bitmask.turn_off(b, flag='science' if foundstd else 'standard')
# Find the files without any types
indx = np.logical_not(self.type_bitmask.flagged(type_bits))
if np.any(indx):
msgs.info("Couldn't identify the following files:")
for f in self['filename'][indx]:
msgs.info(f)
if not flag_unknown:
msgs.error("Check these files before continuing")
# Finish up (note that this is called above if user is not None!)
msgs.info("Typing completed!")
return self.set_frame_types(type_bits, merge=merge)
def set_pypeit_cols(self, write_bkg_pairs=False, write_manual=False):
"""
Generate the list of columns to be included in the fitstbl
(nearly the complete list).
Args:
write_bkg_pairs (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for calib, comb_id
and bkg_id
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
Returns:
`numpy.ndarray`_: Array of columns to be used in the fits
table>
"""
# Columns for output
columns = self.spectrograph.pypeit_file_keys()
extras = []
# comb, bkg columns
if write_bkg_pairs:
extras += ['calib', 'comb_id', 'bkg_id']
# manual
if write_manual:
extras += ['manual']
for key in extras:
if key not in columns:
columns += [key]
# Take only those present
output_cols = np.array(columns)
return output_cols[np.isin(output_cols, self.keys())].tolist()
def set_combination_groups(self, assign_objects=True):
"""
Set combination groups.
.. note::
:attr:`table` is edited in place.
This function can be used to initialize the combination group
and background group columns, and/or to initialize the combination
groups to the set of objects (science or standard frames) to a
unique integer.
If the 'comb_id' or 'bkg_id' columns do not exist, they're set
to -1.
Args:
assign_objects (:obj:`bool`, optional):
If all of 'comb_id' values are less than 0 (meaning
they're unassigned), the combination groups are set to
be unique for each standard and science frame.
"""
if 'comb_id' not in self.keys():
self['comb_id'] = -1
if 'bkg_id' not in self.keys():
self['bkg_id'] = -1
if assign_objects and np.all(self['comb_id'] < 0):
# find_frames will throw an exception if framebit is not
# set...
sci_std_idx = np.where(np.any([self.find_frames('science'),
self.find_frames('standard')], axis=0))[0]
self['comb_id'][sci_std_idx] = np.arange(len(sci_std_idx), dtype=int) + 1
def set_user_added_columns(self):
"""
Set columns that the user *might* add
.. note::
:attr:`table` is edited in place.
This function can be used to initialize columns
that the user might add
"""
if 'manual' not in self.keys():
self['manual'] = ''
def write_sorted(self, ofile, overwrite=True, ignore=None,
write_bkg_pairs=False, write_manual=False):
"""
Write the sorted file.
The sorted file lists all the unique instrument configurations
(setups) and the frames associated with each configuration. The
output data table is identical to the pypeit file output.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting/removing it.
Args:
ofile (:obj:`str`):
Name for the output sorted file.
overwrite (:obj:`bool`, optional):
Overwrite any existing file with the same name.
ignore (:obj:`list`, optional):
Ignore configurations in the provided list.
write_bkg_pairs (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for calib, comb_id
and bkg_id
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
Raises:
PypeItError:
Raised if the 'setup' isn't been defined.
"""
if 'setup' not in self.keys():
msgs.error('Cannot write sorted instrument configuration table without \'setup\' '
'column; run set_configurations.')
if os.path.isfile(ofile) and not overwrite:
msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile))
# Grab output columns
output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,
write_manual=write_manual)
cfgs = self.unique_configurations(copy=ignore is not None)
if ignore is not None:
for key in cfgs.keys():
if key in ignore:
del cfgs[key]
# Construct file
ff = open(ofile, 'w')
for setup in cfgs.keys():
# Get the subtable of frames taken in this configuration
indx = self['setup'] == setup
if not np.any(indx):
continue
subtbl = self.table[output_cols][indx]
# Write the file
ff.write('##########################################################\n')
ff.write('Setup {:s}\n'.format(setup))
ff.write('\n'.join(dict_to_lines(cfgs[setup], level=1)) + '\n')
ff.write('#---------------------------------------------------------\n')
mjd = subtbl['mjd'].copy()
# Deal with possibly None mjds if there were corrupt header cards
mjd[mjd == None] = -99999.0
isort = np.argsort(mjd)
subtbl = subtbl[isort]
subtbl.write(ff, format='ascii.fixed_width')
ff.write('##end\n')
ff.close()
# TODO: Do we need a calib file?
def write_calib(self, ofile, overwrite=True, ignore=None):
"""
Write the calib file.
The calib file provides the unique instrument configurations
(setups) and the association of each frame from that
configuration with a given calibration group.
.. todo::
- This is for backwards compatibility, but we should
consider reformatting/removing it.
- This is complicated by allowing some frame types to have
no association with an instrument configuration
- This is primarily used for QA now; but could probably use the pypeit file instead
Args:
ofile (:obj:`str`):
Name for the output sorted file.
overwrite (:obj:`bool`, optional):
Overwrite any existing file with the same name.
ignore (:obj:`list`, optional):
Ignore calibration groups in the provided list.
Raises:
PypeItError:
Raised if the 'setup' or 'calibbit' columns haven't been
defined.
"""
if 'setup' not in self.keys() or 'calibbit' not in self.keys():
msgs.error('Cannot write calibration groups without \'setup\' and \'calibbit\' '
'columns; run set_configurations and set_calibration_groups.')
if os.path.isfile(ofile) and not overwrite:
msgs.error('{0} already exists. Use ovewrite=True to overwrite.'.format(ofile))
# Construct the setups dictionary
cfg = self.unique_configurations(copy=True, rm_none=True)
# TODO: We should edit the relevant follow-on code so that we
# don't have to do these gymnastics. Or better yet, just stop
# producing/using the *.calib file.
_cfg = {}
for setup in cfg.keys():
_cfg[setup] = {}
_cfg[setup]['--'] = deepcopy(cfg[setup])
cfg = _cfg
# Iterate through the calibration bit names as these are the root of the
# MasterFrames and QA
for icbit in np.unique(self['calibbit'].data):
cbit = int(icbit) # for yaml
# Skip this group
if ignore is not None and cbit in ignore:
continue
# Find the frames in this group
#in_group = self.find_calib_group(i)
in_cbit = self['calibbit'] == cbit
# Find the unique configurations in this group, ignoring any
# undefined ('None') configurations
#setup = np.unique(self['setup'][in_group]).tolist()
setup = np.unique(self['setup'][in_cbit]).tolist()
if 'None' in setup:
setup.remove('None')
# Make sure that each calibration group should only contain
# frames from a single configuration
if len(setup) != 1:
msgs.error('Each calibration group must be from one and only one instrument '
'configuration with a valid letter identifier; i.e., the '
'configuration cannot be None.')
# Find the frames of each type in this group
cfg[setup[0]][cbit] = {}
for key in self.type_bitmask.keys():
#ftype_in_group = self.find_frames(key) & in_group
ftype_in_group = self.find_frames(key) & in_cbit
cfg[setup[0]][cbit][key] = [ os.path.join(d,f)
for d,f in zip(self['directory'][ftype_in_group],
self['filename'][ftype_in_group])]
# Write it
ff = open(ofile, 'w')
ff.write(yaml.dump(utils.yamlify(cfg)))
ff.close()
def write_pypeit(self, output_path=None, cfg_lines=None,
write_bkg_pairs=False, write_manual=False,
configs=None):
"""
Write a pypeit file in data-table format.
The pypeit file is the main configuration file for PypeIt,
configuring the control-flow and algorithmic parameters and
listing the data files to read. This function writes the
columns selected by the
:func:`pypeit.spectrographs.spectrograph.Spectrograph.pypeit_file_keys`,
which can be specific to each instrument.
Args:
output_path (:obj:`str`, optional):
Root path for the output pypeit files. If None, set
to current directory. If the output directory does
not exist, it is created.
cfg_lines (:obj:`list`, optional):
The list of configuration lines to include in the file.
If None are provided, the vanilla configuration is
included.
write_bkg_pairs (:obj:`bool`, optional):
When constructing the
:class:`pypeit.metadata.PypeItMetaData` object, include
two columns called `comb_id` and `bkg_id` that identify
object and background frame pairs.
write_manual (:obj:`bool`, optional):
Add additional ``PypeIt`` columns for manual extraction
configs (:obj:`str`, :obj:`list`, optional):
One or more strings used to select the configurations
to include in the returned objects. If ``'all'``,
pass back all configurations. Otherwise, only return
the configurations matched to this provided string or
list of strings (e.g., ['A','C']). See
:attr:`configs`.
Raises:
PypeItError:
Raised if the 'setup' isn't defined and split is True.
Returns:
:obj:`list`: List of ``PypeIt`` files generated.
"""
# Set output path
if output_path is None:
output_path = os.getcwd()
# Find unique configurations, always ignoring any 'None'
# configurations...
cfg = self.unique_configurations(copy=True, rm_none=True)
# Get the setups to write
if configs is None or configs == 'all' or configs == ['all']:
cfg_keys = list(cfg.keys())
else:
_configs = configs if isinstance(configs, list) else [configs]
cfg_keys = [key for key in cfg.keys() if key in _configs]
if len(cfg_keys) == 0:
msgs.error('No setups to write!')
# Grab output columns
output_cols = self.set_pypeit_cols(write_bkg_pairs=write_bkg_pairs,
write_manual=write_manual)
# Write the pypeit files
ofiles = [None]*len(cfg_keys)
for j,setup in enumerate(cfg_keys):
# Create the output directory
root = '{0}_{1}'.format(self.spectrograph.name, setup)
odir = os.path.join(output_path, root)
if not os.path.isdir(odir):
os.makedirs(odir)
# Create the output file name
ofiles[j] = os.path.join(odir, '{0}.pypeit'.format(root))
# Get the setup lines
setup_lines = dict_to_lines({'Setup {0}'.format(setup):
utils.yamlify(cfg[setup])}, level=1)
# Get the paths
in_cfg = self['setup'] == setup
if not np.any(in_cfg):
continue
paths = np.unique(self['directory'][in_cfg]).tolist()
# Get the data lines
subtbl = self.table[output_cols][in_cfg]
subtbl.sort(['frametype','filename'])
with io.StringIO() as ff:
subtbl.write(ff, format='ascii.fixed_width')
data_lines = ff.getvalue().split('\n')[:-1]
# Write the file
make_pypeit_file(ofiles[j], self.spectrograph.name, [], cfg_lines=cfg_lines,
setup_lines=setup_lines, sorted_files=data_lines, paths=paths)
# Return
return ofiles
def write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=False,
header=None):
"""
Write the metadata either to a file or to the screen.
The method allows you to set the columns to print and which column to
use for sorting.
Args:
output (:obj:`str`, optional):
Output signature or file name. If None, the table contents
are printed to the screen. If ``'table'``, the table that
would have been printed/written to disk is returned.
Otherwise, the string is interpreted as the name of an ascii
file to which to write the table contents.
rows (`numpy.ndarray`_, optional):
A boolean vector selecting the rows of the table to write. If
None, all rows are written. Shape must match the number of
the rows in the table.
columns (:obj:`str`, :obj:`list`, optional):
A list of columns to include in the output file. Can be
provided as a list directly or as a comma-separated string.
If None or ``'all'``, all columns in are written; if
``'pypeit'``, the columns are the same as those included in
the pypeit file. Each selected column must be a valid pypeit
metadata keyword, specific to :attr:`spectrograph`.
Additional valid keywords, depending on the processing level
of the metadata table, are directory, filename, frametype,
framebit, setup, calib, and calibbit.
sort_col (:obj:`str`, optional):
Name of the column to use for sorting the output. If
None, the table is printed in its current state.
overwrite (:obj:`bool`, optional):
Overwrite any existing file; otherwise raise an
exception.
header (:obj:`str`, :obj:`list`, optional):
One or more strings to write to the top of the file, on
string per file line; ``# `` is added to the beginning of
each string. Ignored if ``output`` does not specify an output
file.
Returns:
`astropy.table.Table`: The table object that would have been
written/printed if ``output == 'table'``. Otherwise, the method
always returns None.
Raises:
ValueError:
Raised if the columns to include are not valid, or if the
column to use for sorting is not valid.
FileExistsError:
Raised if overwrite is False and the file exists.
"""
# Check the file can be written (this is here because the spectrograph
# needs to be defined first)
ofile = None if output in [None, 'table'] else output
if ofile is not None and os.path.isfile(ofile) and not overwrite:
raise FileExistsError(f'{ofile} already exists; set flag to overwrite.')
# Check the rows input
if rows is not None and len(rows) != len(self.table):
raise ValueError('Boolean vector selecting output rows has incorrect length.')
# Get the columns to return
if columns in [None, 'all']:
tbl_cols = list(self.keys())
elif columns == 'pypeit':
tbl_cols = self.set_pypeit_cols(write_bkg_pairs=True)
else:
all_cols = list(self.keys())
tbl_cols = columns if isinstance(columns, list) else columns.split(',')
badcol = [col not in all_cols for col in tbl_cols]
if np.any(badcol):
raise ValueError('The following columns are not valid: {0}'.format(
', '.join(tbl_cols[badcol])))
# Make sure the basic parameters are the first few columns; do them in
# reverse order so I can always insert at the beginning of the list
for col in ['framebit', 'frametype', 'filename', 'directory']:
if col not in tbl_cols:
continue
indx = np.where([t == col for t in tbl_cols])[0][0]
if indx != 0:
tbl_cols.insert(0, tbl_cols.pop(indx))
# Make sure the dithers and combination and background IDs are the last
# few columns
ncol = len(tbl_cols)
for col in ['dithpat', 'dithpos', 'dithoff', 'calib', 'comb_id', 'bkg_id']:
if col not in tbl_cols:
continue
indx = np.where([t == col for t in tbl_cols])[0][0]
if indx != ncol-1:
tbl_cols.insert(ncol-1, tbl_cols.pop(indx))
# Copy the internal table so that it is unaltered
output_tbl = self.table.copy()
# Select the output rows if a vector was provided
if rows is not None:
output_tbl = output_tbl[rows]
# Select and sort the data by a given column
if sort_col is not None:
if sort_col not in self.keys():
raise ValueError(f'Cannot sort by {sort_col}. Not a valid column.')
# Ignore any NoneTypes
indx = output_tbl[sort_col] != None
is_None = np.logical_not(indx)
srt = np.append(np.where(is_None)[0],
np.where(indx)[0][np.argsort(output_tbl[sort_col][indx].data)])
output_tbl = output_tbl[tbl_cols][srt]
else:
output_tbl = output_tbl[tbl_cols]
if output == 'table':
# Instead of writing, just return the modified table
return output_tbl
# Always write the table in ascii format
with io.StringIO() as ff:
output_tbl.write(ff, format='ascii.fixed_width')
data_lines = ff.getvalue().split('\n')[:-1]
if ofile is None:
# Output file not defined so just print it
print('\n'.join(data_lines))
return None
# Write the output to an ascii file
with open(ofile, 'w') as f:
if header is not None:
_header = header if isinstance(header, list) else [header]
for h in _header:
f.write(f'# {h}\n')
f.write('\n')
f.write('\n'.join(data_lines))
f.write('\n')
# Just to be explicit that the method returns None when writing to a
# file...
return None
def find_calib_group(self, grp):
"""
Find all the frames associated with the provided calibration group.
Args:
grp (:obj:`int`):
The calibration group integer.
Returns:
numpy.ndarray: Boolean array selecting those frames in the
table included in the selected calibration group.
Raises:
PypeItError:
Raised if the 'calibbit' column is not defined.
"""
if 'calibbit' not in self.keys():
msgs.error('Calibration groups are not set. First run set_calibration_groups.')
return self.calib_bitmask.flagged(self['calibbit'].data, grp)
def find_frame_calib_groups(self, row):
"""
Find the calibration groups associated with a specific frame.
"""
return self.calib_bitmask.flagged_bits(self['calibbit'][row])
# TODO: Is there a reason why this is not an attribute of
# PypeItMetaData?
def row_match_config(row, config, spectrograph):
"""
Queries whether a row from the fitstbl matches the
input configuration
Args:
row (astropy.table.Row): From fitstbl
config (dict): Defines the configuration
spectrograph (pypeit.spectrographs.spectrograph.Spectrograph):
Used to grab the rtol value for float meta (e.g. dispangle)
Returns:
bool: True if the row matches the input configuration
"""
# Loop on keys in config
match = []
for k in config.keys():
# Deal with floating configs (e.g. grating angle)
if isinstance(config[k], float):
if row[k] is None:
match.append(False)
elif np.abs(config[k]-row[k])/config[k] < spectrograph.meta[k]['rtol']:
match.append(True)
else:
match.append(False)
else:
# The np.all allows for arrays in the Table (e.g. binning)
match.append(np.all(config[k] == row[k]))
# Check
return np.all(match)
| 2.390625 | 2 |
aql/aql/main/aql_builtin_tools.py | menify/sandbox | 0 | 6243 | <filename>aql/aql/main/aql_builtin_tools.py<gh_stars>0
import os.path
import shutil
import errno
from aql.nodes import Builder, FileBuilder
from .aql_tools import Tool
__all__ = ( "ExecuteCommand",
"InstallBuilder",
"BuiltinTool",
)
"""
Unique Value - name + type
value
node
node = ExecuteCommand('gcc --help -v')
tools.cpp.cxx
node = ExecuteCommand( tools.cpp.cxx, '--help -v' )
node = ExecuteMethod( target = my_function )
dir_node = CopyFiles( prog_node, target = dir_name )
dir_node = CopyFilesAs( prog_node, target = dir_name )
dir_node = MoveFiles( prog_node, )
dir_node = MoveFilesAs( prog_node )
dir_node = RemoveFiles( prog_node )
node = FindFiles( dir_node )
dir_node = FileDir( prog_node )
"""
def _makeTagetDirs( path_dir ):
try:
os.makedirs( path_dir )
except OSError as e:
if e.errno != errno.EEXIST:
raise
#//===========================================================================//
class ExecuteCommand (Builder):
def build( self, node ):
cmd = node.getSources()
out = self.execCmd( cmd )
node.setNoTargets()
return out
#//-------------------------------------------------------//
def getBuildStrArgs( self, node, brief ):
cmd = node.getSourceValues()
return (cmd,)
#//===========================================================================//
class InstallBuilder (FileBuilder):
def __init__(self, options, target ):
self.target = os.path.abspath( target )
#//-------------------------------------------------------//
def build( self, node ):
sources = node.getSources()
target = self.target
_makeTagetDirs( target )
for source in sources:
if os.path.isfile( source ):
shutil.copy( source, target )
node.setNoTargets()
#//-------------------------------------------------------//
def getTraceTargets( self, node, brief ):
return self.target
#//===========================================================================//
class BuiltinTool( Tool ):
def ExecuteCommand( self, options ):
return ExecuteCommand( options )
def Install(self, options, target ):
return InstallBuilder( options, target )
def DirName(self, options):
raise NotImplementedError()
def BaseName(self, options):
raise NotImplementedError()
| 2.234375 | 2 |
cms/test_utils/project/placeholderapp/models.py | stefanw/django-cms | 0 | 6244 | from django.core.urlresolvers import reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from cms.models.fields import PlaceholderField
from cms.utils import get_language_from_request
from cms.utils.urlutils import admin_reverse
from hvad.models import TranslatableModel, TranslatedFields
def dynamic_placeholder_1(instance):
return instance.char_1
def dynamic_placeholder_2(instance):
return instance.char_2
@python_2_unicode_compatible
class Example1(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
date_field = models.DateField(null=True)
placeholder = PlaceholderField('placeholder')
static_admin_url = ''
def __init__(self, *args, **kwargs):
super(Example1, self).__init__(*args, **kwargs)
def callable_item(self, request):
return self.char_1
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("example_detail", args=(self.pk,))
def get_draft_url(self):
return self.get_absolute_url()
def get_public_url(self):
return '/public/view/'
def set_static_url(self, request):
language = get_language_from_request(request)
if self.pk:
self.static_admin_url = admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))
return self.pk
def dynamic_url(self, request):
language = get_language_from_request(request)
return admin_reverse('placeholderapp_example1_edit_field', args=(self.pk, language))
class TwoPlaceholderExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
char_3 = models.CharField(u'char_3', max_length=255)
char_4 = models.CharField(u'char_4', max_length=255)
placeholder_1 = PlaceholderField('placeholder_1', related_name='p1')
placeholder_2 = PlaceholderField('placeholder_2', related_name='p2')
class DynamicPlaceholderSlotExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
char_2 = models.CharField(u'char_2', max_length=255)
placeholder_1 = PlaceholderField(dynamic_placeholder_1, related_name='dynamic_pl_1')
placeholder_2 = PlaceholderField(dynamic_placeholder_2, related_name='dynamic_pl_2')
@python_2_unicode_compatible
class CharPksExample(models.Model):
char_1 = models.CharField(u'char_1', max_length=255)
slug = models.SlugField(u'char_1', max_length=255, primary_key=True)
placeholder_1 = PlaceholderField('placeholder_1', related_name='charpk_p1')
def __str__(self):
return "%s - %s" % (self.char_1, self.pk)
@python_2_unicode_compatible
class MultilingualExample1(TranslatableModel):
translations = TranslatedFields(
char_1=models.CharField(u'char_1', max_length=255),
char_2=models.CharField(u'char_2', max_length=255),
)
placeholder_1 = PlaceholderField('placeholder_1')
def __str__(self):
return self.char_1
def get_absolute_url(self):
return reverse("detail_multi", args=(self.pk,))
| 2.03125 | 2 |
150-Challenges/Challenges 80 - 87/Challenge 84.py | DGrifferty/Python | 0 | 6245 | # 084
# Ask the user to type in their postcode.Display the first two
# letters in uppercase.
# very simple
print(input('Enter your postcode: ')[0:2].upper()) | 3.6875 | 4 |
api_youtube.py | OnoArnaldo/PythonApiYoutube | 2 | 6246 | <filename>api_youtube.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import json
import urllib2
import codecs
BASE_DIR = os.path.dirname(__file__)
BASE_URL = 'https://www.googleapis.com/youtube/v3/'
API_CHANNELS = 'channels'
API_PLAYLIST = 'playlistItems'
API_KEY = 'YOUR KEY'
CHANNELS = [
'videosimprovaveis',
'nerdologia',
'Kurzgesagt',
'1veritasium',
'minutephysics',
'xadrezverbal',
'estevaoslow',
'Vsauce',
'braincraftvideo',
'CienciaTodoDia',
]
class UrlEncoder(object):
API_URL = ''
def __init__(self, **kwargs):
self.args = kwargs
def _parms(self):
args = []
for k, v in self.args.items():
args.append(k + '=' + str(v))
return '&'.join(args)
def get(self):
parms = '?' + self._parms() if len(self.args) else ''
return self.API_URL + parms
def set(self, key, value):
if value:
self.args[key] = value
class ApiChannel(object):
URL = BASE_URL + API_CHANNELS
FILE_NAME = os.path.join(BASE_DIR, 'channels.json')
def __init__(self, channels):
self.encoder = self.build_encoder(API_KEY)
self.channels = channels
def run(self):
data = self.generate_data()
self.save(data)
def generate_data(self):
encoder = self.encoder
ret = {}
for channel in self.channels:
encoder.set('forUsername', channel)
data = self.get_data(encoder.get())
ret[channel] = self.get_playlist_id(data)
return ret
def get_data(self, url):
url = urllib2.urlopen(url)
data = url.read()
return json.loads(data)
def get_playlist_id(self, data):
items = data.get('items')
content = items[0].get('contentDetails')
playlists = content.get('relatedPlaylists')
return playlists.get('uploads')
def save(self, data):
with open(self.FILE_NAME, 'w') as f:
f.write(json.dumps(data))
f.close()
def build_encoder(self, api_key):
UrlEncoder.API_URL = self.URL
encoder = UrlEncoder()
encoder.set('key', api_key)
encoder.set('part', 'contentDetails')
return encoder
class ApiPlayList(object):
URL = BASE_URL + API_PLAYLIST
FILE_NAME = os.path.join(BASE_DIR, 'playlist.txt')
def __init__(self, channels):
self.channels = channels
self.encoder = self.build_encoder(API_KEY)
def run(self):
data = self.generate_data()
self.save(data)
def generate_data(self):
encoder = self.encoder
channels = self.channels
ret = []
for key in channels:
encoder.set('playlistId', channels[key])
data = self.get_data(encoder.get())
ret += [[key] + self.get_info(data)]
return ret
def get_info(self, data):
items = data.get('items')
snippet = items[0].get('snippet')
title = snippet.get('title')
published_at = snippet.get('publishedAt')
description = snippet.get('description')
return [title, published_at, description]
def save(self, data):
fname = os.path.join(BASE_DIR, 'last_update.txt')
with codecs.open(fname, 'w', encoding='utf-8') as f:
for key, title, published_at, description in sorted(data, key=lambda x: x[2]):
f.write('{}: {} - {}\n'.format(published_at[:10], key, title))
f.close()
def get_data(self, url):
url = urllib2.urlopen(url)
data = url.read()
return json.loads(data)
def build_encoder(self, api_key):
UrlEncoder.API_URL = self.URL
encoder = UrlEncoder()
encoder.set('key', api_key)
encoder.set('part', 'snippet')
encoder.set('maxResults', '1')
return encoder
@classmethod
def import_channels(cls, fname):
with open(fname, 'r') as f:
text = f.read()
f.close()
return json.loads(text)
if __name__ == '__main__':
args = sys.argv[1:]
if '-channel' in args:
channel = ApiChannel(CHANNELS)
channel.run()
if '-playlist' in args:
channels = ApiPlayList.import_channels(ApiChannel.FILE_NAME)
play_list = ApiPlayList(channels)
play_list.run()
| 2.953125 | 3 |
python_and_ebpf/train.py | be4r/ssh-miner-detection | 0 | 6247 | <reponame>be4r/ssh-miner-detection
#!/usr/bin/env python3
from sklearn.tree import DecisionTreeClassifier
import pickle
import numpy as np
no = [b'runc:[2:INIT]', b'containerssh-ag', b'apt',b'dpkg']
class model:
def __init__(self):
self.d = DecisionTreeClassifier()
def load(self, filename = 'model.p'):
try:
f = open(filename, 'rb')
self.d = pickle.load(f)
if type(self.d) != DecisionTreeClassifier:
d = None
f.close()
except:
return
def save(self, filename = 'model.p'):
f = open(filename, 'wb')
pickle.dump(self.d, f)
f.close()
def fit(self, x, y):
self.d.fit(x, y)
def predict(self, x):
return self.d.predict(x)
def accuracy(self, y_pred, y_ref):
return sum(np.array(y_pred) == np.array(y_ref)) / len(y_ref)
def f1(self, y_pred, y_ref):
tp = (np.array(y_pred) == 1) * (np.array(y_ref) == 1)
tn = (np.array(y_pred) == 0) * (np.array(y_ref) == 0)
fp = (np.array(y_pred) == 1) * (np.array(y_ref) == 0)
fn = (np.array(y_pred) == 0) * (np.array(y_ref) == 1)
return tp / (tp + (fp + fn) / 2)
def ngrams(array, size = 25, overlacing = False):
return [array[i:i+size] for i in range(0, len(array)//size * size, 1 if overlacing else size)]
res = [array[i:i+size] for i in range(0, len(array)//size * size, 1 if overlacing else size)]
if sum([len(i) == size for i in res]) != len(res):
raise Exception('wtf')
def gen_train(a, is_miner):
#x1,y1,x2,y2 = train_test_split(x,y,0.05)
x = ngrams(a)
y = [1 if is_miner else 0,] * len(x)
return x,y
def train_on_logs(*filenames, is_miner):
classifier = model()
#classifier.load()
x, y = [], []
for id, filename in enumerate(filenames):
l = []
with open(filename, 'r') as f:
l = eval(''.join(f))
codes = []
for i in l:
if i[0] not in no:
codes.append(i[1])
x_, y_ = gen_train(codes, is_miner[id])
x.append(x_)
y.append(y_)
print(x,y)
#classifier.fit(x,y)
#classifier.save()
def predict_on_logs(*filenames, is_miner):
classifier = model()
classifier.load()
x, y = [], []
for id, filename in enumerate(filenames):
l = []
with open(filename, 'r') as f:
l = eval(''.join(f))
codes = []
for i in l:
if i[0] not in no:
codes.append(i[1])
x_, y_ = gen_train(codes, is_miner[id])
x.append(x_)
y.append(y_)
y_pred = classifier.predict(x)
print("Accuracy: ", classifier.accuracy(y_pred, y))
print("F1: ",classifier.f1(y_pred, y))
def predict_on_trace(trace, A = 0.9):
classifier = model()
classifier.load()
x, y = [], []
for id, filename in enumerate(filenames):
codes = []
for i in trace:
if i[0] not in no:
codes.append(i[1])
x_, y_ = gen_train(codes, is_miner[id])
x.append(x_)
y.append(y_)
y_pred = classifier.predict(x)
acc = sum(np.array(y_pred)) / len(y_pred)
return acc > A
| 2.265625 | 2 |
data/parse_hipp_data.py | slinderman/pyhsmm-spiketrains | 10 | 6248 | import os
import numpy as np
from scipy.io import loadmat
data = loadmat("data/hipp_2dtrack_a/smJun03p2.dat")
N = 49
data = reshape(data, 3, length(data)/3);
data = data';
size(data) % 43799-by-3
fclose(fid);
% sampling time
Ts = 0.0333;
duration = size(data,1) * Ts; % in second
Tmax = data(end, 3);
Tmin = data(1,3);
time_edges = [Tmin: 0.25: Tmax]; % 250 ms per bin
% interpolated rat's position in time bins
Rat_pos = interp1(data(:, 3), [data(:, 1), data(:, 2)], time_edges');
vel = abs(diff(Rat_pos, 1, 1 )); % row difference
vel = [vel(1, :); vel];
% 250 ms
rat_vel = 4 * sqrt(vel(:, 1).^2 + vel(:, 2).^2); % unit: cm/s
vel_ind = find(rat_vel >= 10); % RUN velocity threshold
% using RUN only
T = length(vel_ind);
% using Run + pause periods
T = length(time_edges);
AllSpikeData = zeros(C,T);
for i=1:C
str = ['Cell_num' num2str(i)];
fid = fopen(str, 'r');
cell_data = fscanf(fid, '%f');
cell_data = reshape(cell_data, 3, length(cell_data)/3)';
spike_time = cell_data(:, 3);
spike_pos = cell_data(:, 1:2);
[spike_time_count, bin] = histc(spike_time, time_edges); % column vector
% if analyzing the RUN period only uncomment this
% spike_time_count = spike_time_count(vel_ind);
AllSpikeData(i, :) = spike_time_count';
fclose(fid);
end | 2.40625 | 2 |
repokid/tests/test_roledata.py | tomdev/repokid | 0 | 6249 | # Copyright 2017 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from mock import patch
import repokid.utils.roledata
from repokid.role import Role
from repokid.tests.test_repokid_cli import ROLE_POLICIES, ROLES
AARDVARK_DATA = {
"arn:aws:iam::123456789012:role/all_services_used": [
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "iam"},
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "s3"}],
"arn:aws:iam::123456789012:role/unused_ec2": [
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "iam"},
{"lastAuthenticated": 0,
"serviceNamespace": "ec2"}],
"arn:aws:iam::123456789012:role/young_role": [
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "iam"},
{"lastAuthenticated": int(time.time()) * 1000,
"serviceNamespace": "s3"}]
}
class TestRoledata(object):
@patch('repokid.utils.roledata.expand_policy')
@patch('repokid.utils.roledata.get_actions_from_statement')
@patch('repokid.utils.roledata.all_permissions')
def test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy):
test_role = Role(ROLES[0])
all_permissions = ['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket',
's3:getobject']
# empty policy to make sure we get the latest
test_role.policies = [{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}]
mock_all_permissions.return_value = all_permissions
mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms']
mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms']
permissions = repokid.utils.roledata._get_role_permissions(test_role)
assert permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms'])
@patch('repokid.hooks.call_hooks')
def test_get_repoable_permissions(self, mock_call_hooks):
minimum_age = 1
repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2']
repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3', 'service_1:action_4']
hooks = {}
permissions = ['service_1:action_1', 'service_1:action_2', 'service_1:action_3', 'service_1:action_4',
'service_2:action_1', 'service_3:action_1', 'service_3:action_2', 'service_4:action_1',
'service_4:action_2']
# service_1 and service_2 both used more than a day ago, which is outside of our test filter for age
aa_data = [{'serviceNamespace': 'service_1', 'lastAuthenticated': (time.time() - 90000) * 1000},
{'serviceNamespace': 'service_2', 'lastAuthenticated': (time.time() - 90000) * 1000},
{'serviceNamespace': 'service_3', 'lastAuthenticated': time.time() * 1000}]
no_repo_permissions = {'service_4:action_1': time.time() - 1, 'service_4:action_2': time.time() + 1000}
repoable_decision = repokid.utils.roledata.RepoablePermissionDecision()
repoable_decision.repoable = True
mock_call_hooks.return_value = {'potentially_repoable_permissions': {'service_1:action_1': repoable_decision,
'service_1:action_2': repoable_decision,
'service_4:action_1': repoable_decision}}
repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions, aa_data,
no_repo_permissions, minimum_age,
hooks)
# service_1:action_3 and action_4 are unsupported actions, service_2 is an unsupported service, service_3
# was used too recently, service_4 action 2 is in no_repo_permissions and not expired
assert repoable_permissions == set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1'])
@patch('repokid.utils.roledata._get_role_permissions')
@patch('repokid.utils.roledata._get_repoable_permissions')
@patch('repokid.hooks.call_hooks')
def test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions):
roles = [Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])]
roles[0].disqualified_by = []
roles[0].aa_data = 'some_aa_data'
# disqualified by a filter
roles[1].policies = [{'Policy': ROLE_POLICIES['unused_ec2']}]
roles[1].disqualified_by = ['some_filter']
roles[1].aa_data = 'some_aa_data'
# no AA data
roles[2].policies = [{'Policy': ROLE_POLICIES['all_services_used']}]
roles[2].disqualified_by = []
roles[2].aa_data = None
hooks = {}
mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy',
'ec2:AllocateHosts', 'ec2:AssociateAddress'],
['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'],
['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']]
mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])
mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])]
minimum_age = 90
repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks)
assert roles[0].repoable_permissions == 2
assert roles[0].repoable_services == ['iam']
assert roles[1].repoable_permissions == 0
assert roles[1].repoable_services == []
assert roles[2].repoable_permissions == 0
assert roles[2].repoable_services == []
def test_get_repoed_policy(self):
policies = ROLE_POLICIES['all_services_used']
repoable_permissions = set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket'])
rewritten_policies, empty_policies = repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions)
assert rewritten_policies == {'s3_perms': {'Version': '2012-10-17',
'Statement': [{'Action': ['s3:deletebucket'],
'Resource': ['*'],
'Effect': 'Allow'}]}}
assert empty_policies == ['iam_perms']
def test_find_newly_added_permissions(self):
old_policy = ROLE_POLICIES['all_services_used']
new_policy = ROLE_POLICIES['unused_ec2']
new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy)
assert new_perms == set(['ec2:allocatehosts', 'ec2:associateaddress'])
def test_convert_repoable_perms_to_perms_and_services(self):
all_perms = ['a:j', 'a:k', 'b:l', 'c:m', 'c:n']
repoable_perms = ['b:l', 'c:m']
expected_repoed_services = ['b']
expected_repoed_permissions = ['c:m']
assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) ==
(expected_repoed_permissions, expected_repoed_services))
def test_convert_repoed_service_to_sorted_perms_and_services(self):
repoed_services = ['route53', 'ec2', 's3:abc', 'dynamodb:def', 'ses:ghi', 'ses:jkl']
expected_services = ['ec2', 'route53']
expected_permissions = ['dynamodb:def', 's3:abc', 'ses:ghi', 'ses:jkl']
assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == (
expected_permissions, expected_services
)
def test_get_epoch_authenticated(self):
assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620, True))
assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620, True))
assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None, False))
def test_filter_scheduled_repoable_perms(self):
assert repokid.utils.roledata._filter_scheduled_repoable_perms(
['a:b', 'a:c', 'b:a'], ['a:c', 'b']) == ['a:c', 'b:a']
assert repokid.utils.roledata._filter_scheduled_repoable_perms(
['a:b', 'a:c', 'b:a'], ['a', 'b']) == ['a:b', 'a:c', 'b:a']
assert repokid.utils.roledata._filter_scheduled_repoable_perms(
['a:b', 'a:c', 'b:a'], ['a:b', 'a:c']) == ['a:b', 'a:c']
| 1.75 | 2 |
DL_Scripts/image_recognition.py | Matnay/KPIT_Deep_Learning | 1 | 6250 | import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import String
from cv_bridge import CvBridge
import cv2
import numpy as np
import tensorflow as tf
import classify_image
class RosTensorFlow():
def __init__(self):
classify_image.maybe_download_and_extract()
self._session = tf.Session()
classify_image.create_graph()
self._cv_bridge = CvBridge()
self._sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1)
self._pub = rospy.Publisher('result', String, queue_size=1)
self.score_threshold = rospy.get_param('~score_threshold', 0.1)
self.use_top_k = rospy.get_param('~use_top_k', 5)
def callback(self, image_msg):
cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
# copy from
# classify_image.py
image_data = cv2.imencode('.jpg', cv_image)[1].tostring()
# Creates graph from saved GraphDef.
softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0')
predictions = self._session.run(
softmax_tensor, {'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
# Creates node ID --> English string lookup.
node_lookup = classify_image.NodeLookup()
top_k = predictions.argsort()[-self.use_top_k:][::-1]
for node_id in top_k:
human_string = node_lookup.id_to_string(node_id)
score = predictions[node_id]
if score > self.score_threshold:
rospy.loginfo('%s (score = %.5f)' % (human_string, score))
self._pub.publish(human_string)
def main(self):
rospy.spin()
if __name__ == '__main__':
classify_image.setup_args()
rospy.init_node('rostensorflow')
tensor = RosTensorFlow()
tensor.main()
| 2.53125 | 3 |
plugins/grouputils.py | aviskumar/speedo | 0 | 6251 | <filename>plugins/grouputils.py
# Copyright (C) 2020-2021 by TeamSpeed<EMAIL>, < https://github.com/TeamSpeedo >.
#
# This file is part of < https://github.com/TeamSpeedo/FridayUserBot > project,
# and is released under the "GNU v3.0 License Agreement".
# Please see < https://github.com/TeamSpeedo/blob/master/LICENSE >
#
# All rights reserved.
import asyncio
import os
import time
from asyncio import sleep
from pyrogram.types import ChatPermissions
import pyrogram
from main_start.core.decorators import speedo_on_cmd
from main_start.helper_func.basic_helpers import (
edit_or_reply,
edit_or_send_as_file,
get_text,
get_user,
is_admin_or_owner,
)
from main_start.helper_func.logger_s import LogIt
from main_start.helper_func.plugin_helpers import (
convert_to_image,
convert_vid_to_vidnote,
generate_meme,
)
@speedo_on_cmd(
["silentpin"],
only_if_admin=True,
cmd_help={
"help": "Pin Message Without Sending Notification To Members!",
"example": "{ch}silentpin (reply to message)",
},
)
async def spin(client, message):
engine = message.Engine
if not message.reply_to_message:
await edit_or_reply(message, engine.get_string("REPLY_TO_PIN"))
try:
await client.pin_chat_message(
message.chat.id,
message.reply_to_message.message_id,
disable_notification=True,
)
except BaseException as e:
await edit_or_reply(
message, engine.get_string("UNABLE_TO_PIN").format(e)
)
return
await edit_or_reply(message, engine.get_string("PINNED"))
@speedo_on_cmd(
["pinloud", "pin"],
only_if_admin=True,
cmd_help={
"help": "Pin Message With Sending Notification To Members!",
"example": "{ch}pin (reply to messages)",
},
)
async def lpin(client, message):
engine = message.Engine
if not message.reply_to_message:
await edit_or_reply(message, engine.get_string("REPLY_TO_PIN"))
try:
await client.pin_chat_message(
message.chat.id, message.reply_to_message.message_id
)
except BaseException as e:
await edit_or_reply(
message, engine.get_string("UNABLE_TO_PIN").format(e)
)
return
await edit_or_reply(message, engine.get_string("PINNED"))
@speedo_on_cmd(
["unpin", "rmpins"],
only_if_admin=True,
cmd_help={"help": "Unpin All Pinned Messages!", "example": "{ch}rmpins"},
)
async def dpins(client, message):
engine = message.Engine
await client.unpin_all_chat_messages(message.chat.id)
await edit_or_reply(message, engine.get_string("UNPINNED"))
@speedo_on_cmd(
["adminlist", "admins"],
cmd_help={"help": "Get Adminlist Of Chat!", "example": "{ch}adminlist"},
)
async def midhunadmin(client, message):
engine = message.Engine
mentions = ""
starky = get_text(message) or message.chat.id
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
try:
X = await client.get_chat_members(starky, filter="administrators")
ujwal = await client.get_chat(starky)
except BaseException as e:
await pablo.edit(engine.get_string("CANT_FETCH_ADMIN").format("Admins", e))
return
for midhun in X:
if not midhun.user.is_deleted:
link = f'✱ <a href="tg://user?id={midhun.user.id}">{midhun.user.first_name}</a>'
userid = f"<code>{midhun.user.id}</code>"
mentions += f"\n{link} {userid}"
holy = ujwal.username or ujwal.id
messag = f"""
<b>Admins in {ujwal.title} | {holy}</b>
{mentions}
"""
await edit_or_send_as_file(
messag,
pablo,
client,
f"`AdminList Of {holy}!`",
"admin-lookup-result",
"html",
)
@speedo_on_cmd(
["botlist", "bot"],
group_only=True,
cmd_help={"help": "Get List Of Bots In Chat!", "example": "{ch}botlist"},
)
async def bothub(client, message):
engine = message.Engine
buts = "**Bot List** \n\n"
starky = get_text(message) or message.chat.id
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
try:
bots = await client.get_chat_members(starky, filter="bots")
except BaseException as e:
await pablo.edit(engine.get_string("CANT_FETCH_ADMIN").format("Bots", e))
return
for nos, ujwal in enumerate(bots, start=1):
buts += f"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \n"
await pablo.edit(buts)
@speedo_on_cmd(
["zombies", "delusers"],
cmd_help={
"help": "Remove Deleted Accounts In The Group/Channel!",
"example": "{ch}zombies",
},
)
async def ujwalzombie(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
if len(message.text.split()) == 1:
dm = 0
da = 0
dc = 0
async for member in client.iter_chat_members(message.chat.id):
if member.user.is_deleted:
await sleep(1)
if member.status == "member":
dm += 1
elif member.status == "administrator":
da += 1
elif member.status == "creator":
dc += 1
text = "**Zombies Report!** \n\n"
if dm > 0:
text += engine.get_string("TOTAL_ZOMBIES_USERS").format(dm)
if da > 0:
text += engine.get_string("TOTAL_ZOMBIES_ADMINS").format(da)
if dc > 0:
text += engine.get_string("GRP_OWNER_IS_ZOMBIE")
d = dm + da + dc
if d > 0:
text += (engine.get_string("WIPE_THEM"))
await pablo.edit(text)
else:
await pablo.edit(engine.get_string("NO_ZOMBIES"))
return
sgname = message.text.split(None, 1)[1]
if sgname.lower().strip() == "clean":
me = client.me
lol = await is_admin_or_owner(message, me.id)
if not lol:
await pablo.edit(engine.get_string("NOT_ADMIN"))
return
s = 0
f = 0
async for member in client.iter_chat_members(message.chat.id):
if member.user.is_deleted:
try:
await client.kick_chat_member(message.chat.id, member.user.id)
s += 1
except:
f += 1
text = ""
if s > 0:
text += engine.get_string("REMOVED_ZOMBIES").format(s)
if f > 0:
text += (engine.get_string("FAILED_ZOMBIES").format(f))
await pablo.edit(text)
@speedo_on_cmd(
["ban", "bun"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "Ban Replied User or provide his ID!",
"example": "{ch}ban (reply to user message OR provide his ID)",
},
)
async def ban_world(client, message):
engine = message.Engine
bun = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_restrict_members:
await bun.edit(engine.get_string("NOT_ADMIN"))
return
text_ = get_text(message)
userk, reason = get_user(message, text_)
if not userk:
await bun.edit(engine.get_string("TO_DO").format("Ban"))
return
try:
user_ = await client.get_users(userk)
except BaseException as e:
await bun.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user_.id
if not reason:
reason = "Not Specified!"
if userz == me_m.id:
await bun.edit(engine.get_string("TF_DO_IT").format("Ban"))
return
try:
user_ = await client.get_users(userz)
except BaseException as e:
await bun.edit(engine.get_string("USER_MISSING").format(e))
return
try:
await client.kick_chat_member(message.chat.id, int(user_.id))
except BaseException as e:
await bun.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Ban", e))
return
b = f"**#Banned** \n**User :** [{user_.first_name}](tg://user?id={user_.id}) \n**Chat :** `{message.chat.title}` \n**Reason :** `{reason}`"
await bun.edit(b)
log = LogIt(message)
await log.log_msg(client, b)
@speedo_on_cmd(
["unban", "unbun"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "UnBan Replied User or provide his ID!",
"example": "{ch}unban (reply to user message OR Provide his id)",
},
)
async def unban_world(client, message):
engine = message.Engine
unbun = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_restrict_members:
await unbun.edit(engine.get_string("NOT_ADMIN"))
return
text_ = get_text(message)
userm, reason = get_user(message, text_)
if not userm:
await unbun.edit(
engine.get_string("TO_DO").format("Un-Ban")
)
return
try:
user_ = await client.get_users(userm)
except BaseException as e:
await unbun.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user_.id
if not reason:
reason = "Not Specified!"
if userz == me_m.id:
await unbun.edit(engine.get_string("TF_DO_IT").format("Un-Ban"))
return
try:
await client.unban_chat_member(message.chat.id, int(user_.id))
except BaseException as e:
await unbun.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Un-Ban", e))
ub = f"**#UnBanned** \n**User :** [{user_.first_name}](tg://user?id={user_.id}) \n**Chat :** `{message.chat.title}` \n**Reason :** `{reason}`"
await unbun.edit(ub)
log = LogIt(message)
await log.log_msg(client, ub)
@speedo_on_cmd(
["promote", "prumote"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "Promote Replied user or provide his ID!",
"example": "{ch}promote (reply to user message OR provide his ID)",
},
)
async def ujwal_mote(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_promote_members:
await pablo.edit(engine.get_string("NOT_ADMIN"))
return
asplit = get_text(message)
userl, Res = get_user(message, asplit)
if not userl:
await pablo.edit(
engine.get_string("TO_DO").format("Promote")
)
return
try:
user = await client.get_users(userl)
except BaseException as e:
await pablo.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user.id
if not Res:
Res = "Admeme"
if userz == me_m.id:
await pablo.edit(engine.get_string("TF_DO_IT").format("Promote"))
return
try:
await client.promote_chat_member(
message.chat.id,
user.id,
can_change_info=me_.can_change_info,
can_delete_messages=me_.can_delete_messages,
can_restrict_members=me_.can_restrict_members,
can_invite_users=me_.can_invite_users,
can_pin_messages=me_.can_pin_messages,
can_promote_members=me_.can_promote_members,
)
except BaseException as e:
await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Promote", e))
return
p = f"**#Promote** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}` \n**Title :** `{Res}`"
await pablo.edit(p)
log = LogIt(message)
await log.log_msg(client, p)
try:
if Res:
await client.set_administrator_title(message.chat.id, user.id, Res)
except:
pass
@speedo_on_cmd(
["demote", "demute"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "Demote Replied user or provide his ID!",
"example": "{ch}demote (reply to user message OR provide his ID)",
},
)
async def ujwal_demote(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
await message.chat.get_member(int(me_m.id))
asplit = get_text(message)
usero = get_user(message, asplit)[0]
if not usero:
await pablo.edit(
engine.get_string("TO_DO").format("Demote")
)
return
try:
user = await client.get_users(usero)
except BaseException as e:
await pablo.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user.id
if userz == me_m.id:
await pablo.edit(engine.get_string("TF_DO_IT").format("Demote"))
return
try:
await client.promote_chat_member(
message.chat.id,
user.id,
is_anonymous=False,
can_change_info=False,
can_post_messages=False,
can_edit_messages=False,
can_delete_messages=False,
can_restrict_members=False,
can_invite_users=False,
can_pin_messages=False,
can_promote_members=False,
)
except BaseException as e:
await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Demote", e))
return
d = f"**#Demote** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}`"
await pablo.edit(d)
log = LogIt(message)
await log.log_msg(client, d)
@speedo_on_cmd(
["mute"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "Mute Replied user or provide his ID!",
"example": "{ch}mute (reply to user message OR provide his ID)",
},
)
async def ujwal_mute(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_restrict_members:
await pablo.edit(engine.get_string("NOT_ADMIN"))
return
asplit = get_text(message)
userf = get_user(message, asplit)[0]
if not userf:
await pablo.edit(
engine.get_string("TO_DO").format("Mute")
)
return
try:
user = await client.get_users(userf)
except BaseException as e:
await pablo.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user.id
if userz == me_m.id:
await pablo.edit(engine.get_string("TF_DO_IT").format("Mute"))
return
try:
await client.restrict_chat_member(
message.chat.id, user.id, ChatPermissions(can_send_messages=False)
)
except BaseException as e:
await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Mute", e))
return
m = f"**#Muted** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}`"
await pablo.edit(m)
log = LogIt(message)
await log.log_msg(client, m)
@speedo_on_cmd(
["unmute"],
only_if_admin=True,
group_only=True,
cmd_help={
"help": "Unmute Replied user or provide his ID!",
"example": "{ch}Unmute (reply to user message OR provide his ID)",
},
)
async def ujwal_unmute(client, message):
engine = message.Engine
pablo = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_restrict_members:
await pablo.edit(engine.get_string("NOT_ADMIN"))
return
asplit = get_text(message)
userf = get_user(message, asplit)[0]
if not userf:
await pablo.edit(
engine.get_string("TO_DO").format("Un-Mute")
)
return
try:
user = await client.get_users(userf)
except BaseException as e:
await pablo.edit(engine.get_string("USER_MISSING").format(e))
return
userz = user.id
if userz == me_m.id:
await pablo.edit(engine.get_string("TF_DO_IT").format("un-mute"))
return
try:
await client.restrict_chat_member(
message.chat.id, user.id, ChatPermissions(can_send_messages=True)
)
except BaseException as e:
await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Un-mute", e))
return
um = f"**#Un_Muted** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}`"
await pablo.edit(um)
log = LogIt(message)
await log.log_msg(client, um)
@speedo_on_cmd(
["chatinfo", "grpinfo"],
group_only=True,
cmd_help={"help": "Get Info Of The Chat!", "example": "{ch}chatinfo"},
)
async def owo_chat_info(client, message):
engine = message.Engine
s = await edit_or_reply(message, engine.get_string("PROCESSING"))
ujwal = await client.get_chat(message.chat.id)
peer = await client.resolve_peer(message.chat.id)
online_ = await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer))
msg = "**Chat Info** \n\n"
msg += f"**Chat-ID :** __{ujwal.id}__ \n"
msg += f"**Verified :** __{ujwal.is_verified}__ \n"
msg += f"**Is Scam :** __{ujwal.is_scam}__ \n"
msg += f"**Chat Title :** __{ujwal.title}__ \n"
msg += f"**Users Online :** __{online_.onlines}__ \n"
if ujwal.photo:
msg += f"**Chat DC :** __{ujwal.dc_id}__ \n"
if ujwal.username:
msg += f"**Chat Username :** __{ujwal.username}__ \n"
if ujwal.description:
msg += f"**Chat Description :** __{ujwal.description}__ \n"
msg += f"**Chat Members Count :** __{ujwal.members_count}__ \n"
if ujwal.photo:
kek = await client.download_media(ujwal.photo.big_file_id)
await client.send_photo(message.chat.id, photo=kek, caption=msg)
await s.delete()
else:
await s.edit(msg)
@speedo_on_cmd(
["purge"],
only_if_admin=True,
cmd_help={
"help": "Purge All Messages Till Replied Message!",
"example": "{ch}purge (reply to message)",
},
)
async def purge(client, message):
engine = message.Engine
start_time = time.time()
message_ids = []
purge_len = 0
event = await edit_or_reply(message, engine.get_string("PROCESSING"))
me_m = client.me
if message.chat.type in ["supergroup", "channel"]:
me_ = await message.chat.get_member(int(me_m.id))
if not me_.can_delete_messages:
await event.edit(engine.get_string("NOT_ADMIN"))
return
if not message.reply_to_message:
await event.edit(engine.get_string("NEEDS_REPLY").format("Message To Purge."))
return
async for msg in client.iter_history(
chat_id=message.chat.id,
offset_id=message.reply_to_message.message_id,
reverse=True,
):
if msg.message_id != message.message_id:
purge_len += 1
message_ids.append(msg.message_id)
if len(message_ids) >= 100:
await client.delete_messages(
chat_id=message.chat.id, message_ids=message_ids, revoke=True
)
message_ids.clear()
if message_ids:
await client.delete_messages(
chat_id=message.chat.id, message_ids=message_ids, revoke=True
)
end_time = time.time()
u_time = round(end_time - start_time)
await event.edit(
engine.get_string("PURGE_").format(purge_len, u_time)
)
await asyncio.sleep(3)
await event.delete()
@speedo_on_cmd(
["del"],
cmd_help={
"help": "Delete Replied Message!",
"example": "{ch}del (reply to message)",
},
)
async def delmsgs(client, message):
engine = message.Engine
if not message.reply_to_message:
await message.delete()
return
await client.delete_messages(
chat_id=message.chat.id,
message_ids=[message.reply_to_message.message_id],
revoke=True,
)
await message.delete()
@speedo_on_cmd(
["setgrppic", "gpic"],
cmd_help={
"help": "Set Custom Group Pic, For Lazy Peoples!",
"example": "{ch}setgrppic (reply to image)",
},
)
async def magic_grps(client, message):
engine = message.Engine
msg_ = await edit_or_reply(message, engine.get_string("PROCESSING"))
if not message.reply_to_message:
await msg_.edit(engine.get_string("NEEDS_REPLY").format("image"))
return
me_ = await message.chat.get_member(int(client.me.id))
if not me_.can_change_info:
await msg_.edit(engine.get_string("NOT_ADMIN"))
return
cool = await convert_to_image(message, client)
if not cool:
await msg_.edit(engine.get_string("NEEDS_REPLY").format("a valid media"))
return
if not os.path.exists(cool):
await msg_.edit(engine.get_string("INVALID_MEDIA"))
return
try:
await client.set_chat_photo(message.chat.id, photo=cool)
except BaseException as e:
await msg_.edit(f"`Unable To Set Group Photo! TraceBack : {e}")
return
await msg_.edit(engine.get_string("DONE_"))
| 1.695313 | 2 |
carberretta/bot/cogs/feeds.py | Nereg/Carberretta | 0 | 6252 | <filename>carberretta/bot/cogs/feeds.py
"""
FEEDS
Handles YouTube and Twitch feed notifications.
"""
import datetime as dt
import discord
import feedparser
from apscheduler.triggers.cron import CronTrigger
from discord.ext import commands
from carberretta import Config
from carberretta.utils import DEFAULT_EMBED_COLOUR, chron
LIVE_EMBED_COLOUR = 0x9146FF
VOD_EMBED_COLOUR = 0x3498DB
class Feeds(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot = bot
async def call_feed(self) -> dict:
url = f"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}"
async with self.bot.session.get(url) as response:
if not 200 <= response.status <= 299:
return []
if not (data := feedparser.parse(await response.text()).entries):
return []
return data
async def call_yt_api(self, video_id: str) -> dict:
url = f"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}"
async with self.bot.session.get(url) as response:
if not 200 <= response.status <= 299:
return []
if not (data := await response.json()):
return []
return data["items"][0]
async def call_twitch_api(self) -> dict:
url = f"https://api.twitch.tv/helix/search/channels?query=carberratutorials"
oauthurl = f"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials"
async with self.bot.session.post(url=oauthurl) as response:
if not 200 <= response.status <= 299:
return []
if not (twitch_tok := (await response.json())["access_token"]):
return []
headers = {
"client-id": f"{Config.TWITCH_CLIENT_ID}",
"Authorization": f"Bearer {twitch_tok}",
}
async with self.bot.session.get(url=url, headers=headers) as response:
if not 200 <= response.status <= 299:
return []
if not (data := await response.json()):
return []
return data["data"][0]
@commands.Cog.listener()
async def on_ready(self) -> None:
if not self.bot.ready.booted:
self.videos_channel = self.bot.get_channel(Config.VIDEOS_ID)
self.videos_role = self.bot.guild.get_role(Config.VIDEOS_ROLE_ID)
self.vods_role = self.bot.guild.get_role(Config.VODS_ROLE_ID)
self.streams_role = self.bot.guild.get_role(Config.STREAMS_ROLE_ID)
self.youtube = self.bot.get_cog("YouTube")
if (await self.bot.application_info()).id == 696804435321552906:
self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute="*/3", second=0))
self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute="*/3", second=15))
self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute="*/3", second=30))
self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute="*/3", second=45))
self.bot.ready.up(self)
async def get_new_vods(self) -> str:
current_vod = await self.bot.db.field("SELECT ContentValue FROM videos WHERE ContentType = ?", "vod")
for item in await self.call_feed():
data = await self.call_yt_api(item.yt_videoid)
thumbnails = data["snippet"]["thumbnails"]
duration = data["contentDetails"]["duration"]
if current_vod == item.yt_videoid:
# We announced this vod already
return
elif "#VOD" in item.summary:
# This is a vod we havent announced
await self.videos_channel.send(
f"Hey {self.vods_role.mention}, a new VOD just went live! Catch up on anything you missed from the last stream!",
embed=discord.Embed.from_dict(
{
"title": item.title,
"description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...",
"color": VOD_EMBED_COLOUR,
"url": item.link,
"author": {"name": "<NAME>"},
"image": {"url": thumbnails["maxres"]["url"]},
"footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"},
}
),
)
await self.bot.db.execute(
"UPDATE videos SET ContentValue = ? WHERE ContentType = ?", item.yt_videoid, "vod"
)
return item.yt_videoid
async def get_new_videos(self) -> str:
current_vid = await self.bot.db.field("SELECT ContentValue FROM videos WHERE ContentType = ?", "video")
for item in await self.call_feed():
data = await self.call_yt_api(item.yt_videoid)
thumbnails = data["snippet"]["thumbnails"]
duration = data["contentDetails"]["duration"]
if item.yt_videoid == current_vid:
# This is a video we already announced
return
elif "liveStreamingDetails" not in data.keys():
# A new video is live and its was not a premiere
if "#VOD" not in item.summary:
# This isnt a VOD
await self.videos_channel.send(
f"Hey {self.videos_role.mention}, a new video just went live! Come check it out!",
embed=discord.Embed.from_dict(
{
"title": item.title,
"description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...",
"color": DEFAULT_EMBED_COLOUR,
"url": item.link,
"author": {"name": "<NAME>"},
"image": {"url": thumbnails["maxres"]["url"]},
"footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"},
}
),
)
await self.bot.db.execute(
"UPDATE videos SET ContentValue = ? WHERE ContentType = ?", item.yt_videoid, "video"
)
return item.yt_videoid
async def get_new_premieres(self) -> tuple:
known_premieres = {
_id: [_upcoming, _announced]
for _id, _upcoming, _announced in await self.bot.db.records("SELECT * FROM premieres")
}
for item in await self.call_feed():
data = await self.call_yt_api(item.yt_videoid)
thumbnails = data["snippet"]["thumbnails"]
duration = data["contentDetails"]["duration"]
live_content = data["snippet"]["liveBroadcastContent"]
upcoming = known_premieres[item.yt_videoid][0] if item.yt_videoid in known_premieres.keys() else None
announced = known_premieres[item.yt_videoid][1] if item.yt_videoid in known_premieres.keys() else None
if "liveStreamingDetails" in data.keys():
start_time = data["liveStreamingDetails"]["scheduledStartTime"].strip("Z")
scheduled_time = chron.from_iso(start_time)
if not upcoming and duration != "P0D":
# We have not seen this premiere before
if live_content == "upcoming" and not announced:
# This premiere is upcoming and not live
await self.videos_channel.send(
f"Hey {self.videos_role.mention}, a new premiere is scheduled for {chron.long_date_and_time(scheduled_time)} UTC! Hope to see you there!",
embed=discord.Embed.from_dict(
{
"title": item.title,
"description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...",
"color": DEFAULT_EMBED_COLOUR,
"url": item.link,
"author": {"name": "<NAME>"},
"image": {"url": thumbnails["maxres"]["url"]},
"footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"},
}
),
)
await self.bot.db.execute(
"REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)",
item.yt_videoid,
1,
0,
)
return item.yt_videoid, False
elif live_content == "live" and not upcoming and not announced:
# The premiere was never upcoming is now live
await self.videos_channel.send(
f"Hey {self.videos_role.mention}, a new premiere started on {chron.long_date_and_time(scheduled_time)} UTC! Come and join us!",
embed=discord.Embed.from_dict(
{
"title": item.title,
"description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...",
"color": DEFAULT_EMBED_COLOUR,
"url": item.link,
"author": {"name": "<NAME>"},
"image": {"url": thumbnails["maxres"]["url"]},
"footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"},
}
),
)
await self.bot.db.execute(
"REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)",
item.yt_videoid,
1,
1,
)
return item.yt_videoid, True
elif not announced:
# A premiere was upcoming, and is now live
await self.videos_channel.send(
f"Hey {self.videos_role.mention}, a new premiere started on {chron.long_date_and_time(scheduled_time)} UTC! Come and join us!",
embed=discord.Embed.from_dict(
{
"title": item.title,
"description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...",
"color": DEFAULT_EMBED_COLOUR,
"url": item.link,
"author": {"name": "<NAME>"},
"image": {"url": thumbnails["maxres"]["url"]},
"footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"},
}
),
)
await self.bot.db.execute(
"REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)", item.yt_videoid, 1, 1
)
return item.yt_videoid, True
async def get_new_streams(self) -> tuple:
data = await self.call_twitch_api()
if data:
live_now = await self.bot.db.field("SELECT StreamLive FROM streams WHERE ID = 1")
if data["is_live"] and not live_now:
# The stream is live and we havent announced it yet
start = chron.from_iso(data["started_at"].strip("Z"))
message = await self.videos_channel.send(
f"Hey {self.streams_role.mention}, I'm live on Twitch now! Come watch!",
embed=discord.Embed.from_dict(
{
"title": data["title"],
"description": f"**Category: {data['game_name']}**",
"color": LIVE_EMBED_COLOUR,
"url": "https://www.twitch.tv/carberratutorials",
"author": {"name": "<NAME>"},
"thumbnail": {"url": data["thumbnail_url"]},
"footer": {"text": f"Started: {chron.long_date_and_time(start)} UTC"},
}
),
)
await self.bot.db.execute(
"UPDATE streams SET StreamLive = ?, StreamStart = ?, StreamMessage= ? WHERE ID = 1",
1,
start,
message.id,
)
return data["title"], False
elif not data["is_live"] and live_now:
# The stream is not live and last we checked it was (stream is over)
await self.bot.db.execute(
"UPDATE streams SET StreamLive = ?, StreamEnd = ? WHERE ID = 1", 0, dt.datetime.utcnow()
)
start, stream_message, end = await self.bot.db.record(
"SELECT StreamStart, StreamMessage, StreamEnd FROM streams WHERE ID = 1"
)
duration = chron.from_iso(end) - chron.from_iso(start)
try:
message = await self.videos_channel.fetch_message(stream_message)
except (discord.NotFound, discord.Forbidden, discord.HTTPException):
return
else:
await message.edit(
content=f"Hey {self.streams_role.mention}, I'm live on Twitch now! Come watch!",
embed=discord.Embed.from_dict(
{
"title": "The stream has ended.",
"description": "**Catch you in the next one!**",
"color": LIVE_EMBED_COLOUR,
"url": "https://www.twitch.tv/carberratutorials",
"author": {"name": "<NAME>"},
"thumbnail": {"url": data["thumbnail_url"]},
"footer": {"text": f"Runtime: {chron.long_delta(duration)}"},
}
),
)
return data["title"], True
@commands.group(name="feed", invoke_without_command=True)
@commands.is_owner()
async def group_feed(self, ctx: commands.Context) -> None:
pass
@group_feed.command(name="video")
@commands.is_owner()
async def command_feed_video(self, ctx: commands.Context) -> None:
last_video = await self.get_new_videos()
await ctx.send(f"Announced video: {last_video}." if last_video else "No new videos.")
@group_feed.command(name="vod")
@commands.is_owner()
async def command_feed_vod(self, ctx: commands.Context) -> None:
last_vod = await self.get_new_vods()
await ctx.send(f"Announced VOD: {last_vod}." if last_vod else "No new VODs.")
@group_feed.command(name="premiere")
@commands.is_owner()
async def command_feed_premiere(self, ctx: commands.Context) -> None:
if not (last_premiere := await self.get_new_premieres()):
await ctx.send("No new premieres.")
else:
await ctx.send(
f"Announced live premiere: {last_premiere[0]}."
if last_premiere[1]
else f"Announced upcoming premiere: {last_premiere[0]}."
)
@group_feed.command(name="stream")
@commands.is_owner()
async def command_feed_stream(self, ctx: commands.Context) -> None:
if not (last_stream := await self.get_new_streams()):
await ctx.send("No new streams.")
else:
await ctx.send(
f"Stream ended: {last_stream[0]}." if last_stream[1] else f"Announced stream: {last_stream[0]}."
)
def setup(bot: commands.Bot) -> None:
bot.add_cog(Feeds(bot))
| 2.4375 | 2 |
gdb/proxy.py | abaire/gdb_sniffer | 1 | 6253 | <gh_stars>1-10
"""Provides a GDB logging proxy.
See https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html
See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html
"""
from __future__ import annotations
import logging
import socket
from typing import Optional
from typing import Tuple
from .packet import GDBPacket
from net import ip_transport
logger = logging.getLogger(__name__)
class GDBProxy(ip_transport.IPTransport):
"""GDB Remote Serial Protocol proxy."""
def __init__(self, target_addr: Tuple[str, int], colorize: bool = False):
super().__init__(process_callback=self._on_gdb_bytes_read)
self.log_acks = False
self.target_addr = target_addr
self._target: Optional[ip_transport.IPTransport] = None
if colorize:
self.target_color = "\x1b[34m\x1b[47m"
self.gdb_color = "\x1b[30m\x1b[47m"
else:
self.target_color = ""
self.gdb_color = ""
self._gdb_read_buffer: bytearray = bytearray()
self._target_read_buffer: bytearray = bytearray()
def set_connection(self, sock, addr):
super().set_connection(sock, addr)
logger.debug(f"{self.target_color}Connecting to target at {self.target_addr}")
try:
target_sock = socket.create_connection(self.target_addr)
except ConnectionRefusedError:
logger.error(f"{self.target_color}Connection to Target@{self.target_addr} refused.")
self.close()
return
self._target = ip_transport.IPTransport(self._on_target_bytes_read, f"Target@{self.target_addr}")
self._target.set_connection(target_sock, self.target_addr)
self._add_sub_connection(self._target)
def _on_gdb_bytes_read(self, _ignored):
buffer = self._read_buffer
self.shift_read_buffer(len(buffer))
self._append_gdb_read_buffer(buffer)
self._target._write_buffer.extend(buffer)
def _on_target_bytes_read(self, _ignored):
buffer = self._target.read_buffer
self._target.shift_read_buffer(len(buffer))
self._append_target_read_buffer(buffer)
self._write_buffer.extend(buffer)
def _append_gdb_read_buffer(self, data: bytes):
self._unescape_and_append(self._gdb_read_buffer, data)
bytes_consumed = self._log_rsp_bytes(f"{self.gdb_color}GDB :", self._gdb_read_buffer)
if bytes_consumed:
self._gdb_read_buffer = bytearray(self._gdb_read_buffer[bytes_consumed:])
def _append_target_read_buffer(self, data: bytes):
self._unescape_and_append(self._target_read_buffer, data)
bytes_consumed = self._log_rsp_bytes(f"{self.target_color}TARGET :", self._target_read_buffer)
if bytes_consumed:
self._target_read_buffer = bytearray(self._target_read_buffer[bytes_consumed:])
@staticmethod
def _unescape_and_append(buffer: bytearray, data: bytes):
# RSP uses '}' as an escape character. Escapes are processed in this method
# before adding to the read buffer to simplify parsing.
if not data:
return
# Process any left over escapes.
if buffer and buffer[-1] == GDBPacket.RSP_ESCAPE_CHAR:
buffer[-1] = data[0] ^ 0x20
data = data[1:]
escape_char_index = data.find(GDBPacket.RSP_ESCAPE_CHAR)
while escape_char_index >= 0:
if escape_char_index == len(data):
# If there are no more characters after the escape char, just add it to the buffer and let it be
# processed when more data is received.
break
if escape_char_index:
buffer.extend(data[: escape_char_index - 1])
unescaped = data[escape_char_index + 1] ^ 0x20
buffer.append(unescaped)
data = data[escape_char_index + 2 :]
buffer.extend(data)
def _log_rsp_bytes(self, log_prefix: str, buffer: bytearray) -> int:
total_bytes_consumed = 0
pkt = GDBPacket()
buffer_len = len(buffer)
while total_bytes_consumed < buffer_len:
if buffer[0] == ord("+"):
if self.log_acks:
logger.info(f"{log_prefix} <<ack>>")
total_bytes_consumed += 1
buffer = buffer[1:]
continue
if buffer[0] == ord("-"):
if self.log_acks:
logger.info(f"{log_prefix} <<nack>>")
total_bytes_consumed += 1
buffer = buffer[1:]
continue
if buffer[0] == 0x03:
logger.info(f"{log_prefix} <<Interrupt request>>")
total_bytes_consumed += 1
buffer = buffer[1:]
continue
leader = buffer.find(GDBPacket.PACKET_LEADER)
if leader > 0:
logger.warning(
f"{log_prefix} Skipping {leader} non-leader bytes {buffer[:total_bytes_consumed + leader]}"
)
buffer = buffer[leader:]
bytes_consumed = pkt.parse(buffer)
buffer = buffer[bytes_consumed:]
if not bytes_consumed:
break
total_bytes_consumed += bytes_consumed
if pkt.data:
logger.info(f"{log_prefix} Received packet {pkt}")
else:
logger.info(f"{log_prefix} Received empty packet")
if len(buffer):
logger.debug(
f"{log_prefix} After processing: [{len(buffer)}] {buffer}"
)
return total_bytes_consumed
| 2.25 | 2 |
pylayers/em/openems/test/Rect_Waveguide.py | usmanwardag/pylayers | 143 | 6254 | <filename>pylayers/em/openems/test/Rect_Waveguide.py<gh_stars>100-1000
from openems.openems import *
# A simple simulation
#
# FDTD Simulation Setting
#
F = FDTD()
F.add(Exc(typ='Sinus',f0=100000))
F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR']))
#
# CSX (Geometry setting)
#
C = CSX()
# The Box is added as a property
C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0))
C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0))
C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1)))
C.add(Polyhedron())
S = OpenEMS(F,C)
S.save(filename='RectWaveguide.xml')
#gnd = Matter('gnd')
#sphere = Matter('sphere')
#patch = Matter('patch')
#substrate = Matter('substrate',typ='Ma',Epsilon="3.38",Kappa="0.00046")
#cdgsht = Matter('copper',typ='Cs',conductivity="56e6",thickness="40e-6")
#b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0)
#b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10)
#b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)
#s1 = Sphere(P=[0,0,0],R=100,Pr=50)
#dump = DumpBox()
#C.add(gnd)
#C.add(patch)
#C.add(substrate)
#C.add(sphere)
#C.add(cdgsht)
#C.add(exc)
#C.add(dump)
#C.set('gnd',b1)
#C.set('gnd',b2)
#C.set('sphere',s1)
#C.set('copper',b1)
#C.set('copper',b2)
#C.set('Et',b4)
#C.save(filename='structure.xml')
##C.AddBox(prop='ConductingSheet',name='copper',P1=[0,-50,200],P2=[1000,50,200],Pri=10)
##C.AddCylinder(prop='Metal',name='cyl0',P1=[0,0,0],P2=[0,0,100],Rad=50,Pri=10)
#
| 1.851563 | 2 |
DataPreprocessing/_segment_Y.py | vd1371/CBSA | 0 | 6255 | import numpy as np
def segment_Y(Y, **params):
Y_segments = params.get("Y_segments")
Y_quantile = params.get("Y_quantile")
print("segmenting Y")
Y = Y.values.reshape(-1)
Y_quantile = np.quantile(Y, Y_quantile, axis = 0)
bigger_mask = (Y > Y_quantile).copy()
smaller_mask = (Y <= Y_quantile).copy()
Y[bigger_mask] = 1
Y[smaller_mask] = 0
Y = Y.astype(int)
return Y | 2.796875 | 3 |
WifiEnigma/BattleAI/question.py | Puzzlebox-IMT/Puzzlebox | 0 | 6256 | import mysql.connector
import random
from voice import synthetize_voice, delete_wav
def AllQuestionAI(id_theme):
i = 0
#CONNEXION A LA BDD
conn = mysql.connector.connect(host="localhost",
user="phpmyadmin", password="<PASSWORD>",
database="Puzzlebox")
cursor = conn.cursor()
#EXECUTER LA REQUETE AVEC LA BDD
query = ("SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s")
cursor.execute(query, (id_theme, ))
#RECUPERATION DES INFORMATIONS
rows = cursor.fetchall()
if rows:
for line in rows:
i += 1
enonce = line[1]
proposition1 = line[2]
proposition2 = line[3]
proposition3 = line[4]
proposition4 = line[5]
reponse = line[5]
print("*******************************************************************************")
print(" QUESTION ",i," ")
print("*******************************************************************************")
print("ENONCE : ", enonce)
print("PROPOSITION 1 : ", proposition1)
print("PROPOSITION 2 : ", proposition2)
print("PROPOSITION 3 : ", proposition3)
print("PROPOSITION 4 : ", proposition4)
print("REPONSE : ", reponse)
else:
print("Ce thème ne contient pas de questions")
def questionAI(id_theme):
i = 0
#CONNEXION A LA BDD
conn = mysql.connector.connect(host="localhost",
user="phpmyadmin", password="<PASSWORD>",
database="Puzzlebox")
cursor = conn.cursor()
#EXECUTER LA REQUETE AVEC LA BDD
query = ("SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s")
cursor.execute(query, (id_theme, ))
#RECUPERATION DES INFORMATIONS
rows = cursor.fetchall()
if rows:
nb_rows = len(rows)
num_question = random.randint(1, nb_rows)
#L'index de la liste commence à zéro, il faut donc décaler d'un le numéro
num_question = num_question - 1
question = rows[num_question]
result = [] #Tab which stores the query results
#RECUPERATION DES TUPLES
result.append(question[1])
result.append(question[2])
result.append(question[3])
result.append(question[4])
result.append(question[5])
result.append(question[5]) #This last one is the answer
print("*******************************************************************************")
print(" QUESTION ",num_question+1," ")
print("*******************************************************************************")
print("ENONCE : ", result[0])
print("PROPOSITION 1 : ", result[1])
print("PROPOSITION 2 : ", result[2])
print("PROPOSITION 3 : ", result[3])
print("PROPOSITION 4 : ", result[4])
print("REPONSE : ", result[5])
#complete_question = ''.join(complete_question) #Convert tuple into string
return result
else:
print("Ce thème ne contient pas de questions")
def tell_question(question):
synthetize_voice(question[0])
for i in range(1,5) :
num_prop = "Proposition {} ".format(i)
num_prop = ''.join(num_prop)
line = ''.join(question[i])
line = num_prop + line
synthetize_voice(line)
delete_wav()
def quiz():
counter = 1
while(counter <= 5):
questionAI(1)
if (__name__ == '__main__'):
result = questionAI(1)
tell_question(result)
| 2.703125 | 3 |
toy-amr/flux_functions.py | IanHawke/toy-amr | 5 | 6257 | import numpy
def lax_friedrichs(cons_minus, cons_plus, simulation, tl):
alpha = tl.grid.dx / tl.dt
flux = numpy.zeros_like(cons_minus)
prim_minus, aux_minus = simulation.model.cons2all(cons_minus, tl.prim)
prim_plus, aux_plus = simulation.model.cons2all(cons_plus , tl.prim)
f_minus = simulation.model.flux(cons_minus, prim_minus, aux_minus)
f_plus = simulation.model.flux(cons_plus, prim_plus, aux_plus )
flux[:, 1:-1] = 0.5 * ( (f_plus[:,0:-2] + f_minus[:,1:-1]) + \
alpha * (cons_plus[:,0:-2] - cons_minus[:,1:-1]) )
return flux
def upwind(cons_minus, cons_plus, simulation, patch):
flux = numpy.zeros_like(cons_minus)
flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus [:, 0:-2],
cons_minus[:, 1:-1])
return flux
| 2.203125 | 2 |
pi/auth.py | vmagamedov/pi | 7 | 6258 | <gh_stars>1-10
import re
import json
import base64
import codecs
import os.path
import asyncio
import subprocess
_PREFIX = 'docker-credential-'
def read_config():
path = os.path.expanduser('~/.docker/config.json')
if not os.path.exists(path):
return {}
with codecs.open(path, encoding='utf-8') as f:
json_data = f.read()
return json.loads(json_data)
async def _read_creds(creds_store, server):
if not re.match(r'^\w+$', creds_store, re.ASCII):
raise ValueError('Invalid credsStore: {!r}'.format(creds_store))
proc = await asyncio.create_subprocess_exec(
_PREFIX + creds_store, 'get',
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = await proc.communicate(server.encode('ascii'))
if proc.returncode != 0:
return None
else:
data = json.loads(stdout)
return {
'Username': data['Username'],
'Password': data['<PASSWORD>'],
'ServerAddress': server,
}
def _decode_auth(auth_data, server):
auth_data_decoded = base64.b64decode(auth_data).decode('utf-8')
username, _, password = auth_data_decoded.partition(':')
return {
'Username': username,
'Password': password,
'ServerAddress': server,
}
async def resolve_auth(config, server):
config_auths = config.get('auths')
if config_auths is None:
return None
server_auth = config_auths.get(server)
if server_auth is not None:
auth_data = server_auth.get('auth')
if auth_data is not None:
return _decode_auth(auth_data, server)
creds_store = config.get('credsStore')
if creds_store is not None:
return await _read_creds(creds_store, server)
return None
def server_name(image_name):
registry, _, name = image_name.partition('/')
if not name:
return 'docker.io'
else:
return registry
def encode_header(auth):
json_data = json.dumps(auth)
return base64.urlsafe_b64encode(json_data.encode('ascii'))
| 2.25 | 2 |
etl/transform.py | ACWI-SOGW/ngwmn_monitoring_locations_etl | 1 | 6259 | <filename>etl/transform.py
"""
Transform the data into a form that
works with the WELL_REGISTRY_STG table.
"""
import re
def mapping_factory(mapping):
def map_func(key):
if key is not None:
ora_val = mapping.get(key.lower())
else:
ora_val = None
return ora_val
return map_func
WELL_TYPES = {
'surveillance': 1,
'trend': 2,
'special': 3,
}
map_well_type = mapping_factory(WELL_TYPES)
WELL_PURPOSE = {
'dedicated monitoring/observation': 1,
'other': 2
}
map_well_purpose = mapping_factory(WELL_PURPOSE)
QW_WELL_CHARS = {
'background': 1,
'suspected/anticipated changes': 2,
'known changes': 3
}
map_qw_well_chars = mapping_factory(QW_WELL_CHARS)
WL_WELL_CHARS = {
'background': 1,
'suspected/anticipated changes': 2,
'known changes': 3,
'unknown': 999
}
map_wl_well_chars = mapping_factory(WL_WELL_CHARS)
def to_flag(flag):
return '1' if flag else '0'
def transform_mon_loc_data(ml_data):
"""
Map the fields from the API JSON response to
the fields in the WELL_REGISTRY_STG table with
appropriate foreign key values.
"""
mapped_data = dict()
mapped_data['AGENCY_CD'] = ml_data['agency']['agency_cd']
mapped_data['AGENCY_NM'] = ml_data['agency']['agency_nm']
mapped_data['AGENCY_MED'] = ml_data['agency']['agency_med']
mapped_data['SITE_NO'] = ml_data['site_no']
mapped_data['SITE_NAME'] = ml_data['site_name']
mapped_data['DEC_LAT_VA'] = ml_data['dec_lat_va']
mapped_data['DEC_LONG_VA'] = ml_data['dec_long_va']
mapped_data['HORZ_DATUM'] = ml_data['horizontal_datum']
mapped_data['ALT_VA'] = ml_data['alt_va']
mapped_data['ALT_DATUM_CD'] = ml_data['altitude_datum']
try:
mapped_data['NAT_AQUIFER_CD'] = ml_data['nat_aqfr']['nat_aqfr_cd']
mapped_data['NAT_AQFR_DESC'] = ml_data['nat_aqfr']['nat_aqfr_desc']
except (AttributeError, KeyError, TypeError):
mapped_data['NAT_AQUIFER_CD'] = None
mapped_data['NAT_AQFR_DESC'] = None
mapped_data['LOCAL_AQUIFER_NAME'] = ml_data['local_aquifer_name']
mapped_data['AQFR_CHAR'] = ml_data['aqfr_type']
mapped_data['QW_SN_FLAG'] = to_flag(ml_data['qw_sn_flag'])
mapped_data['QW_BASELINE_FLAG'] = to_flag(ml_data['qw_baseline_flag'])
mapped_data['QW_WELL_CHARS'] = map_qw_well_chars(ml_data['qw_well_chars'])
mapped_data['QW_WELL_PURPOSE'] = map_well_purpose(ml_data['qw_well_purpose'])
mapped_data['QW_SYS_NAME'] = ml_data['qw_network_name']
mapped_data['WL_SN_FLAG'] = to_flag(ml_data['wl_sn_flag'])
mapped_data['WL_BASELINE_FLAG'] = to_flag(ml_data['wl_baseline_flag'])
mapped_data['WL_WELL_CHARS'] = map_wl_well_chars(ml_data['wl_well_chars'])
mapped_data['WL_WELL_PURPOSE'] = map_well_purpose(ml_data['wl_well_purpose'])
mapped_data['WL_SYS_NAME'] = ml_data['wl_network_name']
mapped_data['DATA_PROVIDER'] = None
mapped_data['DISPLAY_FLAG'] = to_flag(ml_data['display_flag'])
mapped_data['WL_DATA_PROVIDER'] = None
mapped_data['QW_DATA_PROVIDER'] = None
mapped_data['LITH_DATA_PROVIDER'] = None
mapped_data['CONST_DATA_PROVIDER'] = None
mapped_data['WELL_DEPTH'] = ml_data['well_depth']
mapped_data['LINK'] = ml_data['link']
mapped_data['INSERT_DATE'] = ml_data['insert_date']
mapped_data['UPDATE_DATE'] = ml_data['update_date']
mapped_data['WL_WELL_PURPOSE_NOTES'] = ml_data['wl_well_purpose_notes']
mapped_data['QW_WELL_PURPOSE_NOTES'] = ml_data['qw_well_purpose_notes']
mapped_data['INSERT_USER_ID'] = ml_data['insert_user']
mapped_data['UPDATE_USER_ID'] = ml_data['update_user']
mapped_data['WL_WELL_TYPE'] = map_well_type(ml_data['wl_well_type'])
mapped_data['QW_WELL_TYPE'] = map_well_type(ml_data['qw_well_type'])
mapped_data['LOCAL_AQUIFER_CD'] = None
mapped_data['REVIEW_FLAG'] = None
try:
mapped_data['STATE_CD'] = ml_data['state']['state_cd']
except (AttributeError, KeyError, TypeError):
mapped_data['STATE_CD'] = None
try:
mapped_data['COUNTY_CD'] = ml_data['county']['county_cd']
except (AttributeError, KeyError, TypeError):
mapped_data['COUNTY_CD'] = None
try:
mapped_data['COUNTRY_CD'] = ml_data['country']['country_cd']
except (AttributeError, KeyError, TypeError):
mapped_data['COUNTRY_CD'] = None
mapped_data['WELL_DEPTH_UNITS'] = ml_data['well_depth_units']['unit_id'] if ml_data['well_depth_units'] else None
mapped_data['ALT_UNITS'] = ml_data['altitude_units']['unit_id'] if ml_data['altitude_units'] else None
mapped_data['SITE_TYPE'] = ml_data['site_type']
mapped_data['HORZ_METHOD'] = ml_data['horz_method']
mapped_data['HORZ_ACY'] = ml_data['horz_acy']
mapped_data['ALT_METHOD'] = ml_data['alt_method']
mapped_data['ALT_ACY'] = ml_data['alt_acy']
return mapped_data
def date_format(mapped_data):
# fix missing fractions of a second
if re.match(r".*:\d\dZ$", mapped_data['INSERT_DATE']):
mapped_data['INSERT_DATE'] = mapped_data['INSERT_DATE'][:-1] + ".0Z"
if re.match(r".*:\d\dZ$", mapped_data['UPDATE_DATE']):
mapped_data['UPDATE_DATE'] = mapped_data['UPDATE_DATE'][:-1] + ".0Z"
| 2.875 | 3 |
django_reporter_pro/config/model_configs.py | shamilison/django-reporter-pro | 0 | 6260 | <reponame>shamilison/django-reporter-pro
# Created by shamilsakib at 04/10/20
BASE_MODEL = None | 0.898438 | 1 |
DPSparkImplementations/paf_kernels.py | TEAlab/DPSpark | 0 | 6261 | <filename>DPSparkImplementations/paf_kernels.py
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright (c) 2019 Tealab@SBU"
__license__ = "MIT"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import numpy as np
import numba as nb
'''
Iterative kernels
'''
def update_iter(u_block, x_block, n, I_, J_, K_):
return _update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block), n, I_, J_, K_)
@nb.jit(nopython=True)
def _update_iter(u_block, x_block, n, I_, J_, K_):
# For testing purposes, rather than passing f_matrix_broadcast, we call this function
def f_matrix(i, j):
return float(i+j)
for k in range(x_block.shape[0]-1, -1, -1):
K = K_*x_block.shape[0]+k
for j in range(x_block.shape[0]-1, -1, -1):
J = J_*x_block.shape[0]+j
for i in range(x_block.shape[0]-1, -1, -1):
I = I_*x_block.shape[0]+i
min1 = min(K-2, n-3)
min2 = min(J-1, n-4)
if ((K < n) and (K >= 3) and (J <= min1) and (J >= I+1) and (I <= min2)):
x_block[i, j] = max(x_block[i, j], u_block[j+1, k] + f_matrix(J+1, min(K, 2*J-I+1)))
return x_block
def funcA_iter(block_info, n):
((I_, J_), x_block) = block_info
return update_iter(x_block, x_block, n, I_, J_, I_)
def funcX_iter(block_info, u_block_info, n):
((I_, J_), x_block) = block_info
((UI_, UJ_), u_block) = u_block_info
return update_iter(u_block, x_block, n, I_, J_, UJ_)
| 2.296875 | 2 |
terrakg/rates.py | terrapain/terrakg | 0 | 6262 | from terra_sdk.exceptions import LCDResponseError
from terrakg import logger
# Logging
from terrakg.client import ClientContainer
logger = logger.get_logger(__name__)
class Rates:
"""
Access the most recent rates.
"""
def __init__(self, client: ClientContainer):
self.client = client
def get_token_quote_and_fees(self, token_contract: str, pair: str, amount: int = 1000000, reverse: bool = False):
"""
Returns the price for `amount` of the token `pair` (exchange is included in pair).
Set `reverse` to true to get the inverse price.
"""
desc, action, result_key = ("reverse_simulation", "ask_asset", "offer_amount") if reverse else (
"simulation", "offer_asset", "return_amount")
query_msg = {
desc: {
action: {
"amount": str(amount),
"info": {"token": {
"contract_addr": token_contract
}
}
}
}
}
try:
result = self.client.lcd_client.wasm.contract_query(pair, query_msg)
return result[result_key], result['commission_amount']
except LCDResponseError as e:
logger.warning(f"Issue with price query: {e}")
return None
| 2.484375 | 2 |
src/tracking_module.py | HonzaKlicpera/Effective-footage-processing-Blender-add-on | 1 | 6263 | import bpy
import os, glob
from pathlib import Path
from enum import Enum
from abc import ABC, abstractmethod
import csv
from . import keying_module
def export_tracking_data(self, context):
clip = context.space_data.clip
clip_name = os.path.splitext(clip.name)[0]
tracker_name = context.scene.tracking_local.tracker_name
output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name)
keying_module.create_directory(output_path)
file = open(os.path.join(output_path,clip_name+".csv"), "w", newline='')
writer = csv.writer(file, delimiter=',')
multiplier = context.scene.tracking_local.tracking_multiplier
tracker = clip.tracking.tracks.get(tracker_name)
if tracker is not None:
prev = tracker.markers[0].co[0]
for m in tracker.markers:
writer.writerow([(m.co[0] - prev) * multiplier])
prev = m.co[0]
self.report({"INFO"},"TRACKER SUCESSFULLY EXPORTED")
else:
self.report({"ERROR"},"TRACKER NOT FOUND")
file.close()
#----------------------------------------
# PROPERTIES
#----------------------------------------
class TrackingSceneProps(bpy.types.PropertyGroup):
tracker_name: bpy.props.StringProperty \
(
name = "Track name",
description = "Name of the tracker for data export",
)
tracking_multiplier: bpy.props.FloatProperty \
(
name = "Distance multiplier",
description = "The exported tracking distance gets multiplied by this value",
default = 1,
min = 0.0001
)
class TrackingPanel(bpy.types.Panel):
bl_label = "Tracking Panel"
bl_idname = "SCENE_PT_tracking_rendering"
bl_space_type = "CLIP_EDITOR"
bl_region_type = "UI"
bl_context = "render"
def draw(self, context):
layout = self.layout
scene = context.scene
box = layout.box()
box.row().label(text = "Tracking export")
box.row().prop(scene.tracking_local, "tracker_name")
box.row().prop(scene.tracking_local, "tracking_multiplier")
box.row().operator("tracking.export_data")
class TrackingExportDataOp(bpy.types.Operator):
bl_idname = "tracking.export_data"
bl_label = "Export Data"
bl_description = "Export the tracking data of the chosen tracker"
def execute(self, context):
export_tracking_data(self, context)
return {"FINISHED"}
classes = (
TrackingExportDataOp,
TrackingPanel,
TrackingSceneProps
)
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps)
def unregister():
for cls in reversed(classes):
bpy.utils.unregister_class(cls)
del bpy.types.Scene.tracking_local | 2.40625 | 2 |
nodes/2.x/python/View.ViewTemplate.py | andydandy74/ClockworkForDynamo | 147 | 6264 | import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
def GetViewTemplate(view):
if not view: return None
elif hasattr(view, "ViewTemplateId"):
if view.ViewTemplateId.IntegerValue == -1: return None
else: return view.Document.GetElement(view.ViewTemplateId)
else: return None
views = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetViewTemplate(x) for x in views]
else: OUT = GetViewTemplate(views) | 1.96875 | 2 |
infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py | bohdana-kuzmenko/incubator-dlab | 0 | 6265 | <filename>infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
#!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import json
import time
from fabric.api import *
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import sys
import os
import uuid
import logging
from Crypto.PublicKey import RSA
if __name__ == "__main__":
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.INFO,
filename=local_log_filepath)
try:
os.environ['exploratory_name']
except:
os.environ['exploratory_name'] = ''
if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])):
time.sleep(30)
print('Generating infrastructure names and tags')
dataproc_conf = dict()
try:
dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-')
except:
dataproc_conf['exploratory_name'] = ''
try:
dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-')
except:
dataproc_conf['computational_name'] = ''
dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-')
dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-')
dataproc_conf['key_name'] = os.environ['conf_key_name']
dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
dataproc_conf['region'] = os.environ['gcp_region']
dataproc_conf['zone'] = os.environ['gcp_zone']
dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])
dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'],
dataproc_conf['exploratory_name'], dataproc_conf['computational_name'])
dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])
dataproc_conf['bucket_name'] = '{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])
dataproc_conf['release_label'] = os.environ['dataproc_version']
dataproc_conf['cluster_labels'] = {
os.environ['notebook_instance_name']: "not-configured",
"name": dataproc_conf['cluster_name'],
"sbn": dataproc_conf['service_base_name'],
"user": dataproc_conf['edge_user_name'],
"notebook_name": os.environ['notebook_instance_name'],
"product": "dlab",
"computational_name": dataproc_conf['computational_name']
}
dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'],
dataproc_conf['edge_user_name'])
service_account_email = <EMAIL>".format(dataproc_conf['dataproc_service_account_name'],
os.environ['gcp_project_id'])
dataproc_conf['edge_instance_hostname'] = '{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'])
dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user']
edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname'])
if edge_status != 'RUNNING':
logging.info('ERROR: Edge node is unavailable! Aborting...')
print('ERROR: Edge node is unavailable! Aborting...')
ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn')
put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname)
append_result("Edge node is unavailable")
sys.exit(1)
print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': '))))
logging.info(json.dumps(dataproc_conf))
local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
local("echo Waiting for changes to propagate; sleep 10")
dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig'))
dataproc_cluster['projectId'] = os.environ['gcp_project_id']
dataproc_cluster['clusterName'] = dataproc_conf['cluster_name']
dataproc_cluster['labels'] = dataproc_conf['cluster_labels']
dataproc_cluster['config']['configBucket'] = dataproc_conf['bucket_name']
dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] = service_account_email
dataproc_cluster['config']['gceClusterConfig']['zoneUri'] = dataproc_conf['zone']
dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] = dataproc_conf['subnet']
dataproc_cluster['config']['masterConfig']['machineTypeUri'] = os.environ['dataproc_master_instance_type']
dataproc_cluster['config']['workerConfig']['machineTypeUri'] = os.environ['dataproc_slave_instance_type']
dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count'])
dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count'])
if int(os.environ['dataproc_preemptible_count']) != 0:
dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count'])
else:
del dataproc_cluster['config']['secondaryWorkerConfig']
dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label']
ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['edge_user_name'] + '.pub').read()
key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read())
ssh_admin_pubkey = key.publickey().exportKey("OpenSSH")
dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey)
dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag']
try:
logging.info('[Creating Dataproc Cluster]')
print('[Creating Dataproc Cluster]')
params = "--region {0} --bucket {1} --params '{2}'".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster))
try:
local("~/scripts/{}.py {}".format('dataengine-service_create', params))
except:
traceback.print_exc()
raise Exception
keyfile_name = "/root/keys/{}.pem".format(dataproc_conf['key_name'])
local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to create Dataproc Cluster.", str(err))
local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name']))
sys.exit(1)
| 1.359375 | 1 |
02.py | mattias-lundell/aoc2021 | 0 | 6266 |
test = """forward 5
down 5
forward 8
up 3
down 8
forward 2
"""
def part1(lines):
h = 0
d = 0
for line in lines:
direction, delta = line.split()
delta = int(delta)
if direction == 'forward':
h += delta
elif direction == 'down':
d += delta
elif direction == 'up':
d -= delta
print(h*d)
def part2(lines):
h = 0
d = 0
a = 0
for line in lines:
direction, delta = line.split()
delta = int(delta)
print(direction, delta)
if direction == 'forward':
h += delta
d += (delta * a)
elif direction == 'down':
a += delta
elif direction == 'up':
a -= delta
print(h*d)
if __name__ == '__main__':
part1(test.splitlines())
part1(open('in02.txt').readlines())
part2(test.splitlines())
part2(open('in02.txt').readlines())
| 3.5 | 4 |
associations/migrations/0001_initial.py | ollc-code/django-back | 0 | 6267 | <reponame>ollc-code/django-back<gh_stars>0
# Generated by Django 3.1.3 on 2020-11-09 08:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Associations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('association_name', models.CharField(max_length=100)),
('incharge', models.CharField(max_length=100)),
('about', models.CharField(max_length=500)),
('contacts', models.CharField(max_length=300)),
],
),
]
| 1.84375 | 2 |
tests/pds/test_times.py | seignovert/pyvims | 4 | 6268 | <reponame>seignovert/pyvims
"""Test PDS times modules."""
from datetime import datetime as dt
from pyvims.pds.times import (cassini2utc, cassini_time, dt_date, dt_doy, dt_iso,
dyear, pds_folder, pds_time, utc2cassini)
from pytest import approx, raises
def test_dt_iso():
"""Test parsing ISO time pattern."""
assert str(dt_iso('2005-02-14T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00'
assert str(dt_iso('2005-02-14 18:02:29')) == '2005-02-14 18:02:29+00:00'
assert str(dt_iso('2005-02-14:18:02')) == '2005-02-14 18:02:00+00:00'
assert str(dt_iso('2005-02-14')) == '2005-02-14 00:00:00+00:00'
times = dt_iso('from 2005-02-14T18:02:29 to 2005-02-14T18:03')
assert len(times) == 2
assert str(times[0]) == '2005-02-14 18:02:29+00:00'
assert str(times[1]) == '2005-02-14 18:03:00+00:00'
with raises(ValueError):
_ = dt_iso('2005-045')
def test_dt_doy():
"""Test parsing DOY time pattern."""
assert str(dt_doy('2005-045T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00'
assert str(dt_doy('2005-045 18:02:29')) == '2005-02-14 18:02:29+00:00'
assert str(dt_doy('2005-045:18:02')) == '2005-02-14 18:02:00+00:00'
assert str(dt_doy('2005-045')) == '2005-02-14 00:00:00+00:00'
times = dt_doy('from 2005-045T18:02:29 to 2005-045T18:03')
assert len(times) == 2
assert str(times[0]) == '2005-02-14 18:02:29+00:00'
assert str(times[1]) == '2005-02-14 18:03:00+00:00'
with raises(ValueError):
_ = dt_doy('2005-02-14')
def test_dt_date():
"""Test date pattern."""
assert str(dt_date('Feb 14, 2005')) == '2005-02-14 00:00:00+00:00'
assert str(dt_date('Febr 14, 2005')) == '2005-02-14 00:00:00+00:00'
assert str(dt_date('Feb 14, 2005', eod=True)) == '2005-02-14 23:59:59+00:00'
assert str(dt_date('to Feb 14, 2005')) == '2005-02-14 23:59:59+00:00'
times = dt_date('from Feb 14, 2005 through March 12, 2006')
assert len(times) == 2
assert str(times[0]) == '2005-02-14 00:00:00+00:00'
assert str(times[1]) == '2006-03-12 23:59:59+00:00'
with raises(ValueError):
_ = dt_date('2005-02-14')
def test_pds_time():
"""Test PDS time parsing."""
assert str(pds_time('May 17, 2007')) == '2007-05-17 00:00:00+00:00'
assert str(pds_time('2010-274T00:00:00')) == '2010-10-01 00:00:00+00:00'
assert str(pds_time('2011-10-01T00:02:04.244')) == '2011-10-01 00:02:04.244000+00:00'
t0, t1 = pds_time('… May 17, 2007 through Jun 30, 2007')
assert str(t0) == '2007-05-17 00:00:00+00:00'
assert str(t1) == '2007-06-30 23:59:59+00:00'
t0, t1 = pds_time('… 2010-274T00:00:00 through 2010-365T23:59:59')
assert str(t0) == '2010-10-01 00:00:00+00:00'
assert str(t1) == '2010-12-31 23:59:59+00:00'
t0, t1 = pds_time('… 2011-10-01T00:02:04.244 through 2011-12-31T12:28:45.128')
assert str(t0) == '2011-10-01 00:02:04.244000+00:00'
assert str(t1) == '2011-12-31 12:28:45.128000+00:00'
t0, t1 = pds_time('2005015T175855_2005016T184233/')
assert str(t0) == '2005-01-15 17:58:55+00:00'
assert str(t1) == '2005-01-16 18:42:33+00:00'
with raises(ValueError):
_ = pds_time('No data available')
def test_cassini_time():
"""Test Cassini time parsing."""
assert cassini_time('v1487096932_1.qub') == 1487096932.0
assert cassini_time(1483230358.172) == 1483230358.172
with raises(ValueError):
_ = cassini_time('v123_1')
with raises(ValueError):
_ = cassini_time(123)
def test_cassini2utc():
"""Test Cassini time to UTC converter."""
assert str(cassini2utc('v1487096932_1')) == '2005-02-14 18:02:29'
assert str(cassini2utc(1483230358.172)) == '2005-01-01 00:00:00'
def test_utc2cassini():
"""Test UTC to Cassini time converter."""
assert utc2cassini('2005-02-14T18:02:29') == approx(1487096932.068, abs=1e-3)
times = utc2cassini('May 17, 2007 through Jun 30, 2007')
assert len(times) == 2
assert times[0] == approx(1558053238.602, abs=1e-3)
assert times[1] == approx(1561941262.879, abs=1e-3)
def test_pds_folder():
"""Test convert PDS folder as string."""
assert pds_folder('2005015T175855') == '2005-015T17:58:55'
assert pds_folder('2005015T175855_2005016T184233/') == \
'2005-015T17:58:55 2005-016T18:42:33'
def test_dyear():
"""Test decimal year."""
assert dyear('2005-01-01') == 2005.0
assert dyear('2005-12-31') == 2005.9973
assert dyear('2004-12-31') == 2004.9973
assert dyear(dt(2005, 1, 1)) == 2005.0
assert dyear(dt(2005, 12, 31)) == 2005.9973
assert dyear(dt(2004, 12, 31)) == 2004.9973
| 2.40625 | 2 |
e/mail-relay/web/apps/mail/migrations/0109_auto_20171130_1047.py | zhouli121018/nodejsgm | 0 | 6269 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mail', '0108_auto_20171130_1004'),
]
operations = [
migrations.AlterModelOptions(
name='relaysenderwhitelist',
options={'verbose_name': '\u4e2d\u7ee7\u53d1\u4ef6\u4eba\u767d\u540d\u5355'},
),
migrations.AlterModelOptions(
name='spamrptblacklist',
options={'verbose_name': '\u7f51\u5173\u9694\u79bb\u62a5\u544a\u6536\u4ef6\u4eba\u9ed1\u540d\u5355'},
),
]
| 1.351563 | 1 |
venv/lib/python3.6/site-packages/ansible_test/_data/sanity/code-smell/runtime-metadata.py | usegalaxy-no/usegalaxy | 1 | 6270 | #!/usr/bin/env python
"""Schema validation of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import re
import sys
from distutils.version import StrictVersion, LooseVersion
from functools import partial
import yaml
from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA
from voluptuous import Required, Schema, Invalid
from voluptuous.humanize import humanize_error
from ansible.module_utils.six import string_types
from ansible.utils.version import SemanticVersion
def isodate(value, check_deprecation_date=False, is_tombstone=False):
"""Validate a datetime.date or ISO 8601 date string."""
# datetime.date objects come from YAML dates, these are ok
if isinstance(value, datetime.date):
removal_date = value
else:
# make sure we have a string
msg = 'Expected ISO 8601 date string (YYYY-MM-DD), or YAML date'
if not isinstance(value, string_types):
raise Invalid(msg)
# From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions,
# we have to do things manually.
if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value):
raise Invalid(msg)
try:
removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date()
except ValueError:
raise Invalid(msg)
# Make sure date is correct
today = datetime.date.today()
if is_tombstone:
# For a tombstone, the removal date must be in the past
if today < removal_date:
raise Invalid(
'The tombstone removal_date (%s) must not be after today (%s)' % (removal_date, today))
else:
# For a deprecation, the removal date must be in the future. Only test this if
# check_deprecation_date is truish, to avoid checks to suddenly start to fail.
if check_deprecation_date and today > removal_date:
raise Invalid(
'The deprecation removal_date (%s) must be after today (%s)' % (removal_date, today))
return value
def removal_version(value, is_ansible, current_version=None, is_tombstone=False):
"""Validate a removal version string."""
msg = (
'Removal version must be a string' if is_ansible else
'Removal version must be a semantic version (https://semver.org/)'
)
if not isinstance(value, string_types):
raise Invalid(msg)
try:
if is_ansible:
version = StrictVersion()
version.parse(value)
version = LooseVersion(value) # We're storing Ansible's version as a LooseVersion
else:
version = SemanticVersion()
version.parse(value)
if version.major != 0 and (version.minor != 0 or version.patch != 0):
raise Invalid('removal_version (%r) must be a major release, not a minor or patch release '
'(see specification at https://semver.org/)' % (value, ))
if current_version is not None:
if is_tombstone:
# For a tombstone, the removal version must not be in the future
if version > current_version:
raise Invalid('The tombstone removal_version (%r) must not be after the '
'current version (%s)' % (value, current_version))
else:
# For a deprecation, the removal version must be in the future
if version <= current_version:
raise Invalid('The deprecation removal_version (%r) must be after the '
'current version (%s)' % (value, current_version))
except ValueError:
raise Invalid(msg)
return value
def any_value(value):
"""Accepts anything."""
return value
def get_ansible_version():
"""Return current ansible-core version"""
from ansible.release import __version__
return LooseVersion('.'.join(__version__.split('.')[:3]))
def get_collection_version():
"""Return current collection version, or None if it is not available"""
import importlib.util
collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'collection_detail.py')
collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path)
collection_detail = importlib.util.module_from_spec(collection_detail_spec)
sys.modules['collection_detail'] = collection_detail
collection_detail_spec.loader.exec_module(collection_detail)
# noinspection PyBroadException
try:
result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.')
return SemanticVersion(result['version'])
except Exception: # pylint: disable=broad-except
# We do not care why it fails, in case we cannot get the version
# just return None to indicate "we don't know".
return None
def validate_metadata_file(path, is_ansible, check_deprecation_dates=False):
"""Validate explicit runtime metadata file"""
try:
with open(path, 'r') as f_path:
routing = yaml.safe_load(f_path)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line +
1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
return
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: YAML load failed: %s' %
(path, 0, 0, re.sub(r'\s+', ' ', str(ex))))
return
if is_ansible:
current_version = get_ansible_version()
else:
current_version = get_collection_version()
# Updates to schema MUST also be reflected in the documentation
# ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html
# plugin_routing schema
avoid_additional_data = Schema(
Any(
{
Required('removal_version'): any_value,
'warning_text': any_value,
},
{
Required('removal_date'): any_value,
'warning_text': any_value,
}
),
extra=PREVENT_EXTRA
)
deprecation_schema = All(
# The first schema validates the input, and the second makes sure no extra keys are specified
Schema(
{
'removal_version': partial(removal_version, is_ansible=is_ansible,
current_version=current_version),
'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates),
'warning_text': Any(*string_types),
}
),
avoid_additional_data
)
tombstoning_schema = All(
# The first schema validates the input, and the second makes sure no extra keys are specified
Schema(
{
'removal_version': partial(removal_version, is_ansible=is_ansible,
current_version=current_version, is_tombstone=True),
'removal_date': partial(isodate, is_tombstone=True),
'warning_text': Any(*string_types),
}
),
avoid_additional_data
)
plugin_routing_schema = Any(
Schema({
('deprecation'): Any(deprecation_schema),
('tombstone'): Any(tombstoning_schema),
('redirect'): Any(*string_types),
}, extra=PREVENT_EXTRA),
)
list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema}
for str_type in string_types]
plugin_schema = Schema({
('action'): Any(None, *list_dict_plugin_routing_schema),
('become'): Any(None, *list_dict_plugin_routing_schema),
('cache'): Any(None, *list_dict_plugin_routing_schema),
('callback'): Any(None, *list_dict_plugin_routing_schema),
('cliconf'): Any(None, *list_dict_plugin_routing_schema),
('connection'): Any(None, *list_dict_plugin_routing_schema),
('doc_fragments'): Any(None, *list_dict_plugin_routing_schema),
('filter'): Any(None, *list_dict_plugin_routing_schema),
('httpapi'): Any(None, *list_dict_plugin_routing_schema),
('inventory'): Any(None, *list_dict_plugin_routing_schema),
('lookup'): Any(None, *list_dict_plugin_routing_schema),
('module_utils'): Any(None, *list_dict_plugin_routing_schema),
('modules'): Any(None, *list_dict_plugin_routing_schema),
('netconf'): Any(None, *list_dict_plugin_routing_schema),
('shell'): Any(None, *list_dict_plugin_routing_schema),
('strategy'): Any(None, *list_dict_plugin_routing_schema),
('terminal'): Any(None, *list_dict_plugin_routing_schema),
('test'): Any(None, *list_dict_plugin_routing_schema),
('vars'): Any(None, *list_dict_plugin_routing_schema),
}, extra=PREVENT_EXTRA)
# import_redirection schema
import_redirection_schema = Any(
Schema({
('redirect'): Any(*string_types),
# import_redirect doesn't currently support deprecation
}, extra=PREVENT_EXTRA)
)
list_dict_import_redirection_schema = [{str_type: import_redirection_schema}
for str_type in string_types]
# top level schema
schema = Schema({
# All of these are optional
('plugin_routing'): Any(plugin_schema),
('import_redirection'): Any(None, *list_dict_import_redirection_schema),
# requires_ansible: In the future we should validate this with SpecifierSet
('requires_ansible'): Any(*string_types),
('action_groups'): dict,
}, extra=PREVENT_EXTRA)
# Ensure schema is valid
try:
schema(routing)
except MultipleInvalid as ex:
for error in ex.errors:
# No way to get line/column numbers
print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error)))
def main():
"""Validate runtime metadata"""
paths = sys.argv[1:] or sys.stdin.read().splitlines()
collection_legacy_file = 'meta/routing.yml'
collection_runtime_file = 'meta/runtime.yml'
# This is currently disabled, because if it is enabled this test can start failing
# at a random date. For this to be properly activated, we (a) need to be able to return
# codes for this test, and (b) make this error optional.
check_deprecation_dates = False
for path in paths:
if path == collection_legacy_file:
print('%s:%d:%d: %s' % (path, 0, 0, ("Should be called '%s'" % collection_runtime_file)))
continue
validate_metadata_file(
path,
is_ansible=path not in (collection_legacy_file, collection_runtime_file),
check_deprecation_dates=check_deprecation_dates)
if __name__ == '__main__':
main()
| 2.296875 | 2 |
catkin_ws/src/o2ac_flexbe/o2ac_flexbe_states/src/o2ac_flexbe_states/align_bearing_holes.py | mitdo/o2ac-ur | 32 | 6271 | <reponame>mitdo/o2ac-ur
#!/usr/bin/env python
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyActionClient
# example import of required action
from o2ac_msgs.msg import AlignBearingHolesAction, AlignBearingHolesGoal
class AlignBearingHolesActionState(EventState):
'''
Actionlib for aligning the bearing holes
-- task_name string Name of the task
<= success AlignBearingHoles completed successfully.
<= error AlignBearingHoles failed to execute.
'''
def __init__(self, task_name):
super(
AlignBearingHolesActionState,
self).__init__(
outcomes=[
'success',
'error'])
self._topic = 'o2ac_flexbe/align_bearing_holes'
# pass required clients as dict (topic: type)
self._client = ProxyActionClient(
{self._topic: AlignBearingHolesAction})
self._task_name = task_name
self._success = False
def execute(self, userdata):
if not self._success:
return 'error'
if self._client.has_result(self._topic):
result = self._client.get_result(self._topic)
Logger.logwarn('result %s' % str(result))
if not result:
Logger.logwarn('Fail to complete AlignBearingHoles')
self._success = False
return 'error'
else:
Logger.logwarn('Succeed! completed AlignBearingHoles')
self._success = True
return 'success'
def on_enter(self, userdata):
goal = AlignBearingHolesGoal()
goal.task_name = self._task_name
self._success = True
try:
self._client.send_goal(self._topic, goal)
except Exception as e:
Logger.logwarn(
'Failed to send the AlignBearingHoles command:\n%s' %
str(e))
self._success = False
def on_exit(self, userdata):
if not self._client.has_result(self._topic):
self._client.cancel(self._topic)
Logger.loginfo('Cancelled active action goal.')
| 2.28125 | 2 |
find_unicode_control.py | sebastian-philipp/find-unicode-control | 0 | 6272 | #!/usr/bin/env python3
"""Find unicode control characters in source files
By default the script takes one or more files or directories and looks for
unicode control characters in all text files. To narrow down the files, provide
a config file with the -c command line, defining a scan_exclude list, which
should be a list of regular expressions matching paths to exclude from the scan.
There is a second mode enabled with -p which when set to 'all', prints all
control characters and when set to 'bidi', prints only the 9 bidirectional
control characters.
"""
import sys, os, argparse, re, unicodedata, magic
import importlib
from stat import *
scan_exclude = [r'\.git/', r'\.hg/', r'\.desktop$', r'ChangeLog$', r'NEWS$',
r'\.ppd$', r'\.txt$', r'\.directory$']
scan_exclude_mime = [r'text/x-po$', r'text/x-tex$', r'text/x-troff$',
r'text/html$']
verbose_mode = False
# Print to stderr in verbose mode.
def eprint(*args, **kwargs):
if verbose_mode:
print(*args, file=sys.stderr, **kwargs)
# Decode a single latin1 line.
def decodeline(inf):
if isinstance(inf, str):
return inf
return inf.decode('latin-1')
# Make a text string from a file, attempting to decode from latin1 if necessary.
# Other non-utf-8 locales are not supported at the moment.
def getfiletext(filename):
text = None
with open(filename) as infile:
try:
if detailed_mode:
return [decodeline(inf) for inf in infile]
except Exception as e:
eprint('%s: %s' % (filename, e))
return None
try:
text = ''.join(infile)
except UnicodeDecodeError:
eprint('%s: Retrying with latin1' % filename)
try:
text = ''.join([decodeline(inf) for inf in infile])
except Exception as e:
eprint('%s: %s' % (filename, e))
if text:
return set(text)
else:
return None
def analyze_text_detailed(filename, text, disallowed, msg):
line = 0
warned = False
for t in text:
line = line + 1
subset = [c for c in t if c in disallowed]
if subset:
print('%s:%d %s: %s' % (filename, line, msg, subset))
warned = True
if not warned:
eprint('%s: OK' % filename)
# Look for disallowed characters in the text. We reduce all characters into a
# set to speed up analysis. FIXME: Add a slow mode to get line numbers in files
# that have these disallowed chars.
def analyze_text(filename, text, disallowed, msg):
if detailed_mode:
analyze_text_detailed(filename, text, disallowed, msg)
return
if not text.isdisjoint(disallowed):
print('%s: %s: %s' % (filename, msg, text & disallowed))
else:
eprint('%s: OK' % filename)
def should_read(f):
m = magic.detect_from_filename(f)
# Fast check, just the file name.
if [e for e in scan_exclude if re.search(e, f)]:
return False
# Slower check, mime type.
if not 'text/' in m.mime_type \
or [e for e in scan_exclude_mime if re.search(e, m.mime_type)]:
return False
return True
# Get file text and feed into analyze_text.
def analyze_file(f, disallowed, msg):
eprint('%s: Reading file' % f)
if should_read(f):
text = getfiletext(f)
if text:
analyze_text(f, text, disallowed, msg)
else:
eprint('%s: SKIPPED' % f)
# Actual implementation of the recursive descent into directories.
def analyze_any(p, disallowed, msg):
mode = os.stat(p).st_mode
if S_ISDIR(mode):
analyze_dir(p, disallowed, msg)
elif S_ISREG(mode):
analyze_file(p, disallowed, msg)
else:
eprint('%s: UNREADABLE' % p)
# Recursively analyze files in the directory.
def analyze_dir(d, disallowed, msg):
for f in os.listdir(d):
analyze_any(os.path.join(d, f), disallowed, msg)
def analyze_paths(paths, disallowed, msg):
for p in paths:
analyze_any(p, disallowed, msg)
# All control characters. We omit the ascii control characters.
def nonprint_unicode(c):
cat = unicodedata.category(c)
if cat.startswith('C') and cat != 'Cc':
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Look for Unicode control characters")
parser.add_argument('path', metavar='path', nargs='+',
help='Sources to analyze')
parser.add_argument('-p', '--nonprint', required=False,
type=str, choices=['all', 'bidi'],
help='Look for either all non-printable unicode characters or bidirectional control characters.')
parser.add_argument('-v', '--verbose', required=False, action='store_true',
help='Verbose mode.')
parser.add_argument('-d', '--detailed', required=False, action='store_true',
help='Print line numbers where characters occur.')
parser.add_argument('-t', '--notests', required=False,
action='store_true', help='Exclude tests (basically test.* as a component of path).')
parser.add_argument('-c', '--config', required=False, type=str,
help='Configuration file to read settings from.')
args = parser.parse_args()
verbose_mode = args.verbose
detailed_mode = args.detailed
if not args.nonprint:
# Formatting control characters in the unicode space. This includes the
# bidi control characters.
disallowed = set(chr(c) for c in range(sys.maxunicode) if \
unicodedata.category(chr(c)) == 'Cf')
msg = 'unicode control characters'
elif args.nonprint == 'all':
# All control characters.
disallowed = set(chr(c) for c in range(sys.maxunicode) if \
nonprint_unicode(chr(c)))
msg = 'disallowed characters'
else:
# Only bidi control characters.
disallowed = set([
chr(0x202a), chr(0x202b), chr(0x202c), chr(0x202d), chr(0x202e),
chr(0x2066), chr(0x2067), chr(0x2068), chr(0x2069)])
msg = 'bidirectional control characters'
if args.config:
spec = importlib.util.spec_from_file_location("settings", args.config)
settings = importlib.util.module_from_spec(spec)
spec.loader.exec_module(settings)
if hasattr(settings, 'scan_exclude'):
scan_exclude = scan_exclude + settings.scan_exclude
if hasattr(settings, 'scan_exclude_mime'):
scan_exclude_mime = scan_exclude_mime + settings.scan_exclude_mime
if args.notests:
scan_exclude = scan_exclude + [r'/test[^/]+/']
analyze_paths(args.path, disallowed, msg)
| 3.234375 | 3 |
igibson/object_states/aabb.py | mamadbiabon/iGibson | 360 | 6273 | <reponame>mamadbiabon/iGibson
import numpy as np
from igibson.external.pybullet_tools.utils import aabb_union, get_aabb, get_all_links
from igibson.object_states.object_state_base import CachingEnabledObjectState
class AABB(CachingEnabledObjectState):
def _compute_value(self):
body_id = self.obj.get_body_id()
all_links = get_all_links(body_id)
aabbs = [get_aabb(body_id, link=link) for link in all_links]
aabb_low, aabb_hi = aabb_union(aabbs)
if not hasattr(self.obj, "category") or self.obj.category != "floors" or self.obj.room_floor is None:
return np.array(aabb_low), np.array(aabb_hi)
# TODO: remove after split floors
# room_floor will be set to the correct RoomFloor beforehand
room_instance = self.obj.room_floor.room_instance
# Get the x-y values from the room segmentation map
room_aabb_low, room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance)
if room_aabb_low is None:
return np.array(aabb_low), np.array(aabb_hi)
# Use the z values from pybullet
room_aabb_low[2] = aabb_low[2]
room_aabb_hi[2] = aabb_hi[2]
return np.array(room_aabb_low), np.array(room_aabb_hi)
def _set_value(self, new_value):
raise NotImplementedError("AABB state currently does not support setting.")
# Nothing needs to be done to save/load AABB since it will happen due to pose caching.
def _dump(self):
return None
def load(self, data):
return
| 1.898438 | 2 |
vsphere/tests/test_vsphere.py | fujigon/integrations-core | 0 | 6274 | # (C) Datadog, Inc. 2010-2017
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from __future__ import unicode_literals
import time
from datetime import datetime
import mock
import pytest
from mock import MagicMock
from pyVmomi import vim
from datadog_checks.vsphere import VSphereCheck
from datadog_checks.vsphere.cache_config import CacheConfig
from datadog_checks.vsphere.common import SOURCE_TYPE
from datadog_checks.vsphere.errors import BadConfigError, ConnectionError
from datadog_checks.vsphere.vsphere import (
REFRESH_METRICS_METADATA_INTERVAL,
REFRESH_MORLIST_INTERVAL,
RESOURCE_TYPE_METRICS,
SHORT_ROLLUP,
)
from .utils import MockedMOR, assertMOR, disable_thread_pool, get_mocked_server
SERVICE_CHECK_TAGS = ["vcenter_server:vsphere_mock", "vcenter_host:None", "foo:bar"]
def test__init__(instance):
with pytest.raises(BadConfigError):
# Must define a unique 'name' per vCenter instance
VSphereCheck('vsphere', {}, {}, [{'': ''}])
init_config = {
'clean_morlist_interval': 50,
'refresh_morlist_interval': 42,
'refresh_metrics_metadata_interval': -42,
'batch_property_collector_size': -1,
}
check = VSphereCheck('vsphere', init_config, {}, [instance])
i_key = check._instance_key(instance)
assert check.time_started > 0
assert not check.server_instances
assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42
assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42
assert check.clean_morlist_interval == 50
assert len(check.event_config) == 1
assert 'vsphere_mock' in check.event_config
assert not check.registry
assert not check.latest_event_query
assert check.batch_collector_size == 0
assert check.batch_morlist_size == 50
assert check.excluded_host_tags == []
def test_excluded_host_tags(vsphere, instance, aggregator):
# Check default value and precedence of instance config over init config
check = VSphereCheck('vsphere', {}, {}, [instance])
assert check.excluded_host_tags == []
check = VSphereCheck('vsphere', {"excluded_host_tags": ["vsphere_host"]}, {}, [instance])
assert check.excluded_host_tags == ["vsphere_host"]
instance["excluded_host_tags"] = []
check = VSphereCheck('vsphere', {"excluded_host_tags": ["vsphere_host"]}, {}, [instance])
assert check.excluded_host_tags == []
# Test host tags are excluded from external host metadata, but still stored in the cache for metrics
vsphere.excluded_host_tags = ["vsphere_host"]
mocked_vm = MockedMOR(spec="VirtualMachine")
mocked_host = MockedMOR(spec="HostSystem")
mocked_mors_attrs = {
mocked_vm: {
"name": "mocked_vm",
"parent": mocked_host,
"runtime.powerState": vim.VirtualMachinePowerState.poweredOn,
},
mocked_host: {"name": "mocked_host", "parent": None},
}
with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs):
server_instance = vsphere._get_server_instance(instance)
result = MagicMock()
result.value = [23.4]
server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result], entity=mocked_vm)]
vsphere.metadata_cache = MagicMock()
vsphere.metadata_cache.get_metadata.return_value = {"name": "mymetric", "unit": "kb"}
vsphere.in_compatibility_mode = MagicMock()
vsphere.in_compatibility_mode.return_value = False
vsphere.check(instance)
ext_host_tags = vsphere.get_external_host_tags()
# vsphere_host tag not in external metadata
for host, source_tags in ext_host_tags:
if host == u"mocked_vm":
tags = source_tags["vsphere"]
for tag in tags:
assert "vsphere_host:" not in tag
break
# vsphere_host tag still in cache for sending with metrics
aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname="mocked_vm", count=1)
aggregator.assert_metric_has_tag('vsphere.mymetric', tag="vsphere_host:mocked_host", count=1)
def test__is_excluded():
"""
* Exclude hosts/vms not compliant with the user's `*_include` configuration.
* Exclude "non-labeled" virtual machines when the user configuration instructs to.
"""
# Sample(s)
include_regexes = {'host_include': "f[o]+", 'vm_include': "f[o]+"}
# OK
included_host = MockedMOR(spec="HostSystem", name="foo")
included_vm = MockedMOR(spec="VirtualMachine", name="foo")
assert not VSphereCheck._is_excluded(included_host, {"name": included_host.name}, include_regexes, None)
assert not VSphereCheck._is_excluded(included_vm, {"name": included_vm.name}, include_regexes, None)
# Not OK!
excluded_host = MockedMOR(spec="HostSystem", name="bar")
excluded_vm = MockedMOR(spec="VirtualMachine", name="bar")
assert VSphereCheck._is_excluded(excluded_host, {"name": excluded_host.name}, include_regexes, None)
assert VSphereCheck._is_excluded(excluded_vm, {"name": excluded_vm.name}, include_regexes, None)
# Sample(s)
include_regexes = None
include_only_marked = True
# OK
included_vm = MockedMOR(spec="VirtualMachine", name="foo", label=True)
assert not VSphereCheck._is_excluded(
included_vm, {"customValue": included_vm.customValue}, include_regexes, include_only_marked
)
# Not OK
included_vm = MockedMOR(spec="VirtualMachine", name="foo")
assert VSphereCheck._is_excluded(included_vm, {"customValue": []}, include_regexes, include_only_marked)
def test_vms_in_filtered_host_are_filtered(vsphere, instance):
"""Test that all vms belonging to a filtered host are also filtered"""
server_instance = vsphere._get_server_instance(instance)
filtered_host = MockedMOR(spec="HostSystem")
filtered_vm = MockedMOR(spec="VirtualMachine")
non_filtered_host = MockedMOR(spec="HostSystem")
non_filtered_vm = MockedMOR(spec="VirtualMachine")
mocked_mors_attrs = {
filtered_host: {"name": "filtered_host_number_1", "parent": None},
filtered_vm: {
"name": "this_vm_is_filtered",
"runtime.powerState": vim.VirtualMachinePowerState.poweredOn,
"runtime.host": filtered_host,
},
non_filtered_host: {"name": "non_filtered_host_number_1", "parent": None},
non_filtered_vm: {
"name": "this_vm_is_not_filtered",
"runtime.powerState": vim.VirtualMachinePowerState.poweredOn,
"runtime.host": non_filtered_host,
},
}
regex = {'host_include': '^(?!filtered_.+)'}
with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs):
obj_list = vsphere._get_all_objs(server_instance, regex, False, [])
assert len(obj_list[vim.VirtualMachine]) == 1
assert len(obj_list[vim.HostSystem]) == 1
assert {
"mor_type": "vm",
"mor": non_filtered_vm,
"hostname": "this_vm_is_not_filtered",
"tags": ["vsphere_host:non_filtered_host_number_1", "vsphere_type:vm"],
} == obj_list[vim.VirtualMachine][0]
assert {
"mor_type": "host",
"mor": non_filtered_host,
"hostname": "non_filtered_host_number_1",
"tags": ["vsphere_type:host"],
} == obj_list[vim.HostSystem][0]
def test__get_all_objs(vsphere, instance):
"""
Test that we don't raise KeyError if the property collector failed to collect some attributes
and that we handle the case were there are missing attributes
"""
server_instance = vsphere._get_server_instance(instance)
vm_no_parent = MockedMOR(spec="VirtualMachine")
vm_no_powerstate = MockedMOR(spec="VirtualMachine")
vm_host_parent = MockedMOR(spec="VirtualMachine")
mocked_host = MockedMOR(spec="HostSystem")
mocked_datastore = MockedMOR(spec="Datastore")
mocked_datacenter = MockedMOR(spec="Datacenter")
mocked_cluster = MockedMOR(spec="ClusterComputeResource")
mocked_mors_attrs = {
vm_no_parent: {"name": "vm_no_parent", "runtime.powerState": vim.VirtualMachinePowerState.poweredOn},
vm_no_powerstate: {"name": "vm_no_powerstate"},
vm_host_parent: {"parent": mocked_host, "runtime.powerState": vim.VirtualMachinePowerState.poweredOn},
mocked_host: {"name": "mocked_host", "parent": None},
mocked_datastore: {},
mocked_cluster: {"name": "cluster"},
mocked_datacenter: {"parent": MockedMOR(spec="Folder", name="unknown folder"), "name": "datacenter"},
}
with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs):
obj_list = vsphere._get_all_objs(server_instance, None, False, [])
assert len(obj_list[vim.VirtualMachine]) == 2
assert {
"mor_type": "vm",
"mor": vm_no_parent,
"hostname": "vm_no_parent",
"tags": ["vsphere_host:unknown", "vsphere_type:vm"],
} in obj_list[vim.VirtualMachine]
assert {
"mor_type": "vm",
"mor": vm_host_parent,
"hostname": "unknown",
"tags": ["vsphere_host:mocked_host", "vsphere_host:unknown", "vsphere_type:vm"],
} in obj_list[vim.VirtualMachine]
assert len(obj_list[vim.HostSystem]) == 1
assert {
"mor_type": "host",
"mor": mocked_host,
"hostname": "mocked_host",
"tags": ["vsphere_type:host"],
} in obj_list[vim.HostSystem]
assert len(obj_list[vim.Datastore]) == 1
assert {
"mor_type": "datastore",
"mor": mocked_datastore,
"hostname": None,
"tags": ["vsphere_datastore:unknown", "vsphere_type:datastore"],
} in obj_list[vim.Datastore]
assert len(obj_list[vim.Datacenter]) == 1
assert {
"mor_type": "datacenter",
"mor": mocked_datacenter,
"hostname": None,
"tags": ["vsphere_folder:unknown", "vsphere_datacenter:datacenter", "vsphere_type:datacenter"],
} in obj_list[vim.Datacenter]
assert len(obj_list[vim.ClusterComputeResource]) == 1
assert {
"mor_type": "cluster",
"mor": mocked_cluster,
"hostname": None,
"tags": ["vsphere_cluster:cluster", "vsphere_type:cluster"],
} in obj_list[vim.ClusterComputeResource]
def test__collect_mors_and_attributes(vsphere, instance):
"""
Test that we check for errors when collecting properties with property collector
"""
server_instance = vsphere._get_server_instance(instance)
with mock.patch("datadog_checks.vsphere.vsphere.vmodl"):
obj = MagicMock(missingSet=None, obj="obj")
result = MagicMock(token=None, objects=[obj])
server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result
log = MagicMock()
vsphere.log = log
mor_attrs = vsphere._collect_mors_and_attributes(server_instance)
log.error.assert_not_called()
assert len(mor_attrs) == 1
obj.missingSet = [MagicMock(path="prop", fault="fault")]
mor_attrs = vsphere._collect_mors_and_attributes(server_instance)
log.error.assert_called_once_with('Unable to retrieve property %s for object %s: %s', 'prop', 'obj', 'fault')
assert len(mor_attrs) == 1
def test__cache_morlist_raw(vsphere, instance):
"""
Explore the vCenter infrastructure to discover hosts, virtual machines.
Input topology:
```
rootFolder
- datacenter1
- compute_resource1
- host1 # Filtered out
- host2
- folder1
- datacenter2
- compute_resource2
- host3
- vm1 # Not labeled
- vm2 # Filtered out
- vm3 # Powered off
- vm4
```
"""
# Samples
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
instance["host_include_only_regex"] = "host[2-9]"
instance["vm_include_only_regex"] = "vm[^2]"
instance["include_only_marked"] = True
# Discover hosts and virtual machines
vsphere._cache_morlist_raw(instance)
# Assertions: 1 labeled+monitored VM + 2 hosts + 2 datacenters + 2 clusters + 1 datastore.
assertMOR(vsphere, instance, count=8)
# ...on hosts
assertMOR(vsphere, instance, spec="host", count=2)
tags = [
"vcenter_server:vsphere_mock",
"vsphere_folder:rootFolder",
"vsphere_datacenter:datacenter1",
"vsphere_compute:compute_resource1",
"vsphere_cluster:compute_resource1",
"vsphere_type:host",
]
assertMOR(vsphere, instance, name="host2", spec="host", tags=tags)
tags = [
"vcenter_server:vsphere_mock",
"vsphere_folder:rootFolder",
"vsphere_folder:folder1",
"vsphere_datacenter:datacenter2",
"vsphere_compute:compute_resource2",
"vsphere_cluster:compute_resource2",
"vsphere_type:host",
]
assertMOR(vsphere, instance, name="host3", spec="host", tags=tags)
# ...on VMs
assertMOR(vsphere, instance, spec="vm", count=1)
tags = [
"vcenter_server:vsphere_mock",
"vsphere_folder:folder1",
"vsphere_datacenter:datacenter2",
"vsphere_compute:compute_resource2",
"vsphere_cluster:compute_resource2",
"vsphere_host:host3",
"vsphere_type:vm",
]
assertMOR(vsphere, instance, name="vm4", spec="vm", subset=True, tags=tags)
def test_use_guest_hostname(vsphere, instance):
# Default value
with mock.patch("datadog_checks.vsphere.VSphereCheck._get_all_objs") as mock_get_all_objs, mock.patch(
"datadog_checks.vsphere.vsphere.vmodl"
):
vsphere._cache_morlist_raw(instance)
# Default value
assert not mock_get_all_objs.call_args[1]["use_guest_hostname"]
# use guest hostname
instance["use_guest_hostname"] = True
vsphere._cache_morlist_raw(instance)
assert mock_get_all_objs.call_args[1]["use_guest_hostname"]
with mock.patch("datadog_checks.vsphere.vsphere.vmodl"):
# Discover hosts and virtual machines
instance["use_guest_hostname"] = True
vsphere._cache_morlist_raw(instance)
assertMOR(vsphere, instance, spec="vm", count=3)
# Fallback on VM name when guest hostname not available
assertMOR(vsphere, instance, name="vm1", spec="vm", subset=True)
assertMOR(vsphere, instance, name="vm2_guest", spec="vm", subset=True)
assertMOR(vsphere, instance, name="vm4_guest", spec="vm", subset=True)
def test__process_mor_objects_queue(vsphere, instance):
vsphere.log = MagicMock()
vsphere._process_mor_objects_queue_async = MagicMock()
vsphere._process_mor_objects_queue(instance)
# Queue hasn't been initialized
vsphere.log.debug.assert_called_once_with(
"Objects queue is not initialized yet for instance %s, skipping processing", vsphere._instance_key(instance)
)
vsphere.batch_morlist_size = 1
i_key = vsphere._instance_key(instance)
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere._cache_morlist_raw(instance)
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 11
vsphere._process_mor_objects_queue(instance)
# Object queue should be empty after processing
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 0
assert vsphere._process_mor_objects_queue_async.call_count == 0 # realtime only
for call_args in vsphere._process_mor_objects_queue_async.call_args_list:
# query_specs parameter should be a list of size 1 since the batch size is 1
assert len(call_args[0][1]) == 1
instance["collect_realtime_only"] = False
vsphere._cache_morlist_raw(instance)
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 11
vsphere._process_mor_objects_queue(instance)
# Object queue should be empty after processing
assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 0
assert vsphere._process_mor_objects_queue_async.call_count == 5 # 2 datacenters, 2 clusters, 1 datastore
def test_collect_realtime_only(vsphere, instance):
"""
Test the collect_realtime_only parameter acts as expected
"""
vsphere._process_mor_objects_queue_async = MagicMock()
instance["collect_realtime_only"] = False
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere._cache_morlist_raw(instance)
vsphere._process_mor_objects_queue(instance)
# Called once to process the 2 datacenters, then 2 clusters, then the datastore
assert vsphere._process_mor_objects_queue_async.call_count == 3
instance["collect_realtime_only"] = True
vsphere._process_mor_objects_queue_async.reset_mock()
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere._cache_morlist_raw(instance)
vsphere._process_mor_objects_queue(instance)
assert vsphere._process_mor_objects_queue_async.call_count == 0
def test__cache_metrics_metadata(vsphere, instance):
vsphere.metadata_cache = MagicMock()
vsphere._cache_metrics_metadata(instance)
vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance))
vsphere.metadata_cache.set_metadata.assert_called_once()
vsphere.metadata_cache.set_metric_ids.assert_called_once()
def test__cache_metrics_metadata_compatibility(vsphere, instance):
server_instance = vsphere._get_server_instance(instance)
i_key = vsphere._instance_key(instance)
counter = MagicMock()
counter.rollupType = "average"
counter.key = 1
vsphere.format_metric_name = MagicMock()
# New way
instance["collection_level"] = 3
server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter]
vsphere._cache_metrics_metadata(instance)
server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3)
assert len(vsphere.metadata_cache._metric_ids[i_key]) == 1
assert len(vsphere.metadata_cache._metadata[i_key]) == 1
vsphere.format_metric_name.assert_called_once_with(counter)
# Compatibility mode
instance["all_metrics"] = False
del instance["collection_level"]
vsphere.format_metric_name.reset_mock()
server_instance.content.perfManager.perfCounter = [counter]
vsphere._cache_metrics_metadata(instance)
assert not vsphere.metadata_cache._metric_ids[i_key]
assert len(vsphere.metadata_cache._metadata[i_key]) == 1
vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True)
def test_in_compatibility_mode(vsphere, instance):
vsphere.log = MagicMock()
instance["collection_level"] = 2
assert not vsphere.in_compatibility_mode(instance)
instance["all_metrics"] = True
assert not vsphere.in_compatibility_mode(instance)
vsphere.log.warning.assert_not_called()
assert not vsphere.in_compatibility_mode(instance, log_warning=True)
vsphere.log.warning.assert_called_once()
del instance["collection_level"]
vsphere.log.reset_mock()
assert vsphere.in_compatibility_mode(instance)
vsphere.log.warning.assert_not_called()
assert vsphere.in_compatibility_mode(instance, log_warning=True)
vsphere.log.warning.assert_called_once()
def test_format_metric_name(vsphere):
counter = MagicMock()
counter.groupInfo.key = "group"
counter.nameInfo.key = "name"
counter.rollupType = "rollup"
assert vsphere.format_metric_name(counter, compatibility=True) == "group.name"
for rollup, short_rollup in SHORT_ROLLUP.items():
counter.rollupType = rollup
assert vsphere.format_metric_name(counter) == "group.name.{}".format(short_rollup)
def test_collect_metrics(vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
vsphere.batch_morlist_size = 1
vsphere._collect_metrics_async = MagicMock()
vsphere._cache_metrics_metadata(instance)
vsphere._cache_morlist_raw(instance)
vsphere._process_mor_objects_queue(instance)
vsphere.collect_metrics(instance)
assert vsphere._collect_metrics_async.call_count == 6 # One for each VM/host, datacenters are not collected
for call_args in vsphere._collect_metrics_async.call_args_list:
# query_specs parameter should be a list of size 1 since the batch size is 1
assert len(call_args[0][1]) == 1
def test__collect_metrics_async_compatibility(vsphere, instance):
server_instance = vsphere._get_server_instance(instance)
server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])]
vsphere.mor_cache = MagicMock()
vsphere.metadata_cache = MagicMock()
vsphere.metadata_cache.get_metadata.return_value = {"name": "unknown"}
vsphere.in_compatibility_mode = MagicMock()
vsphere.log = MagicMock()
vsphere.in_compatibility_mode.return_value = True
vsphere._collect_metrics_async(instance, [])
vsphere.log.debug.assert_called_with('Skipping unknown `%s` metric.', 'unknown')
vsphere.log.reset_mock()
vsphere.in_compatibility_mode.return_value = False
vsphere._collect_metrics_async(instance, [])
vsphere.log.debug.assert_not_called()
def test__collect_metrics_async_hostname(vsphere, instance, aggregator):
server_instance = vsphere._get_server_instance(instance)
result = MagicMock()
result.value = [23.4]
server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])]
mor = {"hostname": "foo"}
vsphere.mor_cache = MagicMock()
vsphere.mor_cache.get_mor.return_value = mor
vsphere.metadata_cache = MagicMock()
vsphere.metadata_cache.get_metadata.return_value = {"name": "mymetric", "unit": "kb"}
vsphere.in_compatibility_mode = MagicMock()
vsphere.in_compatibility_mode.return_value = False
vsphere._collect_metrics_async(instance, [])
aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname="foo")
def test_check(vsphere, instance):
"""
Test the check() method
"""
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
with mock.patch.object(vsphere, 'set_external_tags') as set_external_tags:
vsphere.check(instance)
set_external_tags.assert_called_once()
all_the_tags = dict(set_external_tags.call_args[0][0])
assert all_the_tags['vm4'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_host:host3',
'vsphere_host:host3',
'vsphere_type:vm',
]
assert all_the_tags['host1'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_datacenter:datacenter1',
'vsphere_cluster:compute_resource1',
'vsphere_compute:compute_resource1',
'vsphere_type:host',
]
assert all_the_tags['host3'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_type:host',
]
assert all_the_tags['vm2'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_host:host3',
'vsphere_host:host3',
'vsphere_type:vm',
]
assert all_the_tags['vm1'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_folder:folder1',
'vsphere_datacenter:datacenter2',
'vsphere_cluster:compute_resource2',
'vsphere_compute:compute_resource2',
'vsphere_host:host3',
'vsphere_host:host3',
'vsphere_type:vm',
]
assert all_the_tags['host2'][SOURCE_TYPE] == [
'vcenter_server:vsphere_mock',
'vsphere_folder:rootFolder',
'vsphere_datacenter:datacenter1',
'vsphere_cluster:compute_resource1',
'vsphere_compute:compute_resource1',
'vsphere_type:host',
]
def test_service_check_ko(aggregator, instance):
check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance]))
with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect:
# SmartConnect fails
SmartConnect.side_effect = Exception()
with pytest.raises(ConnectionError):
check.check(instance)
aggregator.assert_service_check(
VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS
)
aggregator.reset()
# SmartConnect succeeds, CurrentTime fails
server = MagicMock()
server.CurrentTime.side_effect = Exception()
SmartConnect.side_effect = None
SmartConnect.return_value = server
with pytest.raises(ConnectionError):
check.check(instance)
aggregator.assert_service_check(
VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS
)
def test_service_check_ok(aggregator, instance):
check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance]))
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect:
SmartConnect.return_value = get_mocked_server()
check.check(instance)
aggregator.assert_service_check(
VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS
)
def test__instance_key(vsphere, instance):
assert vsphere._instance_key(instance) == "vsphere_mock"
del instance['name']
with pytest.raises(BadConfigError):
vsphere._instance_key(instance)
def test__should_cache(instance):
now = time.time()
# do not use fixtures for the check instance, some params are set at
# __init__ time and we need to instantiate the check multiple times
check = VSphereCheck('vsphere', {}, {}, [instance])
i_key = check._instance_key(instance)
# first run should always cache
assert check._should_cache(instance, CacheConfig.Morlist)
assert check._should_cache(instance, CacheConfig.Metadata)
# explicitly set cache expiration times, don't use defaults so we also test
# configuration is properly propagated
init_config = {
'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL,
'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL,
}
check = VSphereCheck('vsphere', init_config, {}, [instance])
# simulate previous runs, set the last execution time in the past
check.cache_config.set_last(CacheConfig.Morlist, i_key, now - (2 * REFRESH_MORLIST_INTERVAL))
check.cache_config.set_last(CacheConfig.Metadata, i_key, now - (2 * REFRESH_METRICS_METADATA_INTERVAL))
with mock.patch("time.time", return_value=now):
assert not check._should_cache(instance, CacheConfig.Morlist)
assert not check._should_cache(instance, CacheConfig.Metadata)
def alarm_event(from_status='green', to_status='red', message='Some error'):
now = datetime.utcnow()
vm = MockedMOR(spec='VirtualMachine')
dc = MockedMOR(spec="Datacenter")
dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1')
alarm = MockedMOR(spec="Alarm")
alarm_arg = vim.event.AlarmEventArgument(alarm=alarm, name='alarm1')
entity = vim.event.ManagedEntityEventArgument(entity=vm, name='vm1')
event = vim.event.AlarmStatusChangedEvent(
entity=entity, fullFormattedMessage=message, createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg
)
setattr(event, 'from', from_status) # noqa: B009
return event
def migrated_event():
now = datetime.utcnow()
vm = MockedMOR(spec='VirtualMachine', name='vm1')
vm_arg = vim.event.VmEventArgument(vm=vm)
host = MockedMOR(spec='HostSystem')
host_arg = vim.event.HostEventArgument(host=host, name='host1')
host_dest = MockedMOR(spec='HostSystem')
host_dest_arg = vim.event.HostEventArgument(host=host_dest, name='host2')
dc = MockedMOR(spec='Datacenter')
dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1')
dc_dest = MockedMOR(spec='Datacenter')
dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2')
ds = MockedMOR(spec='Datastore')
ds_arg = vim.event.DatastoreEventArgument(datastore=ds, name='ds1')
ds_dest = MockedMOR(spec='Datastore')
ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2')
event = vim.event.VmBeingHotMigratedEvent(
vm=vm_arg,
userName='John',
fullFormattedMessage='Some error',
createdTime=now,
host=host_arg,
destHost=host_dest_arg,
datacenter=dc_arg,
destDatacenter=dc_dest_arg,
ds=ds_arg,
destDatastore=ds_dest_arg,
)
return event
def test_events(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was green and it's now red.", tags=['foo:bar']
)
def test_events_tags(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
aggregator.assert_event(
"John has launched a hot migration of this virtual machine",
exact_match=False,
tags=[
'foo:bar',
'vsphere_host:host1',
'vsphere_host:host2',
'vsphere_datacenter:dc1',
'vsphere_datacenter:dc2',
],
)
server_instance = vsphere._get_server_instance(instance)
server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()]
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was green and it's now red.", tags=['foo:bar']
)
def test_events_gray_handled(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
event = alarm_event(from_status='gray', message='Went from Gray to Red')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was gray and it's now red.", tags=['foo:bar']
)
event = alarm_event(from_status='yellow', to_status='gray', message='Went from Yellow to Gray')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.check(instance)
aggregator.assert_event(
"vCenter monitor status changed on this alarm, it was yellow and it's now gray.",
tags=['foo:bar'],
alert_type='info',
)
def test_events_gray_ignored(aggregator, vsphere, instance):
with mock.patch('datadog_checks.vsphere.vsphere.vmodl'):
server_instance = vsphere._get_server_instance(instance)
event = alarm_event(from_status='gray', to_status='green', message='Went from Gray to Green')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True}
vsphere.check(instance)
assert not aggregator.events
event = alarm_event(from_status='green', to_status='gray', message='Went from Green to Gray')
server_instance.content.eventManager.QueryEvents.return_value = [event]
vsphere.check(instance)
assert not aggregator.events
| 1.75 | 2 |
data_steward/constants/validation/email_notification.py | jp3477/curation | 1 | 6275 | MANDRILL_API_KEY = 'MANDRILL_API_KEY'
UNSET_MANDRILL_API_KEY_MSG = f"Mandrill API key not set in environment variable {MANDRILL_API_KEY}"
CONTACT_LIST_QUERY = """
SELECT *
FROM `{{project}}.{{dataset}}.{{contact_table}}`
"""
EHR_OPERATIONS = 'EHR Ops'
EHR_OPS_ZENDESK = '<EMAIL>'
DATA_CURATION_LISTSERV = '<EMAIL>'
NO_REPLY_ADDRESS = '<EMAIL>'
NO_DATA_STEWARD = 'no data steward'
# HPO contact list table columns
SITE_NAME = 'site_name'
HPO_ID = 'hpo_id'
SITE_POINT_OF_CONTACT = 'site_point_of_contact'
# Mandrill API constants
MAIL_TO = 'mail_to'
EHR_OPS_SITE_URL = 'https://sites.google.com/view/ehrupload'
# Email content
EMAIL_BODY = """
<p style="font-size:115%;">Hi {{ site_name }},</p>
<p style="font-size:115%;">Your submission <b>{{ folder }}</b>
{% if submission_error %}was NOT successfully loaded on {{ timestamp }}.<br>
{% else %}was successfully loaded on {{ timestamp }}.<br>
{% endif %}
Please review the <code>results.html</code> submission report attached to this email{% if submission_error %}<br>
and resolve the errors before making a new submission{% endif %}.<br>
If any of your files have not been successfully uploaded, please run the
<a href="https://github.com/all-of-us/aou-ehr-file-check">local file check</a> before making your submission.<br>
To view the full set of curation reports, please visit the submission folder in your
GCS bucket <a href="{{ submission_folder_url }}">here</a>.<br>
For more information on the reports and how to download them, please refer to our
<a href="{{ ehr_ops_site_url }}">EHR Ops website</a>.</p>
<p style="font-size:115%;">You are receiving this email because you are listed as a point of contact
for HPO Site <em>{{ site_name }}</em>.<br>
If you have additional questions or wish to no longer receive these emails, please reply/send an
email to <a href="mailto:{{ eo_zendesk }}">{{ eo_zendesk }}</a>.</p>
<p style="font-size:115%;">EHR Ops team, DRC<br>
<em>All of Us</em> Research Program<br>
<img src="cid:{{ aou_logo }}"/></p>
"""
AOU_LOGO = 'aou_logo'
AOU_LOGO_PNG = 'all-of-us-logo.png'
| 1.5625 | 2 |
clip/clip.py | keshav11/clip | 1 | 6276 | <filename>clip/clip.py
import os
import argparse
from pathlib import Path
CLIP_FILE = os.path.join(Path.home(), '.clip')
TEMP_FILE = '.TEMP_FILE'
def add_text(key, text):
if os.path.exists(CLIP_FILE):
open_mode = 'a'
else:
open_mode = 'w+'
with open(CLIP_FILE, open_mode) as clip_file:
clip_file.write(key + ": " + text + "\n")
def list_texts():
with open(CLIP_FILE, 'r') as clip_file:
for text in clip_file.read().split('\n'):
print(text)
def get_text(key):
with open(CLIP_FILE, 'r') as clip_file:
for text in clip_file.read().split('\n'):
key_val = text.split(':')
if key_val[0].strip() == key:
print(key_val[1].strip(), end='')
def delete_text(key):
exists = False
with open(TEMP_FILE, 'w+') as temp_file:
with open(CLIP_FILE, 'r') as clip_file:
for text in clip_file.read().split('\n'):
if text.strip() == "":
continue
key_val = text.split(':')
if key_val[0].strip() != key:
temp_file.write(text+"\n")
else:
exists = True
if not exists:
print("key:", key, "was not found in the clip store")
try:
os.rename(TEMP_FILE, CLIP_FILE)
except Exception as ex:
os.remove(TEMP_FILE)
print('remove text failed.', ex)
def main():
parser = argparse.ArgumentParser(description='clips and saves texts from the command line')
parser.add_argument('-a', '--add', nargs=2)
parser.add_argument('-g', '--get', nargs=1)
parser.add_argument('-d', '--delete', nargs=1)
parser.add_argument('-l', '--list', action='store_true')
args = parser.parse_args()
if args.add:
key, value = args.add[0], args.add[1]
add_text(key, value)
elif args.list:
list_texts()
elif args.get:
key = args.get[0]
get_text(key)
elif args.delete:
key = args.delete[0]
delete_text(key)
else:
parser.print_usage()
if __name__ == '__main__':
main()
| 3.28125 | 3 |
tests/unit/test_nsga2.py | learsi1911/GAMA_pygmo_v4 | 49 | 6277 | <filename>tests/unit/test_nsga2.py
from typing import List, Tuple
from gama.genetic_programming.nsga2 import (
NSGAMeta,
fast_non_dominated_sort,
crowding_distance_assignment,
)
def _tuples_to_NSGAMeta(tuples: List[Tuple]) -> List[NSGAMeta]:
""" Converts a list of tuples to NSGAMeta objects. """
# Can't declare it directly in a loop as it does not create a new scope.
def fetch_value(i):
return lambda x: x[i]
metrics = [fetch_value(i) for i in range(len(tuples[0]))]
return [NSGAMeta(t, metrics) for t in tuples]
def test_nsgameta_value_assignment():
pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)])
three_five, five_three, four_four = pareto
assert three_five.values == (3, 5)
assert five_three.values == (5, 3)
assert four_four.values == (4, 4)
def test_dominates():
pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (2, 4)])
three_five, five_three, two_four = pareto
assert not three_five.dominates(five_three)
assert not five_three.dominates(three_five)
assert three_five.dominates(two_four)
assert not two_four.dominates(three_five)
assert not five_three.dominates(two_four)
assert not two_four.dominates(five_three)
def test_crowding_distance_assignment():
pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)])
three_five, five_three, four_four = pareto
crowding_distance_assignment(pareto)
assert three_five.distance == float("inf")
assert five_three.distance == float("inf")
assert four_four.distance == 2
def test_crowding_distance_assignment_inf():
pareto = _tuples_to_NSGAMeta([(3, float("inf")), (5, 3), (4, 4)])
three_inf, five_three, four_four = pareto
crowding_distance_assignment(pareto)
assert three_inf.distance == float("inf")
assert five_three.distance == float("inf")
# In our implementation, we ignore 'axis' that contain inf values.
assert four_four.distance == 1
def test_crowd_compare():
pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4), (4.01, 3.99), (4.5, 3.5)])
three_five, five_three, four_four, approx_four_four, half_half = pareto
fast_non_dominated_sort(pareto) # assigns rank
crowding_distance_assignment(pareto) # assigns distance
assert all([three_five.crowd_compare(other) == -1 for other in pareto[2:]])
assert all([five_three.crowd_compare(other) == -1 for other in pareto[2:]])
| 2.59375 | 3 |
stac_ingest/utils/tds.py | crim-ca/stac-ingest | 0 | 6278 | # File taken from https://github.com/Ouranosinc/pavics-vdb/blob/master/catalog/tds.py
"""Utility function to parse metadata from a THREDDS Data Server catalog."""
def walk(cat, depth=1):
"""Return a generator walking a THREDDS data catalog for datasets.
Parameters
----------
cat : TDSCatalog
THREDDS catalog.
depth : int
Maximum recursive depth. Setting 0 will return only datasets within the top-level catalog. If None,
depth is set to 1000.
"""
yield from cat.datasets.items()
if depth is None:
depth = 1000
if depth > 0:
for name, ref in cat.catalog_refs.items():
child = ref.follow()
yield from walk(child, depth=depth-1)
def attrs_from_ds(ds):
"""Extract attributes from TDS Dataset."""
url = ds.access_urls["NCML"]
attrs = attrs_from_ncml(url)
attrs["__services__"] = ds.access_urls
return attrs
def attrs_from_ncml(url):
"""Extract attributes from NcML file.
Parameters
----------
url : str
Link to NcML service of THREDDS server for a dataset.
Returns
-------
dict
Global attribute values keyed by facet names, with variable attributes in `__variable__` nested dict, and
additional specialized attributes in `__group__` nested dict.
"""
import lxml.etree
import requests
parser = lxml.etree.XMLParser(encoding='UTF-8')
ns = {"ncml": "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2"}
# Parse XML content - UTF-8 encoded documents need to be read as bytes
xml = requests.get(url).content
doc = lxml.etree.fromstring(xml, parser=parser)
nc = doc.xpath("/ncml:netcdf", namespaces=ns)[0]
# Extract global attributes
out = _attrib_to_dict(nc.xpath("ncml:attribute", namespaces=ns))
# Extract group attributes
gr = {}
for group in nc.xpath("ncml:group", namespaces=ns):
gr[group.attrib["name"]] = _attrib_to_dict(group.xpath("ncml:attribute", namespaces=ns))
# Extract variable attributes
va = {}
for variable in nc.xpath("ncml:variable", namespaces=ns):
if '_CoordinateAxisType' in variable.xpath("ncml:attribute/@name", namespaces=ns):
continue
va[variable.attrib["name"]] = _attrib_to_dict(variable.xpath("ncml:attribute", namespaces=ns))
out["__group__"] = gr
out["__variable__"] = va
return out
def _attrib_to_dict(elems):
"""Convert element attributes to dictionary.
Ignore attributes with names starting with _
"""
hidden_prefix = "_"
out = {}
for e in elems:
a = e.attrib
if a["name"].startswith(hidden_prefix):
continue
out[a["name"]] = a["value"]
return out | 2.625 | 3 |
tact/util.py | brunel-physics/mva_scikit | 0 | 6279 | <reponame>brunel-physics/mva_scikit
# -*- coding: utf-8 -*-
"""
Module containing miscellaneous utility functions.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
import itertools
import numpy as np
class BinaryTree(object):
def __init__(self):
self.left = None
self.right = None
self.val = None
def deep_update(d1, d2):
"""
Adds key-value pairs in d2 to d1. Conflicts are resolved in favour of d2.
Recurses into all values in d2 which belong to the collections.Mapping
abstract base class.
Parameters
----------
d1 : collections.Mapping
Base dictionary
d2 : collections.Mapping
Dictionary with updated values
Returns
-------
d1 : collections.Mapping
Updated dictionary
"""
for k, v in d2.iteritems():
if isinstance(v, collections.Mapping):
d1[k] = deep_update(d1.get(k, {}), v)
else:
d1[k] = v
return d1
def nodes(tree):
"""
Return a list of values at every node of a tree.
Parameters
----------
tree : BinaryTree
BinaryTree to extract nodes from.
Returns
-------
nodelist : list
List of values at tree nodes.
"""
nodelist = []
def _get_nodes(tree):
"""
Build up a list of nodes.
Parameters
----------
tree : BinaryTree
BinaryTree to extract nodes from.
Returns
-------
None
"""
nodelist.append(tree.val)
try:
_get_nodes(tree.left)
except AttributeError:
nodelist.append(tree.left)
try:
_get_nodes(tree.right)
except AttributeError:
nodelist.append(tree.right)
_get_nodes(tree)
return nodelist
def maenumerate(marr):
"""
Multidimensional index iterator for masked arrays.
Return an iterator yielding pairs of array coordinates and values, with
masked values skipped.
Parameters
----------
marr : MaskedArray
Input array.
"""
for i, m in itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()):
if m:
yield i
def corrcoef(x, y=None, rowvar=True, fweights=None, aweights=None):
"""
Return Pearson product-moment correlation coefficients.
This is a copy of the implementation found in numpy, with the removal of
the deperecated bias and ddof keyword arguments, and the addition of
the fweights and aweights arguments, which are pased to np.cov.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
fweights : array_like, int, optional
1-D array of integer freguency weights; the number of times each
observation vector should be repeated.
aweights : array_like, optional
1-D array of observation vector weights. These relative weights are
typically large for observations considered "important" and smaller for
observations considered less "important". If ``ddof=0`` the array of
weights can be used to assign probabilities to observation vectors.
Returns
-------
R : ndarray
The correlation coefficient matrix of the variables.
"""
c = np.cov(x, y, rowvar, fweights=fweights, aweights=aweights)
try:
d = np.diag(c)
except ValueError:
# scalar covariance
# nan if incorrect value (nan, inf, 0), 1 otherwise
return c / c
stddev = np.sqrt(d.real)
c /= stddev[:, None]
c /= stddev[None, :]
# Clip real and imaginary parts to [-1, 1]. This does not guarantee
# abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without
# excessive work.
np.clip(c.real, -1, 1, out=c.real)
if np.iscomplexobj(c):
np.clip(c.imag, -1, 1, out=c.imag)
return c
| 2.625 | 3 |
src/stochastic_tour.py | DavidNKraemer/ams553-final-project | 0 | 6280 |
import numpy as np
import random
from collections import namedtuple
def generate_prob_matrix(n):
matrix = np.random.rand(n, n)
for i in range(n):
matrix[i][i] = 0
for i in range(n):
matrix[i] = (1/np.sum(matrix[i]))*matrix[i]
return matrix
def categorical(p):
return np.random.choice(len(p), 1, p=p)[0]
Drone = namedtuple('Drone', 'speed probability')
Site = namedtuple('Site', 'location')
class System:
def __init__(self, sites, drones):
self.sites = {}
self.drones = {}
n = len(sites)
for i, drone in enumerate(drones):
self.drones[i] = drone
for i, site in enumerate(sites):
self.sites[i] = site
distance = np.zeros([n, n])
for i in range(n):
for j in range(n):
if i < j:
x = np.subtract(sites[i], sites[j])
d = np.linalg.norm(x)
distance[i][j] = d
distance[j][i] = d
self.distance = distance
def get_site(self, site_id):
return self.sites[site_id]
def get_drone(self, drone_id):
return self.drones[drone_id]
def compute_path_distance(self, path):
n = len(path)
d = 0
for i in range(n - 1):
d += self.distance[path[i]][path[i + 1]]
return d
def compute_path_time(self, path, drone_id):
d = self.compute_path_distance(path)
return d/self.get_drone(drone_id).speed
def generate_path_of_length(self, length, drone_id):
path = []
P = self.get_drone(drone_id).probability
num_sites = len(self.sites)
s = categorical([1/num_sites]*num_sites)
path.append(s)
site = s
for i in range(length):
site = categorical(P[site])
path.append(site)
return path
def generate_path(self, s, t, drone_id):
path = [s]
P = self.get_drone(drone_id).probability
site = categorical(P[s])
path.append(site)
while site != t:
site = categorical(P[site])
path.append(site)
return path
@staticmethod
def generate_random_system(n, k):
locations = np.random.rand(n, 2)
sites = []
for i in locations:
sites.append(Site(i))
drones = []
for i in range(k):
speed = abs(random.random())
probability = generate_prob_matrix(n)
drones.append(Drone(speed, probability))
return System(sites, drones)
def _compute_arrival_times(path, drone_id, sites, speed):
arrival_times = []
t = 0
for i in range(len(path) - 1):
t += system.compute_path_time(path[i:i+2], drone_id=drone_id)
arrival_times.append((drone_id, path[i], path[i+1], t))
return arrival_times
def _generate_arrival_times(system, num_drones, length):
arrival_times = [[] for _ in range(len(system.sites))]
events = []
for i in range(system):
pass
events.extend(compute_arrival_times(path, i))
def get_key(item):
return item[3]
events = sorted(events, key=get_key)
for event in events:
drone_id = event[0]
site_id = event[2]
time = event[3]
arrival_times[site_id].append((drone_id, time))
return arrival_times
def compute_cost(system, n):
arrival_times = generate_arrival_times(system, n)
interarrival_times = [[] for _ in range(len(system.sites))]
for i in range(len(arrival_times)):
arrivals = arrival_times[i]
for j in range(len(arrivals) - 1):
interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1])
interarrival_avgs = [compute_average(i) for i in interarrival_times]
return max(interarrival_avgs)
def compute_average(data):
return (1/len(data))*sum(data)
| 3.0625 | 3 |
orbit/actions/conditional_action_test.py | mcasanova1445/models | 1 | 6281 | <filename>orbit/actions/conditional_action_test.py
# Copyright 2022 The Orbit Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for orbit.actions.conditional_action."""
from orbit import actions
import tensorflow as tf
class ConditionalActionTest(tf.test.TestCase):
def test_conditional_action(self):
# Define a function to raise an AssertionError, since we can't in a lambda.
def raise_assertion(arg):
raise AssertionError(str(arg))
conditional_action = actions.ConditionalAction(
condition=lambda x: x['value'], action=raise_assertion)
conditional_action({'value': False}) # Nothing is raised.
with self.assertRaises(AssertionError) as ctx:
conditional_action({'value': True})
self.assertEqual(ctx.exception.message, "{'value': True}")
if __name__ == '__main__':
tf.test.main()
| 2.75 | 3 |
Customizations/Tagging/show_tags.task.py | phnomcobra/valarie-content | 0 | 6282 | <reponame>phnomcobra/valarie-content
#!/usr/bin/python
################################################################################
# DOCUMENTS
#
# <NAME>
# <EMAIL>
# 614 692 2050
#
# 04/22/2018 Original Construction
################################################################################
import traceback
import json
class Task:
def __init__(self):
self.output = []
self.status = STATUS_NOT_EXECUTED
def execute(self, cli):
try:
keys = cli.AGTCollections("tags")
self.status = STATUS_SUCCESS
for key in keys.find():
#key.set()
self.output.append(json.dumps(key.object, indent = 4))
except Exception:
self.status = STATUS_EXCEPTION
self.output.append(traceback.format_exc())
return self.status | 2.203125 | 2 |
examples/python/masked_hist.py | DerThorsten/seglib | 0 | 6283 | import vigra
import numpy
import pylab
from seglib import cgp2d
from seglib.preprocessing import norm01
import seglib.edge_detectors.pixel as edp
import seglib.region_descriptors.pixel as rdp
from seglib.preprocessing import norm01
from seglib.histogram import jointHistogram,histogram
from seglib.region_descriptors.pixel.sift import denseSift
# change me to your path
img = "img/text.jpg"
img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:]
binCount = 30
sigma = 1.5
histImg = numpy.zeros(img.shape[0:2]+(binCount*3,))
imgBig = None
sizes = [3,4,5,8,10,15,20,25,40,100]
scalings = [5,10,15]
for size in sizes:
for scaling in scalings:
size = int (size)
scaling = float(scaling)
print size,scaling
labels ,nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size)
labels = vigra.analysis.labelImage(labels).astype(numpy.uint64)
cgp,tgrid = cgp2d.cgpFromLabels(labels)
if imgBig is None:
imgBig=vigra.sampling.resize(img,cgp.shape)
#cgp2d.visualize(imgBig,cgp=cgp)
print "accumulate cell "
hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma)
hist = hist.reshape([cgp.numCells(2),-1])
for c in range(histImg.shape[2]):
histImg[:,:,c] += (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False)
histImg=numpy.require(histImg,dtype=numpy.float32)
histImg=vigra.taggedView(histImg, 'xyc')
histImg = vigra.gaussianSmoothing(histImg,sigma=1.0)
#for c in range(histImg.shape[2]):
# #print c
# pylab.imshow( numpy.swapaxes( norm01(histImg[:,:,c]) ,0,1) )
# pylab.show()
#
# print "hist",hist.shape
imgdt = rdp.deepDetexturize(srcImg=img,img=histImg,nIteration=10,
nCluster=10,reductionAlg='pca',nldEdgeThreshold=10.0,nldScale=10.0,distance=None)#'cityblock')
| 2.59375 | 3 |
backup/models.py | helwete/simple-backup | 0 | 6284 | <reponame>helwete/simple-backup<filename>backup/models.py
from datetime import date
from django.conf import settings
from django.db import models
# Create your models here.
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
today = date.today()
return '{0}/{2}/{1}'.format(instance.user.username, filename, today.strftime("%Y/%m/%d/"))
class Upload(models.Model):
uploaded_file = models.FileField(null=True, blank=True, upload_to=user_directory_path)
file_name = models.CharField(max_length=255, null=True)
date_uploaded = models.DateField(auto_now_add=True, null=True)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True)
def __str__(self):
return self.uploaded_file.name
| 2.703125 | 3 |
Kmeans Cluster/Kmeans_Compare.py | Jojoxiao/Machine-Learning-for-Beginner-by-Python3 | 397 | 6285 | #-*- coding:utf-8 -*-
# &Author AnFany
# 引入方法
import Kmeans_AnFany as K_Af # AnFany
import Kmeans_Sklearn as K_Sk # Sklearn
import matplotlib.pyplot as plt
from pylab import mpl # 作图显示中文
mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体
mpl.rcParams['axes.unicode_minus'] = False
import numpy as np
# 利用sklearn生成数据集
from sklearn.datasets import make_blobs
X, Y = make_blobs(n_samples=600, centers=6, n_features=2)
# 绘制散点图
def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']):
typeclass = sorted(list(set(eydata)))
for ii in range(len(typeclass)):
datax = exdata[eydata == typeclass[ii]]
plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii])
plt.title(titl)
#plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2, 0.9))
plt.xlabel('特征1')
plt.ylabel('特征2')
# 调用不同的方法
# AnFany
kresult = K_Af.op_kmeans(X, countcen=6)
# Sklearn
sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10)
train = sk.fit(X)
result = sk.predict(X)
skru = K_Sk.trans(result)
#绘制算法后的类别的散点图
def sca(Xdata, Center, signdict, co=['r', 'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'):
du = 1
for jj in signdict:
xdata = Xdata[signdict[jj]]
plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类' % jj) # 绘制样本散点图
for ss in Center:
if du:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点
du = 0
else:
plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') # 绘制类别中心点
plt.legend(bbox_to_anchor=(1.2, 1))
plt.title(titl)
plt.xlabel('特征1')
plt.ylabel('特征2')
# 定义欧几里得距离
def dis(sample, center):
cen = np.array([center])
sample = np.array(sample)
if len(sample) != 0:
usb = np.sum((sample - cen) ** 2, axis=1) ** 0.5
return usb
else:
return 0
# 计算最终的分类结果的成本值
def Cost(Xdata, typedict):
center = {}
for kk in typedict:
center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值
cio = 0
for cc in typedict:
cio += np.sum(dis(Xdata[typedict[cc]], center[cc]))
return cio
# 最终的结果展示
plt.subplot(2, 2, 1)
fig_scatter(X, Y)
plt.subplot(2, 2, 2)
sca(X, kresult[0], kresult[2])
plt.subplot(2, 2, 3)
sca(X, train.cluster_centers_, skru, titl='Sklearn 结果')
plt.subplot(2, 2, 4)
plt.axis('off')
plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2]))
plt.text(0.3, 0.3, 'Sklearn 最终的分类成本值为:%.5f'%Cost(X, skru))
plt.show()
| 2.703125 | 3 |
control_panel.py | Stayermax/5dof-bartender-robot | 0 | 6286 | #!/usr/bin/env python
"""
Control panel file
"""
import pddl_solver as pddl
import ik
import rospy
from get_object_position import get_object_position
import time
from constants import *
from spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models
from delete_models import delete_all, delete_model
def control_panel():
robot = ik.MoveGroupPythonIntefaceTutorial()
# robot.go_to_init_state()
# robot.open_gripper()
bottle = 'bottle_1'
# simulatiuon
current_bottle_orig_pos = get_object_position(bottle)
# real_world
# current_bottle_orig_pos = Real_poses(bottle)
# current_bottle_orig_pos[-1] += BZS
while(True):
print()
cmd = raw_input("Enter command:\n open, close, init,\n gtb, hover, gtc, move,\n pour, cb, rb, ra,\n pgr, parm, pj,\n setj, att, box,\n del, dela, spawn, exit:\n")
if(cmd == 'open'): # open the gripper
robot.open_gripper()
elif(cmd == 'close'): # close the gripper
goal = float(raw_input("Enter closing goal in range [-0.12; 0]:\n"))
if(goal==""):
goal = -0.075
while(goal > 0 or goal < -0.12):
goal = float(raw_input("Enter closing goal in range [-0.12; 0]:\n"))
robot.close_gripper(goal)
elif(cmd == 'init'): # go to initial pose
robot.go_to_init_state()
elif(cmd == 'gtb'): # go to bottle
x,y,z = current_bottle_orig_pos
h = raw_input("Set z level: ")
if(h == ""):
h = BZS
else:
h = float(h)
robot.go_to_xyz(x, y, z + h)
elif(cmd == 'hover'): # hover over the bottle
x,y,z = current_bottle_orig_pos
robot.go_to_xyz(x, y, BUO)
elif(cmd == 'gtc'): # go to cup
# simulation
x,y,z = get_object_position('cup_1')
# real_world
# pos, angle = Real_world_PourPos[cup]
# x,y,z = pos
robot.go_to_xyz(x, y, CUO)
elif(cmd == 'move'): # go to cup
x,y,z = robot.get_arm_pose()
dir = raw_input("Enter coord: x,y or z:\n")
while(dir not in ['x','y','z']):
dir = raw_input("Enter coord: x,y or z:\n")
step = float(raw_input("Enter step size:\n"))
if(dir == 'x'):
x += step
elif(dir == 'y'):
y += step
elif(dir == 'z'):
z += step
robot.go_to_xyz(x, y, z)
elif(cmd == 'pour'): # turn gripper on pouring angle
robot.rotate_gripper(angle = 1)
rospy.sleep(1.5)
robot.rotate_gripper(angle = 0)
elif(cmd == 'cb'): # change bottle
b_n = int(raw_input("Enter bottle number from 1 to 6\n"))
while(b_n not in [1,2,3,4,5,6]):
b_n = int(raw_input("Enter bottle number from 1 to 6\n"))
bottle = 'bottle_' + str(b_n)
# simulatiuon
current_bottle_orig_pos = get_object_position(bottle)
# real_world
# current_bottle_orig_pos = Real_poses(bottle)
elif(cmd == 'rb'): # reset bottle position
reset_model_position(bottle)
elif(cmd == 'ra'): # reset all models positions
reset_all()
elif(cmd == 'pgr'): # print gripper postiion
pos = robot.get_gripper_pose()
print("Current gripper coordinates: " + str(pos))
elif(cmd == 'parm'): # print arm postiion
pos = robot.get_arm_pose()
print("Current arm coordinates: " + str(pos))
elif(cmd == 'pj'): # print arm joints
current_joints = robot.get_arm_joints()
print("Current joints poistion: " + str(current_joints))
elif(cmd == 'setj'): # set robot joint angles
joints = robot.get_arm_joints()
# joints[0] = float(raw_input("Enter theta_0")) # We don't want to change the arm direction
t1 = raw_input("Enter theta_1: ")
t2 = raw_input("Enter theta_2: ")
t3 = raw_input("Enter theta_3: ")
if(t1 != ''):
joints[1] = float(t1)
if(t2 != ''):
joints[2] = float(t2)
if(t3 != ''):
joints[3] = float(t3)
joints[4] = 0
robot.set_joints(joints)
elif(cmd == 'att'): # attaches object to the gripper
robot.attach_object(bottle)
attached_objects = robot.scene.get_attached_objects([bottle])
print("Attached objects: " + str(attached_objects))
elif(cmd == 'box'):
robot.add_box()
robot.attach_object('box')
attached_objects = robot.scene.get_attached_objects([bottle])
print("Attached objects: " + str(attached_objects))
elif(cmd == 'del'):
delete_model(bottle)
print("Bottle " + str(bottle.split('_')[1]) + " was deleted")
elif(cmd == 'dela'):
delete_all()
print("All models were deleted")
elif(cmd == 'spawn'):
spawn_model(bottle)
print("Bottle " + str(bottle.split('_')[1]) + " was spawned")
elif(cmd == 'exit'): # exit control panel script
print('Finish performance')
return
else:
print('Wrong command')
if __name__ == '__main__':
control_panel() | 2.703125 | 3 |
Enigma/Enigma.py | archanpatkar/Enigma | 3 | 6287 | from Enigma.Rotor import Rotor
from Enigma.Reflector import Reflector
from Enigma.Plugboard import Plugboard
class Enigma:
def __init__(self , rotors = [ Rotor(0,"IC") , Rotor(0,"IIC") , Rotor(0,"IIIC") ] , plugboard = Plugboard() , reflector = Reflector("A")):
self.rotors = rotors
for i in range(len(rotors)):
if i + 1 < len(rotors):
rotors[i].on("Sidereal", lambda *args: rotors[i+1].step())
self.Plugboard = plugboard;
self.Reflector = reflector;
def encrypt(self,data):
data = data.upper().replace(" ","");
string = "";
for char in data:
string += self.each(char,True);
return string;
def decrypt(self,data):
data = data.upper();
string = "";
for char in data:
string += self.each(char,False);
return string;
def each(self,char,flag):
self.rotors[0].step()
output = self.Plugboard.get(char)
for rotor in self.rotors:
if flag:
output = rotor.scramble(output)
else:
output = rotor.unscramble(output)
output = self.Reflector.get(output)
for rotor in self.rotors[::-1]:
if flag:
output = rotor.scramble(output)
else:
output = rotor.unscramble(output)
return self.Plugboard.get(output);
| 2.84375 | 3 |
var/spack/repos/builtin/packages/exiv2/package.py | xiki-tempula/spack | 9 | 6288 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Exiv2(CMakePackage):
"""Exiv2 is a Cross-platform C++ library and a command line utility
to manage image metadata
"""
homepage = "https://www.exiv2.org/"
url = "https://github.com/Exiv2/exiv2/archive/v0.27.2.tar.gz"
version('0.27.2', sha256='3dbcaf01fbc5b98d42f091d1ff0d4b6cd9750dc724de3d9c0d113948570b2934')
depends_on('zlib', type='link')
depends_on('[email protected]:', type='link')
| 1.015625 | 1 |
magicauth/send_token.py | JMIdeaMaker/django-magicauth | 0 | 6289 | import math
from django.contrib.auth import get_user_model
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.template import loader
from magicauth import settings as magicauth_settings
from django.conf import settings as django_settings
from magicauth.models import MagicToken
import sendgrid
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY)
class SendTokenMixin(object):
"""
Helper for sending an email containing a link containing the MagicToken.
"""
def create_token(self, user):
token = MagicToken.objects.create(user=user)
return token
def get_user_from_email(self, user_email):
"""
Query the DB for the user corresponding to the email.
- We use get_user_model() instead of User (in case the Django app has customised the User
class)
- We use magicauth_settings.EMAIL_FIELD, which is the name of the field in the user
model. By default "username" but not always.
"""
user_class = get_user_model()
email_field = magicauth_settings.EMAIL_FIELD
field_lookup = {f"{email_field}__iexact": user_email}
user = user_class.objects.get(**field_lookup)
return user
def send_email(self, user, user_email, token, extra_context=None):
email_subject = magicauth_settings.EMAIL_SUBJECT
html_template = magicauth_settings.EMAIL_HTML_TEMPLATE
text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE
from_email = magicauth_settings.FROM_EMAIL
context = {
"token": token,
"user": user,
"site": get_current_site(self.request),
"TOKEN_DURATION_MINUTES": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60),
"TOKEN_DURATION_SECONDS": magicauth_settings.TOKEN_DURATION_SECONDS,
}
if extra_context:
context.update(extra_context)
text_message = loader.render_to_string(text_template, context)
html_message = loader.render_to_string(html_template, context)
mail = Mail(
from_email=(
django_settings.MAGICAUTH_FROM_EMAIL,
django_settings.MAGICAUTH_SENDER
),
to_emails=[user_email],
subject=email_subject,
html_content=html_message
)
sg.send(mail)
def send_token(self, user_email, extra_context=None):
user = self.get_user_from_email(user_email)
token = self.create_token(user)
self.send_email(user, user_email, token, extra_context)
| 2.328125 | 2 |
qcdb/util/paths.py | loriab/qccddb | 8 | 6290 | <gh_stars>1-10
import os
import sys
## {{{ http://code.activestate.com/recipes/52224/ (r1)
def search_file(filename, search_path):
"""Given an os.pathsep divided `search_path`, find first occurrence of
`filename`. Returns full path to file if found or None if unfound.
"""
file_found = False
paths = search_path.split(os.pathsep)
# paths = string.split(search_path, os.pathsep)
for path in paths:
if os.path.exists(os.path.join(path, filename)):
file_found = True
break
if file_found:
return os.path.abspath(os.path.join(path, filename))
else:
return None
## end of http://code.activestate.com/recipes/52224/ }}}
def all_casings(input_string):
"""Function to return a generator of all lettercase permutations
of *input_string*.
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
def import_ignorecase(module, lenv=None):
"""Function to import *module* in any possible lettercase
permutation. Returns module object if available, None if not.
`lenv` is list (not str) of addl sys.path members to try.
"""
lenv = [] if lenv is None else lenv
with add_path(lenv):
modobj = None
for per in list(all_casings(module)):
try:
modobj = __import__(per)
except ImportError:
pass
else:
break
return modobj
class add_path:
"""https://stackoverflow.com/a/39855753"""
def __init__(self, paths):
# paths must be list
self.paths = paths
def __enter__(self):
for pth in reversed(self.paths):
sys.path.insert(0, pth)
def __exit__(self, exc_type, exc_value, traceback):
for pth in self.paths:
sys.path.remove(pth)
| 2.953125 | 3 |
tests/models/DCN_test.py | JiangBowen-master/DeepCTR | 1 | 6291 | <filename>tests/models/DCN_test.py
import pytest
import tensorflow as tf
from deepctr.estimator import DCNEstimator
from deepctr.models import DCN
from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \
Estimator_TEST_TF1
@pytest.mark.parametrize(
'cross_num,hidden_size,sparse_feature_num,cross_parameterization',
[(0, (8,), 2, 'vector'), (1, (), 1, 'vector'), (1, (8,), 3, 'vector'),
(0, (8,), 2, 'matrix'), (1, (), 1, 'matrix'), (1, (8,), 3, 'matrix'),
]
)
def test_DCN(cross_num, hidden_size, sparse_feature_num, cross_parameterization):
model_name = "DCN"
sample_size = SAMPLE_SIZE
x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = DCN(feature_columns, feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization,
dnn_hidden_units=hidden_size, dnn_dropout=0.5)
check_model(model, model_name, x, y)
@pytest.mark.parametrize(
'cross_num,hidden_size,sparse_feature_num',
[(1, (8,), 3)
]
)
def test_DCNEstimator(cross_num, hidden_size, sparse_feature_num):
if not Estimator_TEST_TF1 and tf.__version__ < "2.2.0":
return
model_name = "DCN"
sample_size = SAMPLE_SIZE
linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size,
sparse_feature_num=sparse_feature_num,
dense_feature_num=sparse_feature_num)
model = DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size,
dnn_dropout=0.5)
check_estimator(model, input_fn)
# def test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()):
# feature_dim_dict = {'sparse': [SparseFeat('sparse_1', 2), SparseFeat('sparse_2', 5), SparseFeat('sparse_3', 10)],
# 'dense': [SparseFeat('dense_1', 1), SparseFeat('dense_1', 1), SparseFeat('dense_1', 1)]}
# with pytest.raises(ValueError):
# _ = DCN(None, embedding_size=embedding_size, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5)
if __name__ == "__main__":
pass
| 2.296875 | 2 |
workflows/post_process_run/fv3post/gsutil.py | jacnugent/fv3net | 5 | 6292 | <filename>workflows/post_process_run/fv3post/gsutil.py<gh_stars>1-10
import os
import subprocess
import backoff
class GSUtilResumableUploadException(Exception):
pass
def _decode_to_str_if_bytes(s, encoding="utf-8"):
if isinstance(s, bytes):
return s.decode(encoding)
else:
return s
def authenticate():
try:
credentials = os.environ["GOOGLE_APPLICATION_CREDENTIALS"]
except KeyError:
pass
else:
subprocess.check_call(
["gcloud", "auth", "activate-service-account", "--key-file", credentials]
)
@backoff.on_exception(backoff.expo, GSUtilResumableUploadException, max_tries=3)
def upload_dir(d, dest):
try:
# Pipe stderr to stdout because gsutil logs upload progress there.
subprocess.check_output(
["gsutil", "-m", "rsync", "-r", "-e", d, dest], stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as e:
output = _decode_to_str_if_bytes(e.output)
if "ResumableUploadException" in output:
raise GSUtilResumableUploadException()
else:
raise e
def download_directory(dir_, dest):
os.makedirs(dest, exist_ok=True)
subprocess.check_call(["gsutil", "-m", "rsync", "-r", dir_, dest])
def cp(source, destination):
subprocess.check_call(["gsutil", "cp", source, destination])
| 2.1875 | 2 |
deploy_tix/__main__.py | rpappalax/deploy-tix | 0 | 6293 | import argparse
from deploy_tix.bugzilla_rest_client import BugzillaRESTClient
from deploy_tix.release_notes import ReleaseNotes
from output_helper import OutputHelper
def main(args=None):
parser = argparse.ArgumentParser(
description='Scripts for creating / updating deployment tickets in \
Bugzilla',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-a', '--application',
help='Example: loop-server',
required=True)
parser.add_argument(
'-B', '--bugzilla-mozilla',
help='Set this switch to post directly to bugzilla.mozilla.org \
(without switch posts to: bugzilla-dev.allizom.org)',
action='store_true',
default=False,
required=False)
subparsers = parser.add_subparsers(help='Ticket action')
# parser for ticket - {create} option
parser_create = \
subparsers.add_parser('NEW', help='Create a NEW deployment ticket.')
parser_create.add_argument(
'-o', '--repo-owner',
help='Example: mozilla-services',
default='mozilla-services',
required=False)
parser_create.add_argument(
'-e', '--environment',
help='Enter: STAGE, PROD',
default='STAGE',
required=False)
parser_create.add_argument(
'-m', '--cc-mail',
help='Example: <EMAIL> \
NOTE: must be a registered username!',
default='',
required=False)
# parser for ticket - {upate} option
parser_update = subparsers.add_parser(
'UPDATE',
help='UPDATE an existing deployment ticket'
)
parser_update.add_argument(
'-i', '--bug-id',
help='Example: 1234567',
required=False)
parser_update.add_argument(
'-c', '--comment',
help='Enter: <your bug comment>',
required=True)
args = vars(parser.parse_args())
application = args['application']
bugzilla_mozilla = args['bugzilla_mozilla']
ticket = BugzillaRESTClient(bugzilla_mozilla)
if all(key in args for key in ['bug_id', 'comment']):
bug_id = args['bug_id']
comment = args['comment']
ticket.bug_update(application, comment, bug_id)
if all(key in args for key in ['repo_owner', 'application', 'environment']): # noqa
repo_owner = args['repo_owner']
environment = args['environment'].lower()
if args['cc_mail']:
cc_mail = args['cc_mail']
else:
cc_mail = ''
status = 'NEW'
output = OutputHelper()
output.log('Create deployment ticket', True, True)
notes = ReleaseNotes(repo_owner, application, environment)
description = notes.get_release_notes()
release_num = notes.last_tag
output.log('Release Notes', True)
output.log(description)
ticket.bug_create(
release_num, application, environment, status, description, cc_mail
)
| 2.265625 | 2 |
site-packages/visual/examples/drape.py | lebarsfa/vpython-wx | 68 | 6294 | from visual import *
print("""
Click to place spheres under falling string.
Right button drag or Ctrl-drag to rotate view.
Middle button drag or Alt-drag to zoom in or out.
On a two-button mouse, middle is left + right.
""")
# <NAME>
scene.title = "Drape"
restlength = 0.02
m = 0.010 * restlength
g = 9.8
dt = 0.002
k = 3
damp = (1-0)**dt
nspheres = 3
floor = 0
# Create the stringy thing:
band = curve( x = arange(-1,1,restlength),
y = 1,
radius = 0.02
)
band.p = band.pos * 0
scene.range = 1.5
scene.autoscale = 0
# Let the user position obstacles:
spheres = []
for i in range(nspheres):
s = sphere( pos = scene.mouse.getclick().pos, #(i*0.6 - 0.7,0.5 + i*0.1,0),
radius = 0.25,
color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) )
spheres.append( s )
while True:
rate(1.0 / dt)
if scene.mouse.clicked:
i = len(spheres)
s = sphere( pos = scene.mouse.getclick().pos,
radius = 0.25,
color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) )
spheres.append( s )
if floor:
below = less(band.pos[:,1],-1)
band.p[:,1] = where( below, 0, band.p[:,1] )
band.pos[:,1] = where( below, -1, band.pos[:,1] )
# need a more physical way to make 'damped springs' than this!
band.p = band.p * damp
#band.p[0] = 0 # nail down left endpoint
#band.p[-1] = 0 # nail down right endpoint
band.pos = band.pos + band.p/m*dt
#gravity
band.p[:,1] = band.p[:,1] - m * g * dt
# force[n] is the force on point n from point n+1 (to the right):
length = (band.pos[1:] - band.pos[:-1])
dist = sqrt(sum(length*length,-1))
force = k * ( dist - restlength )
force = length/dist[:,newaxis] * force[:,newaxis]
band.p[:-1] = band.p[:-1] + force*dt
band.p[1:] = band.p[1:] - force*dt
# color based on "stretch": blue -> white -> red
c = clip( dist/restlength * 0.5, 0, 2 )
# blue (compressed) -> white (relaxed) -> red (tension)
band.red[1:] = where( less(c,1), c, 1 )
band.green[1:] = where( less(c,1), c, 2-c )
band.blue[1:] = where( less(c,1), 1, 2-c )
for s in spheres:
dist = mag( band.pos - s.pos )[:,newaxis]
inside = less( dist, s.radius )
if sometrue(inside):
R = ( band.pos - s.pos ) / dist
surface = s.pos + (s.radius)*R
band.pos = surface*inside + band.pos*(1-inside)
pdotR = sum(asarray(band.p)*asarray(R),-1)
band.p = band.p - R*pdotR[:,newaxis]*inside
| 3.515625 | 4 |
sdc/utilities/sdc_typing_utils.py | dlee992/sdc | 540 | 6295 | # *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
"""
| This file contains SDC utility functions related to typing compilation phase
"""
import numpy
import numba
import sdc
from numba import types
from numba.core.errors import TypingError
from numba.np import numpy_support
from sdc.datatypes.indexes import *
from sdc.str_arr_type import string_array_type, StringArrayType
from sdc.datatypes.categorical.types import Categorical
sdc_old_index_types = (types.Array, StringArrayType, )
sdc_pandas_index_types = (
EmptyIndexType,
PositionalIndexType,
RangeIndexType,
Int64IndexType,
MultiIndexType,
) + sdc_old_index_types
sdc_indexes_range_like = (
PositionalIndexType,
RangeIndexType,
)
# TO-DO: support caching of data allocated for range indexes at request for .values
sdc_indexes_wo_values_cache = (
EmptyIndexType,
PositionalIndexType,
RangeIndexType,
)
sdc_pandas_df_column_types = (
types.Array,
StringArrayType,
Categorical,
)
class TypeChecker:
"""
Validate object type and raise TypingError if the type is invalid, e.g.:
Method nsmallest(). The object n
given: bool
expected: int
"""
msg_template = '{} The object {}\n given: {}\n expected: {}'
def __init__(self, func_name):
"""
Parameters
----------
func_name: :obj:`str`
name of the function where types checking
"""
self.func_name = func_name
def raise_exc(self, data, expected_types, name=''):
"""
Raise exception with unified message
Parameters
----------
data: :obj:`any`
real type of the data
expected_types: :obj:`str`
expected types inserting directly to the exception
name: :obj:`str`
name of the parameter
"""
msg = self.msg_template.format(self.func_name, name, data, expected_types)
raise TypingError(msg)
def check(self, data, accepted_type, name=''):
"""
Check data type belongs to specified type
Parameters
----------
data: :obj:`any`
real type of the data
accepted_type: :obj:`type`
accepted type
name: :obj:`str`
name of the parameter
"""
if not isinstance(data, accepted_type):
self.raise_exc(data, accepted_type.__name__, name=name)
class SDCLimitation(Exception):
"""Exception to be raised in case of SDC limitation"""
pass
def kwsparams2list(params):
"""Convert parameters dict to a list of string of a format 'key=value'"""
return ['{}={}'.format(k, v) for k, v in params.items()]
def sigparams2list(param_names, defaults):
"""Creates a list of strings of a format 'key=value' from parameter names and default values"""
return [(f'{param}' if param not in defaults else f'{param}={defaults[param]}') for param in param_names]
def has_literal_value(var, value):
"""Used during typing to check that variable var is a Numba literal value equal to value"""
if not isinstance(var, types.Literal):
return False
if value is None:
return isinstance(var, types.NoneType) or var.literal_value is value
elif isinstance(value, type(bool)):
return var.literal_value is value
else:
return var.literal_value == value
def has_python_value(var, value):
"""Used during typing to check that variable var was resolved as Python type and has specific value"""
if not isinstance(var, type(value)):
return False
if value is None or isinstance(value, type(bool)):
return var is value
else:
return var == value
def is_default(var, value):
return has_literal_value(var, value) or has_python_value(var, value) or isinstance(var, types.Omitted)
def check_is_numeric_array(type_var):
"""Used during typing to check that type_var is a numeric numpy arrays"""
return check_is_array_of_dtype(type_var, types.Number)
def check_index_is_numeric(ty_series):
"""Used during typing to check that series has numeric index"""
return isinstance(ty_series.index.dtype, types.Number)
def check_types_comparable(ty_left, ty_right):
"""Used during typing to check that specified types can be compared"""
if hasattr(ty_left, 'dtype'):
ty_left = ty_left.dtype
if hasattr(ty_right, 'dtype'):
ty_right = ty_right.dtype
# add the rest of supported types here
if isinstance(ty_left, types.Number):
return isinstance(ty_right, types.Number)
if isinstance(ty_left, types.UnicodeType):
return isinstance(ty_right, types.UnicodeType)
if isinstance(ty_left, types.Boolean):
return isinstance(ty_right, types.Boolean)
if isinstance(ty_left, (types.Tuple, types.UniTuple)):
# FIXME: just for now to unblock compilation
return ty_left == ty_right
return False
def check_arrays_comparable(ty_left, ty_right):
"""Used during typing to check that underlying arrays of specified types can be compared"""
return ((ty_left == string_array_type and ty_right == string_array_type)
or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right)))
def check_is_array_of_dtype(type_var, dtype):
"""Used during typing to check that type_var is a numeric numpy array of specific dtype"""
return isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype)
def find_common_dtype_from_numpy_dtypes(array_types, scalar_types):
"""Used to find common numba dtype for a sequences of numba dtypes each representing some numpy dtype"""
np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in array_types]
np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in scalar_types]
np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes)
numba_common_dtype = numpy_support.from_dtype(np_common_dtype)
return numba_common_dtype
def find_index_common_dtype(left, right):
"""Used to find common dtype for indexes of two series and verify if index dtypes are equal"""
left_index_dtype = left.dtype
right_index_dtype = right.dtype
index_dtypes_match = left_index_dtype == right_index_dtype
if not index_dtypes_match:
numba_index_common_dtype = find_common_dtype_from_numpy_dtypes(
[left_index_dtype, right_index_dtype], [])
else:
numba_index_common_dtype = left_index_dtype
return index_dtypes_match, numba_index_common_dtype
def gen_impl_generator(codegen, impl_name):
"""Generate generator of an implementation"""
def _df_impl_generator(*args, **kwargs):
func_text, global_vars = codegen(*args, **kwargs)
loc_vars = {}
exec(func_text, global_vars, loc_vars)
_impl = loc_vars[impl_name]
return _impl
return _df_impl_generator
def check_signed_integer(ty):
return isinstance(ty, types.Integer) and ty.signed
def _check_dtype_param_type(dtype):
""" Returns True is dtype is a valid type for dtype parameter and False otherwise.
Used in RangeIndex ctor and other methods that take dtype parameter. """
valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass)
return isinstance(dtype, valid_dtype_types) or dtype is None
| 1.585938 | 2 |
Hackerrank/Contests/Project Euler/euler010.py | PROxZIMA/Competitive-Coding | 1 | 6296 | <reponame>PROxZIMA/Competitive-Coding
from math import sqrt
# Naive method: Loop through N and check if every number is prime or not. If prime add to sum. Time complexity is O(√n). Time of execution ~ 8sec for n = 1000000
def prime(n):
yield 2
yield 3
for p in range(5, n+1, 2):
if p % 3 == 0:
continue
else:
for i in range (5, int(sqrt(p)) + 1, 6):
if p % i == 0 or p % (i + 2) == 0:
break
else:
yield p
s = set(prime(1000000))
for _ in range(int(input())):
n = int(input())
print(sum(i for i in s if i <= n))
# Sieve implementation: Time complexity of O(n*log(log(n))). Time of execution ~ 2sec for n = 1000000
limit = 1000000
sieve = [0] + [1, 0] * 500000
sieve[0], sieve[1], sieve[2] = 0, 0, 2
p = 3
while p <= limit:
if sieve[p]:
sieve[p] = sieve[p-1] + p
for i in range(p*p, limit+1, p):
sieve[i] = 0
else:
sieve[p] = sieve[p-1]
sieve[p+1] = sieve[p]
p += 2
for _ in range(int(input())):
print(sieve[int(input())])
| 3.625 | 4 |
DQM/L1TMonitorClient/python/L1EmulatorErrorFlagClient_cfi.py | ckamtsikis/cmssw | 852 | 6297 | <filename>DQM/L1TMonitorClient/python/L1EmulatorErrorFlagClient_cfi.py<gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
l1EmulatorErrorFlagClient = DQMEDHarvester("L1EmulatorErrorFlagClient",
#
# for each L1 system, give:
# - SystemLabel: system label
# - HwValLabel: system label as used in hardware validation package
# (the package producing the ErrorFlag histogram)
# - SystemMask: system mask: if 1, the system is masked in the summary plot
# - SystemFolder: the folder where the ErrorFlag histogram is looked for
#
# the position in the parameter set gives, in reverse order, the position in the reportSummaryMap
# in the emulator column (left column)
L1Systems = cms.VPSet(
cms.PSet(
SystemLabel = cms.string("ECAL"),
HwValLabel = cms.string("ETP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("HCAL"),
HwValLabel = cms.string("HTP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("RCT"),
HwValLabel = cms.string("RCT"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("Stage1Layer2"),
HwValLabel = cms.string("Stage1Layer2"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("DTTF"),
HwValLabel = cms.string("DTF"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("DTTPG"),
HwValLabel = cms.string("DTP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("CSCTF"),
HwValLabel = cms.string("CTF"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("CSCTPG"),
HwValLabel = cms.string("CTP"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("RPC"),
HwValLabel = cms.string("RPC"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("GMT"),
HwValLabel = cms.string("GMT"),
SystemMask = cms.uint32(0),
SystemFolder = cms.string("")
),
cms.PSet(
SystemLabel = cms.string("GT"),
HwValLabel = cms.string("GT"),
SystemMask = cms.uint32(1),
SystemFolder = cms.string("L1TEMU/Stage1GTexpert")
)
)
)
| 2.125 | 2 |
codalab/model/tables.py | jzwang43/codalab-worksheets | 0 | 6298 | <filename>codalab/model/tables.py
"""
The SQLAlchemy table objects for the CodaLab bundle system tables.
"""
# TODO: Replace String and Text columns with Unicode and UnicodeText as appropriate
# This way, SQLAlchemy will automatically perform conversions to and from UTF-8
# encoding, or use appropriate database engine-specific data types for Unicode
# data. Currently, only worksheet.title uses the Unicode column type.
from sqlalchemy import Column, ForeignKey, Index, MetaData, Table, UniqueConstraint
from sqlalchemy.types import (
BigInteger,
Boolean,
DateTime,
Enum,
Float,
Integer,
LargeBinary,
String,
Text,
Unicode,
)
from sqlalchemy.sql.schema import ForeignKeyConstraint
db_metadata = MetaData()
bundle = Table(
'bundle',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('uuid', String(63), nullable=False),
Column('bundle_type', String(63), nullable=False),
# The command will be NULL except for run bundles.
Column('command', Text, nullable=True),
# The data_hash will be NULL if the bundle's value is still being computed.
Column('data_hash', String(63), nullable=True),
Column('state', String(63), nullable=False),
Column('owner_id', String(255), nullable=True),
Column('is_anonymous', Boolean, nullable=False, default=False),
UniqueConstraint('uuid', name='uix_1'),
Index('bundle_data_hash_index', 'data_hash'),
Index('state_index', 'state'), # Needed for the bundle manager.
)
# Includes things like name, description, etc.
bundle_metadata = Table(
'bundle_metadata',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),
Column('metadata_key', String(63), nullable=False),
Column('metadata_value', Text, nullable=False),
Index('metadata_kv_index', 'metadata_key', 'metadata_value', mysql_length=63),
)
# For each child_uuid, we have: key = child_path, target = (parent_uuid, parent_path)
bundle_dependency = Table(
'bundle_dependency',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('child_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),
Column('child_path', Text, nullable=False),
# Deliberately omit ForeignKey(bundle.c.uuid), because bundles can have
# dependencies to bundles not (yet) in the system.
Column('parent_uuid', String(63), nullable=False),
Column('parent_path', Text, nullable=False),
)
# The worksheet table does not have many columns now, but it will eventually
# include columns for owner, group, permissions, etc.
worksheet = Table(
'worksheet',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('uuid', String(63), nullable=False),
Column('name', String(255), nullable=False),
Column('owner_id', String(255), nullable=True),
Column(
'title', Unicode(255), nullable=True
), # Short human-readable description of the worksheet
Column(
'frozen', DateTime, nullable=True
), # When the worksheet was frozen (forever immutable) if it is.
Column('is_anonymous', Boolean, nullable=False, default=False),
Column(
'date_created', DateTime
), # When the worksheet was created; Set to null if the worksheet created before v0.5.31; Set to current timestamp by default
Column(
'date_last_modified', DateTime
), # When the worksheet was last modified; Set to null if the worksheet created before v0.5.31; Set to current_timestamp by default
UniqueConstraint('uuid', name='uix_1'),
Index('worksheet_name_index', 'name'),
Index('worksheet_owner_index', 'owner_id'),
)
worksheet_item = Table(
'worksheet_item',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False),
# A worksheet item is either:
# - type = bundle (bundle_uuid != null)
# - type = worksheet (subworksheet_uuid != null)
# - type = markup (value != null)
# - type = directive (value != null)
# Deliberately omit ForeignKey(bundle.c.uuid), because worksheets can contain
# bundles and worksheets not (yet) in the system.
Column('bundle_uuid', String(63), nullable=True),
Column('subworksheet_uuid', String(63), nullable=True),
Column('value', Text, nullable=False), # TODO: make this nullable
Column('type', String(20), nullable=False),
Column('sort_key', Integer, nullable=True),
Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'),
Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'),
Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'),
)
# Worksheet tags
worksheet_tag = Table(
'worksheet_tag',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False),
Column('tag', String(63), nullable=False),
Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'),
Index('worksheet_tag_tag_index', 'tag'),
)
group = Table(
'group',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('uuid', String(63), nullable=False),
Column('name', String(255), nullable=False),
Column('user_defined', Boolean),
Column('owner_id', String(255), nullable=True),
UniqueConstraint('uuid', name='uix_1'),
Index('group_name_index', 'name'),
Index('group_owner_id_index', 'owner_id'),
)
user_group = Table(
'user_group',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False),
Column('user_id', String(63), ForeignKey("user.user_id"), nullable=False),
# Whether a user is able to modify this group.
Column('is_admin', Boolean),
Index('group_uuid_index', 'group_uuid'),
Index('user_id_index', 'user_id'),
)
# Permissions for bundles
group_bundle_permission = Table(
'group_bundle_permission',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False),
# Reference to a bundle
Column('object_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),
# Permissions encoded as integer (see below)
Column('permission', Integer, nullable=False),
)
# Permissions for worksheets
group_object_permission = Table(
'group_object_permission',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False),
# Reference to a worksheet object
Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False),
# Permissions encoded as integer (see below)
Column('permission', Integer, nullable=False),
)
# A permission value is one of the following: none (0), read (1), or all (2).
GROUP_OBJECT_PERMISSION_NONE = 0x00
GROUP_OBJECT_PERMISSION_READ = 0x01
GROUP_OBJECT_PERMISSION_ALL = 0x02
# A notifications value is one of the following:
NOTIFICATIONS_NONE = 0x00 # Receive no notifications
NOTIFICATIONS_IMPORTANT = 0x01 # Receive only important notifications
NOTIFICATIONS_GENERAL = 0x02 # Receive general notifications (new features)
# Store information about users.
user = Table(
'user',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
# Basic information
Column('user_id', String(63), nullable=False),
Column('user_name', String(63), nullable=False, unique=True),
Column(
'email', String(254), nullable=False, unique=True
), # Length of 254 to be compliant with RFC3696/5321
Column(
'notifications', Integer, nullable=False, default=NOTIFICATIONS_GENERAL
), # Which emails user wants to receive
Column('last_login', DateTime), # Null if user has never logged in
Column(
'is_active', Boolean, nullable=False, default=True
), # Set to False instead of deleting users to maintain foreign key integrity
Column('first_name', String(30, convert_unicode=True)),
Column('last_name', String(30, convert_unicode=True)),
Column('date_joined', DateTime, nullable=False),
Column('has_access', Boolean, default=False, nullable=True),
Column('is_verified', Boolean, nullable=False, default=False),
Column('is_superuser', Boolean, nullable=False, default=False),
Column('password', String(128), nullable=False),
# Additional information
Column('affiliation', String(255, convert_unicode=True), nullable=True),
Column('url', String(255, convert_unicode=True), nullable=True),
# Quotas
Column('time_quota', Float, nullable=False), # Number of seconds allowed
Column('parallel_run_quota', Integer, nullable=False), # Number of parallel jobs allowed
Column('time_used', Float, nullable=False), # Number of seconds already used
Column('disk_quota', Float, nullable=False), # Number of bytes allowed
Column('disk_used', Float, nullable=False), # Number of bytes already used
Index('user_user_id_index', 'user_id'),
Index('user_user_name_index', 'user_name'),
UniqueConstraint('user_id', name='uix_1'),
)
# Stores (email) verification keys
user_verification = Table(
'user_verification',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('date_created', DateTime, nullable=False),
Column('date_sent', DateTime, nullable=True),
Column('key', String(64), nullable=False),
)
# Stores password reset codes
user_reset_code = Table(
'user_reset_code',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('date_created', DateTime, nullable=False),
Column('code', String(64), nullable=False),
)
# OAuth2 Tables
oauth2_client = Table(
'oauth2_client',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('client_id', String(63), nullable=False),
Column('name', String(63), nullable=True),
Column('secret', String(255), nullable=True),
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=True),
Column(
'grant_type',
Enum("authorization_code", "password", "client_credentials", "refresh_token"),
nullable=False,
),
Column('response_type', Enum("code", "token"), nullable=False),
Column('scopes', Text, nullable=False), # comma-separated list of allowed scopes
Column('redirect_uris', Text, nullable=False), # comma-separated list of allowed redirect URIs
UniqueConstraint('client_id', name='uix_1'),
)
oauth2_token = Table(
'oauth2_token',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False),
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('scopes', Text, nullable=False),
Column('access_token', String(255), unique=True),
Column('refresh_token', String(255), unique=True),
Column('expires', DateTime, nullable=False),
)
oauth2_auth_code = Table(
'oauth2_auth_code',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
),
Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False),
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('scopes', Text, nullable=False),
Column('code', String(100), nullable=False),
Column('expires', DateTime, nullable=False),
Column('redirect_uri', String(255), nullable=False),
)
# Store information about users' questions or feedback.
chat = Table(
'chat',
db_metadata,
Column(
'id',
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
nullable=False,
autoincrement=True,
), # Primary key
Column('time', DateTime, nullable=False), # When did the user send this query?
Column('sender_user_id', String(63), nullable=True), # Who sent it?
Column('recipient_user_id', String(63), nullable=True), # Who received it?
Column('message', Text, nullable=False), # What's the content of the chat?
Column(
'worksheet_uuid', String(63), nullable=True
), # What is the id of the worksheet that the sender is on?
Column(
'bundle_uuid', String(63), nullable=True
), # What is the id of the bundle that the sender is on?
)
# Store information about workers.
worker = Table(
'worker',
db_metadata,
Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False),
Column('worker_id', String(127), primary_key=True, nullable=False),
Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=True),
Column('tag', Text, nullable=True), # Tag that allows for scheduling runs on specific workers.
Column('cpus', Integer, nullable=False), # Number of CPUs on worker.
Column('gpus', Integer, nullable=False), # Number of GPUs on worker.
Column('memory_bytes', BigInteger, nullable=False), # Total memory of worker.
Column('free_disk_bytes', BigInteger, nullable=True), # Available disk space on worker.
Column(
'checkin_time', DateTime, nullable=False
), # When the worker last checked in with the bundle service.
Column('socket_id', Integer, nullable=False), # Socket ID worker listens for messages on.
Column(
'shared_file_system', Boolean, nullable=False
), # Whether the worker and the server have a shared filesystem.
Column(
'tag_exclusive', Boolean, nullable=False
), # Whether worker runs bundles if and only if they match tags.
Column(
'exit_after_num_runs', Integer, nullable=False
), # Number of jobs allowed to run on worker.
Column('is_terminating', Boolean, nullable=False),
)
# Store information about all sockets currently allocated to each worker.
worker_socket = Table(
'worker_socket',
db_metadata,
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('worker_id', String(127), nullable=False),
# No foreign key constraint on the worker table so that we can create a socket
# for the worker before adding the worker to the worker table.
Column('socket_id', Integer, primary_key=True, nullable=False),
)
# Store information about the bundles currently running on each worker.
worker_run = Table(
'worker_run',
db_metadata,
Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False),
Column('worker_id', String(127), nullable=False),
ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']),
Column('run_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False),
Index('uuid_index', 'run_uuid'),
)
# Store information about the dependencies available on each worker.
worker_dependency = Table(
'worker_dependency',
db_metadata,
Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False),
Column('worker_id', String(127), primary_key=True, nullable=False),
ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']),
# Serialized list of dependencies for the user/worker combination.
# See WorkerModel for the serialization method.
Column('dependencies', LargeBinary, nullable=False),
)
| 2.234375 | 2 |
grpc/plugins/connection/gnmi.py | hansthienpondt/ansible-networking-collections | 0 | 6299 | <reponame>hansthienpondt/ansible-networking-collections
# (c) 2020 Nokia
#
# Licensed under the BSD 3 Clause license
# SPDX-License-Identifier: BSD-3-Clause
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author:
- "<NAME> (@HansThienpondt)"
- "<NAME> (@wisotzky)"
connection: gnmi
short_description: Provides a persistent gRPC connection for gNMI API service
description:
- This gRPC plugin provides methods to interact with the gNMI service.
- OpenConfig gNMI specification
https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md
- gNMI API
https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto
- This connection plugin provides a persistent communication channel to
remote devices using gRPC including the underlying transport (TLS).
- The plugin binds to the gNMI gRPC service. It provide wrappers for gNMI
requests (Capabilities, Get, Set, Subscribe)
requirements:
- grpcio
- protobuf
options:
host:
description:
- Target host FQDN or IP address to establish gRPC connection.
default: inventory_hostname
vars:
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device that listens for connections
when establishing the gRPC connection. If None only the C(host) part
will be used.
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
remote_user:
description:
- The username used to authenticate to the remote device when the gRPC
connection is first established. If the remote_user is not specified,
the connection will use the username of the logged in user.
- Can be configured from the CLI via the C(--user) or C(-u) options.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device
when first establishing the gRPC connection.
vars:
- name: ansible_password
- name: ansible_ssh_pass
private_key_file:
description:
- The PEM encoded private key file used to authenticate to the
remote device when first establishing the grpc connection.
ini:
- section: grpc_connection
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
root_certificates_file:
description:
- The PEM encoded root certificate file used to create a SSL-enabled
channel, if the value is None it reads the root certificates from
a default location chosen by gRPC at runtime.
ini:
- section: grpc_connection
key: root_certificates_file
env:
- name: ANSIBLE_ROOT_CERTIFICATES_FILE
vars:
- name: ansible_root_certificates_file
certificate_chain_file:
description:
- The PEM encoded certificate chain file used to create a SSL-enabled
channel. If the value is None, no certificate chain is used.
ini:
- section: grpc_connection
key: certificate_chain_file
env:
- name: ANSIBLE_CERTIFICATE_CHAIN_FILE
vars:
- name: ansible_certificate_chain_file
certificate_path:
description:
- Folder to search for certificate and key files
ini:
- section: grpc_connection
key: certificate_path
env:
- name: ANSIBLE_CERTIFICATE_PATH
vars:
- name: ansible_certificate_path
gnmi_encoding:
description:
- Encoding used for gNMI communication
- Must be either JSON or JSON_IETF
- If not provided, will run CapabilityRequest for auto-detection
ini:
- section: grpc_connection
key: gnmi_encoding
env:
- name: ANSIBLE_GNMI_ENCODING
vars:
- name: ansible_gnmi_encoding
grpc_channel_options:
description:
- Key/Value pairs (dict) to define gRPC channel options to be used
- gRPC reference
U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html)
- Provide the I(ssl_target_name_override) option to override the TLS
subject or subjectAltName (only in the case secure connections are
used). The option must be provided in cases, when the FQDN or IPv4
address that is used to connect to the device is different from the
subject name that is provided in the host certificate. This is
needed, because the TLS validates hostname or IP address to avoid
man-in-the-middle attacks.
vars:
- name: ansible_grpc_channel_options
grpc_environment:
description:
- Key/Value pairs (dict) to define environment settings specific to gRPC
- The standard mechanism to provide/set the environment in Ansible
cannot be used, because those environment settings are not passed to
the client process that establishes the gRPC connection.
- Set C(GRPC_VERBOSITY) and C(GRPC_TRACE) to setup gRPC logging. Need to
add code for log forwarding of gRPC related log messages to the
persistent messages log (see below).
- Set C(HTTPS_PROXY) to specify your proxy settings (if needed).
- Set C(GRPC_SSL_CIPHER_SUITES) in case the default TLS ciphers do not match
what is offered by the gRPC server.
vars:
- name: ansible_grpc_environment
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to
initially establish a persistent connection. If this value expires
before the connection to the remote device is completed, the connection
will fail.
default: 5
ini:
- section: persistent_connection
key: connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
vars:
- name: ansible_connect_timeout
persistent_command_timeout:
type: int
description:
- Configures the default timeout value (in seconds) when awaiting a
response after issuing a call to a RPC. If the RPC does not return
before the timeout exceed, an error is generated and the connection
is closed.
default: 300
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
persistent_log_messages:
type: boolean
description:
- This flag will enable logging the command executed and response received
from target device in the ansible log file. For this option to work the
'log_path' ansible configuration option is required to be set to a file
path with write access.
- Be sure to fully understand the security implications of enabling this
option as it could create a security vulnerability by logging sensitive
information in log file.
default: False
ini:
- section: persistent_connection
key: log_messages
env:
- name: ANSIBLE_PERSISTENT_LOG_MESSAGES
vars:
- name: ansible_persistent_log_messages
"""
import os
import re
import json
import base64
import datetime
try:
import grpc
HAS_GRPC = True
except ImportError:
HAS_GRPC = False
try:
from google import protobuf
HAS_PROTOBUF = True
except ImportError:
HAS_PROTOBUF = False
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.plugins.connection import NetworkConnectionBase
from ansible.plugins.connection import ensure_connect
from google.protobuf import json_format
from ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2
from ansible.module_utils._text import to_text
class Connection(NetworkConnectionBase):
"""
Connection plugin for gRPC
To use gRPC connections in Ansible one (or more) sub-plugin(s) for the
required gRPC service(s) must be loaded. To load gRPC sub-plugins use the
method `register_service()` with the name of the sub-plugin to be
registered.
After loading the sub-plugin, Ansible modules can call methods provided by
that sub-plugin. There is a wrapper available that consumes the attribute
name {sub-plugin name}__{method name} to call a specific method of that
sub-plugin.
"""
transport = "nokia.grpc.gnmi"
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(
play_context, new_stdin, *args, **kwargs
)
self._task_uuid = to_text(kwargs.get("task_uuid", ""))
if not HAS_PROTOBUF:
raise AnsibleError(
"protobuf is required to use gRPC connection type. " +
"Please run 'pip install protobuf'"
)
if not HAS_GRPC:
raise AnsibleError(
"grpcio is required to use gRPC connection type. " +
"Please run 'pip install grpcio'"
)
self._connected = False
def readFile(self, optionName):
"""
Reads a binary certificate/key file
Parameters:
optionName(str): used to read filename from options
Returns:
File content
Raises:
AnsibleConnectionFailure: file does not exist or read excpetions
"""
path = self.get_option('certificate_path')
if not path:
path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates'
filename = self.get_option(optionName)
if filename:
if filename.startswith('~'):
filename = os.path.expanduser(filename)
if not filename.startswith('/'):
for entry in path.split(':'):
if os.path.isfile(os.path.join(entry, filename)):
filename = os.path.join(entry, filename)
break
if os.path.isfile(filename):
try:
with open(filename, 'rb') as f:
return f.read()
except Exception as exc:
raise AnsibleConnectionFailure(
'Failed to read cert/keys file %s: %s' % (filename, exc)
)
else:
raise AnsibleConnectionFailure(
'Cert/keys file %s does not exist' % filename
)
return None
def _connect(self):
"""
Establish gRPC connection to remote node and create gNMI stub.
This method will establish the persistent gRPC connection, if not
already done. After this, the gNMI stub will be created. To get
visibility about gNMI capabilities of the remote device, a gNM
CapabilityRequest will be sent and result will be persisted.
Parameters:
None
Returns:
None
"""
if self.connected:
self.queue_message('v', 'gRPC connection to host %s already exist' % self._target)
return
grpcEnv = self.get_option('grpc_environment') or {}
if not isinstance(grpcEnv, dict):
raise AnsibleConnectionFailure("grpc_environment must be a dict")
for key in grpcEnv:
if grpcEnv[key]:
os.environ[key] = str(grpcEnv[key])
else:
try:
del os.environ[key]
except KeyError:
# no such setting in current environment, but thats ok
pass
self._login_credentials = [
('username', self.get_option('remote_user')),
('password', self.get_option('password'))
]
host = self.get_option('host')
port = self.get_option('port')
self._target = host if port is None else '%s:%d' % (host, port)
self._timeout = self.get_option('persistent_command_timeout')
certs = {}
certs['root_certificates'] = self.readFile('root_certificates_file')
certs['certificate_chain'] = self.readFile('certificate_chain_file')
certs['private_key'] = self.readFile('private_key_file')
options = self.get_option('grpc_channel_options')
if options:
if not isinstance(options, dict):
raise AnsibleConnectionFailure("grpc_channel_options must be a dict")
options = options.items()
if certs['root_certificates'] or certs['private_key'] or certs['certificate_chain']:
self.queue_message('v', 'Starting secure gRPC connection')
creds = grpc.ssl_channel_credentials(**certs)
self._channel = grpc.secure_channel(self._target, creds, options=options)
else:
self.queue_message('v', 'Starting insecure gRPC connection')
self._channel = grpc.insecure_channel(self._target, options=options)
self.queue_message('v', "gRPC connection established for user %s to %s" %
(self.get_option('remote_user'), self._target))
self.queue_message('v', 'Creating gNMI stub')
self._stub = gnmi_pb2.gNMIStub(self._channel)
self._encoding = self.get_option('gnmi_encoding')
if not self._encoding:
self.queue_message('v', 'Run CapabilityRequest()')
request = gnmi_pb2.CapabilityRequest()
response = self._stub.Capabilities(request, metadata=self._login_credentials)
self.queue_message('v', 'CapabilityRequest() succeeded')
self._gnmiVersion = response.gNMI_version
self._yangModels = response.supported_models
if gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings:
self._encoding = 'JSON_IETF'
elif gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings:
self._encoding = 'JSON'
else:
raise AnsibleConnectionFailure("No compatible supported encoding found (JSON or JSON_IETF)")
else:
if self._encoding not in ['JSON_IETF', 'JSON']:
raise AnsibleConnectionFailure("Incompatible encoding '%s' requested (JSON or JSON_IETF)" % self._encoding)
self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding)
self._connected = True
self.queue_message('v', 'gRPC/gNMI connection has established successfully')
def close(self):
"""
Closes the active gRPC connection to the target host
Parameters:
None
Returns:
None
"""
if self._connected:
self.queue_message('v', "Closing gRPC connection to target host")
self._channel.close()
super(Connection, self).close()
# -----------------------------------------------------------------------
def _encodeXpath(self, xpath='/'):
"""
Encodes XPATH to dict representation that allows conversion to gnmi_pb.Path object
Parameters:
xpath (str): path string using XPATH syntax
Returns:
(dict): path dict using gnmi_pb2.Path structure for easy conversion
"""
mypath = []
xpath = xpath.strip('\t\n\r /')
if xpath:
path_elements = re.split('''/(?=(?:[^\[\]]|\[[^\[\]]+\])*$)''', xpath)
for e in path_elements:
entry = {'name': e.split("[", 1)[0]}
eKeys = re.findall('\[(.*?)\]', e)
dKeys = dict(x.split('=', 1) for x in eKeys)
if dKeys:
entry['key'] = dKeys
mypath.append(entry)
return {'elem': mypath}
return {}
def _decodeXpath(self, path):
"""
Decodes XPATH from dict representation converted from gnmi_pb.Path object
Parameters:
path (dict): decoded gnmi_pb2.Path object
Returns:
(str): path string using XPATH syntax
"""
result = []
if 'elem' not in path:
return ""
for elem in path['elem']:
tmp = elem['name']
if 'key' in elem:
for k, v in elem['key'].items():
tmp += "[%s=%s]" % (k, v)
result.append(tmp)
return '/'.join(result)
def _encodeVal(self, data):
"""
Encodes value to dict representation that allows conversion to gnmi_pb.TypedValue object
Parameters:
data (ANY): data to be encoded as gnmi_pb.TypedValue object
Returns:
(dict): dict using gnmi_pb.TypedValue structure for easy conversion
"""
value = base64.b64encode(json.dumps(data).encode())
if self._encoding == 'JSON_IETF':
return {'jsonIetfVal': value}
else:
return {'jsonVal': value}
def _decodeVal(self, val):
"""
Decodes value from dict representation converted from gnmi_pb.TypedValue object
Parameters:
val (dict): decoded gnmi_pb.TypedValue object
Returns:
(ANY): extracted data
"""
if 'jsonIetfVal' in val:
return json.loads(base64.b64decode(val['jsonIetfVal']))
elif 'jsonVal' in val:
return json.loads(base64.b64decode(val['jsonVal']))
else:
raise AnsibleConnectionFailure("Ansible gNMI plugin does not support encoding for value: %s" % json.dumps(val))
def _dictToList(self, aDict):
for key in aDict.keys():
if key.startswith('___'):
aDict[key[3:]] = [self._dictToList(val) if isinstance(val, dict) else val for val in aDict[key].values()]
del aDict[key]
else:
if isinstance(aDict[key], dict):
aDict[key] = self._dictToList(aDict[key])
return aDict
def _mergeToSingleDict(self, rawData):
result = {}
for entry in rawData:
if 'syncResponse' in entry and entry['syncResponse']:
# Ignore: SyncResponse is sent after initial update
break
elif 'update' not in entry:
# Ignore: entry without updates
break
elif 'timestamp' not in entry:
# Subscribe response, enter update context
entry = entry['update']
else:
# Get response, keep context
pass
prfx = result
if ('prefix' in entry) and ('elem' in entry['prefix']):
prfx_elements = entry['prefix']['elem']
else:
prfx_elements = []
for elem in prfx_elements:
eleName = elem['name']
if 'key' in elem:
eleKey = json.dumps(elem['key'])
eleName = '___'+eleName
# Path Element has key => must be list()
if eleName in prfx:
# Path Element exists => Change Context
prfx = prfx[eleName]
if eleKey not in prfx:
# List entry does not exist => Create
prfx[eleKey] = elem['key']
prfx = prfx[eleKey]
else:
# Path Element does not exist => Create
prfx[eleName] = {}
prfx = prfx[eleName]
prfx[eleKey] = elem['key']
prfx = prfx[eleKey]
else:
# Path Element hasn't key => must be dict()
if eleName in prfx:
# Path Element exists => Change Context
prfx = prfx[eleName]
else:
# Path Element does not exist => Create
prfx[eleName] = {}
prfx = prfx[eleName]
for _upd in entry['update']:
if 'val' not in _upd:
# requested path without content (no value) => skip
continue
elif ('path' in _upd) and ('elem' in _upd['path']):
path_elements = _upd['path']['elem']
cPath = prfx
elif prfx_elements:
path_elements = prfx_elements
cPath = result
else:
# No path at all, replace the objecttree with value
result = self._decodeVal(_upd['val'])
prfx = result
continue
# If path_elements has more than just a single entry,
# we need to create/navigate to the specified subcontext
for elem in path_elements[:-1]:
eleName = elem['name']
if 'key' in elem:
eleKey = json.dumps(elem['key'])
eleName = '___'+eleName
# Path Element has key => must be list()
if eleName in cPath:
# Path Element exists => Change Context
cPath = cPath[eleName]
if eleKey not in cPath:
# List entry does not exist => Create
cPath[eleKey] = elem['key']
cPath = cPath[eleKey]
else:
# Path Element does not exist => Create
cPath[eleName] = {}
cPath = cPath[eleName]
cPath[eleKey] = elem['key']
cPath = cPath[eleKey]
else:
# Path Element hasn't key => must be dict()
if eleName in cPath:
# Path Element exists => Change Context
cPath = cPath[eleName]
else:
# Path Element does not exist => Create
cPath[eleName] = {}
cPath = cPath[eleName]
# The last entry of path_elements is the leaf element
# that needs to be created/updated
leaf_elem = path_elements[-1]
if 'key' in leaf_elem:
eleKey = json.dumps(leaf_elem['key'])
eleName = '___'+leaf_elem['name']
if eleName not in cPath:
cPath[eleName] = {}
cPath = cPath[eleName]
cPath[eleKey] = self._decodeVal(_upd['val'])
else:
cPath[leaf_elem['name']] = self._decodeVal(_upd['val'])
return self._dictToList(result)
def _simplifyUpdates(self, rawData):
for msg in rawData:
entry = json_format.MessageToDict(msg)
if 'syncResponse' in entry:
# Ignore: SyncResponse is sent after initial update
pass
elif 'update' in entry:
result = {}
update = entry['update']
if 'prefix' in update:
result['prefix'] = '/'+self._decodeXpath(update['prefix'])
if 'timestamp' in update:
result['timestamp'] = datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat()
if 'update' in update:
result['values'] = {self._decodeXpath(u['path']): self._decodeVal(u['val']) for u in update['update']}
yield result
else:
# Ignore: Invalid message format
pass
# -----------------------------------------------------------------------
@ensure_connect
def gnmiCapabilities(self):
"""
Executes a gNMI Capabilities request
Parameters:
None
Returns:
str: gNMI capabilities converted into JSON format
"""
request = gnmi_pb2.CapabilityRequest()
auth = self._login_credentials
try:
response = self._stub.Capabilities(request, metadata=auth)
except grpc.RpcError as e:
raise AnsibleConnectionFailure("%s" % e)
return json_format.MessageToJson(response)
@ensure_connect
def gnmiGet(self, *args, **kwargs):
"""
Executes a gNMI Get request
Encoding that is used for data serialization is automatically determined
based on the remote device capabilities. This gNMI plugin has implemented
suppport for JSON_IETF (preferred) and JSON (fallback).
Parameters:
type (str): Type of data that is requested: ALL, CONFIG, STATE
prefix (str): Path prefix that is added to all paths (XPATH syntax)
paths (list): List of paths (str) to be captured
Returns:
str: GetResponse message converted into JSON format
"""
# Remove all input parameters from kwargs that are not set
input = dict(filter(lambda x: x[1], kwargs.items()))
# Adjust input parameters to match specification for gNMI SetRequest
if 'prefix' in input:
input['prefix'] = self._encodeXpath(input['prefix'])
if 'path' in input:
input['path'] = [self._encodeXpath(path) for path in input['path']]
if 'type' in input:
input['type'] = input['type'].upper()
input['encoding'] = self._encoding_value
request = json_format.ParseDict(input, gnmi_pb2.GetRequest())
auth = self._login_credentials
try:
response = self._stub.Get(request, metadata=auth)
except grpc.RpcError as e:
raise AnsibleConnectionFailure("%s" % e)
output = self._mergeToSingleDict(json_format.MessageToDict(response)['notification'])
return json.dumps(output, indent=4).encode()
@ensure_connect
def gnmiSet(self, *args, **kwargs):
"""
Executes a gNMI Set request
Encoding that is used for data serialization is automatically determined
based on the remote device capabilities. This gNMI plugin has implemented
suppport for JSON_IETF (preferred) and JSON (fallback).
Parameters:
prefix (str): Path prefix that is added to all paths (XPATH syntax)
update (list): Path/Value pairs to be updated
replace (list): Path/Value pairs to be replaced
delete (list): Paths (str) to be deleted
Returns:
str: SetResponse message converted into JSON format
"""
# Remove all input parameters from kwargs that are not set
input = dict(filter(lambda x: x[1], kwargs.items()))
# Backup options are not to be used in gNMI SetRequest
if 'backup' in input:
del input['backup']
if 'backup_options' in input:
del input['backup_options']
# Adjust input parameters to match specification for gNMI SetRequest
if 'prefix' in input:
input['prefix'] = self._encodeXpath(input['prefix'])
if 'delete' in input:
input['delete'] = [self._encodeXpath(entry) for entry in input['delete']]
if 'update' in input:
for entry in input['update']:
entry['path'] = self._encodeXpath(entry['path'])
entry['val'] = self._encodeVal(entry['val'])
if 'replace' in input:
for entry in input['replace']:
entry['path'] = self._encodeXpath(entry['path'])
entry['val'] = self._encodeVal(entry['val'])
request = json_format.ParseDict(input, gnmi_pb2.SetRequest())
auth = self._login_credentials
try:
response = self._stub.Set(request, metadata=auth)
except grpc.RpcError as e:
raise AnsibleConnectionFailure("%s" % e)
output = json_format.MessageToDict(response)
output['timestamp'] = datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat()
if 'prefix' in output:
output['prefix'] = self._decodeXpath(output['prefix'])
for item in output['response']:
item['path'] = self._decodeXpath(item['path'])
return json.dumps(output, indent=4).encode()
@ensure_connect
def gnmiSubscribe(self, *args, **kwargs):
"""
Executes a gNMI Subscribe request
Encoding that is used for data serialization is automatically determined
based on the remote device capabilities. This gNMI plugin has implemented
suppport for JSON_IETF (preferred) and JSON (fallback).
Parameters:
prefix (str): Path prefix that is added to all paths (XPATH syntax)
mode (str): Mode of subscription (STREAM, ONCE)
subscription (list of dict): Subscription specification (path, interval, submode)
duration (int): timeout, to stop receiving
qos (int): DSCP marking that is used
updates_only (bool): Send only updates to initial state
allow_aggregation (bool): Aggregate elements marked as eligible for aggregation
Returns:
str: Updates received converted into JSON format
"""
# Remove all input parameters from kwargs that are not set
input = dict(filter(lambda x: x[1], kwargs.items()))
# Adjust input parameters to match specification for gNMI SubscribeRequest
if 'mode' in input:
input['mode'] = input['mode'].upper()
input['encoding'] = self._encoding_value
if 'prefix' in input:
input['prefix'] = self._encodeXpath(input['prefix'])
if 'subscription' in input:
for item in input['subscription']:
item['path'] = self._encodeXpath(item['path'])
# Extract duration from input attributes
if 'duration' in input:
duration = input['duration']
del input['duration']
else:
duration = 20
request = json_format.ParseDict({'subscribe': input}, gnmi_pb2.SubscribeRequest())
auth = self._login_credentials
try:
output = []
responses = self._stub.Subscribe(iter([request]), duration, metadata=auth)
if input['mode'] == 'ONCE':
responses = [json_format.MessageToDict(response) for response in responses]
output = self._mergeToSingleDict(responses)
else:
for update in self._simplifyUpdates(responses):
output.append(update)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED:
if input['mode'] == 'ONCE':
raise AnsibleConnectionFailure("gNMI ONCE Subscription timed out")
else:
# RPC timed out, which is okay
pass
else:
raise AnsibleConnectionFailure("%s" % e)
return json.dumps(output, indent=4).encode()
| 1.523438 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.