max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
model_compression_toolkit/keras/quantizer/gradient_ptq/utils.py | eladc-git/model_optimization | 0 | 3500 | <filename>model_compression_toolkit/keras/quantizer/gradient_ptq/utils.py<gh_stars>0
# Copyright 2021 Sony Semiconductors Israel, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
from model_compression_toolkit.common.constants import MIN_THRESHOLD, THRESHOLD
def ste_ceil(x: tf.Tensor) -> tf.Tensor:
"""
Return the ceil values of a tensor.
"""
error = tf.stop_gradient(tf.math.ceil(x) - x)
return error + x
def ste_round(x: tf.Tensor) -> tf.Tensor:
"""
Return the rounded values of a tensor.
"""
error = tf.stop_gradient(tf.math.round(x) - x)
return error + x
def log2(x: tf.Tensor) -> tf.Tensor:
"""
Compute log2 of a tensor.
"""
return tf.math.log(x) / tf.math.log(2.0)
def power_of_two_max(max_tensor: tf.Tensor) -> tf.Tensor:
"""
Compute the power of two threshold for a tensor.
"""
return tf.math.pow(2.0, ste_ceil(log2(tf.maximum(max_tensor, MIN_THRESHOLD))))
def calculate_delta(max_tensor: tf.Tensor,
num_bits: int,
signed: bool) -> tf.Tensor:
"""
Compute the step size for the quantization.
"""
return max_tensor / (2 ** (num_bits - int(signed)))
def adjustable_steps(x: tf.Variable, t: float) -> tf.Tensor:
"""
A function to gradually quantize a float variable to an integer of values [-1, 0 ,1]
Args:
x: input float variable
t: temperature to control quantization
Returns:
semi-quantized variable
"""
return tf.sigmoid(tf.add(x, 1) / t) + tf.sigmoid(tf.add(x, -1) / t) - 1
def ste_clip(x: [tf.Tensor, tf.Variable], max_val=1, min_val=None) -> tf.Tensor:
"""
clip a variable between fixed values such that min_val<=output<=max_val
Args:
x: input variable
max_val: maximum value for clipping
min_val: minimum value for clipping (defaults to -max_val)
Returns:
clipped variable
"""
min_val = -max_val if min_val is None else min_val
return tf.stop_gradient(tf.math.minimum(tf.math.maximum(x, min_val), max_val) - x) + x
def symmetric_quantizer(input_tensor: tf.Tensor,
max_tensor: tf.Tensor,
num_bits: int,
signed: bool,
power_of_two: bool) -> tf.Tensor:
"""
Quantize a tensor symmetrically.
Args:
input_tensor: Tensor to quantize.
max_tensor: Tensor with max values to compute the threshold.
num_bits: Num of bits to use.
signed: Signedness of the quantization range.
power_of_two: Whether the threshold should be constrained or not.
Returns:
A quantized tensor.
"""
if power_of_two:
max_tensor = power_of_two_max(max_tensor)
delta = calculate_delta(max_tensor, num_bits, signed)
tensor_q = ste_round(input_tensor / delta)
min_int = -int(signed) * (2 ** (num_bits - int(signed)))
max_int = (2 ** (num_bits - int(signed))) - 1
return delta * tf.math.minimum(tf.math.maximum(tensor_q, min_int), max_int)
def symmetric_constrained_quantizer(input_tensor: tf.Tensor,
auxvar_tensor: tf.Variable,
max_tensor: tf.Tensor,
num_bits: int,
signed: bool,
power_of_two: bool,
max_lsbs_change: int = 1) -> tf.Tensor:
"""
Quantize a tensor symmetrically with maximum LSBs shift.
Args:
input_tensor: Tensor to quantize. values of this tensor are not changed during gptq.
auxvar_tensor: Tensor that manifests the bit shift the weight due to gptq
max_tensor: Tensor with max values to compute the threshold.
num_bits: Num of bits to use.
signed: Signedness of the quantization range.
power_of_two: Whether the threshold should be constrained or not.
max_lsbs_change: maximum number of LSBs that the auxvar is allowed to change
Returns:
A quantized tensor.
"""
if power_of_two:
max_tensor = power_of_two_max(max_tensor)
delta = calculate_delta(max_tensor, num_bits, signed)
tensor_q = ste_round(tf.stop_gradient(tf.round(input_tensor / delta)) + ste_clip(auxvar_tensor, max_val=max_lsbs_change))
min_int = -int(signed) * (2 ** (num_bits - int(signed)))
max_int = (2 ** (num_bits - int(signed))) - 1
return delta * ste_clip(tensor_q, max_val=max_int, min_val=min_int)
| 2.125 | 2 |
pygments/lexers/tnt.py | btashton/pygments | 1 | 3501 | <filename>pygments/lexers/tnt.py<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
pygments.lexers.tnt
~~~~~~~~~~~~~~~~~~~
Lexer for Typographic Number Theory.
:copyright: Copyright 2019-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer
from pygments.token import Text, Comment, Operator, Keyword, Name, Number, \
Punctuation, Error
__all__ = ['TNTLexer']
class TNTLexer(Lexer):
"""
Lexer for Typographic Number Theory, as described in the book
<NAME>, by <NAME>,
or as summarized here:
https://github.com/Kenny2github/language-tnt/blob/master/README.md#summary-of-tnt
.. versionadded:: 2.7
"""
name = 'Typographic Number Theory'
aliases = ['tnt']
filenames = ['*.tnt']
cur = []
LOGIC = set('⊃→]&∧^|∨Vv')
OPERATORS = set('+.⋅*')
VARIABLES = set('abcde')
PRIMES = set("'′")
NEGATORS = set('~!')
QUANTIFIERS = set('AE∀∃')
NUMBERS = set('0123456789')
WHITESPACE = set('\t \v\n')
RULES = re.compile('''(?xi)
joining | separation | double-tilde | fantasy\\ rule
| carry[- ]over(?:\\ of)?(?:\\ line)?\\ ([0-9]+) | detachment
| contrapositive | De\\ Morgan | switcheroo
| specification | generalization | interchange
| existence | symmetry | transitivity
| add\\ S | drop\\ S | induction
| axiom\\ ([1-5]) | premise | push | pop
''')
LINENOS = re.compile(r'(?:[0-9]+)(?:(?:, ?|,? and )(?:[0-9]+))*')
COMMENT = re.compile(r'\[[^\n\]]+\]')
def whitespace(self, start, text, required=False):
"""Tokenize whitespace."""
end = start
try:
while text[end] in self.WHITESPACE:
end += 1
except IndexError:
end = len(text)
if required:
assert end != start
if end != start:
self.cur.append((start, Text, text[start:end]))
return end
def variable(self, start, text):
"""Tokenize a variable."""
assert text[start] in self.VARIABLES
end = start+1
while text[end] in self.PRIMES:
end += 1
self.cur.append((start, Name.Variable, text[start:end]))
return end
def term(self, start, text):
"""Tokenize a term."""
if text[start] == 'S': # S...S(...) or S...0
end = start+1
while text[end] == 'S':
end += 1
self.cur.append((start, Number.Integer, text[start:end]))
return self.term(end, text)
if text[start] == '0': # the singleton 0
self.cur.append((start, Number.Integer, text[start]))
return start+1
if text[start] in self.VARIABLES: # a''...
return self.variable(start, text)
if text[start] == '(': # (...+...)
self.cur.append((start, Punctuation, text[start]))
start = self.term(start+1, text)
assert text[start] in self.OPERATORS
self.cur.append((start, Operator, text[start]))
start = self.term(start+1, text)
assert text[start] == ')'
self.cur.append((start, Punctuation, text[start]))
return start+1
raise AssertionError # no matches
def formula(self, start, text):
"""Tokenize a formula."""
if text[start] in '[]': # fantasy push or pop
self.cur.append((start, Keyword, text[start]))
return start+1
if text[start] in self.NEGATORS: # ~<...>
end = start+1
while text[end] in self.NEGATORS:
end += 1
self.cur.append((start, Operator, text[start:end]))
return self.formula(end, text)
if text[start] in self.QUANTIFIERS: # Aa:<...>
self.cur.append((start, Keyword.Declaration, text[start]))
start = self.variable(start+1, text)
assert text[start] == ':'
self.cur.append((start, Punctuation, text[start]))
return self.formula(start+1, text)
if text[start] == '<': # <...&...>
self.cur.append((start, Punctuation, text[start]))
start = self.formula(start+1, text)
assert text[start] in self.LOGIC
self.cur.append((start, Operator, text[start]))
start = self.formula(start+1, text)
assert text[start] == '>'
self.cur.append((start, Punctuation, text[start]))
return start+1
# ...=...
start = self.term(start, text)
assert text[start] == '='
self.cur.append((start, Operator, text[start]))
start = self.term(start+1, text)
return start
def rule(self, start, text):
"""Tokenize a rule."""
match = self.RULES.match(text, start)
assert match is not None
groups = sorted(match.regs[1:]) # exclude whole match
for group in groups:
if group[0] >= 0: # this group matched
self.cur.append((start, Keyword, text[start:group[0]]))
self.cur.append((group[0], Number.Integer,
text[group[0]:group[1]]))
if group[1] != match.end():
self.cur.append((group[1], Keyword,
text[group[1]:match.end()]))
break
else:
self.cur.append((start, Keyword, text[start:match.end()]))
return match.end()
def lineno(self, start, text):
"""Tokenize a line marker."""
end = start
while text[end] not in self.NUMBERS:
end += 1
self.cur.append((start, Punctuation, text[start]))
self.cur.append((start+1, Text, text[start+1:end]))
start = end
match = self.LINENOS.match(text, start)
assert match is not None
assert text[match.end()] == ')'
self.cur.append((match.start(), Number.Integer, match.group(0)))
self.cur.append((match.end(), Punctuation, text[match.end()]))
return match.end() + 1
def error_till_line_end(self, start, text):
"""Mark everything from ``start`` to the end of the line as Error."""
end = start
try:
while text[end] != '\n': # there's whitespace in rules
end += 1
except IndexError:
end = len(text)
if end != start:
self.cur.append((start, Error, text[start:end]))
end = self.whitespace(end, text)
return end
def get_tokens_unprocessed(self, text):
"""Returns a list of TNT tokens."""
self.cur = []
start = end = self.whitespace(0, text)
while start <= end < len(text):
# try line number
while text[end] in self.NUMBERS:
end += 1
if end != start: # actual number present
self.cur.append((start, Number.Integer, text[start:end]))
# whitespace is required after a line number
orig = len(self.cur)
try:
start = end = self.whitespace(end, text, True)
except AssertionError:
del self.cur[orig:]
start = end = self.error_till_line_end(end, text)
continue
# at this point it could be a comment
match = self.COMMENT.match(text, start)
if match is not None:
self.cur.append((start, Comment, text[start:match.end()]))
start = end = match.end()
# anything after the closing bracket is invalid
start = end = self.error_till_line_end(start, text)
# do not attempt to process the rest
continue
del match
# one formula, possibly containing subformulae
orig = len(self.cur)
try:
start = end = self.formula(start, text)
except AssertionError: # not well-formed
del self.cur[orig:]
while text[end] not in self.WHITESPACE:
end += 1
self.cur.append((start, Error, text[start:end]))
start = end
# skip whitespace after formula
orig = len(self.cur)
try:
start = end = self.whitespace(end, text, True)
except AssertionError:
del self.cur[orig:]
start = end = self.error_till_line_end(start, text)
continue
# rule proving this formula a theorem
orig = len(self.cur)
try:
start = end = self.rule(start, text)
except AssertionError:
del self.cur[orig:]
start = end = self.error_till_line_end(start, text)
continue
# skip whitespace after rule
start = end = self.whitespace(end, text)
# line marker
if text[start] == '(':
orig = len(self.cur)
try:
start = end = self.lineno(start, text)
except AssertionError:
del self.cur[orig:]
start = end = self.error_till_line_end(start, text)
continue
start = end = self.whitespace(start, text)
return self.cur
| 2.25 | 2 |
contacts/urls.py | cheradenine/Django-CRM | 2 | 3502 | <filename>contacts/urls.py
from django.urls import path
from contacts.views import (
ContactsListView, CreateContactView, ContactDetailView,
UpdateContactView, RemoveContactView,
GetContactsView, AddCommentView, UpdateCommentView,
DeleteCommentView, AddAttachmentsView, DeleteAttachmentsView)
app_name = 'contacts'
urlpatterns = [
path('list/', ContactsListView.as_view(), name='list'),
path('create/', CreateContactView.as_view(), name='add_contact'),
path('<int:pk>/view/', ContactDetailView.as_view(), name="view_contact"),
path('<int:pk>/edit/', UpdateContactView.as_view(), name="edit_contact"),
path('<int:pk>/delete/',
RemoveContactView.as_view(),
name="remove_contact"),
path('get/list/', GetContactsView.as_view(), name="get_contacts"),
path('comment/add/', AddCommentView.as_view(), name="add_comment"),
path('comment/edit/', UpdateCommentView.as_view(), name="edit_comment"),
path('comment/remove/',
DeleteCommentView.as_view(),
name="remove_comment"),
path('attachment/add/',
AddAttachmentsView.as_view(),
name="add_attachment"),
path('attachment/remove/', DeleteAttachmentsView.as_view(),
name="remove_attachment"),
]
| 2.234375 | 2 |
windows/winobject/network.py | marpie/PythonForWindows | 1 | 3503 | import windows
import ctypes
import socket
import struct
from windows import winproxy
import windows.generated_def as gdef
from windows.com import interfaces as cominterfaces
from windows.generated_def.winstructs import *
from windows.generated_def.windef import *
class TCP4Connection(MIB_TCPROW_OWNER_PID):
"""A TCP4 socket (connected or listening)"""
@property
def established(self):
"""``True`` if connection is established else it's a listening socket"""
return self.dwState == MIB_TCP_STATE_ESTAB
@property
def remote_port(self):
""":type: :class:`int`"""
if not self.established:
return None
return socket.ntohs(self.dwRemotePort)
@property
def local_port(self):
""":type: :class:`int`"""
return socket.ntohs(self.dwLocalPort)
@property
def local_addr(self):
"""Local address IP (x.x.x.x)
:type: :class:`str`"""
return socket.inet_ntoa(struct.pack("<I", self.dwLocalAddr))
@property
def remote_addr(self):
"""remote address IP (x.x.x.x)
:type: :class:`str`"""
if not self.established:
return None
return socket.inet_ntoa(struct.pack("<I", self.dwRemoteAddr))
@property
def remote_proto(self):
"""Identification of the protocol associated with the remote port.
Equals ``remote_port`` if no protocol is associated with it.
:type: :class:`str` or :class:`int`
"""
try:
return socket.getservbyport(self.remote_port, 'tcp')
except socket.error:
return self.remote_port
@property
def remote_host(self):
"""Identification of the remote hostname.
Equals ``remote_addr`` if the resolution fails
:type: :class:`str` or :class:`int`
"""
try:
return socket.gethostbyaddr(self.remote_addr)
except socket.error:
return self.remote_addr
def close(self):
"""Close the connection <require elevated process>"""
closing = MIB_TCPROW()
closing.dwState = MIB_TCP_STATE_DELETE_TCB
closing.dwLocalAddr = self.dwLocalAddr
closing.dwLocalPort = self.dwLocalPort
closing.dwRemoteAddr = self.dwRemoteAddr
closing.dwRemotePort = self.dwRemotePort
return winproxy.SetTcpEntry(ctypes.byref(closing))
def __repr__(self):
if not self.established:
return "<TCP IPV4 Listening socket on {0}:{1}>".format(self.local_addr, self.local_port)
return "<TCP IPV4 Connection {s.local_addr}:{s.local_port} -> {s.remote_addr}:{s.remote_port}>".format(s=self)
class TCP6Connection(MIB_TCP6ROW_OWNER_PID):
"""A TCP6 socket (connected or listening)"""
@staticmethod
def _str_ipv6_addr(addr):
return ":".join(c.encode('hex') for c in addr)
@property
def established(self):
"""``True`` if connection is established else it's a listening socket"""
return self.dwState == MIB_TCP_STATE_ESTAB
@property
def remote_port(self):
""":type: :class:`int`"""
if not self.established:
return None
return socket.ntohs(self.dwRemotePort)
@property
def local_port(self):
""":type: :class:`int`"""
return socket.ntohs(self.dwLocalPort)
@property
def local_addr(self):
"""Local address IP
:type: :class:`str`"""
return self._str_ipv6_addr(self.ucLocalAddr)
@property
def remote_addr(self):
"""remote address IP
:type: :class:`str`"""
if not self.established:
return None
return self._str_ipv6_addr(self.ucRemoteAddr)
@property
def remote_proto(self):
"""Equals to ``self.remote_port`` for Ipv6"""
return self.remote_port
@property
def remote_host(self):
"""Equals to ``self.remote_addr`` for Ipv6"""
return self.remote_addr
def close(self):
raise NotImplementedError("Closing IPV6 connection non implemented")
def __repr__(self):
if not self.established:
return "<TCP IPV6 Listening socket on {0}:{1}>".format(self.local_addr, self.local_port)
return "<TCP IPV6 Connection {0}:{1} -> {2}:{3}>".format(self.local_addr, self.local_port, self.remote_addr, self.remote_port)
def get_MIB_TCPTABLE_OWNER_PID_from_buffer(buffer):
x = windows.generated_def.winstructs.MIB_TCPTABLE_OWNER_PID.from_buffer(buffer)
nb_entry = x.dwNumEntries
class _GENERATED_MIB_TCPTABLE_OWNER_PID(ctypes.Structure):
_fields_ = [
("dwNumEntries", DWORD),
("table", TCP4Connection * nb_entry),
]
return _GENERATED_MIB_TCPTABLE_OWNER_PID.from_buffer(buffer)
def get_MIB_TCP6TABLE_OWNER_PID_from_buffer(buffer):
x = windows.generated_def.winstructs.MIB_TCP6TABLE_OWNER_PID.from_buffer(buffer)
nb_entry = x.dwNumEntries
# Struct _MIB_TCP6TABLE_OWNER_PID definitions
class _GENERATED_MIB_TCP6TABLE_OWNER_PID(Structure):
_fields_ = [
("dwNumEntries", DWORD),
("table", TCP6Connection * nb_entry),
]
return _GENERATED_MIB_TCP6TABLE_OWNER_PID.from_buffer(buffer)
class Firewall(cominterfaces.INetFwPolicy2):
"""The windows firewall"""
@property
def rules(self):
"""The rules of the firewall
:type: [:class:`FirewallRule`] -- A list of rule
"""
ifw_rules = cominterfaces.INetFwRules()
self.get_Rules(ifw_rules)
nb_rules = gdef.LONG()
ifw_rules.get_Count(nb_rules)
unknw = cominterfaces.IUnknown()
ifw_rules.get__NewEnum(unknw)
pVariant = cominterfaces.IEnumVARIANT()
unknw.QueryInterface(pVariant.IID, pVariant)
count = gdef.ULONG()
var = windows.com.ImprovedVariant()
rules = []
for i in range(nb_rules.value):
pVariant.Next(1, var, count)
if not count.value:
break
rule = FirewallRule()
idisp = var.asdispatch
idisp.QueryInterface(rule.IID, rule)
rules.append(rule)
return rules
@property
def current_profile_types(self):
"""Mask of the profiles currently enabled
:type: :class:`long`
"""
cpt = gdef.LONG()
self.get_CurrentProfileTypes(cpt)
return cpt.value
@property
def enabled(self):
"""A maping of the active firewall profiles
{
``NET_FW_PROFILE_TYPE2_.NET_FW_PROFILE2_DOMAIN(0x1L)``: ``True`` or ``False``,
``NET_FW_PROFILE_TYPE2_.NET_FW_PROFILE2_PRIVATE(0x2L)``: ``True`` or ``False``,
``NET_FW_PROFILE_TYPE2_.NET_FW_PROFILE2_PUBLIC(0x4L)``: ``True`` or ``False``,
}
:type: :class:`dict`
"""
profiles = [gdef.NET_FW_PROFILE2_DOMAIN, gdef.NET_FW_PROFILE2_PRIVATE, gdef.NET_FW_PROFILE2_PUBLIC]
return {prof: self.enabled_for_profile_type(prof) for prof in profiles}
def enabled_for_profile_type(self, profile_type):
enabled = gdef.VARIANT_BOOL()
self.get_FirewallEnabled(profile_type, enabled)
return enabled.value
class FirewallRule(cominterfaces.INetFwRule):
"""A rule of the firewall"""
@property
def name(self):
"""Name of the rule
:type: :class:`unicode`
"""
name = gdef.BSTR()
self.get_Name(name)
return name.value
@property
def description(self):
"""Description of the rule
:type: :class:`unicode`
"""
description = gdef.BSTR()
self.get_Description(description)
return description.value
@property
def application_name(self):
"""Name of the application to which apply the rule
:type: :class:`unicode`
"""
applicationname = gdef.BSTR()
self.get_ApplicationName(applicationname)
return applicationname.value
@property
def service_name(self):
"""Name of the service to which apply the rule
:type: :class:`unicode`
"""
servicename = gdef.BSTR()
self.get_ServiceName(servicename)
return servicename.value
@property
def protocol(self):
"""Protocol to which apply the rule
:type: :class:`long`
"""
protocol = gdef.LONG()
self.get_Protocol(protocol)
return protocol.value
@property
def local_address(self):
"""Local address of the rule
:type: :class:`unicode`
"""
local_address = gdef.BSTR()
self.get_LocalAddresses(local_address)
return local_address.value
@property
def remote_address(self):
"""Remote address of the rule
:type: :class:`unicode`
"""
remote_address = gdef.BSTR()
self.get_RemoteAddresses(remote_address)
return remote_address.value
@property
def direction(self):
"""Direction of the rule, values might be:
* ``NET_FW_RULE_DIRECTION_.NET_FW_RULE_DIR_IN(0x1L)``
* ``NET_FW_RULE_DIRECTION_.NET_FW_RULE_DIR_OUT(0x2L)``
subclass of :class:`long`
"""
direction = gdef.NET_FW_RULE_DIRECTION()
self.get_Direction(direction)
return direction.value
@property
def interface_types(self):
"""Types of interface of the rule
:type: :class:`unicode`
"""
interface_type = gdef.BSTR()
self.get_InterfaceTypes(interface_type)
return interface_type.value
@property
def local_port(self):
"""Local port of the rule
:type: :class:`unicode`
"""
local_port = gdef.BSTR()
self.get_LocalPorts(local_port)
return local_port.value
@property
def remote_port(self):
"""Remote port of the rule
:type: :class:`unicode`
"""
remote_port = gdef.BSTR()
self.get_RemotePorts(remote_port)
return remote_port.value
@property
def action(self):
"""Action of the rule, values might be:
* ``NET_FW_ACTION_.NET_FW_ACTION_BLOCK(0x0L)``
* ``NET_FW_ACTION_.NET_FW_ACTION_ALLOW(0x1L)``
subclass of :class:`long`
"""
action = gdef.NET_FW_ACTION()
self.get_Action(action)
return action.value
@property
def enabled(self):
"""``True`` if rule is enabled"""
enabled = gdef.VARIANT_BOOL()
self.get_Enabled(enabled)
return enabled.value
@property
def grouping(self):
"""Grouping of the rule
:type: :class:`unicode`
"""
grouping = gdef.BSTR()
self.get_RemotePorts(grouping)
return grouping.value
@property
def icmp_type_and_code(self):
icmp_type_and_code = gdef.BSTR()
self.get_RemotePorts(icmp_type_and_code)
return icmp_type_and_code.value
def __repr__(self):
return u'<{0} "{1}">'.format(type(self).__name__, self.name).encode("ascii", errors='backslashreplace')
class Network(object):
NetFwPolicy2 = windows.com.IID.from_string("E2B3C97F-6AE1-41AC-817A-F6F92166D7DD")
@property
def firewall(self):
"""The firewall of the system
:type: :class:`Firewall`
"""
windows.com.init()
firewall = Firewall()
windows.com.create_instance(self.NetFwPolicy2, firewall)
return firewall
@staticmethod
def _get_tcp_ipv4_sockets():
size = ctypes.c_uint(0)
try:
winproxy.GetExtendedTcpTable(None, ctypes.byref(size), ulAf=AF_INET)
except winproxy.IphlpapiError:
pass # Allow us to set size to the needed value
buffer = (ctypes.c_char * size.value)()
winproxy.GetExtendedTcpTable(buffer, ctypes.byref(size), ulAf=AF_INET)
t = get_MIB_TCPTABLE_OWNER_PID_from_buffer(buffer)
return list(t.table)
@staticmethod
def _get_tcp_ipv6_sockets():
size = ctypes.c_uint(0)
try:
winproxy.GetExtendedTcpTable(None, ctypes.byref(size), ulAf=AF_INET6)
except winproxy.IphlpapiError:
pass # Allow us to set size to the needed value
buffer = (ctypes.c_char * size.value)()
winproxy.GetExtendedTcpTable(buffer, ctypes.byref(size), ulAf=AF_INET6)
t = get_MIB_TCP6TABLE_OWNER_PID_from_buffer(buffer)
return list(t.table)
ipv4 = property(lambda self: self._get_tcp_ipv4_sockets())
"""List of TCP IPv4 socket (connection and listening)
:type: [:class:`TCP4Connection`]"""
ipv6 = property(lambda self: self._get_tcp_ipv6_sockets())
"""List of TCP IPv6 socket (connection and listening)
:type: [:class:`TCP6Connection`]
"""
| 2.4375 | 2 |
LIM_scripts/func_curry.py | Bhare8972/LOFAR-LIM | 3 | 3504 | #!/usr/bin/env python3
# Coded by <NAME>, 2012.
#
# - Thanks to b49P23TIvg for suggesting that I should use a set operation
# instead of repeated membership tests.
# - Thanks to <NAME> for pointing out that
# - "minArgs = None" is better than "minArgs = -1",
# - "if args" is better than "if len(args)", and
# - I should use "isdisjoint".
#
def genCur(func, unique = True, minArgs = None):
""" Generates a 'curried' version of a function. """
def g(*myArgs, **myKwArgs):
def f(*args, **kwArgs):
if args or kwArgs: # some more args!
# Allocates data to assign to the next 'f'.
newArgs = myArgs + args
newKwArgs = dict.copy(myKwArgs)
# If unique is True, we don't want repeated keyword arguments.
if unique and not kwArgs.keys().isdisjoint(newKwArgs):
raise ValueError("Repeated kw arg while unique = True")
# Adds/updates keyword arguments.
newKwArgs.update(kwArgs)
# Checks whether it's time to evaluate func.
if minArgs is not None and minArgs <= len(newArgs) + len(newKwArgs):
return func(*newArgs, **newKwArgs) # time to evaluate func
else:
return g(*newArgs, **newKwArgs) # returns a new 'f'
else: # the evaluation was forced
return func(*myArgs, **myKwArgs)
return f
return g
def cur(f, minArgs = None):
return genCur(f, True, minArgs)
def curr(f, minArgs = None):
return genCur(f, False, minArgs)
if __name__ == "__main__":
# Simple Function.
def func(a, b, c, d, e, f, g = 100):
print(a, b, c, d, e, f, g)
# NOTE: '<====' means "this line prints to the screen".
# Example 1.
f = cur(func) # f is a "curried" version of func
c1 = f(1)
c2 = c1(2, d = 4) # Note that c is still unbound
c3 = c2(3)(f = 6)(e = 5) # now c = 3
c3() # () forces the evaluation <====
# it prints "1 2 3 4 5 6 100"
c4 = c2(30)(f = 60)(e = 50) # now c = 30
c4() # () forces the evaluation <====
# it prints "1 2 30 4 50 60 100"
print("\n------\n")
# Example 2.
f = curr(func) # f is a "curried" version of func
# curr = cur with possibly repeated
# keyword args
c1 = f(1, 2)(3, 4)
c2 = c1(e = 5)(f = 6)(e = 10)() # ops... we repeated 'e' because we <====
# changed our mind about it!
# again, () forces the evaluation
# it prints "1 2 3 4 10 6 100"
print("\n------\n")
# Example 3.
f = cur(func, 6) # forces the evaluation after 6 arguments
c1 = f(1, 2, 3) # num args = 3
c2 = c1(4, f = 6) # num args = 5
c3 = c2(5) # num args = 6 ==> evalution <====
# it prints "1 2 3 4 5 6 100"
c4 = c2(5, g = -1) # num args = 7 ==> evaluation <====
# we can specify more than 6 arguments, but
# 6 are enough to force the evaluation
# it prints "1 2 3 4 5 6 -1"
print("\n------\n")
# Example 4.
def printTree(func, level = None):
if level is None:
printTree(cur(func), 0)
elif level == 6:
func(g = '')() # or just func('')()
else:
printTree(func(0), level + 1)
printTree(func(1), level + 1)
printTree(func)
print("\n------\n")
def f2(*args):
print(", ".join(["%3d"%(x) for x in args]))
def stress(f, n):
if n: stress(f(n), n - 1)
else: f() # enough is enough
stress(cur(f2), 100) | 2.859375 | 3 |
src/messages.py | Ewpratten/chat | 0 | 3505 | greeting = """
--------------- BEGIN SESSION ---------------
You have connected to a chat server. Welcome!
:: About
Chat is a small piece of server software
written by <NAME> to allow people to
talk to eachother from any computer as long
as it has an internet connection. (Even an
arduino!). Check out the project at:
https://github.com/Ewpratten/chat
:: Disclaimer
While chatting, keep in mind that, if there
is a rule or regulation about privacy, this
server does not follow it. All data is sent
to and from this server over a raw TCP socket
and data is temporarily stored in plaintext
while the server handles message broadcasting
Now that's out of the way so, happy chatting!
---------------------------------------------
""" | 2.546875 | 3 |
Prediction.py | khayam-hafezi/CRNN-keras-persian | 0 | 3506 | <reponame>khayam-hafezi/CRNN-keras-persian
import cv2
import itertools, os, time
import numpy as np
from Model import get_Model
from parameter import letters
import argparse
from keras import backend as K
K.set_learning_phase(0)
Region = {"A": "서울 ", "B": "경기 ", "C": "인천 ", "D": "강원 ", "E": "충남 ", "F": "대전 ",
"G": "충북 ", "H": "부산 ", "I": "울산 ", "J": "대구 ", "K": "경북 ", "L": "경남 ",
"M": "전남 ", "N": "광주 ", "O": "전북 ", "P": "제주 "}
Hangul = {"dk": "아", "dj": "어", "dh": "오", "dn": "우", "qk": "바", "qj": "버", "qh": "보", "qn": "부",
"ek": "다", "ej": "더", "eh": "도", "en": "두", "rk": "가", "rj": "거", "rh": "고", "rn": "구",
"wk": "자", "wj": "저", "wh": "조", "wn": "주", "ak": "마", "aj": "머", "ah": "모", "an": "무",
"sk": "나", "sj": "너", "sh": "노", "sn": "누", "fk": "라", "fj": "러", "fh": "로", "fn": "루",
"tk": "사", "tj": "서", "th": "소", "tn": "수", "gj": "허"}
def decode_label(out):
# out : (1, 32, 42)
out_best = list(np.argmax(out[0, 2:], axis=1)) # get max index -> len = 32
out_best = [k for k, g in itertools.groupby(out_best)] # remove overlap value
outstr = ''
for i in out_best:
if i < len(letters):
outstr += letters[i]
return outstr
def label_to_hangul(label): # eng -> hangul
region = label[0]
two_num = label[1:3]
hangul = label[3:5]
four_num = label[5:]
try:
region = Region[region] if region != 'Z' else ''
except:
pass
try:
hangul = Hangul[hangul]
except:
pass
return region + two_num + hangul + four_num
parser = argparse.ArgumentParser()
parser.add_argument("-w", "--weight", help="weight file directory",
type=str, default="models/weights.best.hdf5")
parser.add_argument("-t", "--test_img", help="Test image directory",
type=str, default="./DB/test/")
args = parser.parse_args()
# Get CRNN model
model = get_Model(training=False)
try:
model.load_weights(args.weight)
print("...Previous weight data...")
except:
raise Exception("No weight file!")
test_dir =args.test_img
test_imgs = os.listdir(args.test_img)
total = 0
acc = 0
letter_total = 0
letter_acc = 0
start = time.time()
for test_img in test_imgs:
img = cv2.imread(test_dir + test_img, cv2.IMREAD_GRAYSCALE)
img_pred = img.astype(np.float32)
img_pred = cv2.resize(img_pred, (128, 64))
img_pred = (img_pred / 255.0) * 2.0 - 1.0
img_pred = img_pred.T
img_pred = np.expand_dims(img_pred, axis=-1)
img_pred = np.expand_dims(img_pred, axis=0)
net_out_value = model.predict(img_pred)
pred_texts = decode_label(net_out_value)
for i in range(min(len(pred_texts), len(test_img[0:-4]))):
if pred_texts[i] == test_img[i]:
letter_acc += 1
letter_total += max(len(pred_texts), len(test_img[0:-4]))
predOk = "True"
if pred_texts == test_img[0:-4]:
acc += 1
else:
predOk = "False"
total += 1
# print('Predicted: %s / True: %s / net_out_value: %s / ' % (label_to_hangul(pred_texts), label_to_hangul(test_img[0:-4])))
print('Predicted: %s / True: %s / predOk: %s ' % (pred_texts, test_img[0:-4], predOk ))
# cv2.rectangle(img, (0,0), (150, 30), (0,0,0), -1)
# cv2.putText(img, pred_texts, (5, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,255,255),2)
#cv2.imshow("q", img)
#if cv2.waitKey(0) == 27:
# break
#cv2.destroyAllWindows()
end = time.time()
total_time = (end - start)
print("Time : ",total_time / total)
print("ACC : ", acc / total)
print("letter ACC : ", letter_acc / letter_total)
| 2.0625 | 2 |
torcharrow/_interop.py | OswinC/torcharrow | 0 | 3507 | # Copyright (c) Facebook, Inc. and its affiliates.
from typing import List, Optional, cast
# Skipping analyzing 'numpy': found module but no type hints or library stubs
import numpy as np # type: ignore
import numpy.ma as ma # type: ignore
# Skipping analyzing 'pandas': found module but no type hints or library stubs
import pandas as pd # type: ignore
import pyarrow as pa # type: ignore
import torcharrow.dtypes as dt
from torcharrow import Scope
def from_arrow_table(
table,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
""" "
Convert arrow table to a torcharrow dataframe.
"""
scope = scope or Scope.default
device = device or scope.device
assert isinstance(table, pa.Table)
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
chunked_array = table.column(f.name)
pydata = chunked_array.to_pylist()
res[f.name] = scope.Column(pydata, f.dtype)
return scope.DataFrame(res, device=device)
else:
res = {}
table = table.select(columns) if columns is not None else table
for n in table.column_names:
chunked_array = table.column(n)
pydata = chunked_array.to_pylist()
res[n] = scope.Column(
pydata,
dtype=_arrowtype_to_dtype(
table.schema.field(n).type, table.column(n).null_count > 0
),
)
return scope.DataFrame(res, device=device)
def from_pandas_dataframe(
df,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
scope=None,
device="",
):
"""
Convert pandas dataframe to torcharrow dataframe (drops indices).
Parameters
----------
df : Pandas dataframe
dtype : dtype, default None
Data type to force, if None will automatically infer.
columns : array-like
List of column names to extract from df.
scope : Scope or None
Scope to use, or None for default scope.
device : str or ""
Device to use, or default if blank.
Examples
--------
>>> import pandas as pd
>>> import torcharrow as ta
>>> pdf = pd.DataFrame({'a': [0, 1, 2, 3],'b': [0.1, 0.2, None, 0.3]})
>>> gdf = ta.from_pandas_dataframe(pdf)
>>> gdf
index a b
------- --- ---
0 0 0.1
1 1 0.2
2 2
3 3 0.3
dtype: Struct([Field('a', int64), Field('b', Float64(nullable=True))]), count: 4, null_count: 0
"""
scope = scope or Scope.default
device = device or scope.device
if dtype is not None:
assert dt.is_struct(dtype)
dtype = cast(dt.Struct, dtype)
res = {}
for f in dtype.fields:
# this shows that Column shoud also construct Dataframes!
res[f.name] = from_pandas_series(
pd.Series(df[f.name]), f.dtype, scope=scope
)
return scope.Frame(res, dtype=dtype, device=device)
else:
res = {}
for n in df.columns:
if columns is None or n in columns:
res[n] = from_pandas_series(pd.Series(df[n]), scope=scope)
return scope.Frame(res, device=device)
def from_arrow_array(array, dtype=None, scope=None, device=""):
""" "
Convert arrow array to a torcharrow column.
"""
scope = scope or Scope.default
device = device or scope.device
assert isinstance(array, pa.Array)
pydata = _arrow_scalar_to_py(array)
if dtype is not None:
assert not dt.is_struct(dtype)
return scope.Column(pydata, dtype, device=device)
else:
return scope.Column(
pydata,
dtype=_arrowtype_to_dtype(array.type, array.null_count > 0),
device=device,
)
def from_pandas_series(series, dtype=None, scope=None, device=""):
""" "
Convert pandas series array to a torcharrow column (drops indices).
"""
scope = scope or Scope.default
device = device or scope.device
return from_numpy(series.to_numpy(), dtype, scope, device)
def from_numpy(array, dtype, scope=None, device=""):
"""
Convert 1dim numpy array to a torcharrow column (zero copy).
"""
scope = scope or Scope.default
device = device or scope.device
if isinstance(array, ma.core.MaskedArray) and array.ndim == 1:
return _from_numpy_ma(array.data, array.mask, dtype, scope, device)
elif isinstance(array, np.ndarray) and array.ndim == 1:
return _from_numpy_nd(array, dtype, scope, device)
else:
raise TypeError(f"cannot convert numpy array of type {array.dtype}")
def _is_not_str(s):
return not isinstance(s, str)
def _from_numpy_ma(data, mask, dtype, scope=None, device=""):
# adopt types
if dtype is None:
dtype = dt.typeof_np_dtype(data.dtype).with_null()
else:
assert dt.is_primitive_type(dtype)
assert dtype == dt.typeof_np_dtype(data.dtype).with_null()
# TODO if not, adopt the type or?
# Something like ma.array
# np.array([np.nan, np.nan, 3.]).astype(np.int64),
# mask = np.isnan([np.nan, np.nan, 3.]))
# create column, only zero copy supported
if dt.is_boolean_or_numerical(dtype):
assert not np.all(np.isnan(ma.array(data, mask).compressed()))
return scope._FullColumn(data, dtype=dtype, mask=mask)
elif dt.is_string(dtype) or dtype == "object":
assert np.all(np.vectorize(_is_not_str)(ma.array(data, mask).compressed()))
return scope._FullColumn(data, dtype=dtype, mask=mask)
else:
raise TypeError(f"cannot convert masked numpy array of type {data.dtype}")
def _from_numpy_nd(data, dtype, scope=None, device=""):
# adopt types
if dtype is None:
dtype = dt.typeof_np_dtype(data.dtype)
if dtype is None:
dtype = dt.string
else:
assert dt.is_primitive(dtype)
# TODO Check why teh following assert isn't the case
# assert dtype == dt.typeof_np_dtype(data.dtype)
# create column, only zero copy supported
if dt.is_boolean_or_numerical(dtype):
mask = np.isnan(data)
return scope._FullColumn(data, dtype=dtype, mask=mask)
elif dt.is_string(dtype):
mask = np.vectorize(_is_not_str)(data)
if np.any(mask):
dtype = dtype.with_null()
return scope._FullColumn(data, dtype=dtype, mask=mask)
else:
raise TypeError("can not convert numpy array of type {data.dtype,}")
# def _column_without_nan(series, dtype):
# if dtype is None or is_floating(dtype):
# for i in series:
# if isinstance(i, float) and np.isnan(i):
# yield None
# else:
# yield i
# else:
# for i in series:
# yield i
def _arrow_scalar_to_py(array):
for i in array:
yield i.as_py()
def _pandatype_to_dtype(t, nullable):
return dt.typeof_nptype(t, nullable)
def _arrowtype_to_dtype(t, nullable):
if pa.types.is_boolean(t):
return dt.Boolean(nullable)
if pa.types.is_int8(t):
return dt.Int8(nullable)
if pa.types.is_int16(t):
return dt.Int16(nullable)
if pa.types.is_int32(t):
return dt.Int32(nullable)
if pa.types.is_int64(t):
return dt.Int64(nullable)
if pa.types.is_float32(t):
return dt.Float32(nullable)
if pa.types.is_float64(t):
return dt.Float64(nullable)
if pa.types.is_list(t):
return List(t.value_type, nullable)
if pa.types.is_struct(t):
return _pandatype_to_dtype(t.to_pandas_dtype(), True)
if pa.types.is_null(t):
return dt.Void()
if pa.types.is_string(t):
return dt.String(nullable)
if pa.types.is_map(t):
return dt.Map(t.item_type, t.key_type, nullable)
raise NotImplementedError("unsupported case")
| 2.515625 | 3 |
research/gan/image_compression/eval.py | jdavidagudelo/tensorflow-models | 1 | 3508 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Evaluates a TFGAN trained compression model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
import tensorflow as tf
from research.gan.image_compression import data_provider
from research.gan.image_compression import networks
from research.gan.image_compression import summaries
FLAGS = tf.app.flags.FLAGS
flags = tf.app.flags
flags.DEFINE_string('master', '', 'Name of the TensorFlow master to use.')
flags.DEFINE_string('checkpoint_dir', '/tmp/compression/',
'Directory where the model was written to.')
flags.DEFINE_string('eval_dir', '/tmp/compression/',
'Directory where the results are saved to.')
flags.DEFINE_integer('max_number_of_evaluations', None,
'Number of times to run evaluation. If `None`, run '
'forever.')
flags.DEFINE_string('dataset_dir', 'testdata', 'Location of data.')
# Compression-specific flags.
flags.DEFINE_integer('batch_size', 32, 'The number of images in each batch.')
flags.DEFINE_integer('patch_size', 32, 'The size of the patches to train on.')
flags.DEFINE_integer('bits_per_patch', 1230,
'The number of bits to produce per patch.')
flags.DEFINE_integer('model_depth', 64,
'Number of filters for compression model')
def main(_, run_eval_loop=True):
with tf.name_scope('inputs'):
images = data_provider.provide_data(
'validation', FLAGS.batch_size, dataset_dir=FLAGS.dataset_dir,
patch_size=FLAGS.patch_size)
# In order for variables to load, use the same variable scope as in the
# train job.
with tf.variable_scope('generator'):
reconstructions, _, prebinary = networks.compression_model(
images,
num_bits=FLAGS.bits_per_patch,
depth=FLAGS.model_depth,
is_training=False)
summaries.add_reconstruction_summaries(images, reconstructions, prebinary)
# Visualize losses.
pixel_loss_per_example = tf.reduce_mean(
tf.abs(images - reconstructions), axis=[1, 2, 3])
pixel_loss = tf.reduce_mean(pixel_loss_per_example)
tf.summary.histogram('pixel_l1_loss_hist', pixel_loss_per_example)
tf.summary.scalar('pixel_l1_loss', pixel_loss)
# Create ops to write images to disk.
uint8_images = data_provider.float_image_to_uint8(images)
uint8_reconstructions = data_provider.float_image_to_uint8(reconstructions)
uint8_reshaped = summaries.stack_images(uint8_images, uint8_reconstructions)
image_write_ops = tf.write_file(
'%s/%s' % (FLAGS.eval_dir, 'compression.png'),
tf.image.encode_png(uint8_reshaped[0]))
# For unit testing, use `run_eval_loop=False`.
if not run_eval_loop: return
tf.contrib.training.evaluate_repeatedly(
FLAGS.checkpoint_dir,
master=FLAGS.master,
hooks=[tf.contrib.training.SummaryAtEndHook(FLAGS.eval_dir),
tf.contrib.training.StopAfterNEvalsHook(1)],
eval_ops=image_write_ops,
max_number_of_evaluations=FLAGS.max_number_of_evaluations)
if __name__ == '__main__':
app.run(_)
| 1.976563 | 2 |
source/dump_query_results.py | CheyenneNS/metrics | 0 | 3509 | <reponame>CheyenneNS/metrics
#!/usr/local/bin/python
import os
import mysql.connector as mysql
metrics_mysql_password = os.environ['METRICS_MYSQL_PWD']
sql_host = os.environ['SQL_HOST']
metrics = os.environ['QUERY_ON']
def dump_query_results():
"""
This is a simple SQL table dump of a given query so we can supply users with custom tables.
Note that the SQL query itself and column headers portion need to be changed if you want to change
the query/results. Otherwise it is good to go.
It can be called simply with the bin shell script.
Read the README at the top level for an example.
"""
#connect to mysql
db_connection = mysql.connect(
host = sql_host,#"mysql1", #"localhost",
user = "metrics", #"root",
passwd = <PASSWORD>,
database = "metrics" #"datacamp"
)
cursor = db_connection.cursor()
query = "use "+metrics
cursor.execute(query)
#CHANGE QUERY HERE
query = "select username, display_name, email, orcid, kb_internal_user, institution, country, signup_date, last_signin_date from user_info order by signup_date"
#CHANGE COLUMN HEADERS HERE TO MATCH QUERY HEADERS
print("username\tdisplay_name\temail\torcid\tkb_internal_user\tinstitution\tcountry\tsignup_date\tlast_signin_date")
cursor.execute(query)
row_values = list()
for (row_values) in cursor:
temp_string = ""
for i in range(len(row_values) - 1):
if row_values[i] is not None:
temp_string += str(row_values[i])
temp_string += "\t"
if row_values[-1] is not None:
temp_string += str(row_values[-1])
print(temp_string)
return 1
dump_query_results()
| 2.671875 | 3 |
desktop_local_tests/windows/test_windows_packet_capture_disrupt_force_public_dns_servers.py | UAEKondaya1/expressvpn_leak_testing | 219 | 3510 | <reponame>UAEKondaya1/expressvpn_leak_testing
from desktop_local_tests.local_packet_capture_test_case_with_disrupter import LocalPacketCaptureTestCaseWithDisrupter
from desktop_local_tests.windows.windows_dns_force_public_dns_servers_disrupter import WindowsDNSForcePublicDNSServersDisrupter
class TestWindowsPacketCaptureDisruptForcePublicDNSServers(LocalPacketCaptureTestCaseWithDisrupter):
# TODO: Make the packet capture here DNS specific?
def __init__(self, devices, parameters):
super().__init__(WindowsDNSForcePublicDNSServersDisrupter, devices, parameters)
| 1.835938 | 2 |
kivy/loader.py | geojeff/kivy | 1 | 3511 | '''
Asynchronous data loader
========================
This is the Asynchronous Loader. You can use it to load an image
and use it, even if data are not yet available. You must specify a default
loading image for using a such loader::
from kivy import *
image = Loader.image('mysprite.png')
You can also load image from url::
image = Loader.image('http://mysite.com/test.png')
If you want to change the default loading image, you can do::
Loader.loading_image = Image('another_loading.png')
Tweaking the asynchronous loader
--------------------------------
.. versionadded:: 1.6.0
You can now tweak the loader to have a better user experience or more
performance, depending of the images you're gonna to load. Take a look at the
parameters:
- :data:`Loader.num_workers` - define the number of threads to start for
loading images
- :data:`Loader.max_upload_per_frame` - define the maximum image uploads in
GPU to do per frames.
'''
__all__ = ('Loader', 'LoaderBase', 'ProxyImage')
from kivy import kivy_data_dir
from kivy.logger import Logger
from kivy.clock import Clock
from kivy.cache import Cache
from kivy.core.image import ImageLoader, Image
from kivy.compat import PY2
from collections import deque
from time import sleep
from os.path import join
from os import write, close, unlink, environ
import threading
# Register a cache for loader
Cache.register('kv.loader', limit=500, timeout=60)
class ProxyImage(Image):
'''Image returned by the Loader.image() function.
:Properties:
`loaded`: bool, default to False
It can be True if the image is already cached
:Events:
`on_load`
Fired when the image is loaded and changed
'''
__events__ = ('on_load', )
def __init__(self, arg, **kwargs):
kwargs.setdefault('loaded', False)
super(ProxyImage, self).__init__(arg, **kwargs)
self.loaded = kwargs.get('loaded')
def on_load(self):
pass
class LoaderBase(object):
'''Common base for Loader and specific implementation.
By default, Loader will be the best available loader implementation.
The _update() function is called every 1 / 25.s or each frame if we have
less than 25 FPS.
'''
def __init__(self):
self._loading_image = None
self._error_image = None
self._num_workers = 2
self._max_upload_per_frame = 2
self._paused = False
self._resume_cond = threading.Condition()
self._q_load = deque()
self._q_done = deque()
self._client = []
self._running = False
self._start_wanted = False
self._trigger_update = Clock.create_trigger(self._update)
def __del__(self):
try:
Clock.unschedule(self._update)
except Exception:
pass
def _set_num_workers(self, num):
if num < 2:
raise Exception('Must have at least 2 workers')
self._num_workers = num
def _get_num_workers(self):
return self._num_workers
num_workers = property(_get_num_workers, _set_num_workers)
'''Number of workers to use while loading. (used only if the loader
implementation support it.). This setting impact the loader only at the
beginning. Once the loader is started, the setting has no impact::
from kivy.loader import Loader
Loader.num_workers = 4
The default value is 2 for giving a smooth user experience. You could
increase the number of workers, then all the images will be loaded faster,
but the user will not been able to use the application while loading.
Prior to 1.6.0, the default number was 20, and loading many full-hd images
was blocking completly the application.
.. versionadded:: 1.6.0
'''
def _set_max_upload_per_frame(self, num):
if num is not None and num < 1:
raise Exception('Must have at least 1 image processing per image')
self._max_upload_per_frame = num
def _get_max_upload_per_frame(self):
return self._max_upload_per_frame
max_upload_per_frame = property(_get_max_upload_per_frame,
_set_max_upload_per_frame)
'''Number of image to upload per frame. By default, we'll upload only 2
images in the GPU per frame. If you are uploading many tiny images, you can
easily increase this parameter to 10, or more.
If you are loading multiples Full-HD images, the upload time can be
consequent, and can stuck the application during the upload. If you want a
smooth experience, let the default.
As matter of fact, a Full-HD RGB image will take ~6MB in memory, so it will
take times. If you have activated mipmap=True too, then the GPU must
calculate the mipmap of this big images too, in real time. Then it can be
smart to reduce the :data:`max_upload_per_frame` to 1 or 2. If you get ride
of that (or reduce it a lot), take a look at the DDS format.
.. versionadded:: 1.6.0
'''
def _get_loading_image(self):
if not self._loading_image:
loading_png_fn = join(kivy_data_dir, 'images', 'image-loading.gif')
self._loading_image = ImageLoader.load(filename=loading_png_fn)
return self._loading_image
def _set_loading_image(self, image):
if isinstance(image, basestring):
self._loading_image = ImageLoader.load(filename=image)
else:
self._loading_image = image
loading_image = property(_get_loading_image, _set_loading_image)
'''Image used for loading.
You can change it by doing::
Loader.loading_image = 'loading.png'
.. versionchanged:: 1.6.0
Not readonly anymore.
'''
def _get_error_image(self):
if not self._error_image:
error_png_fn = join(
'atlas://data/images/defaulttheme/image-missing')
self._error_image = ImageLoader.load(filename=error_png_fn)
return self._error_image
def _set_error_image(self, image):
if isinstance(image, basestring):
self._error_image = ImageLoader.load(filename=image)
else:
self._error_image = image
error_image = property(_get_error_image, _set_error_image)
'''Image used for error.
You can change it by doing::
Loader.error_image = 'error.png'
.. versionchanged:: 1.6.0
Not readonly anymore.
'''
def start(self):
'''Start the loader thread/process'''
self._running = True
def run(self, *largs):
'''Main loop for the loader.'''
pass
def stop(self):
'''Stop the loader thread/process'''
self._running = False
def pause(self):
'''Pause the loader, can be useful during interactions
.. versionadded:: 1.6.0
'''
self._paused = True
def resume(self):
'''Resume the loader, after a :meth:`pause`.
.. versionadded:: 1.6.0
'''
self._paused = False
self._resume_cond.acquire()
self._resume_cond.notify_all()
self._resume_cond.release()
def _wait_for_resume(self):
while self._running and self._paused:
self._resume_cond.acquire()
self._resume_cond.wait(0.25)
self._resume_cond.release()
def _load(self, kwargs):
'''(internal) Loading function, called by the thread.
Will call _load_local() if the file is local,
or _load_urllib() if the file is on Internet
'''
while len(self._q_done) >= (
self.max_upload_per_frame * self._num_workers):
sleep(0.1)
self._wait_for_resume()
filename = kwargs['filename']
load_callback = kwargs['load_callback']
post_callback = kwargs['post_callback']
try:
proto = filename.split(':', 1)[0]
except:
#if blank filename then return
return
if load_callback is not None:
data = load_callback(filename)
elif proto in ('http', 'https', 'ftp', 'smb'):
data = self._load_urllib(filename, kwargs['kwargs'])
else:
data = self._load_local(filename, kwargs['kwargs'])
if post_callback:
data = post_callback(data)
self._q_done.appendleft((filename, data))
self._trigger_update()
def _load_local(self, filename, kwargs):
'''(internal) Loading a local file'''
# With recent changes to CoreImage, we must keep data otherwise,
# we might be unable to recreate the texture afterwise.
return ImageLoader.load(filename, keep_data=True, **kwargs)
def _load_urllib(self, filename, kwargs):
'''(internal) Loading a network file. First download it, save it to a
temporary file, and pass it to _load_local()'''
if PY2:
import urllib2 as urllib_request
else:
import urllib.request as urllib_request
proto = filename.split(':', 1)[0]
if proto == 'smb':
try:
# note: it's important to load SMBHandler every time
# otherwise the data is occasionaly not loaded
from smb.SMBHandler import SMBHandler
except ImportError:
Logger.warning(
'Loader: can not load PySMB: make sure it is installed')
return
import tempfile
data = fd = _out_osfd = None
try:
_out_filename = ''
suffix = '.%s' % (filename.split('.')[-1])
_out_osfd, _out_filename = tempfile.mkstemp(
prefix='kivyloader', suffix=suffix)
if proto == 'smb':
# read from samba shares
fd = urllib_request.build_opener(SMBHandler).open(filename)
else:
# read from internet
fd = urllib_request.urlopen(filename)
idata = fd.read()
fd.close()
fd = None
# write to local filename
write(_out_osfd, idata)
close(_out_osfd)
_out_osfd = None
# load data
data = self._load_local(_out_filename, kwargs)
# FIXME create a clean API for that
for imdata in data._data:
imdata.source = filename
except Exception:
Logger.exception('Failed to load image <%s>' % filename)
# close file when remote file not found or download error
try:
close(_out_osfd)
except OSError:
pass
return self.error_image
finally:
if fd:
fd.close()
if _out_osfd:
close(_out_osfd)
if _out_filename != '':
unlink(_out_filename)
return data
def _update(self, *largs):
'''(internal) Check if a data is loaded, and pass to the client'''
# want to start it ?
if self._start_wanted:
if not self._running:
self.start()
self._start_wanted = False
# in pause mode, don't unqueue anything.
if self._paused:
self._trigger_update()
return
for x in range(self.max_upload_per_frame):
try:
filename, data = self._q_done.pop()
except IndexError:
return
# create the image
image = data # ProxyImage(data)
if not image.nocache:
Cache.append('kv.loader', filename, image)
# update client
for c_filename, client in self._client[:]:
if filename != c_filename:
continue
# got one client to update
client.image = image
client.loaded = True
client.dispatch('on_load')
self._client.remove((c_filename, client))
self._trigger_update()
def image(self, filename, load_callback=None, post_callback=None, **kwargs):
'''Load a image using the Loader. A ProxyImage is returned with a
loading image. You can use it as follows::
from kivy.app import App
from kivy.uix.image import Image
from kivy.loader import Loader
class TestApp(App):
def _image_loaded(self, proxyImage):
if proxyImage.image.texture:
self.image.texture = proxyImage.image.texture
def build(self):
proxyImage = Loader.image("myPic.jpg")
proxyImage.bind(on_load=self._image_loaded)
self.image = Image()
return self.image
TestApp().run()
In order to cancel all background loading, call *Loader.stop()*.
'''
data = Cache.get('kv.loader', filename)
if data not in (None, False):
# found image, if data is not here, need to reload.
return ProxyImage(data,
loading_image=self.loading_image,
loaded=True, **kwargs)
client = ProxyImage(self.loading_image,
loading_image=self.loading_image, **kwargs)
self._client.append((filename, client))
if data is None:
# if data is None, this is really the first time
self._q_load.appendleft({
'filename': filename,
'load_callback': load_callback,
'post_callback': post_callback,
'kwargs': kwargs})
if not kwargs.get('nocache', False):
Cache.append('kv.loader', filename, False)
self._start_wanted = True
self._trigger_update()
else:
# already queued for loading
pass
return client
#
# Loader implementation
#
if 'KIVY_DOC' in environ:
Loader = None
else:
#
# Try to use pygame as our first choice for loader
#
from kivy.compat import queue
from threading import Thread
class _Worker(Thread):
'''Thread executing tasks from a given tasks queue
'''
def __init__(self, pool, tasks):
Thread.__init__(self)
self.tasks = tasks
self.daemon = True
self.pool = pool
self.start()
def run(self):
while self.pool.running:
func, args, kargs = self.tasks.get()
try:
func(*args, **kargs)
except Exception as e:
print(e)
self.tasks.task_done()
class _ThreadPool(object):
'''Pool of threads consuming tasks from a queue
'''
def __init__(self, num_threads):
super(_ThreadPool, self).__init__()
self.running = True
self.tasks = queue.Queue()
for _ in range(num_threads):
_Worker(self, self.tasks)
def add_task(self, func, *args, **kargs):
'''Add a task to the queue
'''
self.tasks.put((func, args, kargs))
def stop(self):
self.running = False
self.tasks.join()
class LoaderThreadPool(LoaderBase):
def __init__(self):
super(LoaderThreadPool, self).__init__()
self.pool = None
def start(self):
super(LoaderThreadPool, self).start()
self.pool = _ThreadPool(self._num_workers)
Clock.schedule_interval(self.run, 0)
def stop(self):
super(LoaderThreadPool, self).stop()
Clock.unschedule(self.run)
self.pool.stop()
def run(self, *largs):
while self._running:
try:
parameters = self._q_load.pop()
except:
return
self.pool.add_task(self._load, parameters)
Loader = LoaderThreadPool()
Logger.info('Loader: using a thread pool of {} workers'.format(
Loader.num_workers))
| 3.515625 | 4 |
Season 01 - Intro to Python/Episode 13 - Join.py | Pythobit/Python-tutorial | 3 | 3512 | # 13. Join
# it allows to print list a bit better
friends = ['Pythobit','boy','Pythoman']
print(f'My friends are {friends}.') # Output - My friends are ['Pythobit', 'boy', 'Pythoman'].
# So, the Output needs to be a bit clearer.
friends = ['Pythobit','boy','Pythoman']
friend = ', '.join(friends)
print(f'My friends are {friend}') # Output - My friends are Pythobit, boy, Pythoman
# Here (, ) comma n space is used as separator, but you can use anything.
| 4.25 | 4 |
buildsettings.py | randomizax/polygon-label | 0 | 3513 | # settings file for builds.
# if you want to have custom builds, copy this file to "localbuildsettings.py" and make changes there.
# possible fields:
# resourceBaseUrl - optional - the URL base for external resources (all resources embedded in standard IITC)
# distUrlBase - optional - the base URL to use for update checks
# buildMobile - optional - if set, mobile builds are built with 'ant'. requires the Android SDK and appropriate mobile/local.properties file configured
# preBuild - optional - an array of strings to run as commands, via os.system, before building the scripts
# postBuild - optional - an array of string to run as commands, via os.system, after all builds are complete
buildSettings = {
# local: use this build if you're not modifying external resources
# no external resources allowed - they're not needed any more
'randomizax': {
'resourceUrlBase': None,
'distUrlBase': 'https://randomizax.github.io/polygon-label',
},
# local8000: if you need to modify external resources, this build will load them from
# the web server at http://0.0.0.0:8000/dist
# (This shouldn't be required any more - all resources are embedded. but, it remains just in case some new feature
# needs external resources)
'local8000': {
'resourceUrlBase': 'http://0.0.0.0:8000/dist',
'distUrlBase': None,
},
# mobile: default entry that also builds the mobile .apk
# you will need to have the android-sdk installed, and the file mobile/local.properties created as required
'mobile': {
'resourceUrlBase': None,
'distUrlBase': None,
'buildMobile': 'debug',
},
# if you want to publish your own fork of the project, and host it on your own web site
# create a localbuildsettings.py file containing something similar to this
# note: Firefox+Greasemonkey require the distUrlBase to be "https" - they won't check for updates on regular "http" URLs
#'example': {
# 'resourceBaseUrl': 'http://www.example.com/iitc/dist',
# 'distUrlBase': 'https://secure.example.com/iitc/dist',
#},
}
# defaultBuild - the name of the default build to use if none is specified on the build.py command line
# (in here as an example - it only works in localbuildsettings.py)
#defaultBuild = 'local'
| 1.882813 | 2 |
osaka/storage/sftp.py | riverma/osaka | 2 | 3514 | <filename>osaka/storage/sftp.py
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import int
from future import standard_library
standard_library.install_aliases()
import os
import os.path
import stat
import urllib.parse
import paramiko
import traceback
import osaka.utils
"""
A backend used to handle stfp using parimiko
@author starchmd
"""
class SFTP(object):
"""
SFTP handling for Osaka
"""
def __init__(self, params={}):
"""
Constructor
"""
self.keyfile = params["keyfile"] if "keyfile" in params else None
def connect(self, host=None, port=None, user=None, password=<PASSWORD>, secure=False):
"""
Connect to this storage medium. All data is parsed out of the url and may be None
scheme:
@param host - may be None, host to connect to
implementor must handle defaulting
@param port - may be None, port to connect to
implementor must handle a None port
@param user - may be None, user to connect as
implementor must handle a None user
@param password - may be None, password to connect with
implementor must handle a None password
"""
self.client = paramiko.client.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.connect(
host,
port=22 if port is None else int(port),
username=user,
password=password,
key_filename=self.keyfile,
timeout=15,
)
self.sftp = self.client.open_sftp()
@classmethod
def getSchemes(clazz):
"""
Returns a list of schemes this handler handles
Note: handling the scheme of another handler produces unknown results
@returns list of handled schemes
"""
return ["sftp"]
def put(self, path, url):
"""
Put the given path to the given url
@param path - local path of file/folder to put
@param url - url to put file/folder to
"""
rpath = urllib.parse.urlparse(url).path.lstrip("/")
print("\n\n\n\nUploading:", path)
if not os.path.isdir(path):
print("As file")
try:
self.sftp.mkdir(os.path.dirname(rpath))
except IOError:
pass
dest = rpath
try:
if stat.S_ISDIR(self.sftp.stat(rpath).st_mode) != 0:
dest = os.path.join(rpath, os.path.basename(path))
except:
pass
return self.upload(path, dest)
print("As Dir")
try:
self.sftp.mkdir(rpath)
except IOError:
pass
for dirpath, dirname, filenames in os.walk(path):
extra = os.path.relpath(dirpath, os.path.dirname(path))
try:
self.sftp.mkdir(os.path.join(rpath, extra))
except IOError:
pass
for filename in filenames:
self.upload(
os.path.join(dirpath, filename),
os.path.join(rpath, extra, filename),
)
def upload(self, path, rpath):
"""
Uploads a file to remote path
@param path - path to upload
@param rpath - remote path to upload to
"""
self.sftp.put(path, rpath)
return True
def get(self, url, path):
"""
Get the url (file/folder) to local path
@param url - url to get file/folder from
@param path - path to place fetched files
"""
rpath = urllib.parse.urlparse(url).path
try:
self.sftp.get(rpath, path)
except Exception as e:
osaka.utils.LOGGER.warning(
"Encountered exception: {}\n{}".format(e, traceback.format_exc())
)
raise osaka.utils.OsakaFileNotFound("File {} doesn't exist.".format(url))
def rm(self, url):
"""
Remove the item
@param url - url to remove
"""
rpath = urllib.parse.urlparse(url).path
self.sftp.remove(rpath)
def close(self):
"""
Close this connection
"""
self.client.close()
| 2.71875 | 3 |
thesis/pettingzoo/butterfly/cooperative_pong/cake_paddle.py | heavenlysf/thesis | 0 | 3515 | import os
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
import pygame
RENDER_RATIO = 2
class CakePaddle(pygame.sprite.Sprite):
def __init__(self, speed=12):
# surf is the right-most (largest) tier of the cake
self.surf = pygame.Surface((30 // RENDER_RATIO, 120 // RENDER_RATIO))
self.rect = self.surf.get_rect()
self.surf2 = pygame.Surface((30 // RENDER_RATIO, 80 // RENDER_RATIO))
self.rect2 = self.surf2.get_rect()
self.surf3 = pygame.Surface((30 // RENDER_RATIO, 40 // RENDER_RATIO))
self.rect3 = self.surf3.get_rect()
self.surf4 = pygame.Surface((30 // RENDER_RATIO, 10 // RENDER_RATIO))
self.rect4 = self.surf4.get_rect()
self.speed = speed
def reset(self):
# self.rect is set from envs class
self.rect2.midright = self.rect.midleft
self.rect3.midright = self.rect2.midleft
self.rect4.midright = self.rect3.midleft
def draw(self, screen):
pygame.draw.rect(screen, (255, 255, 255), self.rect)
pygame.draw.rect(screen, (255, 255, 255), self.rect2)
pygame.draw.rect(screen, (255, 255, 255), self.rect3)
pygame.draw.rect(screen, (255, 255, 255), self.rect4)
def update(self, area, action):
# action: 1 - up, 2 - down
movepos = [0, 0]
if action == 1:
movepos[1] = movepos[1] - self.speed
elif action == 2:
movepos[1] = movepos[1] + self.speed
newpos = self.rect.move(movepos)
if area.contains(newpos):
self.rect = newpos
# move other rects too
self.rect2 = self.rect2.move(movepos)
self.rect3 = self.rect3.move(movepos)
self.rect4 = self.rect4.move(movepos)
def process_collision(self, b_rect, dx, dy, b_speed, paddle_type):
"""
Parameters
----------
b_rect : Ball rect
dx, dy : Ball speed along single axis
b_speed : Ball speed
ignore paddle type
Returns
-------
is_collision: 1 if ball collides with paddle
b_rect: new ball rect
b_speed: new ball speed
"""
if self.rect4.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect4.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect4.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect4.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
elif self.rect3.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect3.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect3.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect3.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
elif self.rect2.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect2.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect2.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect2.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
elif self.rect.colliderect(b_rect):
is_collision = True
if dx > 0:
b_rect.right = self.rect.left
b_speed[0] = -b_speed[0]
# top or bottom edge
elif dy > 0:
b_rect.bottom = self.rect.top
b_speed[1] = -b_speed[1]
elif dy < 0:
b_rect.top = self.rect.bottom
b_speed[1] = -b_speed[1]
return is_collision, b_rect, b_speed
return False, b_rect, b_speed
| 3.015625 | 3 |
src/internal_representation_analysis/decoder/StateDataset.py | aidkilda/understanding-drl-navigation | 0 | 3516 | <filename>src/internal_representation_analysis/decoder/StateDataset.py
import random
from internal_representation_analysis.network import ActorCriticFFNetwork
from internal_representation_analysis.scene_loader import THORDiscreteEnvironment as Environment
from internal_representation_analysis.constants import MINI_BATCH_SIZE
class StateDataset(object):
def __init__(self, states):
self.all_states = states
self.train_set = None
self.validation_set = None
self.test_set = None
def __eq__(self, other):
return self.all_states == other.all_states
def split_datasets(self, seed, all_targets=False, test_target_eq_obs=False):
all_states = self.all_states[:]
random.seed(seed)
random.shuffle(all_states)
if test_target_eq_obs:
for s in all_states:
s.embedding = s.target_eq_obs
if not all_targets:
self.train_set = all_states[0:int(0.6 * len(all_states))]
self.validation_set = all_states[int(0.6 * len(all_states)):int(
0.8 * len(all_states))]
self.test_set = all_states[int(0.8 * len(all_states)):]
else:
unique_state_ids = list(set([s.state_id for s in all_states]))
random.shuffle(unique_state_ids)
train_ids = set(unique_state_ids[0:int(0.6 * len(unique_state_ids))])
val_ids = set(unique_state_ids[int(0.6 * len(unique_state_ids)):int(
0.8 * len(unique_state_ids))])
test_ids = set(unique_state_ids[int(0.8 * len(unique_state_ids)):])
self.train_set = [s for s in all_states if s.state_id in train_ids]
self.validation_set = [s for s in all_states if s.state_id in val_ids]
self.test_set = [s for s in all_states if s.state_id in test_ids]
def shuffle_train_set(self):
random.shuffle(self.train_set)
def get_train_mini_batch(self, start_index):
return self.train_set[start_index:start_index + MINI_BATCH_SIZE]
def filter_by_indexes(self, indexList):
self.all_states = [self.all_states[i] for i in indexList]
| 2.265625 | 2 |
test_dataset_model.py | ferrine/PerceptualSimilarity | 0 | 3517 | <filename>test_dataset_model.py
import numpy as np
from models import dist_model as dm
from data import data_loader as dl
import argparse
from IPython import embed
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_mode", type=str, default="2afc", help="[2afc,jnd]")
parser.add_argument(
"--datasets",
type=str,
nargs="+",
default=[
"val/traditional",
"val/cnn",
"val/superres",
"val/deblur",
"val/color",
"val/frameinterp",
],
help="datasets to test - for jnd mode: [val/traditional],[val/cnn]; for 2afc mode: [train/traditional],[train/cnn],[train/mix],[val/traditional],[val/cnn],[val/color],[val/deblur],[val/frameinterp],[val/superres]",
)
parser.add_argument(
"--model",
type=str,
default="net-lin",
help="distance model type [net-lin] for linearly calibrated net, [net] for off-the-shelf network, [l2] for euclidean distance, [ssim] for Structured Similarity Image Metric",
)
parser.add_argument(
"--net",
type=str,
default="alex",
help="[squeeze], [alex], or [vgg] for network architectures",
)
parser.add_argument(
"--colorspace",
type=str,
default="Lab",
help="[Lab] or [RGB] for colorspace to use for l2, ssim model types",
)
parser.add_argument(
"--batch_size", type=int, default=50, help="batch size to test image patches in"
)
parser.add_argument("--use_gpu", action="store_true", help="turn on flag to use GPU")
parser.add_argument(
"--model_path",
type=str,
default=None,
help="location of model, will default to ./weights/v[version]/[net_name].pth",
)
parser.add_argument(
"--from_scratch", action="store_true", help="model was initialized from scratch"
)
parser.add_argument(
"--train_trunk", action="store_true", help="model trunk was trained/tuned"
)
parser.add_argument(
"--version",
type=str,
default="0.1",
help="v0.1 is latest, v0.0 was original release",
)
opt = parser.parse_args()
if opt.model in ["l2", "ssim"]:
opt.batch_size = 1
# initialize model
model = dm.DistModel()
# model.initialize(model=opt.model,net=opt.net,colorspace=opt.colorspace,model_path=opt.model_path,use_gpu=opt.use_gpu)
model.initialize(
model=opt.model,
net=opt.net,
colorspace=opt.colorspace,
model_path=opt.model_path,
use_gpu=opt.use_gpu,
pnet_rand=opt.from_scratch,
pnet_tune=opt.train_trunk,
version=opt.version,
)
if opt.model in ["net-lin", "net"]:
print("Testing model [%s]-[%s]" % (opt.model, opt.net))
elif opt.model in ["l2", "ssim"]:
print("Testing model [%s]-[%s]" % (opt.model, opt.colorspace))
# embed()
# initialize data loader
for dataset in opt.datasets:
data_loader = dl.CreateDataLoader(
dataset, dataset_mode=opt.dataset_mode, batch_size=opt.batch_size
)
# evaluate model on data
if opt.dataset_mode == "2afc":
(score, results_verbose) = dm.score_2afc_dataset(data_loader, model.forward)
elif opt.dataset_mode == "jnd":
(score, results_verbose) = dm.score_jnd_dataset(data_loader, model.forward)
# print results
print(" Dataset [%s]: %.2f" % (dataset, 100.0 * score))
| 2.234375 | 2 |
plotter.py | StrangeTcy/pathnet-pytorch | 86 | 3518 | import argparse
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--mnist', action='store_true', default=False,
help='open mnist result')
args = parser.parse_args()
def subplot(subplot, data_first, data_second, title):
plt.subplot(subplot)
if args.mnist:
x = np.arange(0,100)
else:
x = np.arange(0,500)
y_first = np.mean(data_first, axis=0)
y_second = np.mean(data_second, axis=0)
y_first_err = np.std(data_first, axis=0) / 2.
y_second_err = np.std(data_second, axis=0) / 2.
plt.fill_between(x, y_first - y_first_err, y_first + y_first_err, color='m', alpha=0.3)
plt.fill_between(x, y_second - y_second_err, y_second + y_second_err, color='c', alpha=0.3)
plt.plot(x, y_first, color='r', label='Task A')
plt.plot(x, y_second, color='g', label='Task B (transfer learning)')
plt.legend(bbox_to_anchor=(0.8, 0.3), loc=2, ncol=1, fontsize=15)
axes = plt.gca()
if args.mnist:
axes.set_xlim([0, 100])
axes.set_ylim([0, 1.2])
else:
axes.set_xlim([0, 500])
axes.set_ylim([0, 0.6])
plt.title(title, fontsize=20, y = 0.9)
plt.ylabel('Accuracy',fontsize=15)
plt.xlabel('Generations',fontsize=15)
plt.grid(True)
try:
if args.mnist:
f = open(os.path.join('./result/result_mnist.pickle'))
result = pickle.load(f)
f.close()
pathnet_first = []
pathnet_second = []
for res in result:
pathnet_first.append(res[2])
pathnet_second.append(res[3])
subplot('111', pathnet_first, pathnet_second,'MNIST')
plt.show()
else:
f = open(os.path.join('./result/result_cifar_svhn.pickle'))
result = pickle.load(f)
f.close()
cifar_first = []
cifar_second = []
svhn_first = []
svhn_second = []
for res in result:
if res[0] == 'pathnet_cifar_first':
cifar_first.append(res[2])
svhn_second.append(res[3])
else:
svhn_first.append(res[2])
cifar_second.append(res[3])
subplot('211', cifar_first, cifar_second,'CIFAR-10')
subplot('212', svhn_first, svhn_second,'cSVHN')
plt.show()
except IOError:
print("Result file does not exist")
| 2.671875 | 3 |
kindler/solver/optimizer.py | mingruimingrui/kindler | 0 | 3519 | <reponame>mingruimingrui/kindler
import torch
def make_sgd_optimizer(
model,
base_lr=0.001,
bias_lr_factor=2.0,
momentum=0.9,
weight_decay=0.0005,
weight_decay_bias=0.0,
):
params = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
param_lr = base_lr
param_weight_decay = weight_decay
if "bias" in key:
param_lr = base_lr * bias_lr_factor
param_weight_decay = weight_decay_bias
params.append({
'params': [value],
'lr': param_lr,
'weight_decay': param_weight_decay
})
optimizer = torch.optim.SGD(params, base_lr, momentum=momentum)
return optimizer
| 2.46875 | 2 |
platypus/tests/test_operators.py | sctiwari/EZFF_ASE | 2 | 3520 | <gh_stars>1-10
# Copyright 2015-2018 <NAME>
#
# This file is part of Platypus, a Python module for designing and using
# evolutionary algorithms (EAs) and multiobjective evolutionary algorithms
# (MOEAs).
#
# Platypus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Platypus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Platypus. If not, see <http://www.gnu.org/licenses/>.
import unittest
from mock import patch
from ..core import Problem, Solution
from ..types import Permutation
from ..operators import Swap
class TestSwap(unittest.TestCase):
def test_swap10(self):
problem = Problem(1, 0)
problem.types[0] = Permutation(range(10))
solution = Solution(problem)
solution.variables[0] = list(range(10))
with patch('random.randrange', side_effect=[2, 4]):
result = Swap(1.0).mutate(solution)
self.assertEqual(result.variables[0][2], 4)
self.assertEqual(result.variables[0][4], 2)
self.assertEqual(solution.variables[0][2], 2)
self.assertEqual(solution.variables[0][4], 4)
def test_swap2a(self):
problem = Problem(1, 0)
problem.types[0] = Permutation(range(2))
solution = Solution(problem)
solution.variables[0] = list(range(2))
with patch('random.randrange', side_effect=[0, 1]):
result = Swap(1.0).mutate(solution)
self.assertEqual(result.variables[0][0], 1)
self.assertEqual(result.variables[0][1], 0)
def test_swap2b(self):
problem = Problem(1, 0)
problem.types[0] = Permutation(range(2))
solution = Solution(problem)
solution.variables[0] = list(range(2))
with patch('random.randrange', side_effect=[1, 1, 0]):
result = Swap(1.0).mutate(solution)
self.assertEqual(result.variables[0][0], 1)
self.assertEqual(result.variables[0][1], 0)
def test_swap1(self):
problem = Problem(1, 0)
problem.types[0] = Permutation(range(1))
solution = Solution(problem)
solution.variables[0] = list(range(1))
with patch('random.randrange', side_effect=[0, 0]):
result = Swap(1.0).mutate(solution)
self.assertEqual(result.variables[0][0], 0) | 2.796875 | 3 |
fusion_net/bilinear_sampler.py | ClovisChen/LearningCNN | 0 | 3521 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-are not covered by the UCLB ACP-A Licence,
from __future__ import absolute_import, division, print_function
import tensorflow as tf
def bilinear_sampler_1d_h(input_images, x_offset, wrap_mode='border', name='bilinear_sampler', **kwargs):
'''
一维双线性采样: x_offset--输入X上偏移量的图
重复函数 : 先将一维的x后面扩展一个维度, 然后在扩展的维度上复制相应的值, 随后将其转成一维的值, exsamples:[1,2,3] --> [1,1,2,2,3,3]
'''
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.tile(tf.expand_dims(x, 1), [1, n_repeats])
return tf.reshape(rep, [-1])
def _interpolate(im, x, y): #插值函数
with tf.variable_scope('_interpolate'):
# handle both texture border types
_edge_size = 0
# 如果包围方式是border, 那么边界长度是1, 在h和w维两侧加一排0
if _wrap_mode == 'border':
_edge_size = 1
im = tf.pad(im, [[0, 0], [1, 1], [1, 1], [0, 0]], mode='CONSTANT')
x = x + _edge_size
y = y + _edge_size
elif _wrap_mode == 'edge':
_edge_size = 0
else:
return None
# 修剪偏移量x, 让它在0到width-1+2*edge_size之间(因为偏移量不能太大,要小于等于padding之后).
x = tf.clip_by_value(x, 0.0, _width_f - 1 + 2 * _edge_size)
# 向下取整x,y然后x加1向上取整x
x0_f = tf.floor(x)
y0_f = tf.floor(y)
x1_f = x0_f + 1
# 将向下取整的x y变成整数, 向上取整的x不能大于padding之后的宽度减1
# cast: 类型转换
x0 = tf.cast(x0_f, tf.int32)
y0 = tf.cast(y0_f, tf.int32)
x1 = tf.cast(tf.minimum(x1_f, _width_f - 1 + 2 * _edge_size), tf.int32)
# 第二维也就是宽度维的宽是padding之后的宽
dim2 = (_width + 2 * _edge_size)
# 第一维也就是图像维的宽是padding之后的分辨率
dim1 = (_width + 2 * _edge_size) * (_height + 2 * _edge_size)
# 计算偏移量索引的基,先得到[0,1,2,...,batch],再将它乘宽度,变成
# [0,dim1,2*dim1,...,batch*dim1],然后重复原图分辨率,变成
# [0,0,......,0,dim1,dim1,......,dim1,2*dim1,2*dim1,......,2*dim1 . . batch * dim, batch * dim, ......, batch * dim]
# 这样就变成基底了,表达的是有batch个图的基
base = _repeat(tf.range(_num_batch) * dim1, _height * _width)
# 将y的偏移乘以dim2,也就是乘以宽度,这样就得到加上y之后的基
# y0是[0,0,...,0,1,1,....,1, . . h + 2 * e, h + 2 * e, ..., h + 2 * e]
# 乘了dim2之后变成
# [0, 0, ..., 0, w+2*e, w+2*e, ..., w+2*e, . . (h + 2 * e) * (w + 2 * e), ..., (h + 2 * e) * (w + 2 * e)]
# 加上base之后得到了考虑了batch,height之后的索引
base_y0 = base + y0 * dim2
# 这个索引加上向上下取整的x索引和向上取整的x索引就得到了现在点的左侧点和右侧点
idx_l = base_y0 + x0
idx_r = base_y0 + x1
# 将图变成[batch*w*h,channel]的形状
im_flat = tf.reshape(im, tf.stack([-1, _num_channels]))
# 利用tf.gather根据左右侧点的索引重新排列图,得到重排之后的左右像素
pix_l = tf.gather(im_flat, idx_l)
pix_r = tf.gather(im_flat, idx_r)
# 计算双线性差值的系数x1-1和x-x0
weight_l = tf.expand_dims(x1_f - x, 1)
weight_r = tf.expand_dims(x - x0_f, 1)
# 利用双线性差值方法计算像素值
return weight_l * pix_l + weight_r * pix_r
# get_disp函数生成视差图后,调用插值函数获得更好的图.
def _transform(input_images, x_offset):
'''
转换函数首先调用meshgrid生成关于X轴和Y轴的索引
exsamples:
假设_width=3,经过linspace(0.0,_width_f-1.0,_width)是[ 0., 1., 2.]。height同理
>>> x = tf.linspace(0.0, 2.0, 3)
>>> sess.run(x)
array([0., 1., 2. ], dtype = float32)
>>> x = tf.linspace(0.0, 2.0, 3)
>>> y = tf.linspace(0.0, 4.0, 5)
>>> x_t, y_t = tf.meshgrid(x, y)
>>> sess.run(x_t)
array([0., 1., 2.],
[0., 1., 2.],
[0., 1., 2.],
[0., 1., 2.],
[0., 1., 2.]], dtype=float32)
>>> sess.run(y_t)
array([0., 0., 0.],
[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.],
[4., 4., 4.]], dtype=float32)
>>> x_t_flat = tf.reshape(x_t, (1, -1))
>>> y_t_flat = tf.reshape(y_t, (1, -1))
>>> sess.run(x_t_flat)
array([[0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2.]], dtype=float32)
>>> sess.run(y_t_flat)
array([[0., 0., 0., 1., 1., 1., 2., 2., 2., 3., 3., 3., 4., 4., 4.]], dtype=float32)
>>> x_t_flat = tf.tile(x_t_flat, tf.stack([2,1]))
>>> sess.run(x_t_flat)
arraay([[0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2.], [0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2.]], dtype=float32)
>>> x_t_flat = tf.reshape(x_t_flat, (1, -1))
>>> sess.run(x_t_flat)
array([[0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2., 0., 1., 2.]], dtype=float32)
'''
with tf.variable_scope('transform'):
# grid of (x_t, y_t, 1), eq (1) in ref [1]
x_t, y_t = tf.meshgrid(tf.linspace(0.0, _width_f - 1.0, _width),
tf.linspace(0.0 , _height_f - 1.0 , _height))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
x_t_flat = tf.tile(x_t_flat, tf.stack([_num_batch, 1]))
y_t_flat = tf.tile(y_t_flat, tf.stack([_num_batch, 1]))
x_t_flat = tf.reshape(x_t_flat, [-1])
y_t_flat = tf.reshape(y_t_flat, [-1])
x_t_flat = x_t_flat + tf.reshape(x_offset, [-1]) * _width_f
input_transformed = _interpolate(input_images, x_t_flat, y_t_flat)
output = tf.reshape(
input_transformed, tf.stack([_num_batch, _height, _width, _num_channels]))
return output
with tf.variable_scope(name):
'''
[num_batch, height, width, num_channels]
'''
_num_batch = tf.shape(input_images)[0]
_height = tf.shape(input_images)[1]
_width = tf.shape(input_images)[2]
_num_channels = tf.shape(input_images)[3]
_height_f = tf.cast(_height, tf.float32)
_width_f = tf.cast(_width, tf.float32)
_wrap_mode = wrap_mode
output = _transform(input_images, x_offset)
return output
| 1.96875 | 2 |
Supernovae.py | adamamiller/iptf16hvw-1 | 0 | 3522 | <filename>Supernovae.py
#import relevant libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from astropy.io import ascii
import json
from IPython.display import display, Image
from specutils import Spectrum1D
from astropy import units
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
import scipy.integrate as integrate
from astropy.time import Time
from Supernovae import *
#speed of light (km/s)
c = 3e5
#Define class to hold releveant information for spectra data
class Spectra:
#Initialization function
def __init__(self, Spectra, epoch, z , MJD_max):
'''
Spectra (string) - path to JSON formatted spectra file
epoch (float) - MJD date
z (float) - redshift of corresponding SN
MJD_max (float) - date of B band maximum brightness for SN in MJD
'''
#correct flux for redshift, change wavelength to SN restframe, Normalize flux and store in Spectra
self.data= Unpack_Spectra(Spectra, z)
#store epoch of obseravation
self.epoch = float(epoch)
#store phase of observation
self.phase = float(epoch) - float(MJD_max)
class Lightcurve():
def __init__(self, times, fluxes, error, band):
self.band = band
self.data = pd.DataFrame(list(zip(times, fluxes, error)), columns = ['times', 'flux', 'err'])
#Create Supernovae class to store Spectral objects
class Supernovae(object):
#Initialization function
def __init__(self, name, redshift, maximum):
'''
name (str) - String of SN name
redshift (float) - redshift of SN
maximum (float) - date of B band maximum in MJD
'''
#Store name of SN
self.name = name
#Store redshift of SN
self.redshift = redshift
#Store date of B band maximum brightness
self.maximum = maximum
#initiate empty list to hold Spectra objects
self.spectra = []
self.lightcurves = []
#define function to return spectra closest to given phase
def find_spectra(self, phase1):
'''
Args:
phase1 (float )- phase of interest
Returns:
Spectra object - Spectra object with phase closest to phase1
'''
index = np.argmin([ abs(x.phase - phase1) for x in self.spectra])
return self.spectra[index]
#define function to store new spectra
def store_spectra(self, spectra_object):
'''
Args:
spectra_object (Spectra) - Spectra object to store
'''
#Make sure there are no duplicates and that spectra are sorted by date
if spectra_object in self.spectra:
self.spectra.sort(key= lambda x: x.phase)
print('already exists')
elif spectra_object.epoch in [x.epoch for x in self.spectra]:
self.spectra.sort(key= lambda x: x.phase)
pass
else:
self.spectra.append(spectra_object)
self.spectra.sort(key= lambda x: x.phase)
#define function to store lightcurve
def store_lightcurve(self, lightcurve_object):
if lightcurve_object in self.lightcurves:
print('already exists')
else:
self.lightcurves.append(lightcurve_object)
#define function that converts wavlengths to restframe and corrects flux for redshift, and normalizes flux
def Unpack_Spectra(Spectra, z, normalization = [5000,6000]):
'''
Args:
Spectra - one epoch of spectral data in JSON format from OSN
z (float) - redshift of SN
normalizationn (list) - 2 item list containing boundaries of region used for normalization
Returns:
Pandas DataFrame - 2 column dataframe: wavelength and flux
Flux is corrected for redshift and normalized
Wavelength is converted to SN restframe
'''
#Extract Wavelengths
wavelengths = [float(x[0]) for x in Spectra]
#Extract Fluxes
fluxes = [float(x[1]) for x in Spectra]
#correct fluxes for redshift
fluxes = [correct_flux(flux, z) for flux in fluxes]
#Extract fluxes in normalization range
rel_flux_range = [x for x in Spectra if (float(x[0])>normalization[0]) & (float(x[0])<normalization[1])]
#Make sure there rel_flux_range isnt empty
if len(rel_flux_range) == 0:
#print('No wavelengths in normalization region, not including spectra')
return None
#Calculate average flux in this range
flux_sum = 0
for x in rel_flux_range:
flux_sum += float(x[1])
average_flux = flux_sum / float(len(rel_flux_range))
#Normalize flux
fluxes = [float(flux) / average_flux for flux in fluxes]
#convert wavelength to restframe
wavelengths = [wavelength / float(1 + z) for wavelength in wavelengths]
#store in pandas dataframe
df = pd.DataFrame()
df['Flux'] = fluxes
df['Wavelength'] = wavelengths
return df
def correct_flux(flux_obs, z):
'''
Args:
flux_obs (int) - observed flux
z (int) - redshift
Returns:
int - redshift corrected flux
'''
flux_emit = (z * flux_obs) + flux_obs
return flux_emit
#Define function to get relevant spectra from OSN JSON data file
def create_SN_object(JSON, MJD_max, z):
'''
Function to create Supernovae object for given JSON data file from OSN
Args:
JSON (str) - path to OSN JSON file of interest
MJD_max (int) - number of days past maximum brightness
phase (int) - phase for spectra of interest
Returns:
Supernovae - Supernovae object with spectra list filled
'''
supernovae = Supernovae(str(JSON[0:-5]), z, MJD_max)
#Load OSN json data
file = open('../Data/OSN_data/' + str(JSON))
json_data = json.load(file)
spectra_data = json_data[JSON[0:-5]]['spectra']
spectra_data = np.array(spectra_data)
for i in range(len(spectra_data)):
spectra = Spectra(spectra_data[i]['data'], float(spectra_data[i]['time']) / (1+z), z, MJD_max)
if spectra.data is None:
continue
else:
supernovae.store_spectra(spectra)
return supernovae
#Define function to convert calendar date to MJD
def convert_date_toMJD(date):
'''
Args:
date (str) - string of calendar date (e.g. '2002-8-17')
Returns:
float - MJD value of given calendar date
'''
t = Time(date)
t.format = 'mjd'
return t.value
#Define function to calculate absorption velocities
def calc_abs_velc(restframe, dopplershifted):
'''
Args:
restframe (float) - restframe wavelength of absorption
dopplershifted (float) - dopplershifted wavelength of absorption
Returns:
float - corresponding absorption velocity
'''
velocity = ((restframe - dopplershifted) / np.float(restframe))* c
return velocity
| 2.765625 | 3 |
userbot/plugins/alive.py | iraqis1/irqis | 0 | 3523 | """Check if userbot alive. If you change these, you become the gayest gay such that even the gay world will disown you."""
import asyncio
from telethon import events
from telethon.tl.types import ChannelParticipantsAdmins
from platform import uname
from userbot import ALIVE_NAME
from userbot.utils import admin_cmd
DEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else "No name set yet nibba, check pinned in @XtraTgBot"
@command(outgoing=True, pattern="^.lk$")
async def amireallyalive(alive):
""" For .alive command, check if the bot is running. """
await alive.edit("اهلا بك في سورس التليثيون العراقي\n"
"➖➖➖➖➖➖➖➖➖\n"
"استخدم امر .alive اذا اعتقدت ان البوت توقف!\n"
"➖➖➖➖➖➖➖➖➖\n"
"اشترك في قناة السورس لانها تطرح ملفات وشروحات مفيده\n"
"➖➖➖➖➖➖➖➖➖\n"
"يمكنك مراسلتنا لاي خلل حاصل\n"
"➖➖➖➖➖➖➖➖➖\n"
"لتنصيب السورس راسلني احد مطورين السورس\n"
"➖➖➖➖➖➖➖➖➖\n"
"مطورين السورس : \n"
"➖➖➖➖➖➖➖➖➖\n"
"احمد || @HHMHHH \n"
"➖➖➖➖➖➖➖➖➖\n"
"حسن || @VHHHHH \n"
"➖➖➖➖➖➖➖➖➖\n"
"حارث || @cCcYo \n"
"➖➖➖➖➖➖➖➖➖\n"
"قناة السورس الرسميه : @cqccqq\n"
"➖➖➖➖➖➖➖➖➖\n"
"اوامر السورس هي :\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.ytv` + رابط فيديو من اي موقع للتحميل\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.purge` تحذف رسائل بالرد\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.song` + اسم اغنيه \n"
"➖➖➖➖➖➖➖➖➖\n"
"`.spam`+ كلمه + عدد \n"
"➖➖➖➖➖➖➖➖➖\n"
"`.smoon` لعرض ٤ اسطر اقمار \n"
"➖➖➖➖➖➖➖➖➖\n"
"`.moon` لعرض سطر واحد اقمار \n"
"➖➖➖➖➖➖➖➖➖\n"
"`.solarsystem` كواكب تتحرك \n"
"➖➖➖➖➖➖➖➖➖\n"
"`.snake` افعى تتحرك\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.clock` ساعات سطر واحد \n"
"➖➖➖➖➖➖➖➖➖\n"
"`.gmute` كتم\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.ungmute` الغاء كتم \n"
"➖➖➖➖➖➖➖➖➖\n"
"`.kick` طرد \n"
"➖➖➖➖➖➖➖➖➖\n"
"`.ban` حظر \n"
"➖➖➖➖➖➖➖➖➖\n"
"`.tss` + رمز اللغه\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.rnupload` رد ع الملف وسم ملف لتغير اسم الملف\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.send` + اسم النلف يدز الملف\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.unload` + اسم الملف للغاء التثببت\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.scha` يطلع يكتب حتى لو مدز رساله\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.get_bot` معرفه عدد البوتات الموجوده\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.iffuci` كتابه كود الملف\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.savefilter` اضف رد\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.clearfilter` حذف رد \n"
"➖➖➖➖➖➖➖➖➖\n"
"`.purge` حذف كل الرسائل\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.figlet` كتابه نصوص شخوط\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.savewelcome` + ترحيب لوضع ترحيب\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.clearwelcome` لحذف الترحيب \n"
"➖➖➖➖➖➖➖➖➖\n"
"`.whois` + ايدي شخص\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.fuk` فاكيو\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.get_id` ايدي اي شخص دزه بمحادثته\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.admins` المشرفين الي بالكروب\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.pin` تثبيت رساله بالكروب\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.mmf` اسم انكلش رد ع الصوره\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.bye` مغادره من المجموعه\n"
"➖➖➖➖➖➖➖➖➖\n"
"`.decide` يدز صورتين متحركات\n"
"➖➖➖➖➖➖➖➖➖\n"
"يوجد الكثير من الاوامر لكن ثبتنا الاساسيات\n") | 2.640625 | 3 |
connector/ADBConnector.py | qiutongxue/ArknightsAutoHelper | 1 | 3524 | import os
import logging.config
from random import randint
import zlib
import struct
import socket
import time
from PIL import Image
import config
# from config import ADB_ROOT, ADB_HOST, SCREEN_SHOOT_SAVE_PATH, ShellColor, CONFIG_PATH,enable_adb_host_auto_detect, ADB_SERVER
from .ADBClientSession import ADBClientSession
from util.socketutil import recvall
from . import revconn
# from numpy import average, dot, linalg
logger = logging.getLogger(__name__)
def _screencap_to_image(cap):
w, h, pixels = cap
return Image.frombytes('RGBA', (w, h), pixels)
def _ensure_pil_image(imgorfile):
if isinstance(imgorfile, Image.Image):
return imgorfile
return Image.open(imgorfile)
def check_adb_alive():
try:
sess = ADBClientSession(config.ADB_SERVER)
version = int(sess.service('host:version').read_response().decode(), 16)
logger.debug('ADB server version %d', version)
return True
except ConnectionRefusedError:
return False
except RuntimeError:
return False
def ensure_adb_alive():
if check_adb_alive():
return
logger.info('尝试启动 adb server')
import subprocess
adbbin = config.get('device/adb_binary', None)
if adbbin is None:
adb_binaries = ['adb', os.path.join(config.ADB_ROOT, 'adb')]
else:
adb_binaries = [adbbin]
for adbbin in adb_binaries:
try:
logger.debug('trying %r', adbbin)
subprocess.run([adbbin, 'start-server'], check=True)
return True
except FileNotFoundError:
pass
except subprocess.CalledProcessError:
pass
raise OSError("can't start adb server")
class ADBConnector:
def __init__(self, adb_serial=None):
# os.chdir(ADB_ROOT)
self.ADB_ROOT = config.ADB_ROOT
self.adb_serial = adb_serial
self.host_session_factory = lambda: ADBClientSession(config.ADB_SERVER)
self.rch = None
if self.adb_serial is None:
self.adb_serial = self.__adb_device_name_detector()
self.device_session_factory = lambda: self.host_session_factory().device(self.adb_serial)
self.cache_screenshot = config.get('device/cache_screenshot', True)
self.last_screenshot_timestamp = 0
self.last_screenshot_duration = 0
self.last_screenshot = None
if config.get('device/try_emulator_enhanced_mode', True):
loopbacks = self._detect_loopbacks()
if len(loopbacks):
logger.debug('possible loopback addresses: %s', repr(loopbacks))
self.rch = revconn.ReverseConnectionHost()
self.rch.start()
if self._test_reverse_connection(loopbacks):
logger.info('正在使用模拟器优化模式')
self.screencap = self._reverse_connection_screencap
else:
self.rch.stop()
else:
self.loopback = None
def __del__(self):
if self.rch and self.rch.is_alive():
self.rch.stop()
def __adb_device_name_detector(self):
devices = [x for x in self.host_session_factory().devices() if x[1] != 'offline']
if len(devices) == 0:
auto_connect = config.get('device/adb_auto_connect', None)
if auto_connect is not None:
logger.info('没有已连接设备,尝试连接 %s', auto_connect)
try:
self.host_session_factory().disconnect(auto_connect)
except:
pass
self.host_session_factory().connect(auto_connect)
else:
raise RuntimeError('找不到可用设备')
devices = [x for x in self.host_session_factory().devices() if x[1] != 'offline']
always_use_device = config.get('device/adb_always_use_device', None)
if always_use_device is not None:
if always_use_device not in (x[0] for x in devices):
raise RuntimeError('设备 %s 未连接' % always_use_device)
return always_use_device
if len(devices) == 1:
device_name = devices[0][0]
elif len(devices) > 1:
logger.info("检测到多台设备")
num = 0
while True:
try:
num = int(input("请输入序号选择设备: "))
if not 0 <= num < len(devices):
raise ValueError()
break
except ValueError:
logger.error("输入不合法,请重新输入")
device_name = devices[num][0]
else:
raise RuntimeError('找不到可用设备')
logger.info("确认设备名称:" + device_name)
return device_name
def run_device_cmd(self, cmd, DEBUG_LEVEL=2):
output = self.device_session_factory().exec(cmd)
logger.debug("command: %s", cmd)
logger.debug("output: %s", repr(output))
return output
def get_sub_screen(self, image, screen_range):
return image.crop(
(
screen_range[0][0],
screen_range[0][1],
screen_range[0][0] + screen_range[1][0],
screen_range[0][1] + screen_range[1][1]
)
)
def _detect_loopbacks(self):
board = self.device_session_factory().exec('getprop ro.product.board')
if b'goldfish' in board:
return ['10.0.2.2']
modules = self.device_session_factory().exec('grep -o vboxguest /proc/modules')
if b'vboxguest' in modules:
arp = self.device_session_factory().exec('cat /proc/net/arp')
return [x[:x.find(b' ')].decode() for x in arp.splitlines()[1:]]
return []
def _test_reverse_connection(self, loopbacks):
for addr in loopbacks:
logger.debug('testing loopback address %s', addr)
future = self.rch.register_cookie()
with future:
cmd = 'echo -n %sOKAY | nc -w 1 %s %d' % (future.cookie.decode(), addr, self.rch.port)
logger.debug(cmd)
control_sock = self.device_session_factory().exec_stream(cmd)
with control_sock:
conn = future.get(2)
if conn is not None:
data = recvall(conn)
conn.close()
if data == b'OKAY':
self.loopback = addr
logger.debug('found loopback address %s', addr)
return True
return False
def screencap_png(self):
"""returns PNG bytes"""
s = self.device_session_factory().exec_stream('screencap -p')
data = recvall(s, 4194304)
return data
def screencap(self):
"""returns (width, height, pixels)
pixels in RGBA/RGBX format"""
s = self.device_session_factory().exec_stream('screencap|gzip -1')
data = recvall(s, 4194304)
s.close()
data = zlib.decompress(data, zlib.MAX_WBITS | 16, 8388608)
w, h, f = struct.unpack_from('III', data, 0)
assert (f == 1)
return (w, h, data[12:])
def _reverse_connection_screencap(self):
"""returns (width, height, pixels)
pixels in RGBA/RGBX format"""
future = self.rch.register_cookie()
with future:
control_sock = self.device_session_factory().exec_stream('(echo -n %s; screencap) | nc %s %d' % (future.cookie.decode(), self.loopback, self.rch.port))
with control_sock:
with future.get() as conn:
data = recvall(conn, 8388608, True)
w, h, f = struct.unpack_from('III', data, 0)
assert (f == 1)
return (w, h, data[12:].tobytes())
def screenshot(self, cached=True):
t0 = time.monotonic()
if cached and self.cache_screenshot:
if self.last_screenshot is not None and t0 - self.last_screenshot_timestamp < self.last_screenshot_duration:
return self.last_screenshot
rawcap = self.screencap()
img = _screencap_to_image(rawcap)
t1 = time.monotonic()
self.last_screenshot_timestamp = t1
self.last_screenshot_duration = t1 - t0
self.last_screenshot = img
return img
def touch_swipe2(self, origin, movement, duration=None):
# sleep(1)
x1, y1, x2, y2 = origin[0], origin[1], origin[0] + movement[0], origin[1] + movement[1]
logger.debug("滑动初始坐标:({},{}); 移动距离dX:{}, dy:{}".format(*origin, *movement))
command = "input swipe {} {} {} {} ".format(x1, y1, x2, y2)
if duration is not None:
command += str(int(duration))
self.run_device_cmd(command)
def touch_tap(self, XY=None, offsets=None):
# sleep(10)
# sleep(0.5)
if offsets is not None:
final_X = XY[0] + randint(-offsets[0], offsets[0])
final_Y = XY[1] + randint(-offsets[1], offsets[1])
else:
final_X = XY[0] + randint(-1, 1)
final_Y = XY[1] + randint(-1, 1)
# 如果你遇到了问题,可以把这百年输出并把日志分享到群里。
logger.debug("点击坐标:({},{})".format(final_X, final_Y))
command = "input tap {} {}".format(final_X,
final_Y)
self.run_device_cmd(command)
| 2.140625 | 2 |
redshift_upload/base_utilities.py | douglassimonsen/redshift_upload | 0 | 3525 | import inspect
import os
from pathlib import Path
class change_directory:
"""
A class for changing the working directory using a "with" statement.
It takes the directory to change to as an argument. If no directory is given,
it takes the directory of the file from which this function was called.
"""
def __init__(self, directory: str = None) -> None:
self.old_dir = os.getcwd()
if directory is None:
self.new_dir = Path(inspect.getabsfile(inspect.stack()[1][0])).parent # type: ignore
else:
self.new_dir = directory
def __enter__(self, *_) -> None:
os.chdir(self.new_dir)
def __exit__(self, *_) -> None:
os.chdir(self.old_dir)
| 3.6875 | 4 |
main.py | Gloriel621/MgallManager | 9 | 3526 | import sys
from PyQt5.QtWidgets import QApplication
from gui import MgallManager
def main():
app = QApplication(sys.argv)
ex = MgallManager()
app.aboutToQuit.connect(ex.ExitHandler)
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 2.390625 | 2 |
utils/src/adventofcode/utils/Point3D.py | dh256/adventofcode | 0 | 3527 | <gh_stars>0
class Point3D:
def __init__(self,x,y,z):
self.x = x
self.y = y
self.z = z
'''
Returns the distance between two 3D points
'''
def distance(self, value):
return abs(self.x - value.x) + abs(self.y - value.y) + abs(self.z - value.z)
def __eq__(self, value):
return self.x == value.x and self.y == value.y and self.z == value.z
def __hash__(self):
return hash((self.x,self.y,self.z))
def __repr__(self):
return f'({self.x},{self.y},{self.z})'
def __add__(self,value):
return Point3D(self.x + value.x, self.y + value.y, self.z + value.z) | 3.5 | 4 |
pysc2/lib/actions.py | javierrcc522/starcraft2_api_machineLear | 2 | 3528 | <filename>pysc2/lib/actions.py
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the static list of types and actions for SC2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numbers
import six
from pysc2.lib import point
from s2clientprotocol import spatial_pb2 as sc_spatial
from s2clientprotocol import ui_pb2 as sc_ui
def no_op(action):
del action
def move_camera(action, minimap):
"""Move the camera."""
minimap.assign_to(action.action_feature_layer.camera_move.center_minimap)
def select_point(action, select_point_act, screen):
"""Select a unit at a point."""
select = action.action_feature_layer.unit_selection_point
screen.assign_to(select.selection_screen_coord)
select.type = select_point_act
def select_rect(action, select_add, screen, screen2):
"""Select units within a rectangle."""
select = action.action_feature_layer.unit_selection_rect
out_rect = select.selection_screen_coord.add()
screen_rect = point.Rect(screen, screen2)
screen_rect.tl.assign_to(out_rect.p0)
screen_rect.br.assign_to(out_rect.p1)
select.selection_add = bool(select_add)
def select_idle_worker(action, select_worker):
"""Select an idle worker."""
action.action_ui.select_idle_worker.type = select_worker
def select_army(action, select_add):
"""Select the entire army."""
action.action_ui.select_army.selection_add = select_add
def select_warp_gates(action, select_add):
"""Select all warp gates."""
action.action_ui.select_warp_gates.selection_add = select_add
def select_larva(action):
"""Select all larva."""
action.action_ui.select_larva.SetInParent() # Adds the empty proto field.
def select_unit(action, select_unit_act, select_unit_id):
"""Select a specific unit from the multi-unit selection."""
select = action.action_ui.multi_panel
select.type = select_unit_act
select.unit_index = select_unit_id
def control_group(action, control_group_act, control_group_id):
"""Act on a control group, selecting, setting, etc."""
select = action.action_ui.control_group
select.action = control_group_act
select.control_group_index = control_group_id
def unload(action, unload_id):
"""Unload a unit from a transport/bunker/nydus/etc."""
action.action_ui.cargo_panel.unit_index = unload_id
def build_queue(action, build_queue_id):
"""Cancel a unit in the build queue."""
action.action_ui.production_panel.unit_index = build_queue_id
def cmd_quick(action, ability_id, queued):
"""Do a quick command like 'Stop' or 'Stim'."""
action_cmd = action.action_feature_layer.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
def cmd_screen(action, ability_id, queued, screen):
"""Do a command that needs a point on the screen."""
action_cmd = action.action_feature_layer.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
screen.assign_to(action_cmd.target_screen_coord)
def cmd_minimap(action, ability_id, queued, minimap):
"""Do a command that needs a point on the minimap."""
action_cmd = action.action_feature_layer.unit_command
action_cmd.ability_id = ability_id
action_cmd.queue_command = queued
minimap.assign_to(action_cmd.target_minimap_coord)
def autocast(action, ability_id):
"""Toggle autocast."""
action.action_ui.toggle_autocast.ability_id = ability_id
class ArgumentType(collections.namedtuple(
"ArgumentType", ["id", "name", "sizes", "fn"])):
"""Represents a single argument type.
Attributes:
id: The argument id. This is unique.
name: The name of the argument, also unique.
sizes: The max+1 of each of the dimensions this argument takes.
fn: The function to convert the list of integers into something more
meaningful to be set in the protos to send to the game.
"""
__slots__ = ()
def __str__(self):
return "%s/%s %s" % (self.id, self.name, list(self.sizes))
@classmethod
def enum(cls, options):
"""Create an ArgumentType where you choose one of a set of known values."""
return cls(-1, "<none>", (len(options),), lambda a: options[a[0]])
@classmethod
def scalar(cls, value):
"""Create an ArgumentType with a single scalar in range(value)."""
return cls(-1, "<none>", (value,), lambda a: a[0])
@classmethod
def point(cls): # No range because it's unknown at this time.
"""Create an ArgumentType that is represented by a point.Point."""
return cls(-1, "<none>", (0, 0), lambda a: point.Point(*a).floor())
@classmethod
def spec(cls, id_, name, sizes):
"""Create an ArgumentType to be used in ValidActions."""
return cls(id_, name, sizes, None)
class Arguments(collections.namedtuple("Arguments", [
"screen", "minimap", "screen2", "queued", "control_group_act",
"control_group_id", "select_point_act", "select_add", "select_unit_act",
"select_unit_id", "select_worker", "build_queue_id", "unload_id"])):
"""The full list of argument types.
Take a look at TYPES and FUNCTION_TYPES for more details.
Attributes:
screen: A point on the screen.
minimap: A point on the minimap.
screen2: The second point for a rectangle. This is needed so that no
function takes the same type twice.
queued: Whether the action should be done now or later.
control_group_act: What to do with the control group.
control_group_id: Which control group to do it with.
select_point_act: What to do with the unit at the point.
select_add: Whether to add the unit to the selection or replace it.
select_unit_act: What to do when selecting a unit by id.
select_unit_id: Which unit to select by id.
select_worker: What to do when selecting a worker.
build_queue_id: Which build queue index to target.
unload_id: Which unit to target in a transport/nydus/command center.
"""
___slots__ = ()
@classmethod
def types(cls, **kwargs):
"""Create an Arguments of the possible Types."""
named = {name: type_._replace(id=Arguments._fields.index(name), name=name)
for name, type_ in six.iteritems(kwargs)}
return cls(**named)
# The list of known types.
TYPES = Arguments.types(
screen=ArgumentType.point(),
minimap=ArgumentType.point(),
screen2=ArgumentType.point(),
queued=ArgumentType.enum([False, True]), # (now vs add to queue)
control_group_act=ArgumentType.enum([
sc_ui.ActionControlGroup.Recall,
sc_ui.ActionControlGroup.Set,
sc_ui.ActionControlGroup.Append,
sc_ui.ActionControlGroup.SetAndSteal,
sc_ui.ActionControlGroup.AppendAndSteal,
]),
control_group_id=ArgumentType.scalar(10),
select_point_act=ArgumentType.enum([
sc_spatial.ActionSpatialUnitSelectionPoint.Select,
sc_spatial.ActionSpatialUnitSelectionPoint.Toggle,
sc_spatial.ActionSpatialUnitSelectionPoint.AllType,
sc_spatial.ActionSpatialUnitSelectionPoint.AddAllType,
]),
select_add=ArgumentType.enum([False, True]), # (select vs select_add)
select_unit_act=ArgumentType.enum([
sc_ui.ActionMultiPanel.SingleSelect,
sc_ui.ActionMultiPanel.DeselectUnit,
sc_ui.ActionMultiPanel.SelectAllOfType,
sc_ui.ActionMultiPanel.DeselectAllOfType,
]),
select_unit_id=ArgumentType.scalar(500), # Depends on current selection.
select_worker=ArgumentType.enum([
sc_ui.ActionSelectIdleWorker.Set,
sc_ui.ActionSelectIdleWorker.Add,
sc_ui.ActionSelectIdleWorker.All,
sc_ui.ActionSelectIdleWorker.AddAll,
]),
build_queue_id=ArgumentType.scalar(10), # Depends on current build queue.
unload_id=ArgumentType.scalar(500), # Depends on the current loaded units.
)
# Which argument types do each function need?
FUNCTION_TYPES = {
no_op: [],
move_camera: [TYPES.minimap],
select_point: [TYPES.select_point_act, TYPES.screen],
select_rect: [TYPES.select_add, TYPES.screen, TYPES.screen2],
select_unit: [TYPES.select_unit_act, TYPES.select_unit_id],
control_group: [TYPES.control_group_act, TYPES.control_group_id],
select_idle_worker: [TYPES.select_worker],
select_army: [TYPES.select_add],
select_warp_gates: [TYPES.select_add],
select_larva: [],
unload: [TYPES.unload_id],
build_queue: [TYPES.build_queue_id],
cmd_quick: [TYPES.queued],
cmd_screen: [TYPES.queued, TYPES.screen],
cmd_minimap: [TYPES.queued, TYPES.minimap],
autocast: [],
}
# Which ones need an ability?
ABILITY_FUNCTIONS = {cmd_quick, cmd_screen, cmd_minimap, autocast}
# Which ones require a point?
POINT_REQUIRED_FUNCS = {
False: {cmd_quick, autocast},
True: {cmd_screen, cmd_minimap, autocast}}
always = lambda _: True
class Function(collections.namedtuple(
"Function", ["id", "name", "ability_id", "general_id", "function_type",
"args", "avail_fn"])):
"""Represents a function action.
Attributes:
id: The function id, which is what the agent will use.
name: The name of the function. Should be unique.
ability_id: The ability id to pass to sc2.
general_id: 0 for normal abilities, and the ability_id of another ability if
it can be represented by a more general action.
function_type: One of the functions in FUNCTION_TYPES for how to construct
the sc2 action proto out of python types.
args: A list of the types of args passed to function_type.
avail_fn: For non-abilities, this function returns whether the function is
valid.
"""
__slots__ = ()
@classmethod
def ui_func(cls, id_, name, function_type, avail_fn=always):
"""Define a function representing a ui action."""
return cls(id_, name, 0, 0, function_type, FUNCTION_TYPES[function_type],
avail_fn)
@classmethod
def ability(cls, id_, name, function_type, ability_id, general_id=0):
"""Define a function represented as a game ability."""
assert function_type in ABILITY_FUNCTIONS
return cls(id_, name, ability_id, general_id, function_type,
FUNCTION_TYPES[function_type], None)
@classmethod
def spec(cls, id_, name, args):
"""Create a Function to be used in ValidActions."""
return cls(id_, name, None, None, None, args, None)
def __hash__(self): # So it can go in a set().
return self.id
def __str__(self):
return self.str()
def str(self, space=False):
"""String version. Set space=True to line them all up nicely."""
return "%s/%s (%s)" % (str(self.id).rjust(space and 4),
self.name.ljust(space and 50),
"; ".join(str(a) for a in self.args))
class Functions(object):
"""Represents the full set of functions.
Can't use namedtuple since python3 has a limit of 255 function arguments, so
build something similar.
"""
def __init__(self, functions):
self._func_list = functions
self._func_dict = {f.name: f for f in functions}
if len(self._func_dict) != len(self._func_list):
raise ValueError("Function names must be unique.")
def __getattr__(self, name):
return self._func_dict[name]
def __getitem__(self, key):
if isinstance(key, numbers.Number):
return self._func_list[key]
return self._func_dict[key]
def __iter__(self):
return iter(self._func_list)
def __len__(self):
return len(self._func_list)
# pylint: disable=line-too-long
FUNCTIONS = Functions([
Function.ui_func(0, "no_op", no_op),
Function.ui_func(1, "move_camera", move_camera),
Function.ui_func(2, "select_point", select_point),
Function.ui_func(3, "select_rect", select_rect),
Function.ui_func(4, "select_control_group", control_group),
Function.ui_func(5, "select_unit", select_unit,
lambda obs: obs.ui_data.HasField("multi")),
Function.ui_func(6, "select_idle_worker", select_idle_worker,
lambda obs: obs.player_common.idle_worker_count > 0),
Function.ui_func(7, "select_army", select_army,
lambda obs: obs.player_common.army_count > 0),
Function.ui_func(8, "select_warp_gates", select_warp_gates,
lambda obs: obs.player_common.warp_gate_count > 0),
Function.ui_func(9, "select_larva", select_larva,
lambda obs: obs.player_common.larva_count > 0),
Function.ui_func(10, "unload", unload,
lambda obs: obs.ui_data.HasField("cargo")),
Function.ui_func(11, "build_queue", build_queue,
lambda obs: obs.ui_data.HasField("production")),
# Everything below here is generated with gen_actions.py
Function.ability(12, "Attack_screen", cmd_screen, 3674),
Function.ability(13, "Attack_minimap", cmd_minimap, 3674),
Function.ability(14, "Attack_Attack_screen", cmd_screen, 23, 3674),
Function.ability(15, "Attack_Attack_minimap", cmd_minimap, 23, 3674),
Function.ability(16, "Attack_AttackBuilding_screen", cmd_screen, 2048, 3674),
Function.ability(17, "Attack_AttackBuilding_minimap", cmd_minimap, 2048, 3674),
Function.ability(18, "Attack_Redirect_screen", cmd_screen, 1682, 3674),
Function.ability(19, "Scan_Move_screen", cmd_screen, 19, 3674),
Function.ability(20, "Scan_Move_minimap", cmd_minimap, 19, 3674),
Function.ability(21, "Behavior_BuildingAttackOff_quick", cmd_quick, 2082),
Function.ability(22, "Behavior_BuildingAttackOn_quick", cmd_quick, 2081),
Function.ability(23, "Behavior_CloakOff_quick", cmd_quick, 3677),
Function.ability(24, "Behavior_CloakOff_Banshee_quick", cmd_quick, 393, 3677),
Function.ability(25, "Behavior_CloakOff_Ghost_quick", cmd_quick, 383, 3677),
Function.ability(26, "Behavior_CloakOn_quick", cmd_quick, 3676),
Function.ability(27, "Behavior_CloakOn_Banshee_quick", cmd_quick, 392, 3676),
Function.ability(28, "Behavior_CloakOn_Ghost_quick", cmd_quick, 382, 3676),
Function.ability(29, "Behavior_GenerateCreepOff_quick", cmd_quick, 1693),
Function.ability(30, "Behavior_GenerateCreepOn_quick", cmd_quick, 1692),
Function.ability(31, "Behavior_HoldFireOff_quick", cmd_quick, 3689),
Function.ability(32, "Behavior_HoldFireOff_Ghost_quick", cmd_quick, 38, 3689),
Function.ability(33, "Behavior_HoldFireOff_Lurker_quick", cmd_quick, 2552, 3689),
Function.ability(34, "Behavior_HoldFireOn_quick", cmd_quick, 3688),
Function.ability(35, "Behavior_HoldFireOn_Ghost_quick", cmd_quick, 36, 3688),
Function.ability(36, "Behavior_HoldFireOn_Lurker_quick", cmd_quick, 2550, 3688),
Function.ability(37, "Behavior_PulsarBeamOff_quick", cmd_quick, 2376),
Function.ability(38, "Behavior_PulsarBeamOn_quick", cmd_quick, 2375),
Function.ability(39, "Build_Armory_screen", cmd_screen, 331),
Function.ability(40, "Build_Assimilator_screen", cmd_screen, 882),
Function.ability(41, "Build_BanelingNest_screen", cmd_screen, 1162),
Function.ability(42, "Build_Barracks_screen", cmd_screen, 321),
Function.ability(43, "Build_Bunker_screen", cmd_screen, 324),
Function.ability(44, "Build_CommandCenter_screen", cmd_screen, 318),
Function.ability(45, "Build_CreepTumor_screen", cmd_screen, 3691),
Function.ability(46, "Build_CreepTumor_Queen_screen", cmd_screen, 1694, 3691),
Function.ability(47, "Build_CreepTumor_Tumor_screen", cmd_screen, 1733, 3691),
Function.ability(48, "Build_CyberneticsCore_screen", cmd_screen, 894),
Function.ability(49, "Build_DarkShrine_screen", cmd_screen, 891),
Function.ability(50, "Build_EngineeringBay_screen", cmd_screen, 322),
Function.ability(51, "Build_EvolutionChamber_screen", cmd_screen, 1156),
Function.ability(52, "Build_Extractor_screen", cmd_screen, 1154),
Function.ability(53, "Build_Factory_screen", cmd_screen, 328),
Function.ability(54, "Build_FleetBeacon_screen", cmd_screen, 885),
Function.ability(55, "Build_Forge_screen", cmd_screen, 884),
Function.ability(56, "Build_FusionCore_screen", cmd_screen, 333),
Function.ability(57, "Build_Gateway_screen", cmd_screen, 883),
Function.ability(58, "Build_GhostAcademy_screen", cmd_screen, 327),
Function.ability(59, "Build_Hatchery_screen", cmd_screen, 1152),
Function.ability(60, "Build_HydraliskDen_screen", cmd_screen, 1157),
Function.ability(61, "Build_InfestationPit_screen", cmd_screen, 1160),
Function.ability(62, "Build_Interceptors_quick", cmd_quick, 1042),
Function.ability(63, "Build_Interceptors_autocast", autocast, 1042),
Function.ability(64, "Build_MissileTurret_screen", cmd_screen, 323),
Function.ability(65, "Build_Nexus_screen", cmd_screen, 880),
Function.ability(66, "Build_Nuke_quick", cmd_quick, 710),
Function.ability(67, "Build_NydusNetwork_screen", cmd_screen, 1161),
Function.ability(68, "Build_NydusWorm_screen", cmd_screen, 1768),
Function.ability(69, "Build_PhotonCannon_screen", cmd_screen, 887),
Function.ability(70, "Build_Pylon_screen", cmd_screen, 881),
Function.ability(71, "Build_Reactor_quick", cmd_quick, 3683),
Function.ability(72, "Build_Reactor_screen", cmd_screen, 3683),
Function.ability(73, "Build_Reactor_Barracks_quick", cmd_quick, 422, 3683),
Function.ability(74, "Build_Reactor_Barracks_screen", cmd_screen, 422, 3683),
Function.ability(75, "Build_Reactor_Factory_quick", cmd_quick, 455, 3683),
Function.ability(76, "Build_Reactor_Factory_screen", cmd_screen, 455, 3683),
Function.ability(77, "Build_Reactor_Starport_quick", cmd_quick, 488, 3683),
Function.ability(78, "Build_Reactor_Starport_screen", cmd_screen, 488, 3683),
Function.ability(79, "Build_Refinery_screen", cmd_screen, 320),
Function.ability(80, "Build_RoachWarren_screen", cmd_screen, 1165),
Function.ability(81, "Build_RoboticsBay_screen", cmd_screen, 892),
Function.ability(82, "Build_RoboticsFacility_screen", cmd_screen, 893),
Function.ability(83, "Build_SensorTower_screen", cmd_screen, 326),
Function.ability(84, "Build_SpawningPool_screen", cmd_screen, 1155),
Function.ability(85, "Build_SpineCrawler_screen", cmd_screen, 1166),
Function.ability(86, "Build_Spire_screen", cmd_screen, 1158),
Function.ability(87, "Build_SporeCrawler_screen", cmd_screen, 1167),
Function.ability(88, "Build_Stargate_screen", cmd_screen, 889),
Function.ability(89, "Build_Starport_screen", cmd_screen, 329),
Function.ability(90, "Build_StasisTrap_screen", cmd_screen, 2505),
Function.ability(91, "Build_SupplyDepot_screen", cmd_screen, 319),
Function.ability(92, "Build_TechLab_quick", cmd_quick, 3682),
Function.ability(93, "Build_TechLab_screen", cmd_screen, 3682),
Function.ability(94, "Build_TechLab_Barracks_quick", cmd_quick, 421, 3682),
Function.ability(95, "Build_TechLab_Barracks_screen", cmd_screen, 421, 3682),
Function.ability(96, "Build_TechLab_Factory_quick", cmd_quick, 454, 3682),
Function.ability(97, "Build_TechLab_Factory_screen", cmd_screen, 454, 3682),
Function.ability(98, "Build_TechLab_Starport_quick", cmd_quick, 487, 3682),
Function.ability(99, "Build_TechLab_Starport_screen", cmd_screen, 487, 3682),
Function.ability(100, "Build_TemplarArchive_screen", cmd_screen, 890),
Function.ability(101, "Build_TwilightCouncil_screen", cmd_screen, 886),
Function.ability(102, "Build_UltraliskCavern_screen", cmd_screen, 1159),
Function.ability(103, "BurrowDown_quick", cmd_quick, 3661),
Function.ability(104, "BurrowDown_Baneling_quick", cmd_quick, 1374, 3661),
Function.ability(105, "BurrowDown_Drone_quick", cmd_quick, 1378, 3661),
Function.ability(106, "BurrowDown_Hydralisk_quick", cmd_quick, 1382, 3661),
Function.ability(107, "BurrowDown_Infestor_quick", cmd_quick, 1444, 3661),
Function.ability(108, "BurrowDown_InfestorTerran_quick", cmd_quick, 1394, 3661),
Function.ability(109, "BurrowDown_Lurker_quick", cmd_quick, 2108, 3661),
Function.ability(110, "BurrowDown_Queen_quick", cmd_quick, 1433, 3661),
Function.ability(111, "BurrowDown_Ravager_quick", cmd_quick, 2340, 3661),
Function.ability(112, "BurrowDown_Roach_quick", cmd_quick, 1386, 3661),
Function.ability(113, "BurrowDown_SwarmHost_quick", cmd_quick, 2014, 3661),
Function.ability(114, "BurrowDown_Ultralisk_quick", cmd_quick, 1512, 3661),
Function.ability(115, "BurrowDown_WidowMine_quick", cmd_quick, 2095, 3661),
Function.ability(116, "BurrowDown_Zergling_quick", cmd_quick, 1390, 3661),
Function.ability(117, "BurrowUp_quick", cmd_quick, 3662),
Function.ability(118, "BurrowUp_autocast", autocast, 3662),
Function.ability(119, "BurrowUp_Baneling_quick", cmd_quick, 1376, 3662),
Function.ability(120, "BurrowUp_Baneling_autocast", autocast, 1376, 3662),
Function.ability(121, "BurrowUp_Drone_quick", cmd_quick, 1380, 3662),
Function.ability(122, "BurrowUp_Hydralisk_quick", cmd_quick, 1384, 3662),
Function.ability(123, "BurrowUp_Hydralisk_autocast", autocast, 1384, 3662),
Function.ability(124, "BurrowUp_Infestor_quick", cmd_quick, 1446, 3662),
Function.ability(125, "BurrowUp_InfestorTerran_quick", cmd_quick, 1396, 3662),
Function.ability(126, "BurrowUp_InfestorTerran_autocast", autocast, 1396, 3662),
Function.ability(127, "BurrowUp_Lurker_quick", cmd_quick, 2110, 3662),
Function.ability(128, "BurrowUp_Queen_quick", cmd_quick, 1435, 3662),
Function.ability(129, "BurrowUp_Queen_autocast", autocast, 1435, 3662),
Function.ability(130, "BurrowUp_Ravager_quick", cmd_quick, 2342, 3662),
Function.ability(131, "BurrowUp_Ravager_autocast", autocast, 2342, 3662),
Function.ability(132, "BurrowUp_Roach_quick", cmd_quick, 1388, 3662),
Function.ability(133, "BurrowUp_Roach_autocast", autocast, 1388, 3662),
Function.ability(134, "BurrowUp_SwarmHost_quick", cmd_quick, 2016, 3662),
Function.ability(135, "BurrowUp_Ultralisk_quick", cmd_quick, 1514, 3662),
Function.ability(136, "BurrowUp_Ultralisk_autocast", autocast, 1514, 3662),
Function.ability(137, "BurrowUp_WidowMine_quick", cmd_quick, 2097, 3662),
Function.ability(138, "BurrowUp_Zergling_quick", cmd_quick, 1392, 3662),
Function.ability(139, "BurrowUp_Zergling_autocast", autocast, 1392, 3662),
Function.ability(140, "Cancel_quick", cmd_quick, 3659),
Function.ability(141, "Cancel_AdeptPhaseShift_quick", cmd_quick, 2594, 3659),
Function.ability(142, "Cancel_AdeptShadePhaseShift_quick", cmd_quick, 2596, 3659),
Function.ability(143, "Cancel_BarracksAddOn_quick", cmd_quick, 451, 3659),
Function.ability(144, "Cancel_BuildInProgress_quick", cmd_quick, 314, 3659),
Function.ability(145, "Cancel_CreepTumor_quick", cmd_quick, 1763, 3659),
Function.ability(146, "Cancel_FactoryAddOn_quick", cmd_quick, 484, 3659),
Function.ability(147, "Cancel_GravitonBeam_quick", cmd_quick, 174, 3659),
Function.ability(148, "Cancel_LockOn_quick", cmd_quick, 2354, 3659),
Function.ability(149, "Cancel_MorphBroodlord_quick", cmd_quick, 1373, 3659),
Function.ability(150, "Cancel_MorphGreaterSpire_quick", cmd_quick, 1221, 3659),
Function.ability(151, "Cancel_MorphHive_quick", cmd_quick, 1219, 3659),
Function.ability(152, "Cancel_MorphLair_quick", cmd_quick, 1217, 3659),
Function.ability(153, "Cancel_MorphLurker_quick", cmd_quick, 2333, 3659),
Function.ability(154, "Cancel_MorphLurkerDen_quick", cmd_quick, 2113, 3659),
Function.ability(155, "Cancel_MorphMothership_quick", cmd_quick, 1848, 3659),
Function.ability(156, "Cancel_MorphOrbital_quick", cmd_quick, 1517, 3659),
Function.ability(157, "Cancel_MorphOverlordTransport_quick", cmd_quick, 2709, 3659),
Function.ability(158, "Cancel_MorphOverseer_quick", cmd_quick, 1449, 3659),
Function.ability(159, "Cancel_MorphPlanetaryFortress_quick", cmd_quick, 1451, 3659),
Function.ability(160, "Cancel_MorphRavager_quick", cmd_quick, 2331, 3659),
Function.ability(161, "Cancel_MorphThorExplosiveMode_quick", cmd_quick, 2365, 3659),
Function.ability(162, "Cancel_NeuralParasite_quick", cmd_quick, 250, 3659),
Function.ability(163, "Cancel_Nuke_quick", cmd_quick, 1623, 3659),
Function.ability(164, "Cancel_SpineCrawlerRoot_quick", cmd_quick, 1730, 3659),
Function.ability(165, "Cancel_SporeCrawlerRoot_quick", cmd_quick, 1732, 3659),
Function.ability(166, "Cancel_StarportAddOn_quick", cmd_quick, 517, 3659),
Function.ability(167, "Cancel_StasisTrap_quick", cmd_quick, 2535, 3659),
Function.ability(168, "Cancel_Last_quick", cmd_quick, 3671),
Function.ability(169, "Cancel_HangarQueue5_quick", cmd_quick, 1038, 3671),
Function.ability(170, "Cancel_Queue1_quick", cmd_quick, 304, 3671),
Function.ability(171, "Cancel_Queue5_quick", cmd_quick, 306, 3671),
Function.ability(172, "Cancel_QueueAddOn_quick", cmd_quick, 312, 3671),
Function.ability(173, "Cancel_QueueCancelToSelection_quick", cmd_quick, 308, 3671),
Function.ability(174, "Cancel_QueuePasive_quick", cmd_quick, 1831, 3671),
Function.ability(175, "Cancel_QueuePassiveCancelToSelection_quick", cmd_quick, 1833, 3671),
Function.ability(176, "Effect_Abduct_screen", cmd_screen, 2067),
Function.ability(177, "Effect_AdeptPhaseShift_screen", cmd_screen, 2544),
Function.ability(178, "Effect_AutoTurret_screen", cmd_screen, 1764),
Function.ability(179, "Effect_BlindingCloud_screen", cmd_screen, 2063),
Function.ability(180, "Effect_Blink_screen", cmd_screen, 3687),
Function.ability(181, "Effect_Blink_Stalker_screen", cmd_screen, 1442, 3687),
Function.ability(182, "Effect_ShadowStride_screen", cmd_screen, 2700, 3687),
Function.ability(183, "Effect_CalldownMULE_screen", cmd_screen, 171),
Function.ability(184, "Effect_CausticSpray_screen", cmd_screen, 2324),
Function.ability(185, "Effect_Charge_screen", cmd_screen, 1819),
Function.ability(186, "Effect_Charge_autocast", autocast, 1819),
Function.ability(187, "Effect_ChronoBoost_screen", cmd_screen, 261),
Function.ability(188, "Effect_Contaminate_screen", cmd_screen, 1825),
Function.ability(189, "Effect_CorrosiveBile_screen", cmd_screen, 2338),
Function.ability(190, "Effect_EMP_screen", cmd_screen, 1628),
Function.ability(191, "Effect_Explode_quick", cmd_quick, 42),
Function.ability(192, "Effect_Feedback_screen", cmd_screen, 140),
Function.ability(193, "Effect_ForceField_screen", cmd_screen, 1526),
Function.ability(194, "Effect_FungalGrowth_screen", cmd_screen, 74),
Function.ability(195, "Effect_GhostSnipe_screen", cmd_screen, 2714),
Function.ability(196, "Effect_GravitonBeam_screen", cmd_screen, 173),
Function.ability(197, "Effect_GuardianShield_quick", cmd_quick, 76),
Function.ability(198, "Effect_Heal_screen", cmd_screen, 386),
Function.ability(199, "Effect_Heal_autocast", autocast, 386),
Function.ability(200, "Effect_HunterSeekerMissile_screen", cmd_screen, 169),
Function.ability(201, "Effect_ImmortalBarrier_quick", cmd_quick, 2328),
Function.ability(202, "Effect_ImmortalBarrier_autocast", autocast, 2328),
Function.ability(203, "Effect_InfestedTerrans_screen", cmd_screen, 247),
Function.ability(204, "Effect_InjectLarva_screen", cmd_screen, 251),
Function.ability(205, "Effect_KD8Charge_screen", cmd_screen, 2588),
Function.ability(206, "Effect_LockOn_screen", cmd_screen, 2350),
Function.ability(207, "Effect_LocustSwoop_screen", cmd_screen, 2387),
Function.ability(208, "Effect_MassRecall_screen", cmd_screen, 3686),
Function.ability(209, "Effect_MassRecall_Mothership_screen", cmd_screen, 2368, 3686),
Function.ability(210, "Effect_MassRecall_MothershipCore_screen", cmd_screen, 1974, 3686),
Function.ability(211, "Effect_MedivacIgniteAfterburners_quick", cmd_quick, 2116),
Function.ability(212, "Effect_NeuralParasite_screen", cmd_screen, 249),
Function.ability(213, "Effect_NukeCalldown_screen", cmd_screen, 1622),
Function.ability(214, "Effect_OracleRevelation_screen", cmd_screen, 2146),
Function.ability(215, "Effect_ParasiticBomb_screen", cmd_screen, 2542),
Function.ability(216, "Effect_PhotonOvercharge_screen", cmd_screen, 2162),
Function.ability(217, "Effect_PointDefenseDrone_screen", cmd_screen, 144),
Function.ability(218, "Effect_PsiStorm_screen", cmd_screen, 1036),
Function.ability(219, "Effect_PurificationNova_screen", cmd_screen, 2346),
Function.ability(220, "Effect_Repair_screen", cmd_screen, 3685),
Function.ability(221, "Effect_Repair_autocast", autocast, 3685),
Function.ability(222, "Effect_Repair_Mule_screen", cmd_screen, 78, 3685),
Function.ability(223, "Effect_Repair_Mule_autocast", autocast, 78, 3685),
Function.ability(224, "Effect_Repair_SCV_screen", cmd_screen, 316, 3685),
Function.ability(225, "Effect_Repair_SCV_autocast", autocast, 316, 3685),
Function.ability(226, "Effect_Salvage_quick", cmd_quick, 32),
Function.ability(227, "Effect_Scan_screen", cmd_screen, 399),
Function.ability(228, "Effect_SpawnChangeling_quick", cmd_quick, 181),
Function.ability(229, "Effect_SpawnLocusts_screen", cmd_screen, 2704),
Function.ability(230, "Effect_Spray_screen", cmd_screen, 3684),
Function.ability(231, "Effect_Spray_Protoss_screen", cmd_screen, 30, 3684),
Function.ability(232, "Effect_Spray_Terran_screen", cmd_screen, 26, 3684),
Function.ability(233, "Effect_Spray_Zerg_screen", cmd_screen, 28, 3684),
Function.ability(234, "Effect_Stim_quick", cmd_quick, 3675),
Function.ability(235, "Effect_Stim_Marauder_quick", cmd_quick, 253, 3675),
Function.ability(236, "Effect_Stim_Marauder_Redirect_quick", cmd_quick, 1684, 3675),
Function.ability(237, "Effect_Stim_Marine_quick", cmd_quick, 380, 3675),
Function.ability(238, "Effect_Stim_Marine_Redirect_quick", cmd_quick, 1683, 3675),
Function.ability(239, "Effect_SupplyDrop_screen", cmd_screen, 255),
Function.ability(240, "Effect_TacticalJump_screen", cmd_screen, 2358),
Function.ability(241, "Effect_TimeWarp_screen", cmd_screen, 2244),
Function.ability(242, "Effect_Transfusion_screen", cmd_screen, 1664),
Function.ability(243, "Effect_ViperConsume_screen", cmd_screen, 2073),
Function.ability(244, "Effect_VoidRayPrismaticAlignment_quick", cmd_quick, 2393),
Function.ability(245, "Effect_WidowMineAttack_screen", cmd_screen, 2099),
Function.ability(246, "Effect_WidowMineAttack_autocast", autocast, 2099),
Function.ability(247, "Effect_YamatoGun_screen", cmd_screen, 401),
Function.ability(248, "Hallucination_Adept_quick", cmd_quick, 2391),
Function.ability(249, "Hallucination_Archon_quick", cmd_quick, 146),
Function.ability(250, "Hallucination_Colossus_quick", cmd_quick, 148),
Function.ability(251, "Hallucination_Disruptor_quick", cmd_quick, 2389),
Function.ability(252, "Hallucination_HighTemplar_quick", cmd_quick, 150),
Function.ability(253, "Hallucination_Immortal_quick", cmd_quick, 152),
Function.ability(254, "Hallucination_Oracle_quick", cmd_quick, 2114),
Function.ability(255, "Hallucination_Phoenix_quick", cmd_quick, 154),
Function.ability(256, "Hallucination_Probe_quick", cmd_quick, 156),
Function.ability(257, "Hallucination_Stalker_quick", cmd_quick, 158),
Function.ability(258, "Hallucination_VoidRay_quick", cmd_quick, 160),
Function.ability(259, "Hallucination_WarpPrism_quick", cmd_quick, 162),
Function.ability(260, "Hallucination_Zealot_quick", cmd_quick, 164),
Function.ability(261, "Halt_quick", cmd_quick, 3660),
Function.ability(262, "Halt_Building_quick", cmd_quick, 315, 3660),
Function.ability(263, "Halt_TerranBuild_quick", cmd_quick, 348, 3660),
Function.ability(264, "Harvest_Gather_screen", cmd_screen, 3666),
Function.ability(265, "Harvest_Gather_Drone_screen", cmd_screen, 1183, 3666),
Function.ability(266, "Harvest_Gather_Mule_screen", cmd_screen, 166, 3666),
Function.ability(267, "Harvest_Gather_Probe_screen", cmd_screen, 298, 3666),
Function.ability(268, "Harvest_Gather_SCV_screen", cmd_screen, 295, 3666),
Function.ability(269, "Harvest_Return_quick", cmd_quick, 3667),
Function.ability(270, "Harvest_Return_Drone_quick", cmd_quick, 1184, 3667),
Function.ability(271, "Harvest_Return_Mule_quick", cmd_quick, 167, 3667),
Function.ability(272, "Harvest_Return_Probe_quick", cmd_quick, 299, 3667),
Function.ability(273, "Harvest_Return_SCV_quick", cmd_quick, 296, 3667),
Function.ability(274, "HoldPosition_quick", cmd_quick, 18),
Function.ability(275, "Land_screen", cmd_screen, 3678),
Function.ability(276, "Land_Barracks_screen", cmd_screen, 554, 3678),
Function.ability(277, "Land_CommandCenter_screen", cmd_screen, 419, 3678),
Function.ability(278, "Land_Factory_screen", cmd_screen, 520, 3678),
Function.ability(279, "Land_OrbitalCommand_screen", cmd_screen, 1524, 3678),
Function.ability(280, "Land_Starport_screen", cmd_screen, 522, 3678),
Function.ability(281, "Lift_quick", cmd_quick, 3679),
Function.ability(282, "Lift_Barracks_quick", cmd_quick, 452, 3679),
Function.ability(283, "Lift_CommandCenter_quick", cmd_quick, 417, 3679),
Function.ability(284, "Lift_Factory_quick", cmd_quick, 485, 3679),
Function.ability(285, "Lift_OrbitalCommand_quick", cmd_quick, 1522, 3679),
Function.ability(286, "Lift_Starport_quick", cmd_quick, 518, 3679),
Function.ability(287, "Load_screen", cmd_screen, 3668),
Function.ability(288, "Load_Bunker_screen", cmd_screen, 407, 3668),
Function.ability(289, "Load_Medivac_screen", cmd_screen, 394, 3668),
Function.ability(290, "Load_NydusNetwork_screen", cmd_screen, 1437, 3668),
Function.ability(291, "Load_NydusWorm_screen", cmd_screen, 2370, 3668),
Function.ability(292, "Load_Overlord_screen", cmd_screen, 1406, 3668),
Function.ability(293, "Load_WarpPrism_screen", cmd_screen, 911, 3668),
Function.ability(294, "LoadAll_quick", cmd_quick, 3663),
Function.ability(295, "LoadAll_CommandCenter_quick", cmd_quick, 416, 3663),
Function.ability(296, "Morph_Archon_quick", cmd_quick, 1766),
Function.ability(297, "Morph_BroodLord_quick", cmd_quick, 1372),
Function.ability(298, "Morph_Gateway_quick", cmd_quick, 1520),
Function.ability(299, "Morph_GreaterSpire_quick", cmd_quick, 1220),
Function.ability(300, "Morph_Hellbat_quick", cmd_quick, 1998),
Function.ability(301, "Morph_Hellion_quick", cmd_quick, 1978),
Function.ability(302, "Morph_Hive_quick", cmd_quick, 1218),
Function.ability(303, "Morph_Lair_quick", cmd_quick, 1216),
Function.ability(304, "Morph_LiberatorAAMode_quick", cmd_quick, 2560),
Function.ability(305, "Morph_LiberatorAGMode_screen", cmd_screen, 2558),
Function.ability(306, "Morph_Lurker_quick", cmd_quick, 2332),
Function.ability(307, "Morph_LurkerDen_quick", cmd_quick, 2112),
Function.ability(308, "Morph_Mothership_quick", cmd_quick, 1847),
Function.ability(309, "Morph_OrbitalCommand_quick", cmd_quick, 1516),
Function.ability(310, "Morph_OverlordTransport_quick", cmd_quick, 2708),
Function.ability(311, "Morph_Overseer_quick", cmd_quick, 1448),
Function.ability(312, "Morph_PlanetaryFortress_quick", cmd_quick, 1450),
Function.ability(313, "Morph_Ravager_quick", cmd_quick, 2330),
Function.ability(314, "Morph_Root_screen", cmd_screen, 3680),
Function.ability(315, "Morph_SpineCrawlerRoot_screen", cmd_screen, 1729, 3680),
Function.ability(316, "Morph_SporeCrawlerRoot_screen", cmd_screen, 1731, 3680),
Function.ability(317, "Morph_SiegeMode_quick", cmd_quick, 388),
Function.ability(318, "Morph_SupplyDepot_Lower_quick", cmd_quick, 556),
Function.ability(319, "Morph_SupplyDepot_Raise_quick", cmd_quick, 558),
Function.ability(320, "Morph_ThorExplosiveMode_quick", cmd_quick, 2364),
Function.ability(321, "Morph_ThorHighImpactMode_quick", cmd_quick, 2362),
Function.ability(322, "Morph_Unsiege_quick", cmd_quick, 390),
Function.ability(323, "Morph_Uproot_quick", cmd_quick, 3681),
Function.ability(324, "Morph_SpineCrawlerUproot_quick", cmd_quick, 1725, 3681),
Function.ability(325, "Morph_SporeCrawlerUproot_quick", cmd_quick, 1727, 3681),
Function.ability(326, "Morph_VikingAssaultMode_quick", cmd_quick, 403),
Function.ability(327, "Morph_VikingFighterMode_quick", cmd_quick, 405),
Function.ability(328, "Morph_WarpGate_quick", cmd_quick, 1518),
Function.ability(329, "Morph_WarpPrismPhasingMode_quick", cmd_quick, 1528),
Function.ability(330, "Morph_WarpPrismTransportMode_quick", cmd_quick, 1530),
Function.ability(331, "Move_screen", cmd_screen, 16),
Function.ability(332, "Move_minimap", cmd_minimap, 16),
Function.ability(333, "Patrol_screen", cmd_screen, 17),
Function.ability(334, "Patrol_minimap", cmd_minimap, 17),
Function.ability(335, "Rally_Units_screen", cmd_screen, 3673),
Function.ability(336, "Rally_Units_minimap", cmd_minimap, 3673),
Function.ability(337, "Rally_Building_screen", cmd_screen, 195, 3673),
Function.ability(338, "Rally_Building_minimap", cmd_minimap, 195, 3673),
Function.ability(339, "Rally_Hatchery_Units_screen", cmd_screen, 212, 3673),
Function.ability(340, "Rally_Hatchery_Units_minimap", cmd_minimap, 212, 3673),
Function.ability(341, "Rally_Morphing_Unit_screen", cmd_screen, 199, 3673),
Function.ability(342, "Rally_Morphing_Unit_minimap", cmd_minimap, 199, 3673),
Function.ability(343, "Rally_Workers_screen", cmd_screen, 3690),
Function.ability(344, "Rally_Workers_minimap", cmd_minimap, 3690),
Function.ability(345, "Rally_CommandCenter_screen", cmd_screen, 203, 3690),
Function.ability(346, "Rally_CommandCenter_minimap", cmd_minimap, 203, 3690),
Function.ability(347, "Rally_Hatchery_Workers_screen", cmd_screen, 211, 3690),
Function.ability(348, "Rally_Hatchery_Workers_minimap", cmd_minimap, 211, 3690),
Function.ability(349, "Rally_Nexus_screen", cmd_screen, 207, 3690),
Function.ability(350, "Rally_Nexus_minimap", cmd_minimap, 207, 3690),
Function.ability(351, "Research_AdeptResonatingGlaives_quick", cmd_quick, 1594),
Function.ability(352, "Research_AdvancedBallistics_quick", cmd_quick, 805),
Function.ability(353, "Research_BansheeCloakingField_quick", cmd_quick, 790),
Function.ability(354, "Research_BansheeHyperflightRotors_quick", cmd_quick, 799),
Function.ability(355, "Research_BattlecruiserWeaponRefit_quick", cmd_quick, 1532),
Function.ability(356, "Research_Blink_quick", cmd_quick, 1593),
Function.ability(357, "Research_Burrow_quick", cmd_quick, 1225),
Function.ability(358, "Research_CentrifugalHooks_quick", cmd_quick, 1482),
Function.ability(359, "Research_Charge_quick", cmd_quick, 1592),
Function.ability(360, "Research_ChitinousPlating_quick", cmd_quick, 265),
Function.ability(361, "Research_CombatShield_quick", cmd_quick, 731),
Function.ability(362, "Research_ConcussiveShells_quick", cmd_quick, 732),
Function.ability(363, "Research_DrillingClaws_quick", cmd_quick, 764),
Function.ability(364, "Research_ExtendedThermalLance_quick", cmd_quick, 1097),
Function.ability(365, "Research_GlialRegeneration_quick", cmd_quick, 216),
Function.ability(366, "Research_GraviticBooster_quick", cmd_quick, 1093),
Function.ability(367, "Research_GraviticDrive_quick", cmd_quick, 1094),
Function.ability(368, "Research_GroovedSpines_quick", cmd_quick, 1282),
Function.ability(369, "Research_HiSecAutoTracking_quick", cmd_quick, 650),
Function.ability(370, "Research_HighCapacityFuelTanks_quick", cmd_quick, 804),
Function.ability(371, "Research_InfernalPreigniter_quick", cmd_quick, 761),
Function.ability(372, "Research_InterceptorGravitonCatapult_quick", cmd_quick, 44),
Function.ability(373, "Research_MagFieldLaunchers_quick", cmd_quick, 766),
Function.ability(374, "Research_MuscularAugments_quick", cmd_quick, 1283),
Function.ability(375, "Research_NeosteelFrame_quick", cmd_quick, 655),
Function.ability(376, "Research_NeuralParasite_quick", cmd_quick, 1455),
Function.ability(377, "Research_PathogenGlands_quick", cmd_quick, 1454),
Function.ability(378, "Research_PersonalCloaking_quick", cmd_quick, 820),
Function.ability(379, "Research_PhoenixAnionPulseCrystals_quick", cmd_quick, 46),
Function.ability(380, "Research_PneumatizedCarapace_quick", cmd_quick, 1223),
Function.ability(381, "Research_ProtossAirArmor_quick", cmd_quick, 3692),
Function.ability(382, "Research_ProtossAirArmorLevel1_quick", cmd_quick, 1565, 3692),
Function.ability(383, "Research_ProtossAirArmorLevel2_quick", cmd_quick, 1566, 3692),
Function.ability(384, "Research_ProtossAirArmorLevel3_quick", cmd_quick, 1567, 3692),
Function.ability(385, "Research_ProtossAirWeapons_quick", cmd_quick, 3693),
Function.ability(386, "Research_ProtossAirWeaponsLevel1_quick", cmd_quick, 1562, 3693),
Function.ability(387, "Research_ProtossAirWeaponsLevel2_quick", cmd_quick, 1563, 3693),
Function.ability(388, "Research_ProtossAirWeaponsLevel3_quick", cmd_quick, 1564, 3693),
Function.ability(389, "Research_ProtossGroundArmor_quick", cmd_quick, 3694),
Function.ability(390, "Research_ProtossGroundArmorLevel1_quick", cmd_quick, 1065, 3694),
Function.ability(391, "Research_ProtossGroundArmorLevel2_quick", cmd_quick, 1066, 3694),
Function.ability(392, "Research_ProtossGroundArmorLevel3_quick", cmd_quick, 1067, 3694),
Function.ability(393, "Research_ProtossGroundWeapons_quick", cmd_quick, 3695),
Function.ability(394, "Research_ProtossGroundWeaponsLevel1_quick", cmd_quick, 1062, 3695),
Function.ability(395, "Research_ProtossGroundWeaponsLevel2_quick", cmd_quick, 1063, 3695),
Function.ability(396, "Research_ProtossGroundWeaponsLevel3_quick", cmd_quick, 1064, 3695),
Function.ability(397, "Research_ProtossShields_quick", cmd_quick, 3696),
Function.ability(398, "Research_ProtossShieldsLevel1_quick", cmd_quick, 1068, 3696),
Function.ability(399, "Research_ProtossShieldsLevel2_quick", cmd_quick, 1069, 3696),
Function.ability(400, "Research_ProtossShieldsLevel3_quick", cmd_quick, 1070, 3696),
Function.ability(401, "Research_PsiStorm_quick", cmd_quick, 1126),
Function.ability(402, "Research_RavenCorvidReactor_quick", cmd_quick, 793),
Function.ability(403, "Research_RavenRecalibratedExplosives_quick", cmd_quick, 803),
Function.ability(404, "Research_ShadowStrike_quick", cmd_quick, 2720),
Function.ability(405, "Research_Stimpack_quick", cmd_quick, 730),
Function.ability(406, "Research_TerranInfantryArmor_quick", cmd_quick, 3697),
Function.ability(407, "Research_TerranInfantryArmorLevel1_quick", cmd_quick, 656, 3697),
Function.ability(408, "Research_TerranInfantryArmorLevel2_quick", cmd_quick, 657, 3697),
Function.ability(409, "Research_TerranInfantryArmorLevel3_quick", cmd_quick, 658, 3697),
Function.ability(410, "Research_TerranInfantryWeapons_quick", cmd_quick, 3698),
Function.ability(411, "Research_TerranInfantryWeaponsLevel1_quick", cmd_quick, 652, 3698),
Function.ability(412, "Research_TerranInfantryWeaponsLevel2_quick", cmd_quick, 653, 3698),
Function.ability(413, "Research_TerranInfantryWeaponsLevel3_quick", cmd_quick, 654, 3698),
Function.ability(414, "Research_TerranShipWeapons_quick", cmd_quick, 3699),
Function.ability(415, "Research_TerranShipWeaponsLevel1_quick", cmd_quick, 861, 3699),
Function.ability(416, "Research_TerranShipWeaponsLevel2_quick", cmd_quick, 862, 3699),
Function.ability(417, "Research_TerranShipWeaponsLevel3_quick", cmd_quick, 863, 3699),
Function.ability(418, "Research_TerranStructureArmorUpgrade_quick", cmd_quick, 651),
Function.ability(419, "Research_TerranVehicleAndShipPlating_quick", cmd_quick, 3700),
Function.ability(420, "Research_TerranVehicleAndShipPlatingLevel1_quick", cmd_quick, 864, 3700),
Function.ability(421, "Research_TerranVehicleAndShipPlatingLevel2_quick", cmd_quick, 865, 3700),
Function.ability(422, "Research_TerranVehicleAndShipPlatingLevel3_quick", cmd_quick, 866, 3700),
Function.ability(423, "Research_TerranVehicleWeapons_quick", cmd_quick, 3701),
Function.ability(424, "Research_TerranVehicleWeaponsLevel1_quick", cmd_quick, 855, 3701),
Function.ability(425, "Research_TerranVehicleWeaponsLevel2_quick", cmd_quick, 856, 3701),
Function.ability(426, "Research_TerranVehicleWeaponsLevel3_quick", cmd_quick, 857, 3701),
Function.ability(427, "Research_TunnelingClaws_quick", cmd_quick, 217),
Function.ability(428, "Research_WarpGate_quick", cmd_quick, 1568),
Function.ability(429, "Research_ZergFlyerArmor_quick", cmd_quick, 3702),
Function.ability(430, "Research_ZergFlyerArmorLevel1_quick", cmd_quick, 1315, 3702),
Function.ability(431, "Research_ZergFlyerArmorLevel2_quick", cmd_quick, 1316, 3702),
Function.ability(432, "Research_ZergFlyerArmorLevel3_quick", cmd_quick, 1317, 3702),
Function.ability(433, "Research_ZergFlyerAttack_quick", cmd_quick, 3703),
Function.ability(434, "Research_ZergFlyerAttackLevel1_quick", cmd_quick, 1312, 3703),
Function.ability(435, "Research_ZergFlyerAttackLevel2_quick", cmd_quick, 1313, 3703),
Function.ability(436, "Research_ZergFlyerAttackLevel3_quick", cmd_quick, 1314, 3703),
Function.ability(437, "Research_ZergGroundArmor_quick", cmd_quick, 3704),
Function.ability(438, "Research_ZergGroundArmorLevel1_quick", cmd_quick, 1189, 3704),
Function.ability(439, "Research_ZergGroundArmorLevel2_quick", cmd_quick, 1190, 3704),
Function.ability(440, "Research_ZergGroundArmorLevel3_quick", cmd_quick, 1191, 3704),
Function.ability(441, "Research_ZergMeleeWeapons_quick", cmd_quick, 3705),
Function.ability(442, "Research_ZergMeleeWeaponsLevel1_quick", cmd_quick, 1186, 3705),
Function.ability(443, "Research_ZergMeleeWeaponsLevel2_quick", cmd_quick, 1187, 3705),
Function.ability(444, "Research_ZergMeleeWeaponsLevel3_quick", cmd_quick, 1188, 3705),
Function.ability(445, "Research_ZergMissileWeapons_quick", cmd_quick, 3706),
Function.ability(446, "Research_ZergMissileWeaponsLevel1_quick", cmd_quick, 1192, 3706),
Function.ability(447, "Research_ZergMissileWeaponsLevel2_quick", cmd_quick, 1193, 3706),
Function.ability(448, "Research_ZergMissileWeaponsLevel3_quick", cmd_quick, 1194, 3706),
Function.ability(449, "Research_ZerglingAdrenalGlands_quick", cmd_quick, 1252),
Function.ability(450, "Research_ZerglingMetabolicBoost_quick", cmd_quick, 1253),
Function.ability(451, "Smart_screen", cmd_screen, 1),
Function.ability(452, "Smart_minimap", cmd_minimap, 1),
Function.ability(453, "Stop_quick", cmd_quick, 3665),
Function.ability(454, "Stop_Building_quick", cmd_quick, 2057, 3665),
Function.ability(455, "Stop_Redirect_quick", cmd_quick, 1691, 3665),
Function.ability(456, "Stop_Stop_quick", cmd_quick, 4, 3665),
Function.ability(457, "Train_Adept_quick", cmd_quick, 922),
Function.ability(458, "Train_Baneling_quick", cmd_quick, 80),
Function.ability(459, "Train_Banshee_quick", cmd_quick, 621),
Function.ability(460, "Train_Battlecruiser_quick", cmd_quick, 623),
Function.ability(461, "Train_Carrier_quick", cmd_quick, 948),
Function.ability(462, "Train_Colossus_quick", cmd_quick, 978),
Function.ability(463, "Train_Corruptor_quick", cmd_quick, 1353),
Function.ability(464, "Train_Cyclone_quick", cmd_quick, 597),
Function.ability(465, "Train_DarkTemplar_quick", cmd_quick, 920),
Function.ability(466, "Train_Disruptor_quick", cmd_quick, 994),
Function.ability(467, "Train_Drone_quick", cmd_quick, 1342),
Function.ability(468, "Train_Ghost_quick", cmd_quick, 562),
Function.ability(469, "Train_Hellbat_quick", cmd_quick, 596),
Function.ability(470, "Train_Hellion_quick", cmd_quick, 595),
Function.ability(471, "Train_HighTemplar_quick", cmd_quick, 919),
Function.ability(472, "Train_Hydralisk_quick", cmd_quick, 1345),
Function.ability(473, "Train_Immortal_quick", cmd_quick, 979),
Function.ability(474, "Train_Infestor_quick", cmd_quick, 1352),
Function.ability(475, "Train_Liberator_quick", cmd_quick, 626),
Function.ability(476, "Train_Marauder_quick", cmd_quick, 563),
Function.ability(477, "Train_Marine_quick", cmd_quick, 560),
Function.ability(478, "Train_Medivac_quick", cmd_quick, 620),
Function.ability(479, "Train_MothershipCore_quick", cmd_quick, 1853),
Function.ability(480, "Train_Mutalisk_quick", cmd_quick, 1346),
Function.ability(481, "Train_Observer_quick", cmd_quick, 977),
Function.ability(482, "Train_Oracle_quick", cmd_quick, 954),
Function.ability(483, "Train_Overlord_quick", cmd_quick, 1344),
Function.ability(484, "Train_Phoenix_quick", cmd_quick, 946),
Function.ability(485, "Train_Probe_quick", cmd_quick, 1006),
Function.ability(486, "Train_Queen_quick", cmd_quick, 1632),
Function.ability(487, "Train_Raven_quick", cmd_quick, 622),
Function.ability(488, "Train_Reaper_quick", cmd_quick, 561),
Function.ability(489, "Train_Roach_quick", cmd_quick, 1351),
Function.ability(490, "Train_SCV_quick", cmd_quick, 524),
Function.ability(491, "Train_Sentry_quick", cmd_quick, 921),
Function.ability(492, "Train_SiegeTank_quick", cmd_quick, 591),
Function.ability(493, "Train_Stalker_quick", cmd_quick, 917),
Function.ability(494, "Train_SwarmHost_quick", cmd_quick, 1356),
Function.ability(495, "Train_Tempest_quick", cmd_quick, 955),
Function.ability(496, "Train_Thor_quick", cmd_quick, 594),
Function.ability(497, "Train_Ultralisk_quick", cmd_quick, 1348),
Function.ability(498, "Train_VikingFighter_quick", cmd_quick, 624),
Function.ability(499, "Train_Viper_quick", cmd_quick, 1354),
Function.ability(500, "Train_VoidRay_quick", cmd_quick, 950),
Function.ability(501, "Train_WarpPrism_quick", cmd_quick, 976),
Function.ability(502, "Train_WidowMine_quick", cmd_quick, 614),
Function.ability(503, "Train_Zealot_quick", cmd_quick, 916),
Function.ability(504, "Train_Zergling_quick", cmd_quick, 1343),
Function.ability(505, "TrainWarp_Adept_screen", cmd_screen, 1419),
Function.ability(506, "TrainWarp_DarkTemplar_screen", cmd_screen, 1417),
Function.ability(507, "TrainWarp_HighTemplar_screen", cmd_screen, 1416),
Function.ability(508, "TrainWarp_Sentry_screen", cmd_screen, 1418),
Function.ability(509, "TrainWarp_Stalker_screen", cmd_screen, 1414),
Function.ability(510, "TrainWarp_Zealot_screen", cmd_screen, 1413),
Function.ability(511, "UnloadAll_quick", cmd_quick, 3664),
Function.ability(512, "UnloadAll_Bunker_quick", cmd_quick, 408, 3664),
Function.ability(513, "UnloadAll_CommandCenter_quick", cmd_quick, 413, 3664),
Function.ability(514, "UnloadAll_NydasNetwork_quick", cmd_quick, 1438, 3664),
Function.ability(515, "UnloadAll_NydusWorm_quick", cmd_quick, 2371, 3664),
Function.ability(516, "UnloadAllAt_screen", cmd_screen, 3669),
Function.ability(517, "UnloadAllAt_minimap", cmd_minimap, 3669),
Function.ability(518, "UnloadAllAt_Medivac_screen", cmd_screen, 396, 3669),
Function.ability(519, "UnloadAllAt_Medivac_minimap", cmd_minimap, 396, 3669),
Function.ability(520, "UnloadAllAt_Overlord_screen", cmd_screen, 1408, 3669),
Function.ability(521, "UnloadAllAt_Overlord_minimap", cmd_minimap, 1408, 3669),
Function.ability(522, "UnloadAllAt_WarpPrism_screen", cmd_screen, 913, 3669),
Function.ability(523, "UnloadAllAt_WarpPrism_minimap", cmd_minimap, 913, 3669),
])
# pylint: enable=line-too-long
# Some indexes to support features.py and action conversion.
ABILITY_IDS = collections.defaultdict(set) # {ability_id: {funcs}}
for func in FUNCTIONS:
if func.ability_id >= 0:
ABILITY_IDS[func.ability_id].add(func)
ABILITY_IDS = {k: frozenset(v) for k, v in six.iteritems(ABILITY_IDS)}
FUNCTIONS_AVAILABLE = {f.id: f for f in FUNCTIONS if f.avail_fn}
class FunctionCall(collections.namedtuple(
"FunctionCall", ["function", "arguments"])):
"""Represents a function call action.
Attributes:
function: Store the function id, eg 2 for select_point.
arguments: The list of arguments for that function, each being a list of
ints. For select_point this could be: [[0], [23, 38]].
"""
__slots__ = ()
@classmethod
def all_arguments(cls, function, arguments):
"""Helper function for creating `FunctionCall`s with `Arguments`.
Args:
function: The value to store for the action function.
arguments: The values to store for the arguments of the action. Can either
be an `Arguments` object, a `dict`, or an iterable. If a `dict` or an
iterable is provided, the values will be unpacked into an `Arguments`
object.
Returns:
A new `FunctionCall` instance.
"""
if isinstance(arguments, dict):
arguments = Arguments(**arguments)
elif not isinstance(arguments, Arguments):
arguments = Arguments(*arguments)
return cls(function, arguments)
class ValidActions(collections.namedtuple(
"ValidActions", ["types", "functions"])):
"""The set of types and functions that are valid for an agent to use.
Attributes:
types: A namedtuple of the types that the functions require. Unlike TYPES
above, this includes the sizes for screen and minimap.
functions: A namedtuple of all the functions.
"""
__slots__ = ()
| 2.109375 | 2 |
pywick/meters/aucmeter.py | ashishpatel26/pywick | 2 | 3529 | <reponame>ashishpatel26/pywick
import numbers
from . import meter
import numpy as np
import torch
class AUCMeter(meter.Meter):
"""
The AUCMeter measures the area under the receiver-operating characteristic
(ROC) curve for binary classification problems. The area under the curve (AUC)
can be interpreted as the probability that, given a randomly selected positive
example and a randomly selected negative example, the positive example is
assigned a higher score by the classification model than the negative example.
The AUCMeter is designed to operate on one-dimensional Tensors `output`
and `target`, where (1) the `output` contains model output scores that ought to
be higher when the model is more convinced that the example should be positively
labeled, and smaller when the model believes the example should be negatively
labeled (for instance, the output of a signoid function); and (2) the `target`
contains only values 0 (for negative examples) and 1 (for positive examples).
"""
def __init__(self):
super(AUCMeter, self).__init__()
self.reset()
def reset(self):
self.scores = torch.DoubleTensor(torch.DoubleStorage()).numpy()
self.targets = torch.LongTensor(torch.LongStorage()).numpy()
def add(self, output, target):
if torch.is_tensor(output):
output = output.cpu().squeeze().numpy()
if torch.is_tensor(target):
target = target.cpu().squeeze().numpy()
elif isinstance(target, numbers.Number):
target = np.asarray([target])
assert np.ndim(output) == 1, \
'wrong output size (1D expected)'
assert np.ndim(target) == 1, \
'wrong target size (1D expected)'
assert output.shape[0] == target.shape[0], \
'number of outputs and targets does not match'
assert np.all(np.add(np.equal(target, 1), np.equal(target, 0))), \
'targets should be binary (0, 1)'
self.scores = np.append(self.scores, output)
self.targets = np.append(self.targets, target)
def value(self):
# case when number of elements added are 0
if self.scores.shape[0] == 0:
return 0.5
# sorting the arrays
scores, sortind = torch.sort(torch.from_numpy(self.scores), dim=0, descending=True)
scores = scores.numpy()
sortind = sortind.numpy()
# creating the roc curve
tpr = np.zeros(shape=(scores.size + 1), dtype=np.float64)
fpr = np.zeros(shape=(scores.size + 1), dtype=np.float64)
for i in range(1, scores.size + 1):
if self.targets[sortind[i - 1]] == 1:
tpr[i] = tpr[i - 1] + 1
fpr[i] = fpr[i - 1]
else:
tpr[i] = tpr[i - 1]
fpr[i] = fpr[i - 1] + 1
tpr /= (self.targets.sum() * 1.0)
fpr /= ((self.targets - 1.0).sum() * -1.0)
# calculating area under curve using trapezoidal rule
n = tpr.shape[0]
h = fpr[1:n] - fpr[0:n - 1]
sum_h = np.zeros(fpr.shape)
sum_h[0:n - 1] = h
sum_h[1:n] += h
area = (sum_h * tpr).sum() / 2.0
return (area, tpr, fpr)
| 3.15625 | 3 |
homeassistant/components/zha/core/channels/lighting.py | liangleslie/core | 30,023 | 3530 | """Lighting channels module for Zigbee Home Automation."""
from __future__ import annotations
from contextlib import suppress
from zigpy.zcl.clusters import lighting
from .. import registries
from ..const import REPORT_CONFIG_DEFAULT
from .base import ClientChannel, ZigbeeChannel
@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Ballast.cluster_id)
class Ballast(ZigbeeChannel):
"""Ballast channel."""
@registries.CLIENT_CHANNELS_REGISTRY.register(lighting.Color.cluster_id)
class ColorClientChannel(ClientChannel):
"""Color client channel."""
@registries.BINDABLE_CLUSTERS.register(lighting.Color.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(lighting.Color.cluster_id)
class ColorChannel(ZigbeeChannel):
"""Color channel."""
CAPABILITIES_COLOR_XY = 0x08
CAPABILITIES_COLOR_TEMP = 0x10
UNSUPPORTED_ATTRIBUTE = 0x86
REPORT_CONFIG = (
{"attr": "current_x", "config": REPORT_CONFIG_DEFAULT},
{"attr": "current_y", "config": REPORT_CONFIG_DEFAULT},
{"attr": "color_temperature", "config": REPORT_CONFIG_DEFAULT},
)
MAX_MIREDS: int = 500
MIN_MIREDS: int = 153
ZCL_INIT_ATTRS = {
"color_mode": False,
"color_temp_physical_min": True,
"color_temp_physical_max": True,
"color_capabilities": True,
"color_loop_active": False,
}
@property
def color_capabilities(self) -> int:
"""Return color capabilities of the light."""
with suppress(KeyError):
return self.cluster["color_capabilities"]
if self.cluster.get("color_temperature") is not None:
return self.CAPABILITIES_COLOR_XY | self.CAPABILITIES_COLOR_TEMP
return self.CAPABILITIES_COLOR_XY
@property
def color_mode(self) -> int | None:
"""Return cached value of the color_mode attribute."""
return self.cluster.get("color_mode")
@property
def color_loop_active(self) -> int | None:
"""Return cached value of the color_loop_active attribute."""
return self.cluster.get("color_loop_active")
@property
def color_temperature(self) -> int | None:
"""Return cached value of color temperature."""
return self.cluster.get("color_temperature")
@property
def current_x(self) -> int | None:
"""Return cached value of the current_x attribute."""
return self.cluster.get("current_x")
@property
def current_y(self) -> int | None:
"""Return cached value of the current_y attribute."""
return self.cluster.get("current_y")
@property
def min_mireds(self) -> int:
"""Return the coldest color_temp that this channel supports."""
return self.cluster.get("color_temp_physical_min", self.MIN_MIREDS)
@property
def max_mireds(self) -> int:
"""Return the warmest color_temp that this channel supports."""
return self.cluster.get("color_temp_physical_max", self.MAX_MIREDS)
| 2.15625 | 2 |
pf/queue.py | PiRAT4/py-pf | 0 | 3531 | """Classes to represent Packet Filter's queueing schedulers and statistics."""
import pf._struct
from pf._base import PFObject
from pf.constants import *
from pf._utils import rate2str
__all__ = ["ServiceCurve",
"FlowQueue",
"PFQueue",
"PFQueueStats"]
class ServiceCurve(PFObject):
""" """
_struct_type = pf._struct.pf_queue_scspec
def __init__(self, bandwidth, burst=0, time=0):
""" """
if isinstance(bandwidth, pf._struct.pf_queue_scspec):
self._from_struct(bandwidth)
else:
self.bandwidth = bandwidth
self.burst = burst
self.time = time
def _from_struct(self, sc):
""" """
self.bandwidth = self._get_bandwidth(sc.m2)
self.burst = self._get_bandwidth(sc.m1)
self.time = sc.d
def _to_struct(self):
""" """
sc = pf._struct.pf_queue_scspec()
if (isinstance(self.bandwidth, basestring) and
self.bandwidth.endswith("%")):
sc.m2.percent = int(self.bandwidth[:-1])
else:
sc.m2.absolute = self.bandwidth
if (isinstance(self.burst, basestring) and
self.burst.endswith("%")):
sc.m1.percent = int(self.burst[:-1])
else:
sc.m1.absolute = self.burst
sc.d = self.time
return sc
def _get_bandwidth(self, bw):
""" """
return "{}%".format(bw.percent) if bw.percent else bw.absolute
def _str_bandwidth(self, bw):
""" """
return bw if isinstance(bw, basestring) else rate2str(bw)
def _to_string(self):
""" """
s = self._str_bandwidth(self.bandwidth)
if self.time:
s += " burst {}".format(self._str_bandwidth(self.burst))
s += " for {.time}ms".format(self)
return s
class FlowQueue(PFObject):
""" """
_struct_type = pf._struct.pf_queue_fqspec
def __init__(self, flows, quantum=0, target=0, interval=0):
""" """
if isinstance(flows, pf._struct.pf_queue_fqspec):
self._from_struct(flows)
else:
self.flows = flows
self.quantum = quantum
self.target = target * 1000000
self.interval = interval * 1000000
def _from_struct(self, fq):
""" """
self.flows = fq.flows
self.quantum = fq.quantum
self.target = fq.target
self.interval = fq.interval
def _to_struct(self):
""" """
fq = pf._struct.pf_queue_fqspec()
fq.flows = self.flows
fq.quantum = self.quantum
fq.target = self.target
fq.interval = self.interval
return fq
def _to_string(self):
""" """
s = "flows {.flows}".format(self)
if self.quantum:
s += " quantum {.quantum}".format(self)
if self.interval:
s += " interval {}ms".format(self.interval / 1000000)
if self.target:
s += " target {}ms".format(self.target / 1000000)
return s
class PFQueue(PFObject):
""" """
_struct_type = pf._struct.pf_queuespec
def __init__(self, queue=None, **kw):
""" """
if isinstance(queue, basestring):
queue = pf._struct.pf_queuespec(qname=queue, qlimit=DEFAULT_QLIMIT)
elif queue is None:
queue = pf._struct.pf_queuespec()
super(PFQueue, self).__init__(queue, **kw)
self.stats = PFQueueStats()
def _from_struct(self, q):
""" """
self.qname = q.qname
self.parent = q.parent
self.ifname = q.ifname
self.flags = q.flags
self.qlimit = q.qlimit
self.qid = q.qid
self.parent_qid = q.parent_qid
self.realtime = ServiceCurve(q.realtime)
self.linkshare = ServiceCurve(q.linkshare)
self.upperlimit = ServiceCurve(q.upperlimit)
self.flowqueue = FlowQueue(q.flowqueue)
def _to_struct(self):
""" """
q = pf._struct.pf_queuespec()
q.qname = self.qname
q.parent = self.parent
q.ifname = self.ifname
q.flags = self.flags
q.qlimit = self.qlimit
q.qid = self.qid
q.parent_qid = self.parent_qid
q.realtime = self.realtime._to_struct()
q.linkshare = self.linkshare._to_struct()
q.upperlimit = self.upperlimit._to_struct()
q.flowqueue = self.flowqueue._to_struct()
return q
def _to_string(self):
""" """
s = "queue {.qname}".format(self)
if self.parent and not self.parent.startswith("_"):
s += " parent {.parent}".format(self)
elif self.ifname:
s += " on {.ifname}".format(self)
if self.flags & PFQS_FLOWQUEUE:
s += " {.flowqueue}".format(self)
if self.linkshare.bandwidth or self.linkshare.burst:
s += " bandwidth {}".format(self.linkshare)
if self.realtime.bandwidth:
s += ", min {}".format(self.realtime)
if self.upperlimit.bandwidth:
s += ", max {}".format(self.upperlimit)
if self.flags & PFQS_DEFAULT:
s += " default"
if self.qlimit:
s += " qlimit {.qlimit}".format(self)
return s
class PFQueueStats(PFObject):
""" """
_struct_type = pf._struct.hfsc_class_stats
def __init__(self, stats=None):
""" """
if stats is None:
stats = pf._struct.hfsc_class_stats()
super(PFQueueStats, self).__init__(stats)
def _from_struct(self, s):
""" """
self.qlength = s.qlength
self.qlimit = s.qlimit
self.packets = (s.xmit_cnt.packets, s.drop_cnt.packets)
self.bytes = (s.xmit_cnt.bytes, s.drop_cnt.bytes)
def _to_string(self):
""" """
s = " [ pkts: {0.packets[0]:10} bytes: {0.bytes[0]:10} " + \
"dropped pkts: {0.packets[1]:6} bytes: {0.bytes[1]:6} ]\n" + \
" [ qlength: {0.qlength:3}/{0.qlimit:3} ]"
return s.format(self)
| 2.65625 | 3 |
sdk/python/pulumi_azure_native/labservices/v20181015/__init__.py | pulumi-bot/pulumi-azure-native | 0 | 3532 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .environment import *
from .environment_setting import *
from .gallery_image import *
from .get_environment import *
from .get_environment_setting import *
from .get_gallery_image import *
from .get_global_user_environment import *
from .get_global_user_operation_batch_status import *
from .get_global_user_operation_status import *
from .get_global_user_personal_preferences import *
from .get_lab import *
from .get_lab_account import *
from .get_lab_account_regional_availability import *
from .get_user import *
from .lab import *
from .lab_account import *
from .list_global_user_environments import *
from .list_global_user_labs import *
from .user import *
from ._inputs import *
from . import outputs
def _register_module():
import pulumi
from ... import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:labservices/v20181015:Environment":
return Environment(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:labservices/v20181015:EnvironmentSetting":
return EnvironmentSetting(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:labservices/v20181015:GalleryImage":
return GalleryImage(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:labservices/v20181015:Lab":
return Lab(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:labservices/v20181015:LabAccount":
return LabAccount(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:labservices/v20181015:User":
return User(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "labservices/v20181015", _module_instance)
_register_module()
| 1.570313 | 2 |
servicedirectory/src/sd-api/users/tests/tests_serializers.py | ealogar/servicedirectory | 0 | 3533 | '''
(c) Copyright 2013 Telefonica, I+D. Printed in Spain (Europe). All Rights
Reserved.
The copyright to the software program(s) is property of Telefonica I+D.
The program(s) may be used and or copied only with the express written
consent of Telefonica I+D or in accordance with the terms and conditions
stipulated in the agreement/contract under which the program(s) have
been supplied.
'''
from unittest import TestCase
from mock import MagicMock, patch
from commons.json_schema_validator.schema_reader import SchemaField
from commons.json_schema_validator.schema_reader import SchemaReader
from users.serializers import UserCollectionSerializer
class UserSerializerTests(TestCase):
def setUp(self):
super(UserSerializerTests, self).setUp()
mock_schema_instance = MagicMock(name='mock_schema_instance')
mock_schema_instance.return_value = [
SchemaField(name='username', field_type='string', required=True),
SchemaField(name='password', field_type='string', required=True),
SchemaField(name='is_admin', field_type='boolean', required=True, default=False)
]
mock_get_schema_fields = MagicMock(name='mock_get_schema')
mock_get_schema_fields.return_value = mock_schema_instance
# mock schema instance
schema_reader = SchemaReader()
self.patcher_validate = patch.object(schema_reader, 'validate_object') # @UndefinedVariable
self.patcher_schema = patch.object(schema_reader, # @UndefinedVariable
'get_schema_fields', mock_schema_instance)
self.patcher_schema.start()
self.patcher_validate.start()
def tearDown(self):
self.patcher_schema.stop()
self.patcher_validate.stop()
def test_deserialize_user_should_work(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>'})
self.assertEquals(True, serializer.is_valid(), "Serialization invalid")
def test_deserialize_user_invalid_is_admin_should_work(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>', 'is_admin': 'si'})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
def test_deserialize_user_empty_user_should_give_error_invalid(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': '', 'password': '<PASSWORD>'})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
self.assertEquals(u"invalid",
serializer.errors['username'][0],
'Invalid error message')
def test_deserialize_user_null_user_should_give_required_error(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'password': '<PASSWORD>'})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
self.assertEquals(u"required",
serializer.errors['username'][0],
'Invalid error message')
def test_deserialize_user_large_user_ne_should_give_invalid_error(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': 'a' * 600, 'password': '<PASSWORD>'})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
self.assertEquals(u"invalid",
serializer.errors['username'][0],
'Invalid error message')
def test_deserialize_user_with_invalid_origins_should_give_error(self):
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>', 'origins': ["????"]})
self.assertEquals(False, serializer.is_valid())
self.assertEquals(u"invalid",
serializer.errors['origins'][0],
'Invalid error message')
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>', 'origins': [" tugo"]})
self.assertEquals(False, serializer.is_valid())
self.assertEquals(u"invalid",
serializer.errors['origins'][0],
'Invalid error message')
def test_deserialize_user_with_invalid_classes_should_give_error(self):
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>', 'classes': ["????"]})
self.assertEquals(False, serializer.is_valid())
self.assertEquals(u"invalid",
serializer.errors['classes'][0],
'Invalid error message')
serializer = UserCollectionSerializer(data={'username': 'user', 'password': '<PASSWORD>', 'classes': [" sms"]})
self.assertEquals(False, serializer.is_valid())
self.assertEquals(u"invalid",
serializer.errors['classes'][0],
'Invalid error message')
def test_deserialize_user_invalid_username_should_give_error(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': 'User.user', 'password': '<PASSWORD>'})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
self.assertEquals(u"invalid",
serializer.errors['username'][0],
'Invalid error message')
def test_deserialize_user_invalid_is_admin_should_give_error(self):
# We need to do import here in order generic patches work
serializer = UserCollectionSerializer(data={'username': 'usera', 'password': '<PASSWORD>', 'is_admin': 0})
self.assertEquals(False, serializer.is_valid(), "Serialization invalid")
self.assertEquals(u"invalid",
serializer.errors['is_admin'][0],
'Invalid error message')
| 1.992188 | 2 |
Web/notifyXAPI/app/src/users/views.py | abs0lut3pwn4g3/RootersCTF2019-challenges | 14 | 3534 | ''' User views '''
from datetime import timedelta
from flask import request, jsonify, make_response, redirect, json, render_template
from flask_jwt_extended import (create_access_token, jwt_required)
from flask_restful import Resource
from flask_login import login_user, current_user
from sqlalchemy.exc import IntegrityError, InvalidRequestError
from src import db, api
from .models import User
from .schemas import UserSchema
class UserLoginResource(Resource):
model = User
schema = UserSchema
def get(self):
return make_response(render_template('login.html'))
def post(self):
if request.json:
data = request.json
user = self.model.query.filter(self.model.email == data['email']).first()
if user and self.model.check_password(user, data['password']):
expires = timedelta(days=365)
user = UserSchema(only=('id', 'email', 'is_admin')).dump(user).data
return make_response(
jsonify({'id': user,
'authentication_token': create_access_token(identity=user['id'], expires_delta=expires)}), 200)
else:
return make_response(jsonify({"error": {"code": 400, "msg": "No such user/wrong password."}}), 400)
else:
data = request.form
user = self.model.query.filter(self.model.email == data['email']).first()
if user and self.model.check_password(user, data['password']) and login_user(user):
return make_response(redirect('/admin/', 302))
else:
return make_response(redirect('/api/v1/login', 403))
class UserRegisterResource(Resource):
model = User
schema = UserSchema
def post(self):
data = request.json
if not data:
return make_response(jsonify({'error': 'No data'}), 400)
user = User.query.filter(User.email == data['email']).first()
if user:
return make_response(jsonify({'error': 'User already exists'}), 403)
user, errors = self.schema().load(data)
if errors:
return make_response(jsonify(errors), 400)
try:
user.set_password(data['password'])
db.session.add(user)
db.session.commit()
except (IntegrityError, InvalidRequestError) as e:
print(e)
db.session.rollback()
return make_response(jsonify(error={'code': 400 }), 400)
expires = timedelta(days=365)
return make_response(
jsonify(created_user={'id': user.id,
'user': self.schema(only=('id', 'email', 'is_admin')).dump(user).data,
'authentication_token': create_access_token(identity=user.id,
expires_delta=expires)}), 200)
api.add_resource(UserLoginResource, '/login/', endpoint='login')
api.add_resource(UserRegisterResource, '/register/', endpoint='register') | 2.53125 | 3 |
querybuilder/tests/window_tests.py | wesokes/django-query-builder | 110 | 3535 | <reponame>wesokes/django-query-builder<filename>querybuilder/tests/window_tests.py<gh_stars>100-1000
from querybuilder.fields import (
RankField, RowNumberField, DenseRankField, PercentRankField, CumeDistField, NTileField, LagField,
LeadField, FirstValueField, LastValueField, NthValueField, NumStdDevField
)
from querybuilder.query import QueryWindow, Query
from querybuilder.tests.models import Order
from querybuilder.tests.query_tests import QueryTestCase, get_comparison_str
class QueryWindowTest(QueryTestCase):
def test_query_window(self):
query_window = QueryWindow()
query_str = query_window.get_sql()
expected_query = 'OVER ()'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_partition(self):
query_window = QueryWindow().partition_by('field_one')
query_str = query_window.get_sql()
expected_query = 'OVER (PARTITION BY field_one)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_order(self):
query_window = QueryWindow().order_by('field_one')
query_str = query_window.get_sql()
expected_query = 'OVER (ORDER BY field_one ASC)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_partition_order(self):
query_window = QueryWindow().partition_by(
'field_one'
).order_by(
'field_one'
)
query_str = query_window.get_sql()
expected_query = 'OVER (PARTITION BY field_one ORDER BY field_one ASC)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_query_window_partition_order_many(self):
query_window = QueryWindow().partition_by(
'field_one'
).partition_by(
'field_two'
).order_by(
'field_one'
).order_by(
'-field_two'
)
query_str = query_window.get_sql()
expected_query = 'OVER (PARTITION BY field_one, field_two ORDER BY field_one ASC, field_two DESC)'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
class WindowFunctionTest(QueryTestCase):
def test_rank_no_over(self):
query = Query().from_table(
table=Order,
fields=[
RankField()
]
)
query_str = query.get_sql()
expected_query = 'SELECT RANK() AS "rank" FROM querybuilder_tests_order'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_over(self):
query = Query().from_table(
table=Order,
fields=[
RankField(
over=QueryWindow()
)
]
)
query_str = query.get_sql()
expected_query = 'SELECT RANK() OVER () AS "rank" FROM querybuilder_tests_order'
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_over_order(self):
query = Query().from_table(
table=Order,
fields=[
'id',
RankField(
over=QueryWindow().order_by(
'id'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.id, RANK() OVER (ORDER BY id ASC) AS "rank" FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_over_partition(self):
query = Query().from_table(
table=Order,
fields=[
'id',
RankField(
over=QueryWindow().partition_by(
'account_id'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.id, RANK() OVER (PARTITION BY account_id) AS "rank" FROM '
'querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_row_number(self):
query = Query().from_table(
table=Order,
fields=[
'*',
RowNumberField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'row_number'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'ROW_NUMBER() OVER (ORDER BY margin DESC) AS "row_number" '
'FROM querybuilder_tests_order '
'ORDER BY row_number '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank(self):
query = Query().from_table(
table=Order,
fields=[
'id',
RankField(
over=QueryWindow().partition_by(
'account_id'
).order_by(
'id'
)
)
]
).order_by(
'-rank'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.id, '
'RANK() OVER (PARTITION BY account_id ORDER BY id ASC) AS "rank" '
'FROM querybuilder_tests_order '
'ORDER BY rank '
'DESC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_dense_rank(self):
query = Query().from_table(
table=Order,
fields=[
'*',
DenseRankField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'dense_rank'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'DENSE_RANK() OVER (ORDER BY margin DESC) AS "dense_rank" '
'FROM querybuilder_tests_order '
'ORDER BY dense_rank '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_rank_percent(self):
query = Query().from_table(
table=Order,
fields=[
'*',
PercentRankField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'percent_rank'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'PERCENT_RANK() OVER (ORDER BY margin DESC) AS "percent_rank" '
'FROM querybuilder_tests_order '
'ORDER BY percent_rank '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_cume_dist(self):
query = Query().from_table(
table=Order,
fields=[
'*',
CumeDistField(
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'cume_dist'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'CUME_DIST() OVER (ORDER BY margin DESC) AS "cume_dist" '
'FROM querybuilder_tests_order '
'ORDER BY cume_dist '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_ntile(self):
query = Query().from_table(
table=Order,
fields=[
'*',
NTileField(
num_buckets=2,
over=QueryWindow().order_by(
'-margin'
)
)
]
).order_by(
'ntile'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'NTILE(2) OVER (ORDER BY margin DESC) AS "ntile" '
'FROM querybuilder_tests_order '
'ORDER BY ntile '
'ASC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_lag(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LagField(
'margin',
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LAG(querybuilder_tests_order.margin, 1) OVER (ORDER BY margin DESC) AS "margin_lag" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_lag_default(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LagField(
'margin',
default=0,
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LAG(querybuilder_tests_order.margin, 1, \'0\') OVER (ORDER BY margin DESC) AS "margin_lag" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_lead(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LeadField(
'margin',
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LEAD(querybuilder_tests_order.margin, 1) OVER (ORDER BY margin DESC) AS "margin_lead" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_first_value(self):
query = Query().from_table(
table=Order,
fields=[
'*',
FirstValueField(
'margin',
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'FIRST_VALUE(querybuilder_tests_order.margin) OVER (ORDER BY margin DESC) AS "margin_first_value" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_last_value(self):
query = Query().from_table(
table=Order,
fields=[
'*',
LastValueField(
'margin',
over=QueryWindow().order_by(
'margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'LAST_VALUE(querybuilder_tests_order.margin) OVER (ORDER BY margin ASC) AS "margin_last_value" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_nth_value(self):
query = Query().from_table(
table=Order,
fields=[
'*',
NthValueField(
'margin',
n=2,
over=QueryWindow().order_by(
'-margin'
)
)
]
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'NTH_VALUE(querybuilder_tests_order.margin, 2) OVER (ORDER BY margin DESC) AS "margin_nth_value" '
'FROM querybuilder_tests_order'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
def test_num_stddev(self):
query = Query().from_table(
table=Order,
fields=[
'*',
NumStdDevField(
'margin',
over=QueryWindow()
)
]
).order_by(
'-margin_num_stddev'
)
query_str = query.get_sql()
expected_query = (
'SELECT querybuilder_tests_order.*, '
'(CASE WHEN (STDDEV(querybuilder_tests_order.margin) OVER ()) <> 0 '
'THEN ((querybuilder_tests_order.margin - ('
'AVG(querybuilder_tests_order.margin) OVER ())) / (STDDEV(querybuilder_tests_order.margin) OVER ())) '
'ELSE 0 '
'END) '
'AS "margin_num_stddev" '
'FROM querybuilder_tests_order '
'ORDER BY margin_num_stddev '
'DESC'
)
self.assertEqual(query_str, expected_query, get_comparison_str(query_str, expected_query))
| 2.1875 | 2 |
emoji/coffee.py | wbprice/ojimoji | 0 | 3536 | <reponame>wbprice/ojimoji
import numpy
h = .25
s = 1
bitmap = numpy.array([
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,1,0,0,0,0,0],
[0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0],
[0,0,1,1,1,0,1,0,1,1,1,0,0,1,0,0],
[0,0,1,1,0,1,0,1,0,1,1,0,0,1,0,0],
[0,0,1,1,1,0,1,0,1,1,1,0,1,0,0,0],
[0,0,1,1,0,1,0,1,0,1,1,1,0,0,0,0],
[0,0,1,1,1,0,1,0,1,1,1,0,0,0,0,0],
[0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0],
[0,0,0,0,1,1,1,1,1,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]])
| 2.625 | 3 |
dumpcode/npzbdt.py | gkfthddk/keras | 0 | 3537 | import numpy as np
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
import scipy.stats as sts
import xgboost as xgb
from xiter import *
import pandas as pd
import argparse
from datetime import datetime
def timer(start_time=None):
if not start_time:
start_time = datetime.now()
return start_time
elif start_time:
thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)
tmin, tsec = divmod(temp_sec, 60)
print('\n Time taken: %i hours %i minutes and %s seconds.' % (thour, tmin, round(tsec, 2)))
parser=argparse.ArgumentParser()
parser.add_argument("--end",type=float,default=100000.,help='end ratio')
parser.add_argument("--save",type=str,default="test_",help='save name')
parser.add_argument("--network",type=str,default="rnn",help='network name on symbols/')
parser.add_argument("--right",type=str,default="/scratch/yjdata/gluon100_img",help='which train sample (qq,gg,zq,zg)')
parser.add_argument("--pt",type=int,default=200,help='pt range pt~pt*1.1')
parser.add_argument("--ptmin",type=float,default=0.,help='pt range pt~pt*1.1')
parser.add_argument("--ptmax",type=float,default=2.,help='pt range pt~pt*1.1')
parser.add_argument("--epochs",type=int,default=10,help='num epochs')
parser.add_argument("--batch_size",type=int,default=100000,help='batch_size')
parser.add_argument("--loss",type=str,default="categorical_crossentropy",help='network name on symbols/')
parser.add_argument("--gpu",type=int,default=0,help='gpu number')
parser.add_argument("--isz",type=int,default=0,help='0 or z or not')
parser.add_argument("--eta",type=float,default=0.,help='end ratio')
parser.add_argument("--etabin",type=float,default=1,help='end ratio')
parser.add_argument("--unscale",type=int,default=0,help='end ratio')
args=parser.parse_args()
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
batch_size=args.batch_size
params = {
'max_depth': sts.randint(1,6),
'learning_rate': sts.uniform(0.0010,0.500),
'n_estimators': sts.randint(10,101)
}
model=xgb.XGBClassifier(objective='binary:logistic',tree_method="gpu_hist")
if(args.isz==1):
if(args.etabin==1):
loaded=np.load("zqmixed{}pteta.npz".format(args.pt))
print("zqmixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("zqmixed{}pt.npz".format(args.pt))
print("zqmixed{}pt.npz".format(args.pt))
elif(args.isz==-1):
if(args.etabin==1):
loaded=np.load("qqmixed{}pteta.npz".format(args.pt))
print("qqmixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("qqmixed{}pt.npz".format(args.pt))
print("qqmixed{}pt.npz".format(args.pt))
elif(args.isz==0):
if(args.etabin==1):
if(args.unscale==1):
loaded=np.load("unscalemixed{}pteta.npz".format(args.pt))
else:
loaded=np.load("mixed{}pteta.npz".format(args.pt))
print("etabin 1")
else:
if(args.unscale==1):
loaded=np.load("unscalemixed{}pt.npz".format(args.pt))
else:
loaded=np.load("mixed{}pt.npz".format(args.pt))
print("etabin 2.4")
data=loaded["bdtset"][:,:5]
label=loaded["label"]
line=int(30000)
endline=int(40000)
if(len(label)<40000):
line=int(len(label)*3./4.)
endline=len(label)
X=data[0:line]
vx=data[line:endline]
Y=label[0:line]
vy=label[line:endline]
Y=np.array(Y)[:,0]
folds = 3
param_comb = 100
skf = KFold(n_splits=folds, shuffle = True, random_state = 173)
#skf = StratifiedKFold(n_splits=folds, shuffle = True, random_state = 1001)
random_search = RandomizedSearchCV(model, param_distributions=params, n_iter=param_comb, scoring='log_loss', n_jobs=6, cv=skf.split(X,Y), verbose=3, random_state=173 )
# Here we go
start_time = timer(None) # timing starts from this point for "start_time" variable
random_search.fit(X, Y)
timer(start_time)
#print(random_search.predict(X[:10]))
#print('\n All results:')
#print(random_search.cv_results_)
#print('\n Best estimator:')
#print(random_search.best_estimator_)
print('\n Best normalized gini score for %d-fold search with %d parameter combinations:' % (folds, param_comb))
print(random_search.best_score_ * 2 - 1)
#print('\n Best hyperparameters:')
#print(random_search.best_params_)
results = pd.DataFrame(random_search.cv_results_)
results.to_csv('xgb/{}-{}.csv'.format(args.save,args.pt), index=False)
#random_search.best_estimator_.save_model("bdt-{}.dat".format(args.pt))
| 2.09375 | 2 |
rlenv/StockTradingEnv0.py | watchsea/RL-Stock | 0 | 3538 | import random
import json
import gym
from gym import spaces
import pandas as pd
import numpy as np
MAX_ACCOUNT_BALANCE = 2147483647
MAX_NUM_SHARES = 2147483647
MAX_SHARE_PRICE = 5000
MAX_VOLUME = 1000e8
MAX_AMOUNT = 3e10
MAX_OPEN_POSITIONS = 5
MAX_STEPS = 20000
MAX_DAY_CHANGE = 1
INITIAL_ACCOUNT_BALANCE = 10000
DATA_HIS_PERIOD = 5
# position constant
FLAT = 0 # no position
LONG = 1 # buy position
SHORT = 2 # sell position
# action constant
HOLD = 0
BUY = 1
SELL = 2
class StockTradingEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self, df,show_trade=True):
super(StockTradingEnv, self).__init__()
# show the trade info
self.show_trade = show_trade
self.actions=["FLAT","LONG","SHORT"]
self.fee = 0.0005 # brokage commission
self.df = df
self.closeprices = self.df['close'].values
self.reward_range = (0, MAX_ACCOUNT_BALANCE)
# Actions of the format Buy x%, Sell x%, Hold, etc.
self.action_space = spaces.Discrete(len(self.actions))
# self.action_space = spaces.Box(
# low=np.array([0, 0]), high=np.array([3, 1]), dtype=np.float16)
# Prices contains the OHCL values for the last five prices
self.observation_space = spaces.Box(
low=0, high=1, shape=(DATA_HIS_PERIOD+1,6), dtype=np.float16)
self.history = []
def _next_observation(self):
obs = np.array([
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'open'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'high'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'low'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'close'].values / MAX_SHARE_PRICE,
self.df.loc[self.current_step-DATA_HIS_PERIOD:self.current_step, 'volume'].values / MAX_NUM_SHARES,
])
# Append additional data and scale each value to between 0-1
obs = np.append(obs,[[self.balance / MAX_ACCOUNT_BALANCE,
self.max_net_worth / MAX_ACCOUNT_BALANCE,
self.shares_held / MAX_NUM_SHARES,
self.cost_basis / MAX_SHARE_PRICE,
self.total_shares_sold / MAX_NUM_SHARES,
self.total_sales_value / (MAX_NUM_SHARES * MAX_SHARE_PRICE)]],axis=0)
return obs
def _take_action(self, action):
# Set the current price to a random price within the time step
# current_price = random.uniform(
# self.df.loc[self.current_step, "open"], self.df.loc[self.current_step, "close"])
# Set the current price to the last close price
self.close_price = self.df.loc[self.current_step,"close"]
amount = 0.5 #the old version has this variable, so reserve
# action comes from the agent
# 1 buy, 2 sell, 0 hold
# single position can be opened per trade
# valid action sequence would be
# LONG : buy - hold - hold - sell
# SHORT : sell - hold - hold - buy
# invalid action sequence is just considered hold
# (e.g.) "buy - buy" would be considred "buy - hold"
self.action = HOLD #hold
if action == BUY: #buy
if self.position == FLAT: # if previous position was flat
self.position = LONG #update position to long
self.action = BUY # record action as buy
self.entry_price = self.close_price
# Buy amount % of balance in shares
total_possible = int(self.balance / self.close_price)
shares_bought = int(total_possible * amount)//100 *100
self.krw_balance = shares_bought * self.entry_price # buy balance
commission = round(self.fee * self.krw_balance,2) # commission fee
self.shares_held = shares_bought
self.balance -= self.krw_balance-commission
#self.cost_basis = (prev_cost + additional_cost) / (self.shares_held + shares_bought)
elif self.position == SHORT: # if previous position was short
self.position = FLAT # update position to flat
self.action = BUY # record action as buy
self.exit_price = self.close_price
self.reward += ((self.entry_price - self.exit_price) / self.exit_price + 1) * (
1 - self.fee) ** 2 - 1 # calculate reward
#self.krw_balance = self.krw_balance * (1.0 + self.reward) # evaluate cumulative return in krw-won
self.balance += round(self.krw_balance * (1.0 + self.reward),2) # calcuate the total balance
self.n_short += 1 # record number of short
self.total_shares_sold += self.shares_held
self.total_sales_value += self.shares_held * self.close_price
self.entry_price = 0 # clear entry price
self.shares_held = 0 # clear the shares_
elif action == SELL:
if self.position == FLAT:
self.position = SHORT
self.action = SELL
self.entry_price = self.close_price
# Sell amount % of shares held
total_possible = int(self.balance / self.close_price)
self.shares_held = int(total_possible * amount)//100 *100
self.krw_balance = self.shares_held * self.entry_price # buy balance
commission = round(self.fee * self.krw_balance,2) # commission fee
self.balance -= self.krw_balance-commission
elif self.position == LONG:
self.position = FLAT
self.action = SELL
self.exit_price = self.close_price
self.reward += ((self.exit_price - self.entry_price) / self.entry_price + 1) * (1 - self.fee) ** 2 - 1
#self.krw_balance = self.krw_balance * (1.0 + self.reward)
self.balance += round(self.krw_balance*(1.0+self.reward),2)
self.n_long += 1
self.total_shares_buy += self.shares_held
self.total_buys_value += self.shares_held * self.close_price
self.shares_held = 0
self.entry_price = 0
# [coin + krw_won] total value evaluated in krw won
if (self.position == LONG):
temp_reward = ((self.close_price - self.entry_price) / self.entry_price + 1) * (
1 - self.fee) ** 2 - 1
new_portfolio = self.krw_balance * (1.0 + temp_reward)
elif (self.position == SHORT):
temp_reward = ((self.entry_price - self.close_price) / self.close_price + 1) * (
1 - self.fee) ** 2 - 1
new_portfolio = self.krw_balance * (1.0 + temp_reward)
else:
temp_reward = 0
new_portfolio = 0
self.net_worth = self.balance + new_portfolio
if self.net_worth > self.max_net_worth:
self.max_net_worth = self.net_worth
if self.shares_held == 0:
self.cost_basis = 0
self.portfolio = round(new_portfolio,2)
def step(self, action):
# Execute one time step within the environment
self._take_action(action)
done = False
self.current_step += 1
delay_modifier = (self.current_step / MAX_STEPS)
# profits
#reward = self.net_worth - INITIAL_ACCOUNT_BALANCE
#reward = 1 if reward > 0 else -100
if self.net_worth <= 0:
done = True
if self.current_step > len(self.df.loc[:, 'open'].values) - 1:
self.current_step = DATA_HIS_PERIOD # loop training
# when loop training, then clear the history
self.action = HOLD
self.position = FLAT
self.balance = INITIAL_ACCOUNT_BALANCE
self.net_worth = INITIAL_ACCOUNT_BALANCE
self.max_net_worth = INITIAL_ACCOUNT_BALANCE
self.krw_balance = 0
self.reward = 0
self.portfolio = 0
self.shares_held = 0
self.cost_basis = 0
self.total_shares_buy = 0
self.total_buys_value = 0
self.total_shares_sold = 0
self.total_sales_value = 0
self.n_long = 0
self.n_short = 0
self.history=[]
# done = True
if (self.show_trade and self.current_step % 1 == 0):
print("Tick: {0}/ Portfolio (krw-won): {1}, balance: {2}".format(self.current_step, self.portfolio,self.net_worth))
print("Long: {0}/ Short: {1}".format(self.n_long, self.n_short))
# save the history data
self.history.append([
self.action,
self.position,
self.current_step,
self.close_price,
self.krw_balance,
self.balance,
self.max_net_worth,
self.shares_held,
self.portfolio,
self.total_shares_buy,
self.total_buys_value,
self.total_shares_sold,
self.total_sales_value])
#self.history.append((self.action, self.current_step, self.closingPrice, self.portfolio, self.reward))
obs = self._next_observation()
if (self.current_step > (self.df.shape[0]) - 1):
self.done = True
self.reward = self.get_profit() # return reward at end of the game
return obs, self.net_worth, done, {'portfolio': np.array([self.portfolio]),
"history": self.history,
"n_trades": {'long': self.n_long, 'short': self.n_short}}
#return obs, reward, done, {}
def get_profit(self):
if(self.position == LONG):
profit = ((self.close_Price - self.entry_price)/self.entry_price + 1)*(1-self.fee)**2 - 1
elif(self.position == SHORT):
profit = ((self.entry_price - self.close_Price)/self.close_Price + 1)*(1-self.fee)**2 - 1
else:
profit = 0
return profit
def reset(self, new_df=None):
# Reset the state of the environment to an initial state
self.action = HOLD
self.position = FLAT
self.balance = INITIAL_ACCOUNT_BALANCE
self.net_worth = INITIAL_ACCOUNT_BALANCE
self.max_net_worth = INITIAL_ACCOUNT_BALANCE
self.krw_balance = 0
self.reward =0
self.portfolio =0
self.shares_held = 0
self.cost_basis = 0
self.total_shares_buy =0
self.total_buys_value=0
self.total_shares_sold = 0
self.total_sales_value = 0
self.n_long=0
self.n_short=0
self.history=[]
# pass test dataset to environment
if new_df:
self.df = new_df
# Set the current step to a random point within the data frame
# self.current_step = random.randint(
# 0, len(self.df.loc[:, 'open'].values) - 6)
# the observation include the given period history data
self.current_step = DATA_HIS_PERIOD #random.randint(DATA_HIS_PERIOD,len(self.df.loc[:,'open'].values)-1)
# for i in range(DATA_HIS_PERIOD):
# self.history.append([0.0,0.0,0.0,0.0,0.0,0.0])
return self._next_observation()
def render(self, mode='human', close=False):
# Render the environment to the screen
profit = self.net_worth - INITIAL_ACCOUNT_BALANCE
print('-'*30)
print(f'Step: {self.current_step}')
print(f'Balance: {self.balance}')
print(f'Shares held: {self.shares_held} (Total sold: {self.total_shares_sold})')
print(f'Avg cost for held shares: {self.cost_basis} (Total sales value: {self.total_sales_value})')
print(f'Net worth: {self.net_worth} (Max net worth: {self.max_net_worth})')
print(f'Profit: {profit}')
return profit
| 2.890625 | 3 |
processing/manager.py | mrfleap/us-population-heatmap | 0 | 3539 | import json
import os
import pathlib
import time
from tqdm import tqdm
from aggregator import aggregate
from download import DOWNLOAD_PATH, download_files, unzip_files
from tqdm.contrib.concurrent import process_map
def main():
start = time.time()
# print("Downloading files...")
# download_files()
# print("Unzipping shapefiles...")
# unzip_files()
state_ids = []
for file in os.listdir(DOWNLOAD_PATH):
file_path = os.path.join(DOWNLOAD_PATH, file)
if os.path.isfile(file_path) and pathlib.Path(file_path).suffix == ".txt":
state_ids.append(file[file.index("BG") + 2 : file.index(".")])
# print("Computing population JSON heatmaps...")
# compute_json_heatmaps(state_ids)
print("Aggregating JSON files into one...")
aggegrate_json_files(state_ids)
end = time.time()
print(f"Done in {(end - start):0.2f}s")
def compute_json_heatmaps(state_ids):
data_files = []
for state_id in state_ids:
data_files.append(
(
state_id,
os.path.join(DOWNLOAD_PATH, f"CenPop2020_Mean_BG{state_id}.txt"),
os.path.join(DOWNLOAD_PATH, f"tl_2020_{state_id}_bg", f"tl_2020_{state_id}_bg.shp"),
)
)
process_map(create_json_for_state, data_files, max_workers=4)
def aggegrate_json_files(state_ids):
with open("public/data/pop.json", "w") as f:
f.write("""{"type": "FeatureCollection", "features": [""")
# state_ids = state_ids[:2]
features = []
for state_id in tqdm(state_ids):
geojson = None
with open(os.path.join(DOWNLOAD_PATH, f"{state_id}.json")) as f:
geojson = json.load(f)
with open("public/data/pop.json", "a") as f:
f.write(json.dumps(geojson["features"])[1:-1] + ("," if state_id != state_ids[-1] else ""))
with open("public/data/pop.json", "a") as f:
f.write("]}")
def create_json_for_state(args):
return aggregate(*args, hide_output=True)
if __name__ == "__main__":
main()
| 2.46875 | 2 |
gen_data.py | kshoji6011/vehicleai | 0 | 3540 | from PIL import Image
import os, glob
import numpy as np
from sklearn import model_selection
classes = ["car", "bycycle", "motorcycle", "pedestrian"]
num_class = len(classes)
image_size = 50
# 画像の読み込み
X = []
Y = []
for index, classlabel in enumerate(classes):
photos_dir = "./" + classlabel
files = glob.glob(photos_dir + "/*.jpg")
for i, file in enumerate(files):
if i >=237: break
image = Image.open(file)
image = image.convert("RGB")
image = image.resize((image_size, image_size))
data = np.asarray(image) / 255
X.append(data)
Y.append(index)
X = np.array(X)
Y = np.array(Y)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, Y)
xy = (X_train, X_test, y_train, y_test)
np.save("./vehicle.npy", xy) | 2.96875 | 3 |
app/main.py | immortel32/Sword_Sorcery_Story_Generator | 2 | 3541 | <reponame>immortel32/Sword_Sorcery_Story_Generator
from services import waypoint_scenarios, quest_scenarios
from services.build_campaign import Campaign
from log_setup import log
if __name__ == "__main__":
number_waypoint_scenario = waypoint_scenarios.get_number_of_waypoint_scenarios()
log.info(f"We have {number_waypoint_scenario} waypoint available")
number_quests_available = quest_scenarios.get_number_of_quest_scenarios()
log.info(f"We have {number_quests_available} quests available")
random_waypoint_scenario = waypoint_scenarios.get_random_scenario(10)
random_quest = quest_scenarios.get_random_scenario(1)
campaign = Campaign()
campaign.build_campaign(
waypoint_list=random_waypoint_scenario, quest_list=random_quest
)
| 2.328125 | 2 |
tests/test-recipes/metadata/ignore_some_prefix_files/run_test.py | mbargull/conda-build | 0 | 3542 | <filename>tests/test-recipes/metadata/ignore_some_prefix_files/run_test.py
import os
pkgs = os.path.join(os.environ["ROOT"], "pkgs")
info_dir = os.path.join(pkgs, "conda-build-test-ignore-some-prefix-files-1.0-0", "info")
has_prefix_file = os.path.join(info_dir, "has_prefix")
print(info_dir)
assert os.path.isfile(has_prefix_file)
with open(has_prefix_file) as f:
assert "test2" not in f.read()
| 2.546875 | 3 |
distill.py | Lukeming-tsinghua/Interpretable-NN-for-IBD-diagnosis | 0 | 3543 | <filename>distill.py
import os
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import classification_report
from torch.optim import Adam
from tqdm import tqdm
from data import DataIteratorDistill
from loss import FocalLoss
from model import CNN
from torchtext import data, vocab
from args import get_args, print_args
from config import ConfigBinaryClassification
from config import ConfigBinaryClassificationDistill
from config import ConfigTripleClassification
if __name__ == "__main__":
args = get_args()
print_args(args)
if args.class_num == 2:
cfg = ConfigBinaryClassificationDistill()
elif args.class_num == 3:
cfg = ConfigTripleClassification()
else:
raise ValueError("wrong class num")
device = torch.device("cuda:%d" % args.cuda)
Data = DataIteratorDistill(config=cfg, train_batchsize=args.batch_size)
model = torch.load("checkpoints/CNN-29", map_location=device)
optimizer = Adam(model.parameters(), lr=args.lr)
criterion = FocalLoss(classes=args.class_num, device=device).to(device)
criterion_kv = nn.KLDivLoss().to(device)
alpha = 0.2
T = 2
for epoch in range(args.epoch_num):
print(epoch)
for sample in Data.train_iter:
model.train()
optimizer.zero_grad()
output = model(sample.text.permute(1, 0).to(device))
loss_f = criterion(output, sample.label.to(device))
output = F.log_softmax(output/T, 1)
score = torch.cat((sample.pred0.unsqueeze(1).to(device),
sample.pred1.unsqueeze(1).to(device)), dim=1)
score = F.softmax(score/T,1)
loss_kv = criterion_kv(output, score.to(device)) * T * T
loss = alpha * loss_f + (1 - alpha) * loss_kv
#print(loss_f.item(), loss_kv.item())
loss.backward()
optimizer.step()
with torch.no_grad():
model.eval()
preds = []
labels = []
for sample in Data.valid_iter:
output = model(sample.text.permute(1, 0).to(device))
p = output.argmax(1).cpu().tolist()
l = sample.label.tolist()
preds += p
labels += l
report = classification_report(preds, labels)
print(report)
torch.save(model, os.path.join(args.save_dir, args.save_config + str(epoch)))
| 2.34375 | 2 |
tests/TALTests/HTMLTests/TALAttributesTestCases.py | janbrohl/SimpleTAL | 5 | 3544 | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
# Copyright (c) 2016, <NAME> <<EMAIL>>
# All rights reserved.
# See LICENSE.txt
# Copyright (c) 2004 <NAME> (http://www.owlfish.com/)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# If you make any bug fixes or feature enhancements please let me know!
"""
Unit test cases.
"""
from __future__ import unicode_literals
import unittest
import os
import io
import logging
import logging.config
from simpletal import simpleTAL, simpleTALES
if (os.path.exists("logging.ini")):
logging.config.fileConfig("logging.ini")
else:
logging.basicConfig()
class TALAttributesTestCases(unittest.TestCase):
def setUp(self):
self.context = simpleTALES.Context()
self.context.addGlobal('test', 'testing')
self.context.addGlobal('link', 'www.owlfish.com')
self.context.addGlobal('needsQuoting', """Does "this" work?""")
self.context.addGlobal('number', 5)
self.context.addGlobal('uniQuote', 'Does "this" work?')
self.context.addGlobal('anotherdefault', {
'inhere': simpleTALES.DEFAULTVALUE
})
def _runTest_(self, txt, result, errMsg="Error"):
template = simpleTAL.compileHTMLTemplate(txt)
file = io.StringIO()
template.expand(self.context, file)
realResult = file.getvalue()
self.assertEqual(
realResult, result,
"%s - \npassed in: %s \ngot back %s \nexpected %s\n\nTemplate: %s"
% (errMsg, txt, realResult, result, template))
def testAddingAnAttribute(self):
self._runTest_(
'<html tal:attributes="link link" href="owlfish.com">Hello</html>',
'<html link="www.owlfish.com" href="owlfish.com">Hello</html>',
"Addition of attribute 'link' failed.")
def testRemovingAnAttribute(self):
self._runTest_(
'<html class="test" tal:attributes="href nothing" href="owlfish.com">Hello</html>',
'<html class="test">Hello</html>',
"Removal of attribute 'href' failed.")
def testDefaultAttribute(self):
self._runTest_(
'<html class="test" tal:attributes="href default" href="owlfish.com">Hello</html>',
'<html class="test" href="owlfish.com">Hello</html>',
"Defaulting of attribute 'href' failed.")
def testAnotherDefaultAttribute(self):
self._runTest_(
'<html class="test" tal:attributes="href anotherdefault/inhere" href="owlfish.com">Hello</html>',
'<html class="test" href="owlfish.com">Hello</html>',
"Defaulting of attribute 'href' failed.")
def testMultipleAttributes(self):
self._runTest_(
'<html old="still here" class="test" tal:attributes="href default;class nothing;new test" href="owlfish.com">Hello</html>',
'<html new="testing" old="still here" href="owlfish.com">Hello</html>',
"Setting multiple attributes at once failed.")
def testMultipleAttributesSpace(self):
self._runTest_(
'<html old="still here" class="test" tal:attributes="href default ; class string:Hello there; new test" href="owlfish.com">Hello</html>',
'<html class="Hello there" new="testing" old="still here" href="owlfish.com">Hello</html>',
"Setting multiple attributes at once, with spaces between semi-colons, failed."
)
def testMultipleAttributesEscaped(self):
self._runTest_(
'<html old="still " here" class="test" tal:attributes="href default ; class string: Semi-colon;;test;new test " href="owlfish.com">Hello</html>',
'''<html class="Semi-colon;test" new="testing" old='still " here' href="owlfish.com">Hello</html>''',
"Setting multiple attributes at once, with spaces between semi-colons, failed."
)
def testAttributeEscaping(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="href needsQuoting">Hello</html>',
"""<html href='Does "this" work?' existingatt='"Testing"'>Hello</html>""",
"Escaping of new attributes failed.")
def testNumberAttributeEscaping(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="href number">Hello</html>',
"""<html href="5" existingatt='"Testing"'>Hello</html>""",
"Escaping of new attributes failed.")
def testNumberAttributeEscaping(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="href uniQuote">Hello</html>',
"""<html href='Does "this" work?' existingatt='"Testing"'>Hello</html>""",
"Escaping of new attributes failed.")
def testOriginalAttributes(self):
self._runTest_(
'<html existingAtt=""Testing"" tal:attributes="newAtt attrs/existingatt" tal:content="attrs/existingatt">Hello</html>',
"""<html newAtt='"Testing"' existingatt='"Testing"'>"Testing"</html>""",
"Accessing existing attributes failed.")
def testMultipleOriginalAttributes(self):
self._runTest_(
'<html one="Value One" two="Value two" three="Value three" tal:attributes="four attrs/three" tal:content="attrs/one">Hello</html>',
"""<html four="Value three" one="Value One" two="Value two" three="Value three">Value One</html>""",
"Accessing multiple existing attributes failed.")
def testAmpersandEscapeInAttributes(self):
self._runTest_(
'<html existingAtt="&Testing&" tal:attributes="newAtt attrs/existingatt" tal:content="attrs/existingatt">Hello</html>',
"""<html newAtt="&Testing&" existingatt="&Testing&">&Testing&</html>""",
"Accessing existing attributes failed.")
#~ def testAttributeCase (self):
#~ self._runTest_ ('<html HREF="Testing" tal:attributes="HREF test">Hello</html>'
#~ ,"""<html href="testing">Hello</html>"""
#~ ,"HTML Attributes not treated as case insensitive.")
if __name__ == '__main__':
unittest.main()
| 1.523438 | 2 |
iseq_prof/fasta.py | EBI-Metagenomics/iseq-prof | 0 | 3545 | from pathlib import Path
from typing import List
from fasta_reader import FASTAItem, FASTAWriter, read_fasta
__all__ = ["downsample"]
def downsample(infile: Path, outfile: Path, size: int, random):
targets: List[FASTAItem] = list(read_fasta(infile))
if size > len(targets):
raise ValueError("Size is greater than the number of targets.")
targets = random.choice(targets, size, replace=False).tolist()
with FASTAWriter(outfile) as writer:
for target in targets:
writer.write_item(target.defline, target.sequence)
| 3.28125 | 3 |
src/data_settings.py | DhruvSrikanth/TSLA-React | 0 | 3546 | # API keys
# YF_API_KEY = "<KEY>" # yahoo finance api key
TICKER = "TSLA"
INTERVAL = "1m"
PERIOD = "1d"
LOOK_BACK = 30 # hard limit to not reach rate limit of 100 per day | 1.4375 | 1 |
ymir/backend/src/ymir_controller/controller/utils/invoker_mapping.py | phoenix-xhuang/ymir | 64 | 3547 | from controller.invoker import (
invoker_cmd_branch_checkout,
invoker_cmd_branch_commit,
invoker_cmd_branch_create,
invoker_cmd_branch_delete,
invoker_cmd_branch_list,
invoker_cmd_evaluate,
invoker_cmd_filter,
invoker_cmd_gpu_info,
invoker_cmd_inference,
invoker_cmd_init,
invoker_cmd_label_add,
invoker_cmd_label_get,
invoker_cmd_log,
invoker_cmd_merge,
invoker_cmd_pull_image,
invoker_cmd_repo_check,
invoker_cmd_repo_clear,
invoker_cmd_sampling,
invoker_cmd_terminate,
invoker_cmd_user_create,
invoker_task_factory,
)
from proto import backend_pb2
RequestTypeToInvoker = {
backend_pb2.CMD_BRANCH_CHECKOUT: invoker_cmd_branch_checkout.BranchCheckoutInvoker,
backend_pb2.CMD_BRANCH_CREATE: invoker_cmd_branch_create.BranchCreateInvoker,
backend_pb2.CMD_BRANCH_DEL: invoker_cmd_branch_delete.BranchDeleteInvoker,
backend_pb2.CMD_BRANCH_LIST: invoker_cmd_branch_list.BranchListInvoker,
backend_pb2.CMD_COMMIT: invoker_cmd_branch_commit.BranchCommitInvoker,
backend_pb2.CMD_EVALUATE: invoker_cmd_evaluate.EvaluateInvoker,
backend_pb2.CMD_FILTER: invoker_cmd_filter.FilterBranchInvoker,
backend_pb2.CMD_GPU_INFO_GET: invoker_cmd_gpu_info.GPUInfoInvoker,
backend_pb2.CMD_INFERENCE: invoker_cmd_inference.InferenceCMDInvoker,
backend_pb2.CMD_INIT: invoker_cmd_init.InitInvoker,
backend_pb2.CMD_LABEL_ADD: invoker_cmd_label_add.LabelAddInvoker,
backend_pb2.CMD_LABEL_GET: invoker_cmd_label_get.LabelGetInvoker,
backend_pb2.CMD_LOG: invoker_cmd_log.LogInvoker,
backend_pb2.CMD_MERGE: invoker_cmd_merge.MergeInvoker,
backend_pb2.CMD_PULL_IMAGE: invoker_cmd_pull_image.ImageHandler,
backend_pb2.CMD_TERMINATE: invoker_cmd_terminate.CMDTerminateInvoker,
backend_pb2.CMD_REPO_CHECK: invoker_cmd_repo_check.RepoCheckInvoker,
backend_pb2.CMD_REPO_CLEAR: invoker_cmd_repo_clear.RepoClearInvoker,
backend_pb2.REPO_CREATE: invoker_cmd_init.InitInvoker,
backend_pb2.TASK_CREATE: invoker_task_factory.CreateTaskInvokerFactory,
backend_pb2.USER_CREATE: invoker_cmd_user_create.UserCreateInvoker,
backend_pb2.CMD_SAMPLING: invoker_cmd_sampling.SamplingInvoker,
}
| 1.546875 | 2 |
tests/utils/date_utils.py | asuol/worky | 0 | 3548 | <filename>tests/utils/date_utils.py
from datetime import datetime, timedelta
due_date_format = '%Y-%m-%d'
datepicker_date_format = '%m%d%Y'
def current_date():
return datetime.utcnow().strftime(due_date_format)
def datepicker_current_date():
return datetime.utcnow().strftime(datepicker_date_format)
def _date_from_today(days_to_add):
return datetime.utcnow() + timedelta(days=days_to_add)
def date_from_today(days_to_add):
return _date_from_today(days_to_add).strftime(due_date_format)
def datepicker_date_from_today(days_to_add):
return _date_from_today(days_to_add).strftime(datepicker_date_format)
def datepicker_to_due_date_format(datepicker_date):
return datetime.strptime(datepicker_date,
datepicker_date_format).strftime(due_date_format)
| 2.671875 | 3 |
numba/roc/tests/hsapy/test_gufuncbuilding.py | luk-f-a/numba | 76 | 3549 | <reponame>luk-f-a/numba
import numpy as np
from numba.roc.vectorizers import HsaGUFuncVectorize
from numba.roc.dispatch import HSAGenerializedUFunc
from numba import guvectorize
import unittest
def ufunc_add_core(a, b, c):
for i in range(c.size):
c[i] = a[i] + b[i]
class TestGUFuncBuilding(unittest.TestCase):
def test_gufunc_building(self):
ufbldr = HsaGUFuncVectorize(ufunc_add_core, "(x),(x)->(x)")
ufbldr.add("(float32[:], float32[:], float32[:])")
ufbldr.add("(intp[:], intp[:], intp[:])")
ufunc = ufbldr.build_ufunc()
self.assertIsInstance(ufunc, HSAGenerializedUFunc)
# Test integer version
A = np.arange(100, dtype=np.intp)
B = np.arange(100, dtype=np.intp) + 1
expected = A + B
got = ufunc(A, B)
np.testing.assert_equal(expected, got)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(np.dtype(np.intp), got.dtype)
# Test integer version with 2D inputs
A = A.reshape(50, 2)
B = B.reshape(50, 2)
expected = A + B
got = ufunc(A, B)
np.testing.assert_equal(expected, got)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(np.dtype(np.intp), got.dtype)
# Test integer version with 3D inputs
A = A.reshape(5, 10, 2)
B = B.reshape(5, 10, 2)
expected = A + B
got = ufunc(A, B)
np.testing.assert_equal(expected, got)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(np.dtype(np.intp), got.dtype)
# Test real version
A = np.arange(100, dtype=np.float32)
B = np.arange(100, dtype=np.float32) + 1
expected = A + B
got = ufunc(A, B)
np.testing.assert_allclose(expected, got)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(np.dtype(np.float32), got.dtype)
# Test real version with 2D inputs
A = A.reshape(50, 2)
B = B.reshape(50, 2)
expected = A + B
got = ufunc(A, B)
np.testing.assert_allclose(expected, got)
self.assertEqual(expected.dtype, got.dtype)
self.assertEqual(np.dtype(np.float32), got.dtype)
def test_gufunc_building_scalar_output(self):
def sum_row(inp, out):
tmp = 0.
for i in range(inp.shape[0]):
tmp += inp[i]
out[0] = tmp
ufbldr = HsaGUFuncVectorize(sum_row, "(n)->()")
ufbldr.add("void(int32[:], int32[:])")
ufunc = ufbldr.build_ufunc()
inp = np.arange(300, dtype=np.int32).reshape(100, 3)
out = ufunc(inp)
for i in range(inp.shape[0]):
np.testing.assert_equal(inp[i].sum(), out[i])
def test_gufunc_scalar_input_saxpy(self):
def axpy(a, x, y, out):
for i in range(out.shape[0]):
out[i] = a * x[i] + y[i]
ufbldr = HsaGUFuncVectorize(axpy, '(),(t),(t)->(t)')
ufbldr.add("void(float32, float32[:], float32[:], float32[:])")
saxpy = ufbldr.build_ufunc()
A = np.float32(2)
X = np.arange(10, dtype=np.float32).reshape(5, 2)
Y = np.arange(10, dtype=np.float32).reshape(5, 2)
out = saxpy(A, X, Y)
for j in range(5):
for i in range(2):
exp = A * X[j, i] + Y[j, i]
self.assertTrue(exp == out[j, i])
X = np.arange(10, dtype=np.float32)
Y = np.arange(10, dtype=np.float32)
out = saxpy(A, X, Y)
for j in range(10):
exp = A * X[j] + Y[j]
self.assertTrue(exp == out[j], (exp, out[j]))
A = np.arange(5, dtype=np.float32)
X = np.arange(10, dtype=np.float32).reshape(5, 2)
Y = np.arange(10, dtype=np.float32).reshape(5, 2)
out = saxpy(A, X, Y)
for j in range(5):
for i in range(2):
exp = A[j] * X[j, i] + Y[j, i]
self.assertTrue(exp == out[j, i], (exp, out[j, i]))
class TestGUFuncDecor(unittest.TestCase):
def test_gufunc_decorator(self):
@guvectorize(["void(float32, float32[:], float32[:], float32[:])"],
'(),(t),(t)->(t)', target='roc')
def saxpy(a, x, y, out):
for i in range(out.shape[0]):
out[i] = a * x[i] + y[i]
A = np.float32(2)
X = np.arange(10, dtype=np.float32).reshape(5, 2)
Y = np.arange(10, dtype=np.float32).reshape(5, 2)
out = saxpy(A, X, Y)
for j in range(5):
for i in range(2):
exp = A * X[j, i] + Y[j, i]
self.assertTrue(exp == out[j, i])
X = np.arange(10, dtype=np.float32)
Y = np.arange(10, dtype=np.float32)
out = saxpy(A, X, Y)
for j in range(10):
exp = A * X[j] + Y[j]
self.assertTrue(exp == out[j], (exp, out[j]))
A = np.arange(5, dtype=np.float32)
X = np.arange(10, dtype=np.float32).reshape(5, 2)
Y = np.arange(10, dtype=np.float32).reshape(5, 2)
out = saxpy(A, X, Y)
for j in range(5):
for i in range(2):
exp = A[j] * X[j, i] + Y[j, i]
self.assertTrue(exp == out[j, i], (exp, out[j, i]))
if __name__ == '__main__':
unittest.main()
| 2.171875 | 2 |
server/form/mongo.py | SRM-IST-KTR/ossmosis | 6 | 3550 | import os
from pymongo import MongoClient
from dotenv import load_dotenv
def database_entry(data):
try:
load_dotenv()
mongo_string = os.getenv('MONGODB_AUTH_URI')
client = MongoClient(mongo_string)
database = client[os.getenv('MONGODB_DB')]
col = database['users']
col.insert_one(data)
return True
except Exception as e:
print(e)
return False
if __name__ == "__main__":
pass
| 2.34375 | 2 |
control-flow/solution/file_hosts.py | giserh/book-python | 1 | 3551 | FILE = r'../src/etc-hosts.txt'
hostnames = []
try:
with open(FILE, encoding='utf-8') as file:
content = file.readlines()
except FileNotFoundError:
print('File does not exist')
except PermissionError:
print('Permission denied')
for line in content:
if line.startswith('#'):
continue
if line.isspace():
continue
line = line.strip().split()
ip = line[0]
hosts = line[1:]
for record in hostnames:
if record['ip'] == ip:
record['hostnames'].update(hosts)
break
else:
hostnames.append({
'hostnames': set(hosts),
'protocol': 'IPv4' if '.' in ip else 'IPv6',
'ip': ip,
})
print(hostnames)
| 3.0625 | 3 |
zindi/docs/utils/n_subimissions_per_day.py | eaedk/testing-zindi-package | 6 | 3552 | def n_subimissions_per_day( url, headers ):
"""Get the number of submissions we can make per day for the selected challenge.
Parameters
----------
url : {'prize', 'points', 'knowledge' , 'all'}, default='all'
The reward of the challenges for top challengers.
headers : dictionary ,
The headers of the request.
Returns
-------
n_sub : int, default=0 : Means error during info retrieval.
The number of submissions we can make per day.
""" | 3.28125 | 3 |
algo/test/test_maximum_cut.py | ssavinash1/Algorithm_stanford | 24 | 3553 | # -*- coding: utf-8 -*-
import unittest
from src.graph import Graph
from src.maximum_cut import maximum_cut, maximum_cut_for_bipartite_graph
class MaximumCut(unittest.TestCase):
def test_maximum_cut_for_bipartite_graphs(self):
""" Given the following bipartite graph.
(a)-----(b)
\
\----(c)
(d)-----(e)
/
(f)----/
\
\----(g)
"""
g = Graph.build(edges=[('a', 'b'), ('a', 'c'),
('d', 'e'), ('f', 'e'), ('f', 'g')],
directed=False)
(left, right) = maximum_cut_for_bipartite_graph(g)
self.assertIn(len(left), [3,4], 'either 3 or 4')
self.assertIn(len(right), [3,4], 'eighter 3 or 4')
self.assertEqual(7, len(left)+len(right), 'no vertex counted twice')
def test_maximum_cut_for_larger_bipartite_graphs(self):
""" A sligthly larger graph:
(a) (c)
| \ /|
| x |
| / \ |
(b) (d)
| \ /|
| x |
| / \ |
(e) (f)
"""
g = Graph.build(edges=[('a', 'b'), ('a', 'd'), ('c', 'b'), ('c', 'd'),
('b', 'e'), ('b', 'f'), ('d', 'e'), ('d', 'f')],
directed=False)
(left, right) = maximum_cut_for_bipartite_graph(g)
self.assertIn(set(left), [set(['a', 'c', 'e', 'f']), set(['b', 'd'])])
self.assertIn(set(right), [set(['a', 'c', 'e', 'f']), set(['b', 'd'])])
self.assertNotEqual(left, right, 'not the same subsets')
def test_maximum_cut(self):
""" Given a graph:
(u)----(v)
| \ / |
| \/ |
| /\ |
| / \ |
(w)---(x)
"""
g = Graph.build(edges=[
('u', 'v'), ('u', 'w'), ('u', 'x'), ('v', 'x'),('w', 'x')],
directed=False)
(left, right) = maximum_cut(g)
expected = [{'u', 'v'}, {'w', 'x'}, {'x', 'u'}, {'w', 'v'}]
self.assertNotEqual(left, right, 'no common vertices between cuts')
self.assertIn(set(left), expected, 'should correctly split the graph')
self.assertIn(set(right), expected, 'should correctly split the graph')
def test_weighted_maximum_cut(self):
""" Given the following weighted graph.
(u)-3-(v)
| \ / |
| 5\/1 4
2 /\ |
| / \ |
(w)-6-(x)
"""
g = Graph.build(edges=[
('u', 'v', 3), ('u', 'w', 2), ('u', 'x', 5),
('v', 'x', 4),('w', 'x', 6)],
directed=False)
(left, right) = maximum_cut(g)
self.assertEqual(2, len(left), 'left should contain 2 vertices')
self.assertEqual(2, len(right), 'right should contain 2 vertices')
| 3.671875 | 4 |
gdb/print-avs-rbtree.py | kemonats/avs_commons | 4 | 3554 | <filename>gdb/print-avs-rbtree.py
# -*- coding: utf-8 -*-
#
# Copyright 2021 AVSystem <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# installation: append "source PATH_TO_THIS_SCRIPT" to ~/.gdbinit
import gdb
class PrintAvsRbtreeBase(gdb.Command):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.intptr_type = gdb.lookup_type('unsigned long long')
self.int_type = gdb.lookup_type('int')
self.output_format = '%%s 0x%%0%dx = %%s' % (self.intptr_type.sizeof * 2,)
# TODO
self.color_offset = -32
self.parent_offset = -24
self.left_offset = -16
self.right_offset = -8
def _print_tree(self, ptr, path='', depth=0, visited_addrs=set()):
left_ptr_value = ptr.cast(self.intptr_type) + self.left_offset
left_ptr = left_ptr_value.cast(ptr.type.pointer()).dereference()
right_ptr_value = ptr.cast(self.intptr_type) + self.right_offset
right_ptr = right_ptr_value.cast(ptr.type.pointer()).dereference()
prefix = ''.join(' |' if x == 'L' else ' ' for x in path)
if path:
if path[-1] == 'L':
prefix += '- '
elif path[-1] == 'R':
prefix = prefix[:-1] + "'- "
print(prefix + self.output_format % (path[-1] if path else ' ', int(ptr), str(ptr.dereference())))
if int(left_ptr) in visited_addrs or int(right_ptr) in visited_addrs:
print('circular tree detected, stopping')
return
visited_addrs.add(left_ptr)
visited_addrs.add(right_ptr)
if int(left_ptr) != 0:
self._print_tree(left_ptr, path + 'L', depth+1, visited_addrs)
if int(right_ptr) != 0:
self._print_tree(right_ptr, path + 'R', depth+1, visited_addrs)
class PrintAvsRbtreeSubtree(PrintAvsRbtreeBase):
def __init__(self):
super().__init__('print-avs-rbtree-subtree',
gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, argv_str, _from_tty):
args = gdb.string_to_argv(argv_str)
if len(args) != 1:
print('usage: print-avs-rbtree-subtree expr\n'
' expr - an expression that avaluates to a valid AVS_RBTREE_NODE pointer\n')
return
expr = args[0]
val = gdb.parse_and_eval(expr)
if val is None:
print('cannot evaluate expression: ' + expr)
return
if val == 0:
print('(null)')
else:
self._print_tree(val)
class PrintAvsRbtree(PrintAvsRbtreeBase):
def __init__(self):
super().__init__('print-avs-rbtree',
gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, argv_str, _from_tty):
args = gdb.string_to_argv(argv_str)
if len(args) != 1:
print('usage: print-avs-rbtree expr\n'
' expr - an expression that avaluates to a valid AVS_RBTREE pointer\n')
return
expr = args[0]
val = gdb.parse_and_eval('*(' + expr + ')')
if val is None:
print('cannot evaluate expression: ' + expr)
return
if val == 0:
print('(null)')
else:
self._print_tree(val)
class PrintAvsRbtreeNode(PrintAvsRbtreeBase):
def __init__(self):
super().__init__('print-avs-rbtree-node',
gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, argv_str, _from_tty):
args = gdb.string_to_argv(argv_str)
if len(args) not in (1, 2):
print('usage: print-avs-rbtree expr [with_magic]\n'
' expr - an expression that avaluates to a valid AVS_RBTREE_NODE pointer\n'
' with_magic - if present, "magic" fields are displayed\n')
return
expr = args[0]
with_magic = len(args) > 1
ptr = gdb.parse_and_eval(expr)
if ptr is None:
print('cannot evaluate expression: ' + expr)
return
if ptr == 0:
print('(null)')
else:
intptr_ptr = ptr.cast(self.intptr_type)
if with_magic:
print((intptr_ptr + self.rb_magic_offset))
print((intptr_ptr + self.rb_magic_offset).cast(self.int_type.pointer()))
print('rb magic: %s' % ((intptr_ptr + self.rb_magic_offset).cast(self.int_type.pointer()).dereference()))
print('tree magic: %s' % ((intptr_ptr + self.tree_magic_offset).cast(self.int_type.pointer()).dereference()))
print('color: %s' % ((intptr_ptr + self.color_offset ).cast(self.int_type.pointer()).dereference()))
print('parent: 0x%%0%dx' % (self.intptr_type.sizeof * 2) % ((intptr_ptr + self.parent_offset).cast(ptr.type.pointer()).dereference()))
print('left: 0x%%0%dx' % (self.intptr_type.sizeof * 2) % ((intptr_ptr + self.left_offset ).cast(ptr.type.pointer()).dereference()))
print('right: 0x%%0%dx' % (self.intptr_type.sizeof * 2) % ((intptr_ptr + self.right_offset ).cast(ptr.type.pointer()).dereference()))
PrintAvsRbtreeSubtree()
PrintAvsRbtree()
PrintAvsRbtreeNode()
| 2.34375 | 2 |
hour17/PythonGroup.py | sampx/mongodb-practice | 0 | 3555 | from pymongo import MongoClient
def displayGroup(results):
for result in results:
print (result)
def firstIsALastIsVowel(collection):
key = {'first' : True, "last" : True}
cond = {'first' : 'a', 'last' :
{'$in' : ["a","e","i","o","u"]}}
initial = {'count' : 0}
reduce = "function (obj, prev) { prev.count++; }"
results = collection.group(key, cond, initial, reduce)
print ("\n\n'A' words grouped by first and last" + \
" letter that end with a vowel:")
displayGroup(results)
def firstLetterTotals(collection):
key = {'first' : True}
cond = {}
initial = {'vowels' : 0, 'cons' : 0}
reduce = "function (obj, prev) { " + \
"prev.vowels += obj.stats.vowels; " + \
"prev.cons += obj.stats.consonants; " + \
"}"
finalize = "function (obj) { " + \
"obj.total = obj.vowels + obj.cons; " + \
"}"
results = collection.group(key, cond, initial, reduce, finalize)
print ("\n\nWords grouped by first letter " + \
"with totals:")
displayGroup(results)
if __name__=="__main__":
mongo = MongoClient('mongodb://localhost:27017/')
db = mongo['words']
collection = db['word_stats']
firstIsALastIsVowel(collection)
firstLetterTotals(collection) | 3.140625 | 3 |
packnet_sfm/models/model_utils.py | pection/packnet-sfm | 1 | 3556 | # Copyright 2020 Toyota Research Institute. All rights reserved.
from packnet_sfm.utils.image import flip_lr, interpolate_scales
from packnet_sfm.utils.misc import filter_dict
from packnet_sfm.utils.types import is_tensor, is_list, is_numpy
def flip(tensor, flip_fn):
"""
Flip tensors or list of tensors based on a function
Parameters
----------
tensor : torch.Tensor or list[torch.Tensor] or list[list[torch.Tensor]]
Tensor to be flipped
flip_fn : Function
Flip function
Returns
-------
tensor : torch.Tensor or list[torch.Tensor] or list[list[torch.Tensor]]
Flipped tensor or list of tensors
"""
if not is_list(tensor):
return flip_fn(tensor)
else:
if not is_list(tensor[0]):
return [flip_fn(val) for val in tensor]
else:
return [[flip_fn(v) for v in val] for val in tensor]
def merge_outputs(*outputs):
"""
Merges model outputs for logging
Parameters
----------
outputs : tuple of dict
Outputs to be merged
Returns
-------
output : dict
Dictionary with a "metrics" key containing a dictionary with various metrics and
all other keys that are not "loss" (it is handled differently).
"""
ignore = ['loss'] # Keys to ignore
combine = ['metrics'] # Keys to combine
merge = {key: {} for key in combine}
for output in outputs:
# Iterate over all keys
for key, val in output.items():
# Combine these keys
if key in combine:
for sub_key, sub_val in output[key].items():
assert sub_key not in merge[key].keys(), \
'Combining duplicated key {} to {}'.format(sub_key, key)
merge[key][sub_key] = sub_val
# Ignore these keys
elif key not in ignore:
assert key not in merge.keys(), \
'Adding duplicated key {}'.format(key)
merge[key] = val
return merge
def stack_batch(batch):
"""
Stack multi-camera batches (B,N,C,H,W becomes BN,C,H,W)
Parameters
----------
batch : dict
Batch
Returns
-------
batch : dict
Stacked batch
"""
# If there is multi-camera information
if len(batch['rgb'].shape) == 5:
assert batch['rgb'].shape[0] == 1, 'Only batch size 1 is supported for multi-cameras'
# Loop over all keys
for key in batch.keys():
# If list, stack every item
if is_list(batch[key]):
if is_tensor(batch[key][0]) or is_numpy(batch[key][0]):
batch[key] = [sample[0] for sample in batch[key]]
# Else, stack single item
else:
batch[key] = batch[key][0]
return batch
def flip_batch_input(batch):
"""
Flip batch input information (copies data first)
Parameters
----------
batch : dict
Batch information
Returns
-------
batch : dict
Flipped batch
"""
# Flip tensors
for key in filter_dict(batch, [
'rgb', 'rgb_context',
'input_depth', 'input_depth_context',
]):
batch[key] = flip(batch[key], flip_lr)
# Flip intrinsics
for key in filter_dict(batch, [
'intrinsics'
]):
batch[key] = batch[key].clone()
batch[key][:, 0, 2] = batch['rgb'].shape[3] - batch[key][:, 0, 2]
# Return flipped batch
return batch
def flip_output(output):
"""
Flip output information
Parameters
----------
output : dict
Dictionary of model outputs (e.g. with keys like 'inv_depths' and 'uncertainty')
Returns
-------
output : dict
Flipped output
"""
# Flip tensors
for key in filter_dict(output, [
'uncertainty', 'logits_semantic', 'ord_probability',
'inv_depths', 'inv_depths_context', 'inv_depths1', 'inv_depths2',
'pred_depth', 'pred_depth_context', 'pred_depth1', 'pred_depth2',
'pred_inv_depth', 'pred_inv_depth_context', 'pred_inv_depth1', 'pred_inv_depth2',
]):
output[key] = flip(output[key], flip_lr)
return output
def upsample_output(output, mode='nearest', align_corners=None):
"""
Upsample multi-scale outputs to full resolution.
Parameters
----------
output : dict
Dictionary of model outputs (e.g. with keys like 'inv_depths' and 'uncertainty')
mode : str
Which interpolation mode is used
align_corners: bool or None
Whether corners will be aligned during interpolation
Returns
-------
output : dict
Upsampled output
"""
for key in filter_dict(output, [
'inv_depths', 'uncertainty'
]):
output[key] = interpolate_scales(
output[key], mode=mode, align_corners=align_corners)
for key in filter_dict(output, [
'inv_depths_context'
]):
output[key] = [interpolate_scales(
val, mode=mode, align_corners=align_corners) for val in output[key]]
return output
| 2.234375 | 2 |
utils/stg/min_jerk_traj.py | dgerod/more-dmps | 7 | 3557 | '''
Created on 25.07.2012
@author: karl
'''
def trajectory(start, goal, duration, delta_t):
traj = []
# inital values
t, td, tdd = start, 0, 0
for i in range(int(2 * duration / delta_t)):
try:
t, td, tdd = _min_jerk_step(t, td, tdd, goal, duration - i * delta_t, delta_t)
except:
break
traj.append([t, td, tdd])
return traj
def _min_jerk_step(x, xd, xdd, goal, tau, dt):
#function [x,xd,xdd] = min_jerk_step(x,xd,xdd,goal,tau, dt) computes
# the update of x,xd,xdd for the next time step dt given that we are
# currently at x,xd,xdd, and that we have tau until we want to reach
# the goal
# ported from matlab dmp toolbox
if tau < dt:
raise Exception, "time left (tau) is smaller than current time (dt) - end of traj reached!"
dist = goal - x
a1 = 0
a0 = xdd * tau ** 2
v1 = 0
v0 = xd * tau
t1 = dt
t2 = dt ** 2
t3 = dt ** 3
t4 = dt ** 4
t5 = dt ** 5
c1 = (6.*dist + (a1 - a0) / 2. - 3.*(v0 + v1)) / tau ** 5
c2 = (-15.*dist + (3.*a0 - 2.*a1) / 2. + 8.*v0 + 7.*v1) / tau ** 4
c3 = (10.*dist + (a1 - 3.*a0) / 2. - 6.*v0 - 4.*v1) / tau ** 3
c4 = xdd / 2.
c5 = xd
c6 = x
x = c1 * t5 + c2 * t4 + c3 * t3 + c4 * t2 + c5 * t1 + c6
xd = 5.*c1 * t4 + 4 * c2 * t3 + 3 * c3 * t2 + 2 * c4 * t1 + c5
xdd = 20.*c1 * t3 + 12.*c2 * t2 + 6.*c3 * t1 + 2.*c4
return (x, xd, xdd)
| 2.953125 | 3 |
tests/__init__.py | karanrampal/triplet-loss | 0 | 3558 | <gh_stars>0
# To make directory as a python package
| 1.046875 | 1 |
orthoexon/tests/test_util.py | jessicalettes/orthoexon | 0 | 3559 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_orthoexon
----------------------------------
Tests for `orthoexon` module.
"""
import os
import pytest
@pytest.fixture
def exon_id_with_quotes():
return "'ENSE00001229068.1'"
@pytest.fixture
def exon_id():
return "ENSE00001229068.1"
def test_separate_with_quotes(exon_id_with_quotes):
from orthoexon.util import separate
test = separate(exon_id_with_quotes)
true = "ENSE00001229068"
assert test == true
def test_separate(exon_id):
from orthoexon.util import separate
test = separate(exon_id)
true = "ENSE00001229068"
assert test == true
@pytest.fixture
def location():
return "chr20:10256140-10256211:+:0"
def test_splitstart(location):
from orthoexon.util import splitstart
test = splitstart(location)
true = '10256140'
assert test == true
def test_splitend(location):
from orthoexon.util import splitend
test = splitend(location)
true = '10256211'
assert test == true
@pytest.fixture
def human_gtf_filename(table_folder):
return os.path.join(table_folder, 'humanrbfox2andfmr1andsnap25.gtf')
@pytest.fixture
def human_gtf_database(table_folder):
return os.path.join(table_folder, 'humanrbfox2andfmr1andsnap25.gtf.db')
@pytest.fixture
def human_fasta(table_folder):
return os.path.join(table_folder, 'GRCm38.p3.genome.fa')
def test_translate(exon_id, human_fasta, human_gtf_database):
from orthoexon.util import translate
from orthoexon.util import separate
for index, species1gene in enumerate(human_gtf_database.features_of_type('gene')):
species1gffutilsgeneid = str(species1gene['gene_id'])
species1geneid = separate(species1gffutilsgeneid)
for exon in human_gtf_database.children(species1geneid,
featuretype='CDS',
order_by='start'):
if exon_id == exon:
test = translate(exon, human_fasta)
break
break
true = 'MAEDADMRNELEEMQRRADQLADE'
assert test == true
# def test_getsequence(exon, human_gtf_database):
# from orthoexon.util import getsequence
#
# test = getsequence(exon, human_gtf_database)
# true = 'ATGGCCGAAGACGCAGACATGCGCAATGAGCTGGAGGAGATGCAGCGAAGGGCTGACCAGTT' \
# 'GGCTGATGAG'
#
# assert test == true
# def test_make_sequence_array(finalsequencedf):
# from orthoexon.util import make_sequence_array
#
# test = make_sequence_array(finalsequencedf)
# true = ......
#
# assert test == true | 2.21875 | 2 |
predict_recognition.py | yeyupiaoling/Kersa-Speaker-Recognition | 42 | 3560 | <filename>predict_recognition.py
import argparse
import os
import shutil
import time
import numpy as np
from utils import model, utils
from utils.record import RecordAudio
parser = argparse.ArgumentParser()
parser.add_argument('--audio_db', default='audio_db/', type=str, help='音频库的路径')
parser.add_argument('--threshold', default=0.7, type=float, help='判断是否为同一个人的阈值')
parser.add_argument('--model_path', default=r'models/resnet34-56.h5', type=str, help='模型的路径')
args = parser.parse_args()
person_feature = []
person_name = []
# 获取模型
network_eval = model.vggvox_resnet2d_icassp(input_dim=(257, None, 1), mode='eval')
# 加载预训练模型
network_eval.load_weights(os.path.join(args.model_path), by_name=True)
print('==> successfully loading model {}.'.format(args.model_path))
# 预测获取声纹特征
def predict(path):
specs = utils.load_data(path, mode='eval')
specs = np.expand_dims(np.expand_dims(specs, 0), -1)
feature = network_eval.predict(specs)[0]
return feature
# 加载要识别的音频库
def load_audio_db(audio_db_path):
start = time.time()
audios = os.listdir(audio_db_path)
for audio in audios:
path = os.path.join(audio_db_path, audio)
name = audio[:-4]
feature = predict(path)
person_name.append(name)
person_feature.append(feature)
print("Loaded %s audio." % name)
end = time.time()
print('加载音频库完成,消耗时间:%fms' % (round((end - start) * 1000)))
# 识别声纹
def recognition(path):
name = ''
pro = 0
feature = predict(path)
for i, person_f in enumerate(person_feature):
# 计算相识度
dist = np.dot(feature, person_f.T)
if dist > pro:
pro = dist
name = person_name[i]
return name, pro
# 声纹注册
def register(path, user_name):
save_path = os.path.join(args.audio_db, user_name + os.path.basename(path)[-4:])
shutil.move(path, save_path)
feature = predict(save_path)
person_name.append(user_name)
person_feature.append(feature)
if __name__ == '__main__':
load_audio_db(args.audio_db)
record_audio = RecordAudio()
while True:
select_fun = int(input("请选择功能,0为注册音频到声纹库,1为执行声纹识别:"))
if select_fun == 0:
audio_path = record_audio.record()
name = input("请输入该音频用户的名称:")
if name == '': continue
register(audio_path, name)
elif select_fun == 1:
audio_path = record_audio.record()
name, p = recognition(audio_path)
if p > args.threshold:
print("识别说话的为:%s,相似度为:%f" % (name, p))
else:
print("音频库没有该用户的语音")
else:
print('请正确选择功能')
| 2.59375 | 3 |
cubedash/_product.py | vconrado/datacube-explorer | 0 | 3561 | <reponame>vconrado/datacube-explorer
import logging
from datetime import timedelta
from flask import Blueprint, Response, abort, redirect, url_for
from cubedash import _model, _utils, _utils as utils
_LOG = logging.getLogger(__name__)
bp = Blueprint("product", __name__)
@bp.route("/about.csv")
def legacy_about_csv():
return redirect(".storage_csv")
@bp.route("/audit/storage.csv")
def storage_csv():
"""Get the product storage table as a CSV"""
product_locations = _model.STORE.products_location_samples_all()
return utils.as_csv(
filename_prefix="product-information",
headers=(
"name",
"count",
"locations",
"license",
"definition",
"summary_time",
"metadata_type",
),
rows=(
(
product.name,
summary.dataset_count,
[
location.common_prefix
for location in (product_locations.get(product.name) or [])
],
_utils.product_license(product),
url_for("product.raw_product_doc", name=product.name, _external=True),
summary.last_refresh_time,
product.metadata_type.name,
)
for product, summary in _model.get_products_with_summaries()
),
)
@bp.route("/products.txt")
def product_list_text():
# This is useful for bash scripts when we want to loop products :)
return Response(
"\n".join(t.name for t in _model.STORE.all_dataset_types()),
content_type="text/plain",
)
@bp.route("/metadata-types.txt")
def metadata_type_list_text():
# This is useful for bash scripts when we want to loop them :)
return Response(
"\n".join(t.name for t in _model.STORE.all_metadata_types()),
content_type="text/plain",
)
@bp.route("/audit/storage")
def storage_page():
product_locations = _model.STORE.products_location_samples_all()
return utils.render(
"storage.html",
product_summary_and_location=[
(product, summary, (product_locations.get(product.name) or []))
for product, summary in _model.get_products_with_summaries()
],
)
@bp.route("/product")
def product_redirect():
"""
If people remove the name from a "/product/<name>" url, take them somewhere useful
"""
return redirect(url_for(".products_page"))
@bp.route("/products")
def products_page():
return utils.render(
"products.html",
)
@bp.route("/metadata-types")
def metadata_types_page():
return utils.render(
"metadata-types.html",
)
@bp.route("/product/<name>.odc-product.yaml")
def legacy_raw_product_doc(name):
return redirect(url_for(".raw_product_doc", name=name))
@bp.route("/products/<name>.odc-product.yaml")
def raw_product_doc(name):
product = _model.STORE.index.products.get_by_name(name)
if not product:
abort(404, f"Unknown product {name!r}")
ordered_metadata = utils.prepare_document_formatting(
product.definition, "Product", include_source_url=True
)
return utils.as_yaml(ordered_metadata)
@bp.route("/metadata-type/<name>")
def legacy_metadata_type_page(name):
return redirect(url_for(".metadata_type_page", name=name))
@bp.route("/metadata-types/<name>")
def metadata_type_page(name):
metadata_type = _model.STORE.index.metadata_types.get_by_name(name)
if not metadata_type:
abort(404, f"Unknown metadata type {name!r}")
ordered_metadata = utils.prepare_document_formatting(metadata_type.definition)
products_using_it = sorted(
(
p
for p in _model.STORE.index.products.get_all()
if p.metadata_type.name == name
),
key=lambda p: p.name,
)
return utils.render(
"metadata-type.html",
metadata_type=metadata_type,
metadata_doc=ordered_metadata,
products_using_it=products_using_it,
)
@bp.route("/metadata-type/<name>.odc-type.yaml")
def legacy_metadata_type_doc(name):
return redirect(url_for(".raw_metadata_type_doc", name=name))
@bp.route("/metadata-types/<name>.odc-type.yaml")
def raw_metadata_type_doc(name):
metadata_type = _model.STORE.index.metadata_types.get_by_name(name)
if not metadata_type:
abort(404, f"Unknown metadata type {name!r}")
ordered_metadata = utils.prepare_document_formatting(
metadata_type.definition, "Metadata Type", include_source_url=True
)
return utils.as_yaml(ordered_metadata)
@bp.route("/products.odc-product.yaml")
def raw_all_products_doc():
resp = utils.as_yaml(
*(
utils.prepare_document_formatting(
product.definition,
f"Product {product.name}",
include_source_url=url_for(
".raw_product_doc", name=product.name, _external=True
),
)
for product in _model.STORE.all_dataset_types()
)
)
# Add Explorer ID to the download filename if they have one.
utils.suggest_download_filename(
resp,
prefix="products",
suffix=".odc-product.yaml",
)
return resp
@bp.route("/metadata-types.odc-type.yaml")
def raw_all_metadata_types_doc():
resp = utils.as_yaml(
*(
utils.prepare_document_formatting(
type_.definition,
f"Metadata Type {type_.name}",
include_source_url=url_for(
".raw_metadata_type_doc", name=type_.name, _external=True
),
)
for type_ in _model.STORE.all_metadata_types()
),
)
# Add Explorer ID to the download filename if they have one.
utils.suggest_download_filename(
resp,
prefix="metadata-types",
suffix=".odc-type.yaml",
)
return resp
def _iso8601_duration(tdelta: timedelta):
"""
Format a timedelta as an iso8601 duration
>>> _iso8601_duration(timedelta(seconds=0))
'PT0S'
>>> _iso8601_duration(timedelta(seconds=1))
'PT1S'
>>> _iso8601_duration(timedelta(seconds=23423))
'PT6H30M23S'
>>> _iso8601_duration(timedelta(seconds=4564564556))
'P52830DT14H35M56S'
"""
all_secs = tdelta.total_seconds()
secs = int(all_secs % 60)
h_m_s = (
int(all_secs // 3600 % 24),
int(all_secs // 60 % 60),
secs if secs % 1 != 0 else int(secs),
)
parts = ["P"]
days = int(all_secs // 86400)
if days:
parts.append(f"{days}D")
if any(h_m_s):
parts.append("T")
if all_secs:
for val, name in zip(h_m_s, ["H", "M", "S"]):
if val:
parts.append(f"{val}{name}")
else:
parts.append("T0S")
return "".join(parts)
| 2.09375 | 2 |
litex_boards/platforms/sipeed_tang_nano.py | ozbenh/litex-boards | 0 | 3562 | #
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
# Board diagram/pinout:
# https://user-images.githubusercontent.com/1450143/133655492-532d5e9a-0635-4889-85c9-68683d06cae0.png
# http://dl.sipeed.com/TANG/Nano/HDK/Tang-NANO-2704(Schematic).pdf
from migen import *
from litex.build.generic_platform import *
from litex.build.gowin.platform import GowinPlatform
from litex.build.openfpgaloader import OpenFPGALoader
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk24", 0, Pins("35"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("16"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("17"), IOStandard("LVCMOS33")),
("user_led", 2, Pins("18"), IOStandard("LVCMOS33")),
# Buttons.
("user_btn", 0, Pins("15"), IOStandard("LVCMOS33")),
("user_btn", 0, Pins("14"), IOStandard("LVCMOS33")),
# Serial
("serial", 0,
Subsignal("tx", Pins("8")),
Subsignal("rx", Pins("9")),
IOStandard("LVCMOS33")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = []
# Platform -----------------------------------------------------------------------------------------
class Platform(GowinPlatform):
default_clk_name = "clk24"
default_clk_period = 1e9/24e6
def __init__(self):
GowinPlatform.__init__(self, "GW1N-LV1QN48C6/I5", _io, _connectors, toolchain="gowin", devicename="GW1N-1")
self.toolchain.options["use_done_as_gpio"] = 1
def create_programmer(self):
return OpenFPGALoader("tangnano")
def do_finalize(self, fragment):
GowinPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk24", loose=True), 1e9/24e6)
| 1.546875 | 2 |
nm_cavia/rl/metalearner.py | anon-6994/nm-metarl | 0 | 3563 | import torch
from torch.distributions.kl import kl_divergence
from torch.nn.utils.convert_parameters import (vector_to_parameters,
parameters_to_vector)
from rl_utils.optimization import conjugate_gradient
from rl_utils.torch_utils import (weighted_mean, detach_distribution, weighted_normalize)
class MetaLearner(object):
"""Meta-learner
The meta-learner is responsible for sampling the trajectories/episodes
(before and after the one-step adaptation), compute the inner loss, compute
the updated parameters based on the inner-loss, and perform the meta-update.
[1] <NAME>, <NAME>, <NAME>, "Model-Agnostic
Meta-Learning for Fast Adaptation of Deep Networks", 2017
(https://arxiv.org/abs/1703.03400)
[2] <NAME>, <NAME>, "Reinforcement learning: An introduction",
2018 (http://incompleteideas.net/book/the-book-2nd.html)
[3] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, "High-Dimensional Continuous Control Using Generalized
Advantage Estimation", 2016 (https://arxiv.org/abs/1506.02438)
[4] <NAME>, <NAME>, <NAME>, <NAME>,
<NAME>, "Trust Region Policy Optimization", 2015
(https://arxiv.org/abs/1502.05477)
"""
def __init__(self, sampler, policy, baseline, gamma=0.95,
fast_lr=0.5, tau=1.0, device='cpu'):
self.sampler = sampler
self.policy = policy
self.baseline = baseline
self.gamma = gamma
self.fast_lr = fast_lr
self.tau = tau
self.to(device)
def inner_loss(self, episodes, params=None):
"""Compute the inner loss for the one-step gradient update. The inner
loss is REINFORCE with baseline [2], computed on advantages estimated
with Generalized Advantage Estimation (GAE, [3]).
"""
values = self.baseline(episodes)
advantages = episodes.gae(values, tau=self.tau)
advantages = weighted_normalize(advantages, weights=episodes.mask)
pi = self.policy(episodes.observations, params=params)
log_probs = pi.log_prob(episodes.actions)
if log_probs.dim() > 2:
log_probs = torch.sum(log_probs, dim=2)
loss = -weighted_mean(log_probs * advantages, dim=0, weights=episodes.mask)
return loss
def adapt(self, episodes, first_order=False, params=None, lr=None):
"""Adapt the parameters of the policy network to a new task, from
sampled trajectories `episodes`, with a one-step gradient update [1].
"""
if lr is None:
lr = self.fast_lr
# Fit the baseline to the training episodes
self.baseline.fit(episodes)
# Get the loss on the training episodes
loss = self.inner_loss(episodes, params=params)
# Get the new parameters after a one-step gradient update
params = self.policy.update_params(loss, step_size=lr, first_order=first_order, params=params)
return params, loss
def sample(self, tasks, first_order=False):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.
"""
episodes = []
losses = []
for task in tasks:
self.sampler.reset_task(task)
self.policy.reset_context()
train_episodes = self.sampler.sample(self.policy, gamma=self.gamma)
# inner loop (for CAVIA, this only updates the context parameters)
params, loss = self.adapt(train_episodes, first_order=first_order)
# rollouts after inner loop update
valid_episodes = self.sampler.sample(self.policy, params=params, gamma=self.gamma)
episodes.append((train_episodes, valid_episodes))
losses.append(loss.item())
return episodes, losses
def test(self, tasks, num_steps, batch_size, halve_lr):
"""Sample trajectories (before and after the update of the parameters)
for all the tasks `tasks`.batchsize
"""
episodes_per_task = []
for task in tasks:
# reset context params (for cavia) and task
self.policy.reset_context()
self.sampler.reset_task(task)
# start with blank params
params = None
# gather some initial experience and log performance
test_episodes = self.sampler.sample(self.policy, gamma=self.gamma, params=params, batch_size=batch_size)
# initialise list which will log all rollouts for the current task
curr_episodes = [test_episodes]
for i in range(1, num_steps + 1):
# lower learning rate after first update (for MAML, as described in their paper)
if i == 1 and halve_lr:
lr = self.fast_lr / 2
else:
lr = self.fast_lr
# inner-loop update
params, loss = self.adapt(test_episodes, first_order=True, params=params, lr=lr)
# get new rollouts
test_episodes = self.sampler.sample(self.policy, gamma=self.gamma, params=params, batch_size=batch_size)
curr_episodes.append(test_episodes)
episodes_per_task.append(curr_episodes)
self.policy.reset_context()
return episodes_per_task
def kl_divergence(self, episodes, old_pis=None):
kls = []
if old_pis is None:
old_pis = [None] * len(episodes)
for (train_episodes, valid_episodes), old_pi in zip(episodes, old_pis):
# this is the inner-loop update
self.policy.reset_context()
params, _ = self.adapt(train_episodes)
pi = self.policy(valid_episodes.observations, params=params)
if old_pi is None:
old_pi = detach_distribution(pi)
mask = valid_episodes.mask
if valid_episodes.actions.dim() > 2:
mask = mask.unsqueeze(2)
kl = weighted_mean(kl_divergence(pi, old_pi), dim=0, weights=mask)
kls.append(kl)
return torch.mean(torch.stack(kls, dim=0))
def hessian_vector_product(self, episodes, damping=1e-2):
"""Hessian-vector product, based on the Perlmutter method."""
def _product(vector):
kl = self.kl_divergence(episodes)
grads = torch.autograd.grad(kl, self.policy.parameters(), create_graph=True)
flat_grad_kl = parameters_to_vector(grads)
grad_kl_v = torch.dot(flat_grad_kl, vector)
grad2s = torch.autograd.grad(grad_kl_v, self.policy.parameters())
flat_grad2_kl = parameters_to_vector(grad2s)
return flat_grad2_kl + damping * vector
return _product
def surrogate_loss(self, episodes, old_pis=None):
losses, kls, pis = [], [], []
if old_pis is None:
old_pis = [None] * len(episodes)
for (train_episodes, valid_episodes), old_pi in zip(episodes, old_pis):
# do inner-loop update
self.policy.reset_context()
params, _ = self.adapt(train_episodes)
with torch.set_grad_enabled(old_pi is None):
# get action values after inner-loop update
pi = self.policy(valid_episodes.observations, params=params)
pis.append(detach_distribution(pi))
if old_pi is None:
old_pi = detach_distribution(pi)
values = self.baseline(valid_episodes)
advantages = valid_episodes.gae(values, tau=self.tau)
advantages = weighted_normalize(advantages, weights=valid_episodes.mask)
log_ratio = (pi.log_prob(valid_episodes.actions)
- old_pi.log_prob(valid_episodes.actions))
if log_ratio.dim() > 2:
log_ratio = torch.sum(log_ratio, dim=2)
ratio = torch.exp(log_ratio)
loss = -weighted_mean(ratio * advantages, dim=0, weights=valid_episodes.mask)
losses.append(loss)
mask = valid_episodes.mask
if valid_episodes.actions.dim() > 2:
mask = mask.unsqueeze(2)
kl = weighted_mean(kl_divergence(pi, old_pi), dim=0, weights=mask)
kls.append(kl)
return torch.mean(torch.stack(losses, dim=0)), torch.mean(torch.stack(kls, dim=0)), pis
def step(self, episodes, max_kl=1e-3, cg_iters=10, cg_damping=1e-2,
ls_max_steps=10, ls_backtrack_ratio=0.5):
"""Meta-optimization step (ie. update of the initial parameters), based
on Trust Region Policy Optimization (TRPO, [4]).
"""
old_loss, _, old_pis = self.surrogate_loss(episodes)
# this part will take higher order gradients through the inner loop:
grads = torch.autograd.grad(old_loss, self.policy.parameters())
grads = parameters_to_vector(grads)
# Compute the step direction with Conjugate Gradient
hessian_vector_product = self.hessian_vector_product(episodes, damping=cg_damping)
stepdir = conjugate_gradient(hessian_vector_product, grads, cg_iters=cg_iters)
# Compute the Lagrange multiplier
shs = 0.5 * torch.dot(stepdir, hessian_vector_product(stepdir))
lagrange_multiplier = torch.sqrt(shs / max_kl)
step = stepdir / lagrange_multiplier
# Save the old parameters
old_params = parameters_to_vector(self.policy.parameters())
print()
# Line search
step_size = 1.0
for _ in range(ls_max_steps):
vector_to_parameters(old_params - step_size * step, self.policy.parameters())
loss, kl, _ = self.surrogate_loss(episodes, old_pis=old_pis)
improve = loss - old_loss
if (improve.item() < 0.0) and (kl.item() < max_kl):
break
step_size *= ls_backtrack_ratio
else:
print('no update?')
vector_to_parameters(old_params, self.policy.parameters())
print('improve:', improve.item())
print('kl:', kl.item())
print('step_size:', step_size)
return loss
def to(self, device, **kwargs):
self.policy.to(device, **kwargs)
self.baseline.to(device, **kwargs)
self.device = device
| 2.484375 | 2 |
request/management/commands/purgerequests.py | hramezani/django-request | 373 | 3564 | from datetime import timedelta
from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from ...models import Request
DURATION_OPTIONS = {
'hours': lambda amount: timezone.now() - timedelta(hours=amount),
'days': lambda amount: timezone.now() - timedelta(days=amount),
'weeks': lambda amount: timezone.now() - timedelta(weeks=amount),
'months': lambda amount: timezone.now() + relativedelta(months=-amount),
'years': lambda amount: timezone.now() + relativedelta(years=-amount),
}
try:
# to keep backward Python 2 compatibility
input = raw_input
except NameError:
pass
class Command(BaseCommand):
help = 'Purge old requests.'
def add_arguments(self, parser):
parser.add_argument(
'amount',
type=int,
)
parser.add_argument('duration')
parser.add_argument(
'--noinput',
action='store_false',
dest='interactive',
default=True,
help='Tells Django to NOT prompt the user for input of any kind.'
)
def handle(self, *args, **options):
amount = options['amount']
duration = options['duration']
# Check we have the correct values
if duration[-1] != 's': # If its not plural, make it plural
duration_plural = '{0}s'.format(duration)
else:
duration_plural = duration
if duration_plural not in DURATION_OPTIONS:
raise CommandError('Amount must be {0}'.format(', '.join(DURATION_OPTIONS)))
qs = Request.objects.filter(time__lte=DURATION_OPTIONS[duration_plural](amount))
count = qs.count()
if count == 0:
print('There are no requests to delete.')
return
if options.get('interactive'):
confirm = input('''
You have requested a database reset.
This will IRREVERSIBLY DESTROY any
requests created before {0} {1} ago.
That is a total of {2} requests.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel:'''.format(amount, duration, count))
else:
confirm = 'yes'
if confirm == 'yes':
qs.delete()
else:
print('Purge cancelled')
| 2.234375 | 2 |
tests/test_crypto.py | kimtaehong/PythonForWindows | 0 | 3565 | import pytest
import windows.crypto
import windows.generated_def as gdef
import windows.crypto.generation
from .pfwtest import *
pytestmark = pytest.mark.usefixtures('check_for_gc_garbage')
TEST_CERT = b"""
<KEY>"""
## Cert info:
# Name: PythonForWindowsTest
# Serial: '1b 8e 94 cb 0b 3e eb b6 41 39 f3 c9 09 b1 6b 46'
TEST_PFX_PASSWORD = "<PASSWORD>"
TEST_PFX = b"""
<KEY>
"""
@pytest.fixture()
def rawcert():
return b64decode(TEST_CERT)
@pytest.fixture()
def rawpfx():
return b64decode(TEST_PFX)
PFW_TEST_TMP_KEY_CONTAINER = "PythonForWindowsTMPContainerTest"
RANDOM_CERTIF_NAME = b"PythonForWindowsGeneratedRandomCertifTest"
RANDOM_PFX_PASSWORD = "<PASSWORD>"
@pytest.fixture()
def randomkeypair(keysize=1024):
r"""Generate a cert / pfx. Based on samples\crypto\encryption_demo.py"""
cert_store = windows.crypto.CertificateStore.new_in_memory()
# Create a TMP context that will hold our newly generated key-pair
with windows.crypto.CryptContext(PFW_TEST_TMP_KEY_CONTAINER, None, gdef.PROV_RSA_FULL, 0, retrycreate=True) as ctx:
key = gdef.HCRYPTKEY()
keysize_flags = keysize << 16
# Generate a key-pair that is exportable
windows.winproxy.CryptGenKey(ctx, gdef.AT_KEYEXCHANGE, gdef.CRYPT_EXPORTABLE | keysize_flags, key)
# It does NOT destroy the key-pair from the container,
# It only release the key handle
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa379918(v=vs.85).aspx
windows.winproxy.CryptDestroyKey(key)
# Descrption of the key-container that will be used to generate the certificate
KeyProvInfo = gdef.CRYPT_KEY_PROV_INFO()
KeyProvInfo.pwszContainerName = PFW_TEST_TMP_KEY_CONTAINER
KeyProvInfo.pwszProvName = None
KeyProvInfo.dwProvType = gdef.PROV_RSA_FULL
KeyProvInfo.dwFlags = 0
KeyProvInfo.cProvParam = 0
KeyProvInfo.rgProvParam = None
#KeyProvInfo.dwKeySpec = AT_SIGNATURE
KeyProvInfo.dwKeySpec = gdef.AT_KEYEXCHANGE
crypt_algo = gdef.CRYPT_ALGORITHM_IDENTIFIER()
crypt_algo.pszObjId = gdef.szOID_RSA_SHA256RSA.encode("ascii") # do something else (bytes in generated ctypes ?)
# This is fucking dumb, there is no .format on bytes object...
certif_name = b"".join((b"CN=", RANDOM_CERTIF_NAME))
# Generate a self-signed certificate based on the given key-container and signature algorithme
certif = windows.crypto.generation.generate_selfsigned_certificate(certif_name, key_info=KeyProvInfo, signature_algo=crypt_algo)
# Add the newly created certificate to our TMP cert-store
cert_store.add_certificate(certif)
# Generate a pfx from the TMP cert-store
pfx = windows.crypto.generation.generate_pfx(cert_store, RANDOM_PFX_PASSWORD)
yield certif, pfx
# Destroy the TMP key container
prov = gdef.HCRYPTPROV()
windows.winproxy.CryptAcquireContextW(prov, PFW_TEST_TMP_KEY_CONTAINER, None, gdef.PROV_RSA_FULL, gdef.CRYPT_DELETEKEYSET)
def test_certificate(rawcert):
cert = windows.crypto.Certificate.from_buffer(rawcert)
assert cert.serial == '1b 8e 94 cb 0b 3e eb b6 41 39 f3 c9 09 b1 6b 46'
assert cert.name == b'PythonForWindowsTest'
assert cert.issuer == b'PythonForWindowsTest'
assert cert.thumbprint == 'EF 0C A8 C9 F9 E0 96 AF 74 18 56 8B C1 C9 57 27 A0 89 29 6A'
assert cert.encoded == rawcert
assert cert.version == 2
assert cert == cert
assert cert is cert.duplicate()
cert.chains # TODO: craft a certificate with a chain for test purpose
cert.store.certs
cert.properties
def test_pfx(rawcert, rawpfx):
pfx = windows.crypto.import_pfx(rawpfx, TEST_PFX_PASSWORD)
orig_cert = windows.crypto.Certificate.from_buffer(rawcert)
certs = pfx.certs
assert len(certs) == 1
# Test cert comparaison
assert certs[0] == orig_cert
def test_open_pfx_bad_password(rawpfx):
with pytest.raises(WindowsError) as ar:
pfx = windows.crypto.import_pfx(rawpfx, "BadPassword")
def test_encrypt_decrypt(rawcert, rawpfx):
message_to_encrypt = b"Testing message \xff\x01"
cert = windows.crypto.Certificate.from_buffer(rawcert)
# encrypt should accept a cert or iterable of cert
res = windows.crypto.encrypt(cert, message_to_encrypt)
res2 = windows.crypto.encrypt([cert, cert], message_to_encrypt)
del cert
assert message_to_encrypt not in res
# Open pfx and decrypt
pfx = windows.crypto.import_pfx(rawpfx, TEST_PFX_PASSWORD)
decrypt = windows.crypto.decrypt(pfx, res)
decrypt2 = windows.crypto.decrypt(pfx, res2)
assert message_to_encrypt == decrypt
assert decrypt == decrypt2
def test_randomkeypair(randomkeypair):
randcert, randrawpfx = randomkeypair
assert randcert.name == RANDOM_CERTIF_NAME
randpfx = windows.crypto.import_pfx(randrawpfx, RANDOM_PFX_PASSWORD) # Check password is good too
def test_encrypt_decrypt_multiple_receivers(rawcert, rawpfx, randomkeypair):
message_to_encrypt = b"\xff\x00 Testing message \xff\x01"
# Receiver 1: random key pair
randcert, randrawpfx = randomkeypair
randpfx = windows.crypto.import_pfx(randrawpfx, RANDOM_PFX_PASSWORD)
# Receiver 1: PFW-test-keypair
pfx = windows.crypto.import_pfx(rawpfx, TEST_PFX_PASSWORD)
cert = windows.crypto.Certificate.from_buffer(rawcert)
assert cert.name != randcert.name
assert cert.encoded != randcert.encoded
# Encrypt the message with 2 differents certificates
encrypted = windows.crypto.encrypt([cert, randcert], message_to_encrypt)
# Decrypt with each PFX and check the result is valid/the same
decrypted = windows.crypto.decrypt(pfx, encrypted)
decrypted2 = windows.crypto.decrypt(randpfx, encrypted)
assert decrypted == decrypted2 == message_to_encrypt
def test_crypt_obj():
path = r"C:\windows\system32\kernel32.dll"
x = windows.crypto.CryptObject(path)
x.crypt_msg.certs
x.crypt_msg.signers
x.signers_and_certs
# TODO: Need some better ideas
def test_certificate_from_store():
return windows.crypto.CertificateStore.from_system_store("Root")
def test_sign_verify(rawcert, rawpfx):
message_to_sign = b"Testing message \xff\x01"
# Load PFX (priv+pub key) & certif (pubkey only)
pfx = windows.crypto.import_pfx(rawpfx, TEST_PFX_PASSWORD)
cert = windows.crypto.Certificate.from_buffer(rawcert)
signed_blob = windows.crypto.sign(pfx.certs[0], message_to_sign)
assert message_to_sign in signed_blob
decoded_blob = windows.crypto.verify_signature(cert, signed_blob)
assert decoded_blob == message_to_sign
def test_sign_verify_fail(rawcert, rawpfx):
message_to_sign = b"Testing message \xff\x01"
# Load PFX (priv+pub key) & certif (pubkey only)
pfx = windows.crypto.import_pfx(rawpfx, TEST_PFX_PASSWORD)
cert = windows.crypto.Certificate.from_buffer(rawcert)
signed_blob = windows.crypto.sign(pfx.certs[0], message_to_sign)
assert message_to_sign in signed_blob
# Tamper the signed mesasge content
signed_blob = signed_blob.replace(b"message", b"massage")
with pytest.raises(windows.winproxy.WinproxyError) as excinfo:
decoded_blob = windows.crypto.verify_signature(cert, signed_blob)
assert excinfo.value.winerror == gdef.STATUS_INVALID_SIGNATURE
# str(windows.crypto.encrypt(TEST_CERT, "Hello crypto")).encode("base64")
# Target serial == TEST_CERT.Serial == 1b 8e 94 cb 0b 3e eb b6 41 39 f3 c9 09 b1 6b 46
TEST_CRYPTMSG = b"""<KEY>""
def test_cryptmsg_from_data():
rawdata = b64decode(TEST_CRYPTMSG)
cryptmsg = windows.crypto.CryptMessage.from_buffer(rawdata)
rawtarget = b"\x1b\x8e\x94\xcb\x0b>\xeb\xb6A9\xf3\xc9\t\xb1kF"
assert cryptmsg.get_recipient_data(0).SerialNumber.data[::-1] == rawtarget
| 2.109375 | 2 |
cdci_data_analysis/analysis/plot_tools.py | andreatramacere/cdci_data_analysis | 0 | 3566 | from __future__ import absolute_import, division, print_function
from builtins import (bytes, str, open, super, range,
zip, round, input, int, pow, object, map, zip)
__author__ = "<NAME>"
import numpy as np
from astropy import wcs
from bokeh.layouts import row, widgetbox,gridplot
from bokeh.models import CustomJS, Slider,HoverTool,ColorBar,LinearColorMapper,LabelSet,ColumnDataSource
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.palettes import Plasma256
class Image(object):
def __init__(self,data,header):
self.data=data
self.header=header
def change_image_contrast(self, attr, old, new):
# print attr,old,new
self.fig_im.glyph.color_mapper.update(low=self.graph_min_slider.value, high=self.graph_max_slider.value)
def get_html_draw(self,w=None,h=None, catalog=None, plot=False, vmin=None, vmax=None):
#import plotly
#import plotly.graph_objs as go
#from plotly.graph_objs import Layout
# print('vmin,vmax',vmin,vmax)
msk = ~np.isnan(self.data)
if vmin is None:
vmin = self.data[msk].min()
if vmax is None:
vmax = self.data[msk].max()
min_s = self.data.min()
max_s = self.data.max()
r = self.data.shape[0] * 2
c = self.data.shape[1] * 2
fig = figure(plot_width=w, plot_height=h, x_range=(0, c * 0.5), y_range=(0, r * 0.5),
tools=['pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair'])
w = wcs.WCS(self.header)
color_mapper = LinearColorMapper(low=min_s, high=max_s, palette=Plasma256)
fig_im = fig.image(image=[self.data], x=[0], y=[0], dw=[c * 0.5], dh=[r * 0.5],
color_mapper=color_mapper)
hover = HoverTool(tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")],
renderers=[fig_im])
fig.add_tools(hover)
#fig, (ax) = plt.subplots(1, 1, figsize=(4, 3), subplot_kw={'projection': WCS(self.header)})
#im = ax.imshow(self.data,
# origin='lower',
# zorder=1,
# interpolation='none',
# aspect='equal',
# cmap=plt.get_cmap('jet'),
# vmin=vmin,
# vmax=vmax)
if catalog is not None:
lon = catalog.ra
lat = catalog.dec
if len(lat) > 0.:
pixcrd = w.wcs_world2pix(np.column_stack((lon, lat)), 0)
msk = ~np.isnan(pixcrd[:, 0])
#ax.plot(pixcrd[:, 0][msk], pixcrd[:, 1][msk], 'o', mfc='none')
source = ColumnDataSource(data=dict(lon=pixcrd[:, 0][msk]+0.5,
lat=pixcrd[:, 1][msk]+0.5,
names=catalog.name[msk]))
#for ID, (x, y) in enumerate(pixcrd):
# if msk[ID]:
# # print ('xy',(pixcrd[:, 0][ID], pixcrd[:, 1][ID]))
# ax.annotate('%s' % catalog.name[ID], xy=(x, y), color='white')
#print(pixcrd[:][msk])
fig.scatter(x='lon', y='lat', marker='circle', size=15,
line_color="white", fill_color=None, alpha=1.0, source=source)
labels = LabelSet(x='lon', y='lat', text='names', level='glyph',
x_offset=5, y_offset=5, render_mode='canvas', source=source, text_color='white')
fig.add_layout(labels)
#print'cat', catalog[msk]
color_bar = ColorBar(color_mapper=color_mapper,
label_standoff=12, border_line_color=None, location=(0, 0))
JS_code_slider = """
var vmin = low_slider.value;
var vmax = high_slider.value;
fig_im.glyph.color_mapper.high = vmax;
fig_im.glyph.color_mapper.low = vmin;
"""
callback = CustomJS(args=dict(fig_im=fig_im), code=JS_code_slider)
self.graph_min_slider = Slider(title="Sig. Min", start=min_s, end=max_s, step=1, value=min_s, callback=callback)
self.graph_max_slider = Slider(title="Sig. Max", start=min_s, end=max_s, step=1, value=max_s * 0.8,
callback=callback)
self.graph_min_slider.on_change('value', self.change_image_contrast)
self.graph_max_slider.on_change('value', self.change_image_contrast)
callback.args["low_slider"] = self.graph_min_slider
callback.args["high_slider"] = self.graph_max_slider
#ax.set_xlabel('RA')
#ax.set_ylabel('DEC')
#ax.grid(True, color='white')
#fig.colorbar(im, ax=ax)
#plugins.connect(fig, plugins.MousePosition(fontsize=14))
#if plot == True:
# print('plot', plot)
# mpld3.show()
fig.add_layout(color_bar, 'right')
layout = row(
fig, widgetbox(self.graph_min_slider, self.graph_max_slider),
)
#curdoc().add_root(layout)
#output_file("slider.html", title="slider.py example")
#from bokeh.io import show
#show(layout)
script, div = components(layout)
html_dict = {}
html_dict['script'] = script
html_dict['div'] = div
return html_dict
class ScatterPlot(object):
def __init__(self,w,h,x_label=None,y_label=None,x_range=None,y_range=None,title=None,y_axis_type='linear',x_axis_type='linear'):
hover = HoverTool(tooltips=[("x", "$x"), ("y", "$y")])
self.fig = figure(title=title, width=w, height=h,x_range=x_range,y_range=y_range,
y_axis_type=y_axis_type,
x_axis_type=x_axis_type,
tools=[hover, 'pan,box_zoom,box_select,wheel_zoom,reset,save,crosshair']
)
if x_label is not None:
self.fig.xaxis.axis_label = x_label
if y_label is not None:
self.fig.yaxis.axis_label = y_label
def add_errorbar(self, x, y, xerr=None, yerr=None, color='red',
point_kwargs={}, error_kwargs={}):
self.fig.circle(x, y, color=color, **point_kwargs)
if xerr is not None:
x_err_x = []
x_err_y = []
for px, py, err in zip(x, y, xerr):
x_err_x.append((px - err, px + err))
x_err_y.append((py, py))
self.fig.multi_line(x_err_x, x_err_y, color=color, **error_kwargs)
if yerr is not None:
y_err_x = []
y_err_y = []
for px, py, err in zip(x, y, yerr):
y_err_x.append((px, px))
y_err_y.append((py - err, py + err))
self.fig.multi_line(y_err_x, y_err_y, color=color, **error_kwargs)
def add_step_line(self,x,y,legend=None):
#print('a')
self.fig.step(x,y,name=legend, mode="center")
#print('b')
def add_line(self,x,y,legend=None,color=None):
self.fig.line(x,y,legend=legend,line_color=color)
def get_html_draw(self):
layout = row(
self.fig
)
#curdoc().add_root(layout)
#show(layout)
script, div = components(layout)
#print ('script',script)
#print ('div',div)
html_dict = {}
html_dict['script'] = script
html_dict['div'] = div
return html_dict
class GridPlot(object):
def __init__(self,f1,f2,w=None,h=None):
self.f1=f1
self.f2=f2
def get_html_draw(self,w=None,h=None):
#l = layout([self.f1.fig],[self.f2.fig])
grid = gridplot([self.f1.fig,self.f2.fig],ncols=1,plot_width=w, plot_height=h)
#curdoc().add_root(grid)
#show(grid)
#output_file("test.html")
script, div = components(grid)
html_dict={}
html_dict['script']=script
html_dict['div'] = div
return html_dict
| 2.015625 | 2 |
test/unit/test_testaid_unit_pathlist.py | RebelCodeBase/testaid | 17 | 3567 | from pathlib import Path
from testaid.pathlist import PathList
def test_testaid_unit_pathlist_roles_blacklist(testvars_roles_blacklist):
assert testvars_roles_blacklist is not None
def test_testaid_unit_pathlist_roles_whitelist(testvars_roles_whitelist):
assert testvars_roles_whitelist is not None
def test_testaid_unit_pathlist_get(tmp_path):
msd = tmp_path / 'molecule_scenario_directory'
dir1 = msd / 'dir1'
dir1.mkdir(parents=True)
dir2 = tmp_path / 'dir2'
dir2.mkdir()
file1 = dir1 / 'file1.yml'
file1.touch()
file2 = dir1 / 'file2.yml'
file2.touch()
file3 = dir2 / 'file3.yml'
file3.touch()
my_pathlist = [Path(file3), Path(file1), Path(file2)]
my_pathstring = 'dir1:../dir2/file3.yml'
pathlist = PathList(my_pathstring, msd)
assert pathlist.get() == my_pathlist
| 2.40625 | 2 |
tests/unit/zhmcclient/test_hba.py | vkpro-forks/python-zhmcclient | 0 | 3568 | <filename>tests/unit/zhmcclient/test_hba.py<gh_stars>0
# Copyright 2016-2017 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for _hba module.
"""
from __future__ import absolute_import, print_function
import pytest
import re
import copy
from zhmcclient import Client, Hba, HTTPError, NotFound
from zhmcclient_mock import FakedSession
from tests.common.utils import assert_resources
# Object IDs and names of our faked HBAs:
HBA1_OID = 'hba 1-oid'
HBA1_NAME = 'hba 1'
HBA2_OID = 'hba 2-oid'
HBA2_NAME = 'hba 2'
# URIs and Object IDs of elements referenced in HBA properties:
FCP1_OID = 'fake-fcp1-oid'
PORT11_OID = 'fake-port11-oid'
PORT11_URI = '/api/adapters/{}/storage-ports/{}'.format(FCP1_OID, PORT11_OID)
class TestHba(object):
"""All tests for Hba and HbaManager classes."""
def setup_method(self):
"""
Set up a faked session, and add a faked CPC in DPM mode with one
partition that has no HBAs.
Add one FCP adapter and port.
"""
self.session = FakedSession('fake-host', 'fake-hmc', '2.13.1', '1.8')
self.client = Client(self.session)
# Add a CPC in DPM mode
self.faked_cpc = self.session.hmc.cpcs.add({
'element-id': 'fake-cpc1-oid',
# element-uri is set up automatically
'parent': None,
'class': 'cpc',
'name': 'fake-cpc1-name',
'description': 'CPC #1 (DPM mode)',
'status': 'active',
'dpm-enabled': True,
'is-ensemble-member': False,
'iml-mode': 'dpm',
})
self.cpc = self.client.cpcs.find(name='fake-cpc1-name')
# Add a partition to the CPC
self.faked_partition = self.faked_cpc.partitions.add({
'element-id': 'fake-part1-oid',
# element-uri will be automatically set
'parent': self.faked_cpc.uri,
'class': 'partition',
'name': 'fake-part1-name',
'description': 'Partition #1',
'status': 'active',
'initial-memory': 1024,
'maximum-memory': 2048,
})
self.partition = self.cpc.partitions.find(name='fake-part1-name')
# Add an FCP adapter and port to the CPC
self.faked_fcp1 = self.faked_cpc.adapters.add({
'object-id': FCP1_OID,
'parent': self.faked_cpc.uri,
'class': 'adapter',
'name': 'fcp1',
'description': 'FCP #1',
'status': 'active',
'type': 'fcp',
'adapter-id': '123',
'detected-card-type': '10gbe-roce-express',
'card-location': '1234-5678-J.01',
'port-count': 1,
'network-port-uris': [],
'state': 'online',
'configured-capacity': 80,
'used-capacity': 0,
'allowed-capacity': 80,
'maximum-total-capacity': 80,
'physical-channel-status': 'operating',
})
self.faked_port11 = self.faked_fcp1.ports.add({
'element-id': PORT11_OID,
'parent': self.faked_fcp1.uri,
'class': 'storage-port',
'index': 1,
'name': 'fake-port11-name',
'description': 'FCP #1 Port #1',
})
assert PORT11_URI == self.faked_port11.uri
def add_hba1(self):
"""Add a faked HBA 1 to the faked partition."""
faked_hba = self.faked_partition.hbas.add({
'element-id': HBA1_OID,
# element-uri will be automatically set
'parent': self.faked_partition.uri,
'class': 'hba',
'name': HBA1_NAME,
'description': 'HBA ' + HBA1_NAME,
'adapter-port-uri': PORT11_URI,
'wwpn': 'AABBCCDDEEFF0011',
'device-number': '1111',
})
return faked_hba
def add_hba2(self):
"""Add a faked HBA 2 to the faked partition."""
faked_hba = self.faked_partition.hbas.add({
'element-id': HBA2_OID,
# element-uri will be automatically set
'parent': self.faked_partition.uri,
'class': 'hba',
'name': HBA2_NAME,
'description': 'HBA ' + HBA2_NAME,
'adapter-port-uri': PORT11_URI,
'wwpn': 'AABBCCDDEEFF0012',
'device-number': '1112',
})
return faked_hba
def test_hbamanager_initial_attrs(self):
"""Test initial attributes of HbaManager."""
hba_mgr = self.partition.hbas
# Verify all public properties of the manager object
assert hba_mgr.resource_class == Hba
assert hba_mgr.session == self.session
assert hba_mgr.parent == self.partition
assert hba_mgr.partition == self.partition
# TODO: Test for HbaManager.__repr__()
@pytest.mark.parametrize(
"full_properties_kwargs, prop_names", [
(dict(),
['element-uri']),
(dict(full_properties=False),
['element-uri']),
(dict(full_properties=True),
None),
]
)
def test_hbamanager_list_full_properties(
self, full_properties_kwargs, prop_names):
"""Test HbaManager.list() with full_properties."""
# Add two faked HBAs
faked_hba1 = self.add_hba1()
faked_hba2 = self.add_hba2()
exp_faked_hbas = [faked_hba1, faked_hba2]
hba_mgr = self.partition.hbas
# Execute the code to be tested
hbas = hba_mgr.list(**full_properties_kwargs)
assert_resources(hbas, exp_faked_hbas, prop_names)
@pytest.mark.parametrize(
"filter_args, exp_oids", [
({'element-id': HBA1_OID},
[HBA1_OID]),
({'element-id': HBA2_OID},
[HBA2_OID]),
({'element-id': [HBA1_OID, HBA2_OID]},
[HBA1_OID, HBA2_OID]),
({'element-id': [HBA1_OID, HBA1_OID]},
[HBA1_OID]),
({'element-id': HBA1_OID + 'foo'},
[]),
({'element-id': [HBA1_OID, HBA2_OID + 'foo']},
[HBA1_OID]),
({'element-id': [HBA2_OID + 'foo', HBA1_OID]},
[HBA1_OID]),
({'name': HBA1_NAME},
[HBA1_OID]),
({'name': HBA2_NAME},
[HBA2_OID]),
({'name': [HBA1_NAME, HBA2_NAME]},
[HBA1_OID, HBA2_OID]),
({'name': HBA1_NAME + 'foo'},
[]),
({'name': [HBA1_NAME, HBA2_NAME + 'foo']},
[HBA1_OID]),
({'name': [HBA2_NAME + 'foo', HBA1_NAME]},
[HBA1_OID]),
({'name': [HBA1_NAME, HBA1_NAME]},
[HBA1_OID]),
({'name': '.*hba 1'},
[HBA1_OID]),
({'name': 'hba 1.*'},
[HBA1_OID]),
({'name': 'hba .'},
[HBA1_OID, HBA2_OID]),
({'name': '.ba 1'},
[HBA1_OID]),
({'name': '.+'},
[HBA1_OID, HBA2_OID]),
({'name': 'hba 1.+'},
[]),
({'name': '.+hba 1'},
[]),
({'name': HBA1_NAME,
'element-id': HBA1_OID},
[HBA1_OID]),
({'name': HBA1_NAME,
'element-id': HBA1_OID + 'foo'},
[]),
({'name': HBA1_NAME + 'foo',
'element-id': HBA1_OID},
[]),
({'name': HBA1_NAME + 'foo',
'element-id': HBA1_OID + 'foo'},
[]),
]
)
def test_hbamanager_list_filter_args(self, filter_args, exp_oids):
"""Test HbaManager.list() with filter_args."""
# Add two faked HBAs
self.add_hba1()
self.add_hba2()
hba_mgr = self.partition.hbas
# Execute the code to be tested
hbas = hba_mgr.list(filter_args=filter_args)
assert len(hbas) == len(exp_oids)
if exp_oids:
oids = [hba.properties['element-id'] for hba in hbas]
assert set(oids) == set(exp_oids)
@pytest.mark.parametrize(
"initial_partition_status, exp_status_exc", [
('stopped', None),
('terminated', None),
('starting', HTTPError({'http-status': 409, 'reason': 1})),
('active', None),
('stopping', HTTPError({'http-status': 409, 'reason': 1})),
('degraded', None),
('reservation-error', None),
('paused', None),
]
)
@pytest.mark.parametrize(
"input_props, exp_prop_names, exp_prop_exc", [
({},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'name': 'fake-hba-x'},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'adapter-port-uri': PORT11_URI},
None,
HTTPError({'http-status': 400, 'reason': 5})),
({'name': 'fake-hba-x',
'adapter-port-uri': PORT11_URI},
['element-uri', 'name', 'adapter-port-uri'],
None),
]
)
def test_hbamanager_create(
self, input_props, exp_prop_names, exp_prop_exc,
initial_partition_status, exp_status_exc):
"""Test HbaManager.create()."""
# Set the status of the faked partition
self.faked_partition.properties['status'] = initial_partition_status
hba_mgr = self.partition.hbas
if exp_status_exc:
exp_exc = exp_status_exc
elif exp_prop_exc:
exp_exc = exp_prop_exc
else:
exp_exc = None
if exp_exc:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
hba = hba_mgr.create(properties=input_props)
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
else:
# Execute the code to be tested.
# Note: the Hba object returned by Hba.create() has
# the input properties plus 'element-uri' plus 'element-id'.
hba = hba_mgr.create(properties=input_props)
# Check the resource for consistency within itself
assert isinstance(hba, Hba)
hba_name = hba.name
exp_hba_name = hba.properties['name']
assert hba_name == exp_hba_name
hba_uri = hba.uri
exp_hba_uri = hba.properties['element-uri']
assert hba_uri == exp_hba_uri
# Check the properties against the expected names and values
for prop_name in exp_prop_names:
assert prop_name in hba.properties
if prop_name in input_props:
value = hba.properties[prop_name]
exp_value = input_props[prop_name]
assert value == exp_value
def test_hba_repr(self):
"""Test Hba.__repr__()."""
# Add a faked hba
faked_hba = self.add_hba1()
hba_mgr = self.partition.hbas
hba = hba_mgr.find(name=faked_hba.name)
# Execute the code to be tested
repr_str = repr(hba)
repr_str = repr_str.replace('\n', '\\n')
# We check just the begin of the string:
assert re.match(r'^{classname}\s+at\s+0x{id:08x}\s+\(\\n.*'.
format(classname=hba.__class__.__name__,
id=id(hba)),
repr_str)
@pytest.mark.parametrize(
"initial_partition_status, exp_exc", [
('stopped', None),
('terminated', None),
('starting', HTTPError({'http-status': 409, 'reason': 1})),
('active', None),
('stopping', HTTPError({'http-status': 409, 'reason': 1})),
('degraded', None),
('reservation-error', None),
('paused', None),
]
)
def test_hba_delete(self, initial_partition_status, exp_exc):
"""Test Hba.delete()."""
# Add a faked HBA to be tested and another one
faked_hba = self.add_hba1()
self.add_hba2()
# Set the status of the faked partition
self.faked_partition.properties['status'] = initial_partition_status
hba_mgr = self.partition.hbas
hba = hba_mgr.find(name=faked_hba.name)
if exp_exc:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
hba.delete()
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
# Check that the HBA still exists
hba_mgr.find(name=faked_hba.name)
else:
# Execute the code to be tested.
hba.delete()
# Check that the HBA no longer exists
with pytest.raises(NotFound) as exc_info:
hba_mgr.find(name=faked_hba.name)
def test_hba_delete_create_same_name(self):
"""Test Hba.delete() followed by Hba.create() with same name."""
# Add a faked HBA to be tested and another one
faked_hba = self.add_hba1()
hba_name = faked_hba.name
self.add_hba2()
# Construct the input properties for a third HBA with same name
part3_props = copy.deepcopy(faked_hba.properties)
part3_props['description'] = 'Third HBA'
# Set the status of the faked partition
self.faked_partition.properties['status'] = 'stopped' # deletable
hba_mgr = self.partition.hbas
hba = hba_mgr.find(name=hba_name)
# Execute the deletion code to be tested.
hba.delete()
# Check that the HBA no longer exists
with pytest.raises(NotFound):
hba_mgr.find(name=hba_name)
# Execute the creation code to be tested.
hba_mgr.create(part3_props)
# Check that the HBA exists again under that name
hba3 = hba_mgr.find(name=hba_name)
description = hba3.get_property('description')
assert description == 'Third HBA'
@pytest.mark.parametrize(
"input_props", [
{},
{'description': 'New HBA description'},
{'device-number': 'FEDC',
'description': 'New HBA description'},
]
)
def test_hba_update_properties(self, input_props):
"""Test Hba.update_properties()."""
# Add a faked HBA
faked_hba = self.add_hba1()
# Set the status of the faked partition
self.faked_partition.properties['status'] = 'stopped' # updatable
hba_mgr = self.partition.hbas
hba = hba_mgr.find(name=faked_hba.name)
hba.pull_full_properties()
saved_properties = copy.deepcopy(hba.properties)
# Execute the code to be tested
hba.update_properties(properties=input_props)
# Verify that the resource object already reflects the property
# updates.
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in hba.properties
prop_value = hba.properties[prop_name]
assert prop_value == exp_prop_value
# Refresh the resource object and verify that the resource object
# still reflects the property updates.
hba.pull_full_properties()
for prop_name in saved_properties:
if prop_name in input_props:
exp_prop_value = input_props[prop_name]
else:
exp_prop_value = saved_properties[prop_name]
assert prop_name in hba.properties
prop_value = hba.properties[prop_name]
assert prop_value == exp_prop_value
def test_hba_update_name(self):
"""Test Hba.update_properties() with 'name' property."""
# Add a faked HBA
faked_hba = self.add_hba1()
hba_name = faked_hba.name
# Set the status of the faked partition
self.faked_partition.properties['status'] = 'stopped' # updatable
hba_mgr = self.partition.hbas
hba = hba_mgr.find(name=hba_name)
new_hba_name = "new-" + hba_name
# Execute the code to be tested
hba.update_properties(properties={'name': new_hba_name})
# Verify that the resource is no longer found by its old name, using
# list() (this does not use the name-to-URI cache).
hbas_list = hba_mgr.list(
filter_args=dict(name=hba_name))
assert len(hbas_list) == 0
# Verify that the resource is no longer found by its old name, using
# find() (this uses the name-to-URI cache).
with pytest.raises(NotFound):
hba_mgr.find(name=hba_name)
# Verify that the resource object already reflects the update, even
# though it has not been refreshed yet.
assert hba.properties['name'] == new_hba_name
# Refresh the resource object and verify that it still reflects the
# update.
hba.pull_full_properties()
assert hba.properties['name'] == new_hba_name
# Verify that the resource can be found by its new name, using find()
new_hba_find = hba_mgr.find(name=new_hba_name)
assert new_hba_find.properties['name'] == new_hba_name
# Verify that the resource can be found by its new name, using list()
new_hbas_list = hba_mgr.list(
filter_args=dict(name=new_hba_name))
assert len(new_hbas_list) == 1
new_hba_list = new_hbas_list[0]
assert new_hba_list.properties['name'] == new_hba_name
@pytest.mark.parametrize(
"initial_partition_status, exp_exc", [
('stopped', None),
('terminated', None),
('starting', HTTPError({'http-status': 409, 'reason': 1})),
('active', None),
('stopping', HTTPError({'http-status': 409, 'reason': 1})),
('degraded', None),
('reservation-error', None),
('paused', None),
]
)
def test_hba_reassign_port(self, initial_partition_status, exp_exc):
"""Test Hba.reassign_port()."""
# Add a faked HBA to be tested.
# Its port points to a faked URI.
faked_hba = self.add_hba1()
# Add a faked FCP with one port that the HBA will be reassigned to
faked_adapter = self.faked_cpc.adapters.add({
'object-id': 'fake-fcp1-oid',
# object-uri is auto-set based upon object-id
'parent': self.faked_cpc.uri,
'class': 'adapter',
'name': 'fake-fcp1',
'description': 'FCP #1',
'status': 'active',
'type': 'fcp',
# adapter-family is auto-set based upon type
'adapter-id': '123',
'detected-card-type': 'ficon-express-16s',
'card-location': '1234-5678-J.01',
'port-count': 1,
'storage-port-uris': [],
'state': 'online',
'configured-capacity': 80,
'used-capacity': 0,
'allowed-capacity': 80,
'maximum-total-capacity': 80,
'channel-path-id': '1B',
'physical-channel-status': 'operating',
})
adapter = self.cpc.adapters.find(name='fake-fcp1')
faked_adapter.ports.add({
'element-id': 'fake-port1-oid',
# element-uri is auto-set based upon object-id
'parent': faked_adapter.uri,
'class': 'storage-port',
'name': 'fake-port1',
'description': 'FCP #1 Port 1',
'index': 0,
'fabric-id': None,
})
port = adapter.ports.find(name='fake-port1')
# Set the status of the faked partition
self.faked_partition.properties['status'] = initial_partition_status
# The HBA object we will perform the test on
hba = self.partition.hbas.find(name=faked_hba.name)
# Save the HBA properties for later comparison
hba.pull_full_properties()
saved_properties = copy.deepcopy(hba.properties)
if exp_exc:
with pytest.raises(exp_exc.__class__) as exc_info:
# Execute the code to be tested
hba.reassign_port(port)
exc = exc_info.value
if isinstance(exp_exc, HTTPError):
assert exc.http_status == exp_exc.http_status
assert exc.reason == exp_exc.reason
# Check that the port of the HBA is unchanged ...
prop_name = 'adapter-port-uri'
# ... in the resource object:
assert hba.properties[prop_name] == saved_properties[prop_name]
# ... and again when refreshed from the mock state:
hba.pull_full_properties()
assert hba.properties[prop_name] == saved_properties[prop_name]
else:
# Execute the code to be tested.
hba.reassign_port(port)
# Check that the port of the HBA has been set ...
# ... in the resource object:
prop_name = 'adapter-port-uri'
assert hba.properties[prop_name] == port.uri
# ... and again when refreshed from the mock state:
hba.pull_full_properties()
assert hba.properties[prop_name] == port.uri
| 2.046875 | 2 |
compiler-rt/test/asan/TestCases/Windows/lit.local.cfg.py | medismailben/llvm-project | 2,338 | 3569 | def getRoot(config):
if not config.parent:
return config
return getRoot(config.parent)
root = getRoot(config)
# We only run a small set of tests on Windows for now.
# Override the parent directory's "unsupported" decision until we can handle
# all of its tests.
if root.host_os in ['Windows']:
config.unsupported = False
else:
config.unsupported = True
| 2.546875 | 3 |
Lib/test/libregrtest/utils.py | oskomorokhov/cpython | 5 | 3570 | <filename>Lib/test/libregrtest/utils.py
import math
import os.path
import sys
import textwrap
from test import support
def format_duration(seconds):
ms = math.ceil(seconds * 1e3)
seconds, ms = divmod(ms, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
parts = []
if hours:
parts.append('%s hour' % hours)
if minutes:
parts.append('%s min' % minutes)
if seconds:
if parts:
# 2 min 1 sec
parts.append('%s sec' % seconds)
else:
# 1.0 sec
parts.append('%.1f sec' % (seconds + ms / 1000))
if not parts:
return '%s ms' % ms
parts = parts[:2]
return ' '.join(parts)
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4, file=None):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks),
file=file)
def print_warning(msg):
support.print_warning(msg)
orig_unraisablehook = None
def regrtest_unraisable_hook(unraisable):
global orig_unraisablehook
support.environment_altered = True
print_warning("Unraisable exception")
old_stderr = sys.stderr
try:
sys.stderr = sys.__stderr__
orig_unraisablehook(unraisable)
finally:
sys.stderr = old_stderr
def setup_unraisable_hook():
global orig_unraisablehook
orig_unraisablehook = sys.unraisablehook
sys.unraisablehook = regrtest_unraisable_hook
def clear_caches():
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
# Don't worry about resetting the cache if the module is not loaded
try:
distutils_dir_util = sys.modules['distutils.dir_util']
except KeyError:
pass
else:
distutils_dir_util._path_created.clear()
try:
re = sys.modules['re']
except KeyError:
pass
else:
re.purge()
try:
_strptime = sys.modules['_strptime']
except KeyError:
pass
else:
_strptime._regex_cache.clear()
try:
urllib_parse = sys.modules['urllib.parse']
except KeyError:
pass
else:
urllib_parse.clear_cache()
try:
urllib_request = sys.modules['urllib.request']
except KeyError:
pass
else:
urllib_request.urlcleanup()
try:
linecache = sys.modules['linecache']
except KeyError:
pass
else:
linecache.clearcache()
try:
mimetypes = sys.modules['mimetypes']
except KeyError:
pass
else:
mimetypes._default_mime_types()
try:
filecmp = sys.modules['filecmp']
except KeyError:
pass
else:
filecmp._cache.clear()
try:
struct = sys.modules['struct']
except KeyError:
pass
else:
struct._clearcache()
try:
doctest = sys.modules['doctest']
except KeyError:
pass
else:
doctest.master = None
try:
ctypes = sys.modules['ctypes']
except KeyError:
pass
else:
ctypes._reset_cache()
try:
typing = sys.modules['typing']
except KeyError:
pass
else:
for f in typing._cleanups:
f()
support.gc_collect()
| 2.671875 | 3 |
efetch_server/plugins/fa_sqlite/fa_sqlite_ajax.py | Syrkadian/efetch | 38 | 3571 | """
AJAX for SQLite Viewer plugin
"""
from yapsy.IPlugin import IPlugin
from flask import Response, jsonify
import json
import logging
import sqlite3
class FaSqliteAjax(IPlugin):
def __init__(self):
self.display_name = 'SQLite Ajax'
self.popularity = 0
self.cache = True
self.fast = False
self.action = False
IPlugin.__init__(self)
def activate(self):
IPlugin.activate(self)
return
def deactivate(self):
IPlugin.deactivate(self)
return
def check(self, evidence, path_on_disk):
"""Checks if the file is compatible with this plugin"""
return True
def mimetype(self, mimetype):
"""Returns the mimetype of this plugins get command"""
return "application/json"
def get(self, evidence, helper, path_on_disk, request):
"""Returns the result of this plugin to be displayed in a browser"""
method = helper.get_request_value(request, 'method', raise_key_error=True)
if method == "base":
return self.base_tree(path_on_disk)
elif method == "children":
return self.get_children(request, helper, path_on_disk)
elif method == "values":
return self.values(request, helper, path_on_disk)
logging.error('Unknown method "' + method + '" provided')
raise ValueError('Method "' + method + '" is not valid')
def base_tree(self, path_on_disk):
connection = sqlite3.connect(path_on_disk)
cursor = connection.cursor()
base_tree = []
cursor.execute("SELECT * FROM sqlite_master WHERE type='table';")
cursor.fetchone()
# Master Table
base_tree.append({'title': u'Master Table (1)',
'key': u'master',
'folder': True,
'lazy': True
})
# Tables
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = cursor.fetchall()
base_tree.append({'title': u'Tables (' + unicode(len(tables)) + u')',
'key': u'table',
'folder': True,
'lazy': True
})
# Views
cursor.execute("SELECT name FROM sqlite_master WHERE type='view';")
views = cursor.fetchall()
base_tree.append({'title': u'Views (' + unicode(len(views)) + u')',
'key': u'view',
'folder': True,
'lazy': True
})
# Indexes
cursor.execute("SELECT name FROM sqlite_master WHERE type='index';")
indexes = cursor.fetchall()
base_tree.append({'title': u'Indexes (' + unicode(len(indexes)) + u')',
'key': u'index',
'folder': True,
'lazy': True
})
# Triggers
cursor.execute("SELECT name FROM sqlite_master WHERE type='trigger';")
triggers = cursor.fetchall()
base_tree.append({'title': u'Triggers (' + unicode(len(triggers)) + u')',
'key': u'trigger',
'folder': True,
'lazy': True
})
connection.close()
# TODO REPLACE WITH DICTIONARY AND JSONIFY, SEE: http://stackoverflow.com/questions/12435297/how-do-i-jsonify-a-list-in-flask
return Response(json.dumps(base_tree), mimetype='application/json')
def get_children(self, request, helper, path_on_disk):
key = unicode(helper.get_request_value(request, 'key'))
children = []
if key == u'master':
children.append({'title': u'Master Table (1)',
'key': u'sqlite_master',
'folder': False,
'lazy': False
})
else:
for child in self.get_tables(key, path_on_disk):
children.append({'title': child,
'key': child,
'folder': False,
'lazy': False
})
# TODO REPLACE WITH DICTIONARY AND JSONIFY, SEE: http://stackoverflow.com/questions/12435297/how-do-i-jsonify-a-list-in-flask
return Response(json.dumps(children), mimetype='application/json')
def get_tables(self, key, path_on_disk):
connection = sqlite3.connect(path_on_disk)
cursor = connection.cursor()
tables = []
table_list = cursor.execute("SELECT name FROM sqlite_master WHERE type='" + key + "';")
for table in table_list:
tables.append(unicode(table[0]))
connection.close()
return tables
def values(self, request, helper, path_on_disk):
key = unicode(helper.get_request_value(request, 'key'))
connection = sqlite3.connect(path_on_disk)
cursor = connection.cursor()
cursor.execute("pragma table_info('" + key + "')")
rows = cursor.fetchall()
table = [ u'<table id="sqlitet01" class="display">', u' <thead><tr>' ]
for row in rows:
table.append(u' <th>' + unicode(row[1]) + u'</th>')
table.append(u' </tr> </thead>')
cursor.execute('SELECT * FROM ' + key)
rows = cursor.fetchall()
for row in rows:
table.append(u' <tr>')
for item in row:
try:
table.append(u' <td>' + unicode(item) + u'</td>')
except:
table.append(u' <td>' + unicode(type(item)) + u'</td>')
table.append(u' </tr>')
table.append(u'</table>')
connection.close()
return jsonify({'table': '\n'.join(table)}) | 2.390625 | 2 |
raspagem/random/lista_cidades.py | sslppractice/propython | 0 | 3572 | <filename>raspagem/random/lista_cidades.py
import requests, json
url = 'http://educacao.dadosabertosbr.com/api/cidades/ce'
cidades = requests.get(url).content
cidades = cidades.decode('utf-8')
cidades = json.loads(cidades)
for cidade in cidades:
codigo, nome = cidade.split(':')
print(nome)
| 2.84375 | 3 |
mezzanine/__init__.py | startupgrind/mezzanine | 0 | 3573 | <reponame>startupgrind/mezzanine
__version__ = "4.3.1.post1"
| 1.078125 | 1 |
src/autonomous/purepursuit.py | Sloomey/DeepSpace2019 | 0 | 3574 | import math
from constants import Constants
from utils import vector2d
from wpilib import SmartDashboard as Dash
from autonomous import pursuitpoint
class PurePursuit():
"""An implementation of the Pure Pursuit path tracking algorithm."""
def __init__(self, path):
self.path = path
self.pursuit_points = [pursuitpoint.PursuitPoint(p, c) for p, c in zip(
self.path.getPoints(), self.path.getCurvatures())]
self.last_lookahead_index = 0
self.cur_curvature = 0
self.target_velocities = vector2d.Vector2D()
self.closest_point_index = 0
def computeVelocities(self):
"""Compute the velocities along the path."""
# Compute the velocities along the path using the curvature and Constants.CURVE_VELOCITY
for ppoint in self.pursuit_points:
if abs(ppoint.curvature) <= Constants.CURVATURE_THRESHOLD:
velocity = Constants.MAX_VELOCITY
else:
velocity = min(Constants.MAX_VELOCITY,
Constants.CURVE_VELOCITY/ppoint.curvature)
ppoint.velocity = velocity
# Limit the acceleration of the velocities
for i in reversed(range(0, len(self.pursuit_points)-1)):
distance = self.pursuit_points[i].point.getDistance(
self.pursuit_points[i+1].point)
new_velocity = math.sqrt(
self.pursuit_points[i+1].velocity**2 + (2 * Constants.MAX_ACCELERATION * distance))
new_velocity = min(self.pursuit_points[i].velocity, new_velocity)
self.pursuit_points[i].velocity = new_velocity
def updateLookaheadPointIndex2(self, state):
"""Update the lookahead point given the current robot state.
Uses the minimum distance point if the state is more than
Constants.LOOKAHEAD_DIST from all points, otherwise uses the
closes point to self.loohead_distance"""
# Compute point distances to state and differences from those distances to Constants.LOOKAHEAD_DIST
distances = [math.hypot(state.x - ppoint.point.x,
state.y - ppoint.point.y) for ppoint in self.pursuit_points]
differences = [abs(d-Constants.LOOKAHEAD_DIST) for d in distances]
min_distance = min(distances)
# Get new lookahead index
if min_distance <= Constants.LOOKAHEAD_DIST:
self.last_lookahead_index = differences.index(min(differences))
else:
self.last_lookahead_index = distances.index(min_distance)
def updateLookaheadPointIndex(self, state):
"""Loop over the points in the path to get the lookahead point given the current robot state."""
for i in range(self.last_lookahead_index, len(self.pursuit_points)-1):
lookahead = self.computeLookaheadPoint(
self.pursuit_points[i].point, self.pursuit_points[i+1].point, state)
if lookahead != None:
self.last_lookahead_index = i
def computeLookaheadPoint(self, start, end, state):
"""Compute the lookahead point given the current robot state.
Returns a point if the current state is Constants.LOOKAHEAD_DIST
from between start and end, otherwise returns None."""
# Algorithm for circle line segment intersection found here: https://stackoverflow.com/questions/1073336/circle-line-segment-collision-detection-algorithm/1084899#1084899
segment_direction = end - start
center_to_start = start - state
a = segment_direction * segment_direction
b = 2 * (center_to_start * segment_direction)
c = (center_to_start * center_to_start) - Constants.LOOKAHEAD_DIST ** 2
discriminant = b**2 - (4 * a * c)
if discriminant < 0:
return None
else:
discriminant = math.sqrt(discriminant)
t0 = (-b - discriminant) / (2 * a)
t1 = (-b + discriminant) / (2 * a)
if t0 >= 0 and t0 <= 1:
return start + t0 * segment_direction
if t1 >= 0 and t1 <= 1:
return start + t1 * segment_direction
return None
def updateCurvature(self, state):
"""Update the curvature from the current lookahead point to the current robot position."""
lookahead = self.pursuit_points[self.last_lookahead_index].point
# Transform the lookahead and state.pos to get an aligned vector
transform = lookahead - state.pos
transform = transform.getRotated(-state.angle)
# Use the transformed vector to calculate the curvature (derived from https://www.ri.cmu.edu/pub_files/pub3/coulter_r_craig_1992_1/coulter_r_craig_1992_1.pdf#page=12)
self.cur_curvature = (2 * transform.x) / Constants.LOOKAHEAD_DIST**2
def updateClosestPointIndex(self, state):
"""Update the index of the closest point to the current robot position."""
index = self.closest_point_index
smallest_distance = self.pursuit_points[index].point.getDistance(state)
for i in range(0, len(self.pursuit_points)):
distance = self.pursuit_points[i].point.getDistance(state)
if smallest_distance > distance:
smallest_distance = distance
index = i
self.closest_point_index = index
def updateTargetVelocities(self, state):
"""Update the target velocities of the left and right wheels."""
robot_velocity = self.pursuit_points[self.closest_point_index].velocity
# Use kinematics (http://robotsforroboticists.com/drive-kinematics/) and algebra to find wheel target velocties
l_velocity = robot_velocity * \
(2 + self.cur_curvature * Constants.TRACK_WIDTH) / \
2 / Constants.PURE_PURSUIT_KV
r_velocity = robot_velocity * \
(2 - self.cur_curvature * Constants.TRACK_WIDTH) / \
2 / Constants.PURE_PURSUIT_KV
scale = max(abs(l_velocity), abs(r_velocity))
if scale > 1:
l_velocity /= scale
r_velocity /= scale
self.target_velocities = vector2d.Vector2D(l_velocity, r_velocity)
def update(self, state):
"""Update the pure pursuit follower(runs all update functions)."""
# TODO which lookahead function to use
self.updateLookaheadPointIndex(state.pos)
# self.updateLookaheadPointIndex2(state.pos)
self.updateCurvature(state)
self.updateClosestPointIndex(state.pos)
self.updateTargetVelocities(state.pos)
def outputToSmartDashboard(self):
"""Output values to the smart dashboard."""
lookahead = self.pursuit_points[self.last_lookahead_index].point
closest = self.pursuit_points[self.closest_point_index].point
Dash.putNumberArray("Lookahead Point", [lookahead.x, lookahead.y])
Dash.putNumber("Curvature", self.cur_curvature)
Dash.putNumberArray("Closes Point", [closest.x, closest.y])
Dash.putNumberArray("Target Velocities", [
self.target_velocities.x, self.target_velocities.y])
#print("Lookahead Point - {}".format(lookahead))
#print("Curvature - {}".format(self.cur_curvature))
#print("Closes Point - {}".format(closest))
#print("Target Velocities - {}".format(self.target_velocities))
# print("------------------------------")
def isDone(self):
"""Check if the path is done being followed."""
return (len(self.pursuit_points) - self.closest_point_index) <= 1
| 3.09375 | 3 |
esphome/voluptuous_schema.py | TheEggi/esphomeyaml | 0 | 3575 | <gh_stars>0
import difflib
import itertools
import voluptuous as vol
from esphome.py_compat import string_types
class ExtraKeysInvalid(vol.Invalid):
def __init__(self, *arg, **kwargs):
self.candidates = kwargs.pop('candidates')
vol.Invalid.__init__(self, *arg, **kwargs)
def ensure_multiple_invalid(err):
if isinstance(err, vol.MultipleInvalid):
return err
return vol.MultipleInvalid(err)
# pylint: disable=protected-access, unidiomatic-typecheck
class _Schema(vol.Schema):
"""Custom cv.Schema that prints similar keys on error."""
def __init__(self, schema, extra=vol.PREVENT_EXTRA, extra_schemas=None):
super(_Schema, self).__init__(schema, extra=extra)
# List of extra schemas to apply after validation
# Should be used sparingly, as it's not a very voluptuous-way/clean way of
# doing things.
self._extra_schemas = extra_schemas or []
def __call__(self, data):
res = super(_Schema, self).__call__(data)
for extra in self._extra_schemas:
try:
res = extra(res)
except vol.Invalid as err:
raise ensure_multiple_invalid(err)
return res
def _compile_mapping(self, schema, invalid_msg=None):
invalid_msg = invalid_msg or 'mapping value'
# Check some things that ESPHome's schemas do not allow
# mostly to keep the logic in this method sane (so these may be re-added if needed).
for key in schema:
if key is vol.Extra:
raise ValueError("ESPHome does not allow vol.Extra")
if isinstance(key, vol.Remove):
raise ValueError("ESPHome does not allow vol.Remove")
if isinstance(key, vol.primitive_types):
raise ValueError("All schema keys must be wrapped in cv.Required or cv.Optional")
# Keys that may be required
all_required_keys = set(key for key in schema if isinstance(key, vol.Required))
# Keys that may have defaults
all_default_keys = set(key for key in schema if isinstance(key, vol.Optional))
# Recursively compile schema
_compiled_schema = {}
for skey, svalue in vol.iteritems(schema):
new_key = self._compile(skey)
new_value = self._compile(svalue)
_compiled_schema[skey] = (new_key, new_value)
# Sort compiled schema (probably not necessary for esphome, but leave it here just in case)
candidates = list(vol.schema_builder._iterate_mapping_candidates(_compiled_schema))
# After we have the list of candidates in the correct order, we want to apply some
# optimization so that each
# key in the data being validated will be matched against the relevant schema keys only.
# No point in matching against different keys
additional_candidates = []
candidates_by_key = {}
for skey, (ckey, cvalue) in candidates:
if type(skey) in vol.primitive_types:
candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
elif isinstance(skey, vol.Marker) and type(skey.schema) in vol.primitive_types:
candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
else:
# These are wildcards such as 'int', 'str', 'Remove' and others which should be
# applied to all keys
additional_candidates.append((skey, (ckey, cvalue)))
key_names = []
for skey in schema:
if isinstance(skey, string_types):
key_names.append(skey)
elif isinstance(skey, vol.Marker) and isinstance(skey.schema, string_types):
key_names.append(skey.schema)
def validate_mapping(path, iterable, out):
required_keys = all_required_keys.copy()
# Build a map of all provided key-value pairs.
# The type(out) is used to retain ordering in case a ordered
# map type is provided as input.
key_value_map = type(out)()
for key, value in iterable:
key_value_map[key] = value
# Insert default values for non-existing keys.
for key in all_default_keys:
if not isinstance(key.default, vol.Undefined) and key.schema not in key_value_map:
# A default value has been specified for this missing key, insert it.
key_value_map[key.schema] = key.default()
error = None
errors = []
for key, value in key_value_map.items():
key_path = path + [key]
# Optimization. Validate against the matching key first, then fallback to the rest
relevant_candidates = itertools.chain(candidates_by_key.get(key, []),
additional_candidates)
# compare each given key/value against all compiled key/values
# schema key, (compiled key, compiled value)
for skey, (ckey, cvalue) in relevant_candidates:
try:
new_key = ckey(key_path, key)
except vol.Invalid as e:
if len(e.path) > len(key_path):
raise
if not error or len(e.path) > len(error.path):
error = e
continue
# Backtracking is not performed once a key is selected, so if
# the value is invalid we immediately throw an exception.
exception_errors = []
try:
cval = cvalue(key_path, value)
out[new_key] = cval
except vol.MultipleInvalid as e:
exception_errors.extend(e.errors)
except vol.Invalid as e:
exception_errors.append(e)
if exception_errors:
for err in exception_errors:
if len(err.path) <= len(key_path):
err.error_type = invalid_msg
errors.append(err)
# If there is a validation error for a required
# key, this means that the key was provided.
# Discard the required key so it does not
# create an additional, noisy exception.
required_keys.discard(skey)
break
# Key and value okay, mark as found in case it was
# a Required() field.
required_keys.discard(skey)
break
else:
if self.extra == vol.ALLOW_EXTRA:
out[key] = value
elif self.extra != vol.REMOVE_EXTRA:
if isinstance(key, string_types) and key_names:
matches = difflib.get_close_matches(key, key_names)
errors.append(ExtraKeysInvalid('extra keys not allowed', key_path,
candidates=matches))
else:
errors.append(vol.Invalid('extra keys not allowed', key_path))
# for any required keys left that weren't found and don't have defaults:
for key in required_keys:
msg = getattr(key, 'msg', None) or 'required key not provided'
errors.append(vol.RequiredFieldInvalid(msg, path + [key]))
if errors:
raise vol.MultipleInvalid(errors)
return out
return validate_mapping
def add_extra(self, validator):
validator = _Schema(validator)
self._extra_schemas.append(validator)
return self
# pylint: disable=arguments-differ
def extend(self, *schemas, **kwargs):
extra = kwargs.pop('extra', None)
if kwargs:
raise ValueError
if not schemas:
return self.extend({})
if len(schemas) != 1:
ret = self
for schema in schemas:
ret = ret.extend(schema)
return ret
schema = schemas[0]
if isinstance(schema, vol.Schema):
schema = schema.schema
ret = super(_Schema, self).extend(schema, extra=extra)
return _Schema(ret.schema, extra=ret.extra, extra_schemas=self._extra_schemas)
| 2.296875 | 2 |
semisupervised/DensityPeaks.py | dpr1005/Semisupervised-learning-and-instance-selection-methods | 3 | 3576 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filename: DensityPeaks.py
# @Author: <NAME>
# @Time: 5/3/22 09:55
# @Version: 4.0
import math
from collections import defaultdict
import numpy as np
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors
from sklearn.preprocessing import LabelEncoder
from sklearn.semi_supervised import SelfTrainingClassifier
from sklearn.svm import SVC
from instance_selection import ENN
from .utils import split
class STDPNF:
"""
<NAME>., <NAME>., & <NAME>. (2019). A self-training method based on density
peaks and an extended parameter-free local noise filter for k nearest
neighbor. Knowledge-Based Systems, 184, 104895.
<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018).
Self-training semi-supervised classification based on density peaks of
data. Neurocomputing, 275, 180-191.
"""
def __init__(
self,
dc=None,
distance_metric="euclidean",
k=3,
gauss_cutoff=True,
percent=2.0,
density_threshold=None,
distance_threshold=None,
anormal=True,
filtering=False,
classifier=None,
classifier_params=None,
filter_method=None,
):
"""Semi Supervised Algorithm based on Density Peaks."""
self.dc = dc
self.distance_metric = distance_metric
self.k = k
self.gauss_cutoff = gauss_cutoff
self.percent = percent
self.density_threshold = density_threshold
self.distance_threshold = distance_threshold
self.anormal = anormal
self.filtering = filtering
if classifier is not None:
if isinstance(classifier_params, dict):
self.classifier = classifier(**classifier_params)
else:
self.classifier = classifier()
else:
self.classifier = None
if filter_method is not None and filter_method != "ENANE":
self.filter = filter_method()
elif isinstance(filter_method, str) and filter_method == "ENANE":
self.filter = filter_method
else:
self.filter = None
self.y = None
self.low = None
self.u = None
self.classifier_stdpnf = None
self.order = None
self.structure = None
self.structure_stdnpf = None
self.n_id = None
self.distances = None
self.max_dis = None
self.min_dis = None
self.rho = None
self.delta = None
self.nneigh = None
self.data = None
def __build_distance(self):
"""
Calculate distance dict.
:return: distance dict, max distance, min distance
"""
from scipy.spatial.distance import pdist, squareform
distance_matrix = pdist(self.data, metric=self.distance_metric)
distance_matrix = squareform(distance_matrix)
triangle_upper = np.triu_indices(self.data.shape[0], 1)
triangle_upper = distance_matrix[triangle_upper]
distance = {}
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
distance[(i, j)] = distance_matrix[i, j]
distance[(j, i)] = distance_matrix[i, j]
max_dis, min_dis = np.max(triangle_upper), np.min(triangle_upper)
return distance, max_dis, min_dis
def __auto_select_dc(self):
"""
Auto select the local density threshold that let average neighbor is 1-2
percent of all nodes.
:return: dc that local density threshold
"""
max_dis, min_dis = self.max_dis, self.min_dis
dc = (max_dis + min_dis) / 2
while True:
nneighs = (
sum([1 for v in self.distances.values() if v < dc]) / self.n_id**2
)
if 0.01 <= nneighs <= 0.02:
break
# binary search
if nneighs < 0.01:
min_dis = dc
else:
max_dis = dc
dc = (max_dis + min_dis) / 2
if max_dis - min_dis < 0.0001:
break
return dc
def __select_dc(self):
"""
Select the local density threshold, default is the method used in paper,
'auto' is auto select.
:return: dc that local density threshold
"""
if self.dc == "auto":
dc = self.__auto_select_dc()
else:
position = int(self.n_id * (self.n_id + 1) /
2 * self.percent / 100)
dc = np.sort(list(self.distances.values()))[
position * 2 + self.n_id]
return dc
def __local_density(self):
"""
Compute all points' local density.
:return: local density vector that index is the point index
"""
def gauss_func(dij, dc):
"""
> The function takes in a distance value and a cutoff value, and
returns the value of the Gaussian function at that point
:param dij: distance between two nodes
:param dc: The cutoff distance
:return: the value of the gaussian function.
"""
return math.exp(-((dij / dc) ** 2))
def cutoff_func(dij, dc):
"""
If the distance between two atoms is less than the cutoff distance,
return 1, otherwise return 0
:param dij: distance between atoms i and j
:param dc: cutoff distance
:return: 1 if dij < dc, else 0
"""
return 1 if dij < dc else 0
func = gauss_func if self.gauss_cutoff else cutoff_func
rho = [0] * self.n_id
for i in range(self.n_id):
for j in range(i + 1, self.n_id):
temp = func(self.distances[(i, j)], self.dc)
rho[i] += temp
rho[j] += temp
return np.array(rho, np.float32)
def __min_neighbor_and_distance(self):
"""
Compute all points' min util to the higher local density point(which is
the nearest neighbor).
:return: distance vector, nearest neighbor vector
"""
if self.rho is None:
raise ValueError("Encountered rho as None.")
sort_rho_idx = np.argsort(-self.rho)
delta, nneigh = [float(self.max_dis)] * self.n_id, [0] * self.n_id
delta[sort_rho_idx[0]] = -1.0
for i in range(self.n_id):
for j in range(0, i):
old_i, old_j = sort_rho_idx[i], sort_rho_idx[j]
if self.distances[(old_i, old_j)] < delta[old_i]:
delta[old_i] = self.distances[(old_i, old_j)]
nneigh[old_i] = old_j
delta[sort_rho_idx[0]] = max(delta)
return np.array(delta, np.float32), np.array(nneigh, np.float32)
def __structure(self):
"""
The function takes the data and the nearest neighbor indices and creates
a dataframe with the following columns:
- sample: the data point
- next: the index of the nearest neighbor
- previous: the index of the nearest neighbor of the nearest neighbor
- label: the label of the data point
The function also creates a copy of the dataframe called
structure_stdnpf
"""
self.structure = dict.fromkeys(range(self.n_id))
for index, sample in enumerate(self.data):
self.structure[index] = [
sample,
int(self.nneigh[index]),
None,
self.y[index] if index < len(self.y) else -1,
]
for index in range(self.n_id):
if self.structure[self.structure[index][1]][2] is None:
self.structure[self.structure[index][1]][2] = index
self.structure = pd.DataFrame(
self.structure, index=["sample", "next", "previous", "label"]
).transpose()
self.structure_stdnpf = self.structure.copy(deep=True)
def __step_a(self):
"""
> The function takes the labeled samples and trains the classifier on
them
:return: The samples that have been labeled.
"""
samples_labeled = self.structure.loc[self.structure["label"] != -1]
sam_lab = samples_labeled["sample"].to_list()
y_without = samples_labeled["label"].to_list()
self.classifier.fit(sam_lab, y_without)
return samples_labeled
def __discover_structure(self):
"""Discovers the under laying structure."""
self._fit_without()
def __nan_search(self):
"""
For each point, find the set of points that are within a distance of r,
and the set of points that are within a distance of r+1.
The set of points that are within a distance of r+1 is a superset of the
set of points that are within a distance of r.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is also a superset
of the set of points that are within a distance of r+3.
And so on.
The set of points that are within a distance of r+1 is also a superset
of the set of points that are within a distance of r+2.
The set of points that are within a distance of r+2 is
:return: nan, r
"""
r = 1
nan = defaultdict(set)
nb = dict.fromkeys(range(self.n_id), 0)
knn = defaultdict(set)
rnn = defaultdict(set)
cnt = defaultdict(int)
while True:
search = NearestNeighbors(n_neighbors=r + 1, algorithm="kd_tree")
search.fit(self.data)
for index, sample in enumerate(self.data):
r_neighs = search.kneighbors(
[sample], return_distance=False)[0][1:]
knn[index].update(list(r_neighs))
for neigh in r_neighs:
nb[neigh] += 1
rnn[neigh].add(index)
cnt[r] = np.count_nonzero((np.array(list(nb.values())) == 0))
if r > 2 and cnt[r] == cnt[r - 1]:
r -= 1
break
r += 1
for index in range(self.n_id):
nan[index] = knn[index].intersection(rnn[index])
return nan, r
def __enane(self, fx, nan, r):
"""
> The function takes in the dataframe, the list of indices of the
unlabeled data, the list of indices of the neighbors of the unlabeled
data, and the number of neighbors to use in the KNN classifier. It
then creates a new dataframe with the labeled data and the unlabeled
data, and uses the KNN classifier to predict the labels of the
unlabeled data. It then checks if the predicted label is the same as
the label of the majority of the neighbors of the unlabeled data. If
it is, then it adds the index of the unlabeled data to the list of
indices of the data to be labeled
:param fx: the indexes of the unlabeled data
:param nan: a list of lists, where each list contains the indices of the
neighbors of a sample
:param r: the number of neighbors to consider
:return: The indexes of the samples that are going to be labeled and the
labels that are going to be assigned to them.
"""
es = []
es_pred = []
local_structure = self.structure_stdnpf.copy(deep=True)
base_estimator = KNeighborsClassifier(
n_neighbors=r, metric=self.distance_metric
)
labeled_data = local_structure.loc[local_structure["label"] != -1]
nan_unlabeled = local_structure.loc[fx]
data = pd.concat([labeled_data, nan_unlabeled], join="inner")
enane_model = SelfTrainingClassifier(base_estimator)
enane_model.fit(data["sample"].tolist(), data["label"].tolist())
enane_pred = enane_model.predict(nan_unlabeled["sample"].tolist())
for (row_index, _), pred in zip(nan_unlabeled.iterrows(), enane_pred):
usefulness = 0
harmfulness = 0
for neigh in nan[row_index]:
if local_structure.loc[neigh, "label"] == pred:
usefulness += 1
else:
harmfulness += 1
if usefulness >= harmfulness:
es.append(row_index)
es_pred.append(pred)
return es, es_pred
def __init_values(self, low, u, y):
"""
It takes in the lower and upper bounds of the data, and the data itself,
and then calculates the distances between the data points,
the maximum distance, the minimum distance, the dc value, the rho
value, the delta value, the number of neighbors, and the structure
of the data
:param low: lower bound of the data
:param u: upper bound of the data
:param y: the labels of the data
"""
self.y = y
self.low = low
self.u = u
self.data = np.concatenate((low, u), axis=0)
self.n_id = self.data.shape[0]
self.distances, self.max_dis, self.min_dis = self.__build_distance()
self.dc = self.__select_dc()
self.rho = self.__local_density()
self.delta, self.nneigh = self.__min_neighbor_and_distance()
self.__structure()
def _fit_without(self):
"""
The function takes in a classifier, and then labels the next point,
and then labels the previous points, without filtering.
"""
if self.classifier is None:
self.classifier = SVC()
count = 1
self.order = dict.fromkeys(range(self.n_id), 0)
count = self._label_next_point(count)
self._label_previous_points(count)
def _label_previous_points(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the previous samples of those samples. It then labels those samples
and repeats the process until there are no more samples to label
:param count: the number of the current iteration
"""
while True:
samples_labeled = self.__step_a()
prev_rows = samples_labeled["previous"].to_numpy()
prev_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for prev_row in prev_rows:
if prev_row not in samples_labeled_index and prev_row is not None:
prev_unlabeled.append(prev_row)
self.order[prev_row] = count
if len(prev_unlabeled) == 0:
break
unlabeled_prev_of_labeled = self.structure.loc[prev_unlabeled]
lu = unlabeled_prev_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, prev_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
def _label_next_point(self, count):
"""
> The function takes the samples labeled in the previous step and finds
the next samples in the structure. If the next samples are not
labeled, it labels them and updates the order of the samples
:param count: the number of the next point to be labeled
:return: The number of labeled samples.
"""
while True:
samples_labeled = self.__step_a()
next_rows = samples_labeled["next"].to_numpy()
next_unlabeled = []
samples_labeled_index = samples_labeled.index.to_list()
for next_row in next_rows:
if next_row not in samples_labeled_index:
next_unlabeled.append(next_row)
self.order[next_row] = count
if len(next_unlabeled) == 0:
break
unlabeled_next_of_labeled = self.structure.loc[next_unlabeled]
lu = unlabeled_next_of_labeled["sample"].to_list()
y_pred = self.classifier.predict(lu)
for new_label, pos in zip(y_pred, next_unlabeled):
self.structure.at[pos, "label"] = new_label
count += 1
return count
def _fit_stdpnf(self):
"""
Self Training based on Density Peaks and a parameter-free noise
filter.
"""
self.__discover_structure()
nan, lambda_param = self.__nan_search()
self.classifier_stdpnf = KNeighborsClassifier(
n_neighbors=self.k, metric=self.distance_metric
)
self.classifier_stdpnf.fit(self.low, self.y)
count = 1
while count <= max(self.order.values()):
unlabeled_rows = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] == -1
].index.to_list()
unlabeled_indexes = []
for row in unlabeled_rows:
if self.order[row] == count:
unlabeled_indexes.append(row)
if isinstance(self.filter, str) and self.filter == "ENANE":
filtered_indexes, filtered_labels = self.__enane(
unlabeled_indexes, nan, lambda_param
)
self.structure_stdnpf.at[filtered_indexes,
"label"] = filtered_labels
else:
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
complete = labeled_data["sample"]
complete_y = labeled_data["label"]
result = self._if_filter(complete, complete_y)
self._results_to_structure(complete, result)
labeled_data = self.structure_stdnpf.loc[
self.structure_stdnpf["label"] != -1
]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
count += 1
labeled_data = self.structure_stdnpf.loc[self.structure_stdnpf["label"] != -1]
self.classifier_stdpnf.fit(
labeled_data["sample"].tolist(), labeled_data["label"].tolist()
)
def _results_to_structure(self, complete, result):
"""
> This function takes the results of the model and compares them to the
complete data set. If the result is not in the complete data set, it is
added to the structure data set.
:param complete: the complete dataset
:param result: the result of the clustering
"""
results_to_unlabeled = []
for r in result.to_numpy():
is_in = False
for c in complete:
if np.array_equal(r, c):
is_in = True
if not is_in:
results_to_unlabeled.append(r)
for r in results_to_unlabeled:
self.structure_stdnpf.at[np.array(self.structure_stdnpf["sample"], r)][
"label"
] = -1
def _if_filter(self, complete, complete_y):
"""
If the filter is an ENN, then filter the original data, otherwise
filter the complete data
:param complete: the complete dataframe
:param complete_y: the complete y values
:return: The result is a dataframe with the filtered data.
"""
if isinstance(self.filter, ENN):
original = pd.DataFrame(self.low)
original_y = pd.DataFrame(self.y)
result, _ = self.filter.filter_original_complete(
original, original_y, complete, complete_y
)
else:
result, _ = self.filter.filter(complete, complete_y)
return result
def fit(self, samples, y):
"""Fit method."""
try:
l, u, y = split(samples, y)
except IndexError:
raise ValueError("Dimensions do not match.")
le = LabelEncoder()
le.fit(y)
y = le.transform(y)
self.__init_values(l, u, y)
if self.filtering:
self._fit_stdpnf()
else:
self._fit_without()
def predict(self, src):
"""
Predict based on a trained classifier.
:param src: The source image
:return: The classifier is being returned.
"""
if self.classifier is None:
raise AssertionError("The model needs to be fitted first.")
return self.classifier.predict(src)
| 2.75 | 3 |
N-aryTreeLevelOrderTraversal429.py | Bit64L/LeetCode-Python- | 0 | 3577 | """
# Definition for a Node.
"""
class TreeNode(object):
def __init__(self, val, children):
self.val = val
self.children = children
class Solution(object):
def levelOrder(self, root):
"""
:type root: Node
:rtype: List[List[int]]
"""
if root is None:
return []
from Queue import Queue
que = Queue()
que.put(root)
ans, tmp, k = [], [], 1
while que.qsize() != 0:
node = que.get()
tmp.append(node.val)
k -= 1
for child in node.children:
que.put(child)
if k == 0:
k = que.qsize()
ans.append(list(tmp))
tmp = []
return ans
node2 = TreeNode(2, [])
node3 = TreeNode(3, [])
children = [node2, node3]
node1 = TreeNode(1, children)
solution = Solution()
print(solution.levelOrder(node1))
| 3.71875 | 4 |
plugin.video.team.milhanos/websocket/_core.py | akuala/REPO.KUALA | 2 | 3578 | <reponame>akuala/REPO.KUALA
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 <NAME>(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
from __future__ import print_function
import six
import socket
if six.PY3:
from base64 import encodebytes as base64encode
else:
from base64 import encodestring as base64encode
import struct
import threading
# websocket modules
from ._exceptions import *
from ._abnf import *
from ._socket import *
from ._utils import *
from ._url import *
from ._logging import *
from ._http import *
from ._handshake import *
from ._ssl_compat import *
"""
websocket python client.
=========================
This version support only hybi-13.
Please see http://tools.ietf.org/html/rfc6455 for protocol.
"""
class WebSocket(object):
"""
Low level WebSocket interface.
This class is based on
The WebSocket protocol draft-hixie-thewebsocketprotocol-76
http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76
We can connect to the websocket server and send/receive data.
The following example is an echo client.
>>> import websocket
>>> ws = websocket.WebSocket()
>>> ws.connect("ws://echo.websocket.org")
>>> ws.send("Hello, Server")
>>> ws.recv()
'Hello, Server'
>>> ws.close()
get_mask_key: a callable to produce new mask keys, see the set_mask_key
function's docstring for more details
sockopt: values for socket.setsockopt.
sockopt must be tuple and each element is argument of sock.setsockopt.
sslopt: dict object for ssl socket option.
fire_cont_frame: fire recv event for each cont frame. default is False
enable_multithread: if set to True, lock send method.
skip_utf8_validation: skip utf8 validation.
"""
def __init__(self, get_mask_key=None, sockopt=None, sslopt=None,
fire_cont_frame=False, enable_multithread=False,
skip_utf8_validation=False, **options):
"""
Initialize WebSocket object.
"""
self.sock_opt = sock_opt(sockopt, sslopt)
self.handshake_response = None
self.sock = None
self.connected = False
self.get_mask_key = get_mask_key
# These buffer over the build-up of a single frame.
self.frame_buffer = frame_buffer(self._recv, skip_utf8_validation)
self.cont_frame = continuous_frame(fire_cont_frame, skip_utf8_validation)
if enable_multithread:
self.lock = threading.Lock()
else:
self.lock = NoLock()
def __iter__(self):
"""
Allow iteration over websocket, implying sequential `recv` executions.
"""
while True:
yield self.recv()
def __next__(self):
return self.recv()
def next(self):
return self.__next__()
def fileno(self):
return self.sock.fileno()
def set_mask_key(self, func):
"""
set function to create musk key. You can customize mask key generator.
Mainly, this is for testing purpose.
func: callable object. the func takes 1 argument as integer.
The argument means length of mask key.
This func must return string(byte array),
which length is argument specified.
"""
self.get_mask_key = func
def gettimeout(self):
"""
Get the websocket timeout(second).
"""
return self.sock_opt.timeout
def settimeout(self, timeout):
"""
Set the timeout to the websocket.
timeout: timeout time(second).
"""
self.sock_opt.timeout = timeout
if self.sock:
self.sock.settimeout(timeout)
timeout = property(gettimeout, settimeout)
def getsubprotocol(self):
"""
get subprotocol
"""
if self.handshake_response:
return self.handshake_response.subprotocol
else:
return None
subprotocol = property(getsubprotocol)
def getstatus(self):
"""
get handshake status
"""
if self.handshake_response:
return self.handshake_response.status
else:
return None
status = property(getstatus)
def getheaders(self):
"""
get handshake response header
"""
if self.handshake_response:
return self.handshake_response.headers
else:
return None
headers = property(getheaders)
def connect(self, url, **options):
"""
Connect to url. url is websocket url scheme.
ie. ws://host:port/resource
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> ws = WebSocket()
>>> ws.connect("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth information.
tuple of username and password.
default is None
"subprotocols" - array of available sub protocols.
default is None.
"socket" - pre-initialized stream socket.
"""
self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options),
options.pop('socket', None))
try:
self.handshake_response = handshake(self.sock, *addrs, **options)
self.connected = True
except:
if self.sock:
self.sock.close()
self.sock = None
raise
def send(self, payload, opcode=ABNF.OPCODE_TEXT):
"""
Send the data as string.
payload: Payload must be utf-8 string or unicode,
if the opcode is OPCODE_TEXT.
Otherwise, it must be string(byte array)
opcode: operation code to send. Please see OPCODE_XXX.
"""
frame = ABNF.create_frame(payload, opcode)
return self.send_frame(frame)
def send_frame(self, frame):
"""
Send the data frame.
frame: frame data created by ABNF.create_frame
>>> ws = create_connection("ws://echo.websocket.org/")
>>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0)
>>> ws.send_frame(frame)
>>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1)
>>> ws.send_frame(frame)
"""
if self.get_mask_key:
frame.get_mask_key = self.get_mask_key
data = frame.format()
length = len(data)
trace("send: " + repr(data))
with self.lock:
while data:
l = self._send(data)
data = data[l:]
return length
def send_binary(self, payload):
return self.send(payload, ABNF.OPCODE_BINARY)
def ping(self, payload=""):
"""
send ping data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PING)
def pong(self, payload):
"""
send pong data.
payload: data payload to send server.
"""
if isinstance(payload, six.text_type):
payload = payload.encode("utf-8")
self.send(payload, ABNF.OPCODE_PONG)
def recv(self):
"""
Receive string data(byte array) from the server.
return value: string(byte array) value.
"""
opcode, data = self.recv_data()
if six.PY3 and opcode == ABNF.OPCODE_TEXT:
return data.decode("utf-8")
elif opcode == ABNF.OPCODE_TEXT or opcode == ABNF.OPCODE_BINARY:
return data
else:
return ''
def recv_data(self, control_frame=False):
"""
Receive data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
opcode, frame = self.recv_data_frame(control_frame)
return opcode, frame.data
def recv_data_frame(self, control_frame=False):
"""
Receive data with operation code.
control_frame: a boolean flag indicating whether to return control frame
data, defaults to False
return value: tuple of operation code and string(byte array) value.
"""
while True:
frame = self.recv_frame()
if not frame:
# handle error:
# 'NoneType' object has no attribute 'opcode'
raise WebSocketProtocolException("Not a valid frame %s" % frame)
elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT):
self.cont_frame.validate(frame)
self.cont_frame.add(frame)
if self.cont_frame.is_fire(frame):
return self.cont_frame.extract(frame)
elif frame.opcode == ABNF.OPCODE_CLOSE:
self.send_close()
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PING:
if len(frame.data) < 126:
self.pong(frame.data)
else:
raise WebSocketProtocolException("Ping message is too long")
if control_frame:
return (frame.opcode, frame)
elif frame.opcode == ABNF.OPCODE_PONG:
if control_frame:
return (frame.opcode, frame)
def recv_frame(self):
"""
receive data as frame from server.
return value: ABNF frame object.
"""
return self.frame_buffer.recv_frame()
def send_close(self, status=STATUS_NORMAL, reason=six.b("")):
"""
send close data to the server.
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string or bytes.
"""
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
def close(self, status=STATUS_NORMAL, reason=six.b(""), timeout=3):
"""
Close Websocket object
status: status code to send. see STATUS_XXX.
reason: the reason to close. This must be string.
timeout: timeout until receive a close frame.
If None, it will wait forever until receive a close frame.
"""
if self.connected:
if status < 0 or status >= ABNF.LENGTH_16:
raise ValueError("code is invalid range")
try:
self.connected = False
self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE)
sock_timeout = self.sock.gettimeout()
self.sock.settimeout(timeout)
try:
frame = self.recv_frame()
if isEnabledForError():
recv_status = struct.unpack("!H", frame.data)[0]
if recv_status != STATUS_NORMAL:
error("close status: " + repr(recv_status))
except:
pass
self.sock.settimeout(sock_timeout)
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.shutdown()
def abort(self):
"""
Low-level asynchronous abort, wakes up other threads that are waiting in recv_*
"""
if self.connected:
self.sock.shutdown(socket.SHUT_RDWR)
def shutdown(self):
"close socket, immediately."
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
def _send(self, data):
return send(self.sock, data)
def _recv(self, bufsize):
try:
return recv(self.sock, bufsize)
except WebSocketConnectionClosedException:
if self.sock:
self.sock.close()
self.sock = None
self.connected = False
raise
def create_connection(url, timeout=None, class_=WebSocket, **options):
"""
connect to url and return websocket object.
Connect to url and return the WebSocket object.
Passing optional timeout parameter will set the timeout on the socket.
If no timeout is supplied,
the global default timeout setting returned by getdefauttimeout() is used.
You can customize using 'options'.
If you set "header" list object, you can set your own custom header.
>>> conn = create_connection("ws://echo.websocket.org/",
... header=["User-Agent: MyProgram",
... "x-custom: header"])
timeout: socket timeout time. This value is integer.
if you set None for this value,
it means "use default_timeout value"
class_: class to instantiate when creating the connection. It has to implement
settimeout and connect. It's __init__ should be compatible with
WebSocket.__init__, i.e. accept all of it's kwargs.
options: "header" -> custom http header list or dict.
"cookie" -> cookie value.
"origin" -> custom origin url.
"host" -> custom host header string.
"http_proxy_host" - http proxy host name.
"http_proxy_port" - http proxy port. If not set, set to 80.
"http_no_proxy" - host names, which doesn't use proxy.
"http_proxy_auth" - http proxy auth information.
tuple of username and password.
default is None
"enable_multithread" -> enable lock for multithread.
"sockopt" -> socket options
"sslopt" -> ssl option
"subprotocols" - array of available sub protocols.
default is None.
"skip_utf8_validation" - skip utf8 validation.
"socket" - pre-initialized stream socket.
"""
sockopt = options.pop("sockopt", [])
sslopt = options.pop("sslopt", {})
fire_cont_frame = options.pop("fire_cont_frame", False)
enable_multithread = options.pop("enable_multithread", False)
skip_utf8_validation = options.pop("skip_utf8_validation", False)
websock = class_(sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=fire_cont_frame,
enable_multithread=enable_multithread,
skip_utf8_validation=skip_utf8_validation, **options)
websock.settimeout(timeout if timeout is not None else getdefaulttimeout())
websock.connect(url, **options)
return websock
| 2.296875 | 2 |
vaccine_card/logistic/models.py | Unanimad/lais_046_2020_etapa_2 | 0 | 3579 | from django.db import models
from vaccine_card.vaccination.models import Vaccine
class State(models.Model):
name = models.CharField(max_length=20, verbose_name='Nome')
class Meta:
verbose_name = 'Unidade Federativa'
def __str__(self):
return self.name
class City(models.Model):
name = models.CharField(max_length=50, verbose_name='Nome')
state = models.ForeignKey(State, on_delete=models.CASCADE, verbose_name=State._meta.verbose_name)
class Meta:
verbose_name = 'Município'
def __str__(self):
return self.name
class Address(models.Model):
logradouro = models.CharField(max_length=150, verbose_name='Logradouro')
numero = models.CharField(max_length=4, verbose_name='Número')
complemento = models.CharField(max_length=50, null=True, blank=True, verbose_name='Complemento')
bairro = models.CharField(max_length=150, verbose_name='Bairro')
cep = models.CharField(max_length=8, verbose_name='CEP')
# state = models.ForeignKey(State, on_delete=models.CASCADE, verbose_name=State._meta.verbose_name)
city = models.ForeignKey(City, on_delete=models.CASCADE, verbose_name=City._meta.verbose_name)
class Meta:
verbose_name = 'Endereço'
class HealthCenter(models.Model):
cnes = models.CharField(max_length=7, verbose_name='CNES')
cnpj = models.CharField(max_length=14, verbose_name='CNPJ')
name = models.CharField(max_length=255, verbose_name='Razão Social')
created_at = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name='Criado em:')
updated_at = models.DateTimeField(auto_now_add=False, auto_now=True, verbose_name='Atualizado em:')
address = models.ManyToManyField(Address, verbose_name=Address._meta.verbose_name)
class Meta:
verbose_name = 'Estabelecimento de Saúde'
verbose_name_plural = 'Estabelecimentos de Saúde'
def __str__(self):
return self.name
class Stock(models.Model):
lot = models.PositiveSmallIntegerField(verbose_name='Lote')
created_at = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name='Criado em:')
updated_at = models.DateTimeField(auto_now_add=False, auto_now=True, verbose_name='Atualizado em:')
health_center = models.ForeignKey(HealthCenter, on_delete=models.CASCADE,
verbose_name=HealthCenter._meta.verbose_name)
vaccines = models.ManyToManyField(Vaccine, through='VaccineStock', verbose_name=Vaccine._meta.verbose_name)
class Meta:
verbose_name = 'Estoque'
class VaccineStock(models.Model):
amount = models.PositiveSmallIntegerField(verbose_name='Quantidade recebida')
remaining = models.PositiveSmallIntegerField(verbose_name='Quantidade restante')
vaccine = models.ForeignKey(Vaccine, on_delete=models.DO_NOTHING, verbose_name=Vaccine._meta.verbose_name)
stock = models.ForeignKey(Stock, on_delete=models.DO_NOTHING, verbose_name=Stock._meta.verbose_name)
class Meta:
verbose_name = 'Estoque de Vacina'
def __str__(self):
return self.vaccine.name
| 2.078125 | 2 |
test/test_rimuhosting.py | shenoyn/libcloud | 1 | 3580 | <reponame>shenoyn/libcloud
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# libcloud.org licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2009 RedRata Ltd
from libcloud.drivers.rimuhosting import RimuHostingNodeDriver
from test import MockHttp
from test import MockHttp, TestCaseMixin
import unittest
import httplib
class RimuHostingTest(unittest.TestCase, TestCaseMixin):
def setUp(self):
RimuHostingNodeDriver.connectionCls.conn_classes = (None,
RimuHostingMockHttp)
self.driver = RimuHostingNodeDriver('foo')
def test_list_nodes(self):
nodes = self.driver.list_nodes()
self.assertEqual(len(nodes),1)
node = nodes[0]
self.assertEqual(node.public_ip[0], "1.2.3.4")
self.assertEqual(node.public_ip[1], "1.2.3.5")
self.assertEqual(node.extra['order_oid'], 88833465)
self.assertEqual(node.id, "order-88833465-api-ivan-net-nz")
def test_list_sizes(self):
sizes = self.driver.list_sizes()
self.assertEqual(len(sizes),1)
size = sizes[0]
self.assertEqual(size.ram,950)
self.assertEqual(size.disk,20)
self.assertEqual(size.bandwidth,75)
self.assertEqual(size.price,32.54)
def test_list_images(self):
images = self.driver.list_images()
self.assertEqual(len(images),6)
image = images[0]
self.assertEqual(image.name,"Debian 5.0 (aka Lenny, RimuHosting"\
" recommended distro)")
self.assertEqual(image.id, "lenny")
def test_reboot_node(self):
# Raises exception on failure
node = self.driver.list_nodes()[0]
self.driver.reboot_node(node)
def test_destroy_node(self):
# Raises exception on failure
node = self.driver.list_nodes()[0]
self.driver.destroy_node(node)
def test_create_node(self):
# Raises exception on failure
size = self.driver.list_sizes()[0]
image = self.driver.list_images()[0]
self.driver.create_node(name="api.ivan.net.nz", image=image, size=size)
class RimuHostingMockHttp(MockHttp):
def _r_orders(self,method,url,body,headers):
body = """
{ "get_orders_response" :
{ "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : "Found 15 orders"
, "response_display_duration_type" : "REGULAR",
"about_orders" :
[{ "order_oid" : 88833465
, "domain_name" : "api.ivan.net.nz"
, "slug" : "order-88833465-api-ivan-net-nz"
, "billing_oid" : 96122465
, "is_on_customers_own_physical_server" : false
, "vps_parameters" : { "memory_mb" : 160
, "disk_space_mb" : 4096
, "disk_space_2_mb" : 0}
, "host_server_oid" : "764"
, "server_type" : "VPS"
, "data_transfer_allowance" : { "data_transfer_gb" : 30
, "data_transfer" : "30"}
, "billing_info" : { }
, "allocated_ips" : { "primary_ip" : "192.168.3.11"
, "secondary_ips" : ["172.16.17.32","172.16.31.10"]}
, "running_state" : "RUNNING"}]}}"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_pricing_plans(self,method,url,body,headers):
body = """
{"get_pricing_plans_response" :
{ "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : "Here some pricing plans we are offering on new orders. Note we offer most disk and memory sizes. So if you setup a new server feel free to vary these (e.g. different memory, disk, etc) and we will just adjust the pricing to suit. Pricing is in USD. If you are an NZ-based customer then we would need to add GST."
, "response_display_duration_type" : "REGULAR"
, "pricing_plan_infos" :
[{ "pricing_plan_code" : "MiroVPSLowContention"
, "pricing_plan_description" : "MiroVPS Semi-Dedicated Server (Dallas)"
, "monthly_recurring_fee" : 32.54
, "monthly_recurring_amt" : { "amt" : 35.0
, "currency" : "CUR_AUD"
,"amt_usd" : 32.54}
, "minimum_memory_mb" : 950
, "minimum_disk_gb" : 20
, "minimum_data_transfer_allowance_gb" : 75
, "see_also_url" : "http://rimuhosting.com/order/serverdetails.jsp?plan=MiroVPSLowContention"
, "server_type" : "VPS"
, "offered_at_data_center" :
{ "data_center_location_code" : "DCDALLAS"
, "data_center_location_name" : "Dallas"}}
]}}
"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_distributions(self, method, url, body, headers):
body = """
{ "get_distros_response" : { "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : "Here are the distros we are offering on new orders."
, "response_display_duration_type" : "REGULAR"
, "distro_infos" : [{ "distro_code" : "lenny"
, "distro_description" : "Debian 5.0 (aka Lenny, RimuHosting recommended distro)"}
, { "distro_code" : "centos5"
, "distro_description" : "Centos5"}
, { "distro_code" : "ubuntu904"
, "distro_description" : "Ubuntu 9.04 (Jaunty Jackalope, from 2009-04)"}
, { "distro_code" : "ubuntu804"
, "distro_description" : "Ubuntu 8.04 (Hardy Heron, 5 yr long term support (LTS))"}
, { "distro_code" : "ubuntu810"
, "distro_description" : "Ubuntu 8.10 (Intrepid Ibex, from 2008-10)"}
, { "distro_code" : "fedora10"
, "distro_description" : "Fedora 10"}]}}
"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_orders_new_vps(self, method, url, body, headers):
body = """
{ "post_new_vps_response" :
{ "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : null
, "response_display_duration_type" : "REGULAR"
, "setup_messages" :
["Using user-specified billing data: Wire Transfer" , "Selected user as the owner of the billing details: <NAME>"
, "No VPS paramters provided, using default values."]
, "about_order" :
{ "order_oid" : 52255865
, "domain_name" : "api.ivan.net.nz"
, "slug" : "order-52255865-api-ivan-net-nz"
, "billing_oid" : 96122465
, "is_on_customers_own_physical_server" : false
, "vps_parameters" :
{ "memory_mb" : 160
, "disk_space_mb" : 4096
, "disk_space_2_mb" : 0}
, "host_server_oid" : "764"
, "server_type" : "VPS"
, "data_transfer_allowance" :
{ "data_transfer_gb" : 30 , "data_transfer" : "30"}
, "billing_info" : { }
, "allocated_ips" :
{ "primary_ip" : "172.16.17.32", "secondary_ips" : []}
, "running_state" : "RUNNING"}
, "new_order_request" :
{ "billing_oid" : 96122465
, "user_oid" : 0
, "host_server_oid" : null
, "vps_order_oid_to_clone" : 0
, "ip_request" :
{ "num_ips" : 1, "extra_ip_reason" : ""}
, "vps_parameters" :
{ "memory_mb" : 160
, "disk_space_mb" : 4096
, "disk_space_2_mb" : 0}
, "pricing_plan_code" : "MIRO1B"
, "instantiation_options" :
{ "control_panel" : "webmin"
, "domain_name" : "api.ivan.net.nz"
, "password" : "<PASSWORD>"
, "distro" : "lenny"}}
, "running_vps_info" :
{ "pings_ok" : true
, "current_kernel" : "default"
, "current_kernel_canonical" : "2.6.30.5-xenU.i386"
, "last_backup_message" : ""
, "is_console_login_enabled" : false
, "console_public_authorized_keys" : null
, "is_backup_running" : false
, "is_backups_enabled" : true
, "next_backup_time" :
{ "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000}
, "vps_uptime_s" : 31
, "vps_cpu_time_s" : 6
, "running_state" : "RUNNING"
, "is_suspended" : false}}}
"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_orders_order_88833465_api_ivan_net_nz_vps(self, method, url, body, headers):
body = """
{ "delete_server_response" :
{ "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : "Server removed"
, "response_display_duration_type" : "REGULAR"
, "cancel_messages" :
["api.ivan.net.nz is being shut down."
, "A $7.98 credit has been added to your account."
, "If you need to un-cancel the server please contact our support team."]
}
}
"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _r_orders_order_88833465_api_ivan_net_nz_vps_running_state(self, method,
url, body,
headers):
body = """
{ "put_running_state_response" :
{ "status_message" : null
, "status_code" : 200
, "error_info" : null
, "response_type" : "OK"
, "human_readable_message" : "api.ivan.net.nz restarted. After the reboot api.ivan.net.nz is pinging OK."
, "response_display_duration_type" : "REGULAR"
, "is_restarted" : true
, "is_pinging" : true
, "running_vps_info" :
{ "pings_ok" : true
, "current_kernel" : "default"
, "current_kernel_canonical" : "2.6.30.5-xenU.i386"
, "last_backup_message" : ""
, "is_console_login_enabled" : false
, "console_public_authorized_keys" : null
, "is_backup_running" : false
, "is_backups_enabled" : true
, "next_backup_time" :
{ "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000}
, "vps_uptime_s" : 19
, "vps_cpu_time_s" : 5
, "running_state" : "RUNNING"
, "is_suspended" : false}
, "host_server_info" : { "is_host64_bit_capable" : true
, "default_kernel_i386" : "2.6.30.5-xenU.i386"
, "default_kernel_x86_64" : "2.6.30.5-xenU.x86_64"
, "cpu_model_name" : "Intel(R) Xeon(R) CPU E5506 @ 2.13GHz"
, "host_num_cores" : 1
, "host_xen_version" : "3.4.1"
, "hostload" : [1.45
, 0.56
, 0.28]
, "host_uptime_s" : 3378276
, "host_mem_mb_free" : 51825
, "host_mem_mb_total" : 73719
, "running_vpss" : 34}
, "running_state_messages" : null}}
"""
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
| 1.765625 | 2 |
tests/base_tests/polygon_tests/test_contains.py | lycantropos/gon | 10 | 3581 | <gh_stars>1-10
from typing import Tuple
from hypothesis import given
from gon.base import (Point,
Polygon)
from tests.utils import (equivalence,
implication)
from . import strategies
@given(strategies.polygons)
def test_vertices(polygon: Polygon) -> None:
assert all(vertex in polygon
for vertex in polygon.border.vertices)
assert all(vertex in polygon
for hole in polygon.holes
for vertex in hole.vertices)
@given(strategies.polygons_with_points)
def test_convex_hull(polygon_with_point: Tuple[Polygon, Point]) -> None:
polygon, point = polygon_with_point
assert implication(point in polygon, point in polygon.convex_hull)
@given(strategies.polygons_with_points)
def test_indexing(polygon_with_point: Tuple[Polygon, Point]) -> None:
polygon, point = polygon_with_point
before_indexing = point in polygon
polygon.index()
after_indexing = point in polygon
assert equivalence(before_indexing, after_indexing)
| 2.875 | 3 |
easyidp/core/tests/test_class_reconsproject.py | HowcanoeWang/EasyIDP | 0 | 3582 | import os
import numpy as np
import pytest
import easyidp
from easyidp.core.objects import ReconsProject, Points
from easyidp.io import metashape
module_path = os.path.join(easyidp.__path__[0], "io/tests")
def test_init_reconsproject():
attempt1 = ReconsProject("agisoft")
assert attempt1.software == "metashape"
attempt2 = ReconsProject("Metashape")
assert attempt2.software == "metashape"
with pytest.raises(LookupError):
attempt3 = ReconsProject("not_supported_sfm")
def test_local2world2local():
attempt1 = ReconsProject("agisoft")
attempt1.transform.matrix = np.asarray([[-0.86573098, -0.01489186, 0.08977677, 7.65034123],
[0.06972335, 0.44334391, 0.74589315, 1.85910928],
[-0.05848325, 0.74899678, -0.43972184, -0.1835615],
[0., 0., 0., 1.]], dtype=np.float)
w_pos = Points([0.5, 1, 1.5])
l_pos = Points([7.960064093299587, 1.3019528769064523, -2.6697181763370965])
w_pos_ans = Points([0.4999999999999978, 0.9999999999999993, 1.5])
world_pos = attempt1.local2world(l_pos)
np.testing.assert_array_almost_equal(w_pos_ans.values, world_pos.values, decimal=6)
local_pos = attempt1.world2local(w_pos)
np.testing.assert_array_almost_equal(l_pos.values, local_pos.values, decimal=6)
def test_metashape_project_local_points_on_raw():
test_project_folder = easyidp.test_full_path("data/metashape/goya_test.psx")
chunks = metashape.open_project(test_project_folder)
chunk = chunks[0]
# test for single point
l_pos = Points([7.960064093299587, 1.3019528769064523, -2.6697181763370965])
p_dis_out = chunk.project_local_points_on_raw(l_pos, 0, distortion_correct=False)
p_undis_out = chunk.project_local_points_on_raw(l_pos, 0, distortion_correct=True)
# pro_api_out = np.asarray([2218.883386793118, 1991.4709388015149])
my_undistort_out = Points([2220.854889556147, 1992.6933680261686])
my_distort_out = Points([2218.47960556, 1992.46356322])
np.testing.assert_array_almost_equal(p_dis_out.values, my_distort_out.values)
np.testing.assert_array_almost_equal(p_undis_out.values, my_undistort_out.values)
# test for multiple points
l_pos_points = Points([[7.960064093299587, 1.3019528769064523, -2.6697181763370965],
[7.960064093299587, 1.3019528769064523, -2.6697181763370965]])
p_dis_outs = chunk.project_local_points_on_raw(l_pos_points, 0, distortion_correct=False)
p_undis_outs = chunk.project_local_points_on_raw(l_pos_points, 0, distortion_correct=True)
my_undistort_outs = Points([[2220.854889556147, 1992.6933680261686],
[2220.854889556147, 1992.6933680261686]])
my_distort_outs = Points([[2218.47960556, 1992.46356322],
[2218.47960556, 1992.46356322]])
np.testing.assert_array_almost_equal(p_dis_outs.values, my_distort_outs.values)
np.testing.assert_array_almost_equal(p_undis_outs.values, my_undistort_outs.values)
def test_world2crs_and_on_raw_images():
test_project_folder = easyidp.test_full_path("data/metashape/wheat_tanashi.psx")
chunks = metashape.open_project(test_project_folder)
chunk = chunks[0]
local = Points([11.870130675203006, 0.858098777517136, -12.987136541275])
geocentric = Points([-3943658.7087006606, 3363404.124223561, 3704651.3067566575])
geodetic = Points([139.54033578028609, 35.73756358928734, 96.87827569602781], columns=['lon', 'lat', 'alt'])
idp_world = chunk.local2world(local)
np.testing.assert_array_almost_equal(idp_world.values, geocentric.values, decimal=1)
idp_crs = chunk.world2crs(idp_world)
np.testing.assert_array_almost_equal(idp_crs.values, geodetic.values)
camera_id = 56 # camera_label = 'DJI_0057'
camera_pix_ans = Points([2391.7104647010146, 1481.8987733175165])
idp_cam_pix = chunk.project_local_points_on_raw(local, camera_id, distortion_correct=True)
np.testing.assert_array_almost_equal(camera_pix_ans.values, idp_cam_pix.values)
| 2.046875 | 2 |
withings_api/const.py | tiloc/python_withings_api | 0 | 3583 | <gh_stars>0
"""Constant values."""
STATUS_SUCCESS = (0,)
STATUS_AUTH_FAILED = (100, 101, 102, 200, 401)
STATUS_INVALID_PARAMS = (
201,
202,
203,
204,
205,
206,
207,
208,
209,
210,
211,
212,
213,
216,
217,
218,
220,
221,
223,
225,
227,
228,
229,
230,
234,
235,
236,
238,
240,
241,
242,
243,
244,
245,
246,
247,
248,
249,
250,
251,
252,
254,
260,
261,
262,
263,
264,
265,
266,
267,
271,
272,
275,
276,
283,
284,
285,
286,
287,
288,
290,
293,
294,
295,
297,
300,
301,
302,
303,
304,
321,
323,
324,
325,
326,
327,
328,
329,
330,
331,
332,
333,
334,
335,
336,
337,
338,
339,
340,
341,
342,
343,
344,
345,
346,
347,
348,
349,
350,
351,
352,
353,
380,
381,
382,
400,
501,
502,
503,
504,
505,
506,
509,
510,
511,
523,
532,
3017,
3018,
3019,
)
STATUS_UNAUTHORIZED = (214, 277, 2553, 2554, 2555)
STATUS_ERROR_OCCURRED = (
215,
219,
222,
224,
226,
231,
233,
237,
253,
255,
256,
257,
258,
259,
268,
269,
270,
273,
274,
278,
279,
280,
281,
282,
289,
291,
292,
296,
298,
305,
306,
308,
309,
310,
311,
312,
313,
314,
315,
316,
317,
318,
319,
320,
322,
370,
371,
372,
373,
374,
375,
383,
391,
402,
516,
517,
518,
519,
520,
521,
525,
526,
527,
528,
529,
530,
531,
533,
602,
700,
1051,
1052,
1053,
1054,
2551,
2552,
2556,
2557,
2558,
2559,
3000,
3001,
3002,
3003,
3004,
3005,
3006,
3007,
3008,
3009,
3010,
3011,
3012,
3013,
3014,
3015,
3016,
3020,
3021,
3022,
3023,
3024,
5000,
5001,
5005,
5006,
6000,
6010,
6011,
9000,
10000,
)
STATUS_TIMEOUT = (522,)
STATUS_BAD_STATE = (524,)
STATUS_TOO_MANY_REQUESTS = (601,)
| 1.859375 | 2 |
examples/canvas/bezier.py | sirpercival/kivy | 2 | 3584 | #!/usr/bin/env python
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.slider import Slider
from kivy.graphics import Color, Bezier, Line
class BezierTest(FloatLayout):
def __init__(self, points=[], loop=False, *args, **kwargs):
super(BezierTest, self).__init__(*args, **kwargs)
self.d = 10
self.points = points
self.loop = loop
self.current_point = None
with self.canvas:
Color(1.0, 0.0, 0.0)
self.bezier = Bezier(
points=self.points,
segments=150,
loop=self.loop,
dash_length=100,
dash_offset=10)
Color(1.0, 0.0, 1.0)
self.line = Line(
points=self.points+self.points[:2],
dash_offset=10,
dash_length=100)
s = Slider(y=0, pos_hint={'x': .3}, size_hint=(.7, None), height=50)
s.bind(value=self._set_bezier_dash_offset)
self.add_widget(s)
s = Slider(y=50, pos_hint={'x': .3}, size_hint=(.7, None), height=50)
s.bind(value=self._set_line_dash_offset)
self.add_widget(s)
def _set_bezier_dash_offset(self, instance, value):
# effect to reduce length while increase offset
self.bezier.dash_length = 100 - value
self.bezier.dash_offset = value
def _set_line_dash_offset(self, instance, value):
# effect to reduce length while increase offset
self.line.dash_length = 100 - value
self.line.dash_offset = value
def on_touch_down(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
for i, p in enumerate(list(zip(self.points[::2], self.points[1::2]))):
if (
abs(touch.pos[0] - self.pos[0] - p[0]) < self.d and
abs(touch.pos[1] - self.pos[1] - p[1]) < self.d):
self.current_point = i + 1
return True
return super(BezierTest, self).on_touch_down(touch)
def on_touch_up(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
if self.current_point:
self.current_point = None
return True
return super(BezierTest, self).on_touch_up(touch)
def on_touch_move(self, touch):
if self.collide_point(touch.pos[0], touch.pos[1]):
c = self.current_point
if c:
self.points[(c - 1) * 2] = touch.pos[0] - self.pos[0]
self.points[(c - 1) * 2 + 1] = touch.pos[1] - self.pos[1]
self.bezier.points = self.points
self.line.points = self.points + self.points[:2]
return True
return super(BezierTest, self).on_touch_move(touch)
class Main(App):
def build(self):
from math import cos, sin, radians
x = y = 150
l = 100
# Pacman !
points = [x, y]
for i in range(45, 360, 45):
i = radians(i)
points.extend([x + cos(i) * l, y + sin(i) * l])
return BezierTest(points=points, loop=True)
if __name__ == '__main__':
Main().run()
| 3.046875 | 3 |
tests/bugs/core_6489_test.py | reevespaul/firebird-qa | 0 | 3585 | <filename>tests/bugs/core_6489_test.py<gh_stars>0
#coding:utf-8
#
# id: bugs.core_6489
# title: User without ALTER ANY ROLE privilege can use COMMENT ON ROLE
# decription:
# Test creates two users: one of them has no any rights, second is granted with 'alter any role' privilege.
# First user ('junior') must not have ability to add comment to rdb$admin role, but second ('senior') must
# be able to set comment to any string and make it null.
#
# Confirmed bug on 4.0.0.2384, 3.0.8.33425
# Checked on: 4.0.0.2387, 3.0.8.33426 -- all OK.
#
# NOTE:
# phrase '-Effective user is ...' presents only in FB 4.x and is suppressed here.
#
# tracker_id: CORE-6489
# min_versions: ['3.0.8']
# versions: 3.0.8
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0.8
# resources: None
substitutions_1 = [('ROLE_DESCR_BLOB_ID .*', ''), ('[\t ]+', ' '), ('(-)?Effective user is.*', '')]
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
create or alter user tmp$c6489_junior password '<PASSWORD>' using plugin Srp;
create or alter user tmp$c6489_senior password '<PASSWORD>' using plugin Srp;
commit;
grant alter any role to user tmp$c6489_senior;
commit;
connect '$(DSN)' user tmp$c6489_junior password '<PASSWORD>';
comment on role rdb$admin is 'Comment by tmp$c6489_junior';
commit;
connect '$(DSN)' user tmp$c6489_senior password '<PASSWORD>';
comment on role rdb$admin is 'Comment by tmp$c6489_senior';
commit;
set list on;
select r.rdb$description as role_descr_blob_id from rdb$roles r where r.rdb$role_name = upper('rdb$admin');
commit;
comment on role rdb$admin is null;
commit;
connect '$(DSN)' user 'SYSDBA' password '<PASSWORD>';
drop user tmp$c6489_junior using plugin Srp;
drop user tmp$c6489_senior using plugin Srp;
commit;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Comment by tmp$c6489_senior
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = 28000
unsuccessful metadata update
-COMMENT ON RDB$ADMIN failed
-no permission for ALTER access to ROLE RDB$ADMIN
-Effective user is TMP$C6489_JUNIOR
"""
@pytest.mark.version('>=3.0.8')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_expected_stderr == act_1.clean_stderr
assert act_1.clean_expected_stdout == act_1.clean_stdout
| 1.4375 | 1 |
utils/utils_bbox.py | MasoonZhang/FasterRConvMixer | 0 | 3586 | <reponame>MasoonZhang/FasterRConvMixer
import numpy as np
import torch
from torch.nn import functional as F
from torchvision.ops import nms
def loc2bbox(src_bbox, loc):
if src_bbox.size()[0] == 0:
return torch.zeros((0, 4), dtype=loc.dtype)
src_width = torch.unsqueeze(src_bbox[:, 2] - src_bbox[:, 0], -1)
src_height = torch.unsqueeze(src_bbox[:, 3] - src_bbox[:, 1], -1)
src_ctr_x = torch.unsqueeze(src_bbox[:, 0], -1) + 0.5 * src_width
src_ctr_y = torch.unsqueeze(src_bbox[:, 1], -1) + 0.5 * src_height
dx = loc[:, 0::4]
dy = loc[:, 1::4]
dw = loc[:, 2::4]
dh = loc[:, 3::4]
ctr_x = dx * src_width + src_ctr_x
ctr_y = dy * src_height + src_ctr_y
w = torch.exp(dw) * src_width
h = torch.exp(dh) * src_height
dst_bbox = torch.zeros_like(loc)
dst_bbox[:, 0::4] = ctr_x - 0.5 * w
dst_bbox[:, 1::4] = ctr_y - 0.5 * h
dst_bbox[:, 2::4] = ctr_x + 0.5 * w
dst_bbox[:, 3::4] = ctr_y + 0.5 * h
return dst_bbox
class DecodeBox():
def __init__(self, std, num_classes):
self.std = std
self.num_classes = num_classes + 1
def frcnn_correct_boxes(self, box_xy, box_wh, input_shape, image_shape):
#-----------------------------------------------------------------#
# 把y轴放前面是因为方便预测框和图像的宽高进行相乘
#-----------------------------------------------------------------#
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = np.array(input_shape)
image_shape = np.array(image_shape)
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = np.concatenate([box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1], box_maxes[..., 1:2]], axis=-1)
boxes *= np.concatenate([image_shape, image_shape], axis=-1)
return boxes
def forward(self, roi_cls_locs, roi_scores, rois, image_shape, input_shape, nms_iou = 0.3, confidence = 0.5):
results = []
bs = len(roi_cls_locs)
#--------------------------------#
# batch_size, num_rois, 4
#--------------------------------#
rois = rois.view((bs, -1, 4))
#----------------------------------------------------------------------------------------------------------------#
# 对每一张图片进行处理,由于在predict.py的时候,我们只输入一张图片,所以for i in range(len(mbox_loc))只进行一次
#----------------------------------------------------------------------------------------------------------------#
for i in range(bs):
#----------------------------------------------------------#
# 对回归参数进行reshape
#----------------------------------------------------------#
roi_cls_loc = roi_cls_locs[i] * self.std
#----------------------------------------------------------#
# 第一维度是建议框的数量,第二维度是每个种类
# 第三维度是对应种类的调整参数
#----------------------------------------------------------#
roi_cls_loc = roi_cls_loc.view([-1, self.num_classes, 4])
#-------------------------------------------------------------#
# 利用classifier网络的预测结果对建议框进行调整获得预测框
# num_rois, 4 -> num_rois, 1, 4 -> num_rois, num_classes, 4
#-------------------------------------------------------------#
roi = rois[i].view((-1, 1, 4)).expand_as(roi_cls_loc)
cls_bbox = loc2bbox(roi.contiguous().view((-1, 4)), roi_cls_loc.contiguous().view((-1, 4)))
cls_bbox = cls_bbox.view([-1, (self.num_classes), 4])
#-------------------------------------------------------------#
# 对预测框进行归一化,调整到0-1之间
#-------------------------------------------------------------#
cls_bbox[..., [0, 2]] = (cls_bbox[..., [0, 2]]) / input_shape[1]
cls_bbox[..., [1, 3]] = (cls_bbox[..., [1, 3]]) / input_shape[0]
roi_score = roi_scores[i]
prob = F.softmax(roi_score, dim=-1)
results.append([])
for c in range(1, self.num_classes):
#--------------------------------#
# 取出属于该类的所有框的置信度
# 判断是否大于门限
#--------------------------------#
c_confs = prob[:, c]
c_confs_m = c_confs > confidence
if len(c_confs[c_confs_m]) > 0:
#-----------------------------------------#
# 取出得分高于confidence的框
#-----------------------------------------#
boxes_to_process = cls_bbox[c_confs_m, c]
confs_to_process = c_confs[c_confs_m]
keep = nms(
boxes_to_process,
confs_to_process,
nms_iou
)
#-----------------------------------------#
# 取出在非极大抑制中效果较好的内容
#-----------------------------------------#
good_boxes = boxes_to_process[keep]
confs = confs_to_process[keep][:, None]
labels = (c - 1) * torch.ones((len(keep), 1)).cuda() if confs.is_cuda else (c - 1) * torch.ones((len(keep), 1))
#-----------------------------------------#
# 将label、置信度、框的位置进行堆叠。
#-----------------------------------------#
c_pred = torch.cat((good_boxes, confs, labels), dim=1).cpu().numpy()
# 添加进result里
results[-1].extend(c_pred)
if len(results[-1]) > 0:
results[-1] = np.array(results[-1])
box_xy, box_wh = (results[-1][:, 0:2] + results[-1][:, 2:4])/2, results[-1][:, 2:4] - results[-1][:, 0:2]
results[-1][:, :4] = self.frcnn_correct_boxes(box_xy, box_wh, input_shape, image_shape)
return results
| 2.046875 | 2 |
lib/python/test/__init__.py | woozhijun/cat | 17,318 | 3587 | #!/usr/bin/env python
# encoding: utf-8
import sys
reload(sys)
sys.setdefaultencoding("utf-8") | 1.414063 | 1 |
tests/test_pyclipper.py | odidev/pyclipper | 0 | 3588 | <filename>tests/test_pyclipper.py<gh_stars>0
#!/usr/bin/python
"""
Tests for Pyclipper wrapper library.
"""
from __future__ import print_function
from unittest2 import TestCase, main
import sys
if sys.version_info < (3,):
integer_types = (int, long)
else:
integer_types = (int,)
import pyclipper
# Example polygons from http://www.angusj.com/delphi/clipper.php
PATH_SUBJ_1 = [[180, 200], [260, 200], [260, 150], [180, 150]] # square, orientation is False
PATH_SUBJ_2 = [[215, 160], [230, 190], [200, 190]] # triangle
PATH_CLIP_1 = [[190, 210], [240, 210], [240, 130], [190, 130]] # square
PATH_SIGMA = [[300, 400], [100, 400], [200, 300], [100, 200], [300, 200]] # greek letter sigma
PATTERN = [[4, -6], [6, -6], [-4, 6], [-6, 6]]
INVALID_PATH = [[1, 1], ] # less than 2 vertices
class TestPyclipperModule(TestCase):
def test_has_classes(self):
self.assertTrue(hasattr(pyclipper, 'Pyclipper'))
self.assertTrue(hasattr(pyclipper, 'PyclipperOffset'))
def test_has_namespace_methods(self):
for method in ('Orientation', 'Area', 'PointInPolygon', 'SimplifyPolygon', 'SimplifyPolygons',
'CleanPolygon', 'CleanPolygons', 'MinkowskiSum', 'MinkowskiSum2', 'MinkowskiDiff',
'PolyTreeToPaths', 'ClosedPathsFromPolyTree', 'OpenPathsFromPolyTree',
'ReversePath', 'ReversePaths'):
self.assertTrue(hasattr(pyclipper, method))
class TestNamespaceMethods(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
def test_orientation(self):
self.assertFalse(pyclipper.Orientation(PATH_SUBJ_1))
self.assertTrue(pyclipper.Orientation(PATH_SUBJ_1[::-1]))
def test_area(self):
# area less than 0 because orientation is False
area_neg = pyclipper.Area(PATH_SUBJ_1)
area_pos = pyclipper.Area(PATH_SUBJ_1[::-1])
self.assertLess(area_neg, 0)
self.assertGreater(area_pos, 0)
self.assertEqual(abs(area_neg), area_pos)
def test_point_in_polygon(self):
# on polygon
self.assertEqual(pyclipper.PointInPolygon((180, 200), PATH_SUBJ_1), -1)
# in polygon
self.assertEqual(pyclipper.PointInPolygon((200, 180), PATH_SUBJ_1), 1)
# outside of polygon
self.assertEqual(pyclipper.PointInPolygon((500, 500), PATH_SUBJ_1), 0)
def test_minkowski_sum(self):
solution = pyclipper.MinkowskiSum(PATTERN, PATH_SIGMA, False)
self.assertGreater(len(solution), 0)
def test_minkowski_sum2(self):
solution = pyclipper.MinkowskiSum2(PATTERN, [PATH_SIGMA], False)
self.assertGreater(len(solution), 0)
def test_minkowski_diff(self):
solution = pyclipper.MinkowskiDiff(PATH_SUBJ_1, PATH_SUBJ_2)
self.assertGreater(len(solution), 0)
def test_reverse_path(self):
solution = pyclipper.ReversePath(PATH_SUBJ_1)
manualy_reversed = PATH_SUBJ_1[::-1]
self.check_reversed_path(solution, manualy_reversed)
def test_reverse_paths(self):
solution = pyclipper.ReversePaths([PATH_SUBJ_1])
manualy_reversed = [PATH_SUBJ_1[::-1]]
self.check_reversed_path(solution[0], manualy_reversed[0])
def check_reversed_path(self, path_1, path_2):
if len(path_1) is not len(path_2):
return False
for i in range(len(path_1)):
self.assertEqual(path_1[i][0], path_2[i][0])
self.assertEqual(path_1[i][1], path_2[i][1])
def test_simplify_polygon(self):
solution = pyclipper.SimplifyPolygon(PATH_SUBJ_1)
self.assertEqual(len(solution), 1)
def test_simplify_polygons(self):
solution = pyclipper.SimplifyPolygons([PATH_SUBJ_1])
solution_single = pyclipper.SimplifyPolygon(PATH_SUBJ_1)
self.assertEqual(len(solution), 1)
self.assertEqual(len(solution), len(solution_single))
_do_solutions_match(solution, solution_single)
def test_clean_polygon(self):
solution = pyclipper.CleanPolygon(PATH_CLIP_1)
self.assertEqual(len(solution), len(PATH_CLIP_1))
def test_clean_polygons(self):
solution = pyclipper.CleanPolygons([PATH_CLIP_1])
self.assertEqual(len(solution), 1)
self.assertEqual(len(solution[0]), len(PATH_CLIP_1))
class TestFilterPyPolyNode(TestCase):
def setUp(self):
tree = pyclipper.PyPolyNode()
tree.Contour.append(PATH_CLIP_1)
tree.IsOpen = True
child = pyclipper.PyPolyNode()
child.IsOpen = False
child.Parent = tree
child.Contour = PATH_SUBJ_1
tree.Childs.append(child)
child = pyclipper.PyPolyNode()
child.IsOpen = True
child.Parent = tree
child.Contour = PATH_SUBJ_2
tree.Childs.append(child)
child2 = pyclipper.PyPolyNode()
child2.IsOpen = False
child2.Parent = child
child2.Contour = PATTERN
child.Childs.append(child2)
# empty contour should not
# be included in filtered results
child2 = pyclipper.PyPolyNode()
child2.IsOpen = False
child2.Parent = child
child2.Contour = []
child.Childs.append(child2)
self.tree = tree
def test_polytree_to_paths(self):
paths = pyclipper.PolyTreeToPaths(self.tree)
self.check_paths(paths, 4)
def test_closed_paths_from_polytree(self):
paths = pyclipper.ClosedPathsFromPolyTree(self.tree)
self.check_paths(paths, 2)
def test_open_paths_from_polytree(self):
paths = pyclipper.OpenPathsFromPolyTree(self.tree)
self.check_paths(paths, 2)
def check_paths(self, paths, expected_nr):
self.assertEqual(len(paths), expected_nr)
self.assertTrue(all((len(path) > 0 for path in paths)))
class TestPyclipperAddPaths(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
self.pc = pyclipper.Pyclipper()
def test_add_path(self):
# should not raise an exception
self.pc.AddPath(PATH_CLIP_1, poly_type=pyclipper.PT_CLIP)
def test_add_paths(self):
# should not raise an exception
self.pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], poly_type=pyclipper.PT_SUBJECT)
def test_add_path_invalid_path(self):
self.assertRaises(pyclipper.ClipperException, self.pc.AddPath, INVALID_PATH, pyclipper.PT_CLIP, True)
def test_add_paths_invalid_path(self):
self.assertRaises(pyclipper.ClipperException, self.pc.AddPaths, [INVALID_PATH, INVALID_PATH],
pyclipper.PT_CLIP, True)
try:
self.pc.AddPaths([INVALID_PATH, PATH_CLIP_1], pyclipper.PT_CLIP)
self.pc.AddPaths([PATH_CLIP_1, INVALID_PATH], pyclipper.PT_CLIP)
except pyclipper.ClipperException:
self.fail("add_paths raised ClipperException when not all paths were invalid")
class TestClassProperties(TestCase):
def check_property_assignment(self, pc, prop_name, values):
for val in values:
setattr(pc, prop_name, val)
self.assertEqual(getattr(pc, prop_name), val)
def test_pyclipper_properties(self):
pc = pyclipper.Pyclipper()
for prop_name in ('ReverseSolution', 'PreserveCollinear', 'StrictlySimple'):
self.check_property_assignment(pc, prop_name, [True, False])
def test_pyclipperoffset_properties(self):
for factor in range(6):
pyclipper.SCALING_FACTOR = 10 ** factor
pc = pyclipper.PyclipperOffset()
for prop_name in ('MiterLimit', 'ArcTolerance'):
self.check_property_assignment(pc, prop_name, [2.912, 132.12, 12, -123])
class TestPyclipperExecute(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
self.pc = pyclipper.Pyclipper()
self.add_default_paths(self.pc)
self.default_args = [pyclipper.CT_INTERSECTION, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD]
@staticmethod
def add_default_paths(pc):
pc.AddPath(PATH_CLIP_1, pyclipper.PT_CLIP)
pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], pyclipper.PT_SUBJECT)
@staticmethod
def add_paths(pc, clip_path, subj_paths, addend=None, multiplier=None):
pc.AddPath(_modify_vertices(clip_path, addend=addend, multiplier=multiplier), pyclipper.PT_CLIP)
for subj_path in subj_paths:
pc.AddPath(_modify_vertices(subj_path, addend=addend, multiplier=multiplier), pyclipper.PT_SUBJECT)
def test_get_bounds(self):
bounds = self.pc.GetBounds()
self.assertIsInstance(bounds, pyclipper.PyIntRect)
self.assertEqual(bounds.left, 180)
self.assertEqual(bounds.right, 260)
self.assertEqual(bounds.top, 130)
self.assertEqual(bounds.bottom, 210)
def test_execute(self):
solution = self.pc.Execute(*self.default_args)
self.assertEqual(len(solution), 2)
def test_execute2(self):
solution = self.pc.Execute2(*self.default_args)
self.assertIsInstance(solution, pyclipper.PyPolyNode)
self.check_pypolynode(solution)
def test_execute_empty(self):
pc = pyclipper.Pyclipper()
with self.assertRaises(pyclipper.ClipperException):
pc.Execute(pyclipper.CT_UNION,
pyclipper.PFT_NONZERO,
pyclipper.PFT_NONZERO)
def test_clear(self):
self.pc.Clear()
with self.assertRaises(pyclipper.ClipperException):
self.pc.Execute(*self.default_args)
def test_exact_results(self):
"""
Test whether coordinates passed into the library are returned exactly, if they are not affected by the
operation.
"""
pc = pyclipper.Pyclipper()
# Some large triangle.
path = [[[0, 1], [0, 0], [15 ** 15, 0]]]
pc.AddPaths(path, pyclipper.PT_SUBJECT, True)
result = pc.Execute(pyclipper.PT_CLIP, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD)
assert result == path
def check_pypolynode(self, node):
self.assertTrue(len(node.Contour) == 0 or len(node.Contour) > 2)
# check vertex coordinate, should not be an iterable (in that case
# that means that node.Contour is a list of paths, should be path
if node.Contour:
self.assertFalse(hasattr(node.Contour[0][0], '__iter__'))
for child in node.Childs:
self.check_pypolynode(child)
class TestPyclipperOffset(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 1
@staticmethod
def add_path(pc, path):
pc.AddPath(path, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
def test_execute(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
solution = pc.Execute(2.0)
self.assertIsInstance(solution, list)
self.assertEqual(len(solution), 1)
def test_execute2(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
solution = pc.Execute2(2.0)
self.assertIsInstance(solution, pyclipper.PyPolyNode)
self.assertEqual(len(pyclipper.OpenPathsFromPolyTree(solution)), 0)
self.assertEqual(len(pyclipper.ClosedPathsFromPolyTree(solution)), 1)
def test_clear(self):
pc = pyclipper.PyclipperOffset()
self.add_path(pc, PATH_CLIP_1)
pc.Clear()
solution = pc.Execute(2.0)
self.assertIsInstance(solution, list)
self.assertEqual(len(solution), 0)
class TestScalingFactorWarning(TestCase):
def setUp(self):
pyclipper.SCALING_FACTOR = 2.
self.pc = pyclipper.Pyclipper()
def test_orientation(self):
with self.assertWarns(DeprecationWarning):
pyclipper.Orientation(PATH_SUBJ_1)
def test_area(self):
with self.assertWarns(DeprecationWarning):
pyclipper.Area(PATH_SUBJ_1)
def test_point_in_polygon(self):
with self.assertWarns(DeprecationWarning):
self.assertEqual(pyclipper.PointInPolygon((180, 200), PATH_SUBJ_1), -1)
def test_minkowski_sum(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiSum(PATTERN, PATH_SIGMA, False)
def test_minkowski_sum2(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiSum2(PATTERN, [PATH_SIGMA], False)
def test_minkowski_diff(self):
with self.assertWarns(DeprecationWarning):
pyclipper.MinkowskiDiff(PATH_SUBJ_1, PATH_SUBJ_2)
def test_add_path(self):
with self.assertWarns(DeprecationWarning):
self.pc.AddPath(PATH_CLIP_1, poly_type=pyclipper.PT_CLIP)
def test_add_paths(self):
with self.assertWarns(DeprecationWarning):
self.pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], poly_type=pyclipper.PT_SUBJECT)
class TestScalingFunctions(TestCase):
scale = 2 ** 31
path = [(0, 0), (1, 1)]
paths = [path] * 3
def test_value_scale_to(self):
value = 0.5
res = pyclipper.scale_to_clipper(value, self.scale)
assert isinstance(res, integer_types)
assert res == int(value * self.scale)
def test_value_scale_from(self):
value = 1000000000000
res = pyclipper.scale_from_clipper(value, self.scale)
assert isinstance(res, float)
# Convert to float to get "normal" division in Python < 3.
assert res == float(value) / self.scale
def test_path_scale_to(self):
res = pyclipper.scale_to_clipper(self.path)
assert len(res) == len(self.path)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, integer_types) for i in res for j in i)
def test_path_scale_from(self):
res = pyclipper.scale_from_clipper(self.path)
assert len(res) == len(self.path)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, float) for i in res for j in i)
def test_paths_scale_to(self):
res = pyclipper.scale_to_clipper(self.paths)
assert len(res) == len(self.paths)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, list) for i in res for j in i)
assert all(isinstance(k, integer_types) for i in res for j in i for k in j)
def test_paths_scale_from(self):
res = pyclipper.scale_from_clipper(self.paths)
assert len(res) == len(self.paths)
assert all(isinstance(i, list) for i in res)
assert all(isinstance(j, list) for i in res for j in i)
assert all(isinstance(k, float) for i in res for j in i for k in j)
class TestNonStandardNumbers(TestCase):
def test_sympyzero(self):
try:
from sympy import Point2D
from sympy.core.numbers import Zero
except ImportError:
self.skipTest("Skipping, sympy not available")
path = [(0,0), (0,1)]
path = [Point2D(v) for v in [(0,0), (0,1)]]
assert type(path[0].x) == Zero
path = pyclipper.scale_to_clipper(path)
assert path == [[0, 0], [0, 2147483648]]
def _do_solutions_match(paths_1, paths_2, factor=None):
if len(paths_1) != len(paths_2):
return False
paths_1 = [_modify_vertices(p, multiplier=factor, converter=round if factor else None) for p in paths_1]
paths_2 = [_modify_vertices(p, multiplier=factor, converter=round if factor else None) for p in paths_2]
return all(((p_1 in paths_2) for p_1 in paths_1))
def _modify_vertices(path, addend=0.0, multiplier=1.0, converter=None):
path = path[:]
def convert_coordinate(c):
if multiplier is not None:
c *= multiplier
if addend is not None:
c += addend
if converter:
c = converter(c)
return c
return [[convert_coordinate(c) for c in v] for v in path]
def run_tests():
main()
if __name__ == '__main__':
run_tests()
| 2.390625 | 2 |
imageproc_OE_IF_quant/2_annotate_extracted_cells.py | hshayya/2022_Shayya_UPR_Guidance | 0 | 3589 | <filename>imageproc_OE_IF_quant/2_annotate_extracted_cells.py
import xml.etree.ElementTree as ET
import csv
import os
import re
from ij import IJ
from loci.plugins.in import ImporterOptions
from loci.plugins import BF
from ij.plugin import ImagesToStack
from ij import io
#Records metadata (x,y location) for cells that were extracted with 1_find_extract_cells.py
#metadata will be used in subsequent analysis to cluster cells from similar locations on the section -> semi-quantiative, local, analysis
def parse_cellcounter_to_dict(fpath):
'''Parse Cell-Counter Xml file to Dictionary
Inputs:
fpath (str) path to xml file on disk
Values:
(dict). Keys 'x_cal', 'y_cal' = (float) calibrations in each axis.
Keys '1'-'8' = (lists) of tuples containing cell positions in the form (x,y)
'''
tree = ET.parse(fpath)
cells_dict = {}
cells_dict['x_cal'] = float(tree.find('./Image_Properties/X_Calibration').text)
cells_dict['y_cal'] = float(tree.find('./Image_Properties/Y_Calibration').text)
rt = tree.find('Marker_Data') #re-root the tree
for type_ in rt.iter('Marker_Type'):
cells = []
for marker_ in type_.iter('Marker'):
cells.append((int(marker_[0].text), int(marker_[1].text)))
#
cells_dict[type_.find('Type').text] = cells
return cells_dict
#Load Xml Files
xml_locs = ['/path/to/xml/files'] #same as used in find_extract_cells
xml_files = [os.path.join(base_, f) for base_ in xml_locs for f in os.listdir(base_) if f[-3:] == 'xml' and f[0] != '.']
#Work through each xml file
f_out_path = '/path/to/annotation/out.tsv'
with open(f_out_path,'w') as fout:
fout.write('\t'.join(['cell','x_um','y_um']))
for e,xml_ in enumerate(xml_files):
print 'Working on file: ' + os.path.split(xml_)[1] + '...' + str(e+1) + '/' + str(len(xml_files))
#Find the orig .nd2 file, copied from find_extract_cells.py, see that code for more details.
orig_f_name = re.search('(?<=CellCounter_).*(?=\\-Downsampled)', os.path.split(xml_)[1]).group() + '.nd2'
search_dir = '/'.join(os.path.split(xml_)[0].split('/')[:-1])
files_found = [os.path.join(root, f) for (root, dirs, files) in os.walk(search_dir) for f in files if f == orig_f_name]
if len(files_found) == 1:
fullres_image = files_found[0]
else:
print "Could not find fullres image."
raise ValueError('Found 0 or >1 matching file')
#Generate the original inputs that were passed to extract_cells
input_item = (re.search('(?<=_).*',orig_f_name[:-4]).group(), {'fullres':fullres_image, 'counter':parse_cellcounter_to_dict(xml_)})
input_dict = input_item
types_of_interest={'7':'tdtom','8':'gfp'}
#Copied from the "Extract Cells", recovering positional info and writing to disk instead of extracting cell -> small image.
anim, vals = input_dict
#Loop through Cells and Annotate.
for cell_type, cell_label in types_of_interest.iteritems():
print 'Working on cell_type ' + cell_label
for i in range(len(vals['counter'][cell_type])):
print 'Iteration ' + str(i+1) + '/' + str(len(vals['counter'][cell_type]))
#Convert Px Downsampled -> Px Full Res
x_full_px = vals['counter'][cell_type][i][0] * vals['counter']['x_cal'] #in um
y_full_px = vals['counter'][cell_type][i][1] * vals['counter']['y_cal'] #in um
#Write Information
out_title = '_'.join([anim, cell_label, str(i)])
fout.write('\n' + '\t'.join([out_title, str(x_full_px), str(y_full_px)]))
#Final tsv of form cell_label,x,y. | 1.9375 | 2 |
MAEnv/env_SingleCatchPigs/test_SingleCatchPigs.py | Abluceli/Multi-agent-Reinforcement-Learning-Algorithms | 5 | 3590 | <reponame>Abluceli/Multi-agent-Reinforcement-Learning-Algorithms
from env_SingleCatchPigs import EnvSingleCatchPigs
import random
env = EnvSingleCatchPigs(7)
max_iter = 10000
env.set_agent_at([2, 2], 0)
env.set_pig_at([4, 4], 0)
for i in range(max_iter):
print("iter= ", i)
env.render()
action = random.randint(0, 4)
print('action is', action)
reward, done = env.step(action)
print('reward', reward, 'done', done)
if reward > 0:
print('catch the pig', reward, done)
| 3.03125 | 3 |
eust/tables/data.py | rasmuse/eust | 1 | 3591 | <filename>eust/tables/data.py
# -*- coding: utf-8 -*-
import re
import gzip
import pandas as pd
import numpy as np
from eust.core import _download_file, conf
_DIMENSION_NAME_RE = re.compile(r"^[a-z_0-9]+$")
_YEAR_RE = re.compile(r"^(1|2)[0-9]{3}$")
def _is_valid_dimension_name(s: str) -> bool:
return bool(_DIMENSION_NAME_RE.match(s))
def _split_values_flags(series: pd.Series) -> pd.DataFrame:
split = series.str.split(" ")
df = pd.DataFrame(
{
"value": split.apply(lambda l: l[0] if l else None),
"flag": split.apply(lambda l: l[1] if l and len(l) > 1 else None),
}
)
return df
def _set_multiindex_dtype(index, level, type_):
index_df = index.to_frame()
index_df[level] = index_df[level].astype(type_)
new_index = index_df.set_index(index.names).index
return new_index
def _read_tsv(path_or_buffer) -> pd.DataFrame:
d = pd.read_csv(path_or_buffer, sep="\t", header=0, dtype=str)
top_left_cell = d.columns[0]
row_dimension_names, header_dimension_name = top_left_cell.split("\\")
row_dimension_names = row_dimension_names.split(",")
index_data = d[top_left_cell]
del d[top_left_cell]
assert len(set(index_data)) == len(index_data) # no duplicates
assert len(row_dimension_names) >= 1
d.columns.name = header_dimension_name
index_data = index_data.apply(lambda s: s.split(","))
d.index = pd.MultiIndex.from_arrays(
list(zip(*index_data)), names=row_dimension_names,
)
# cannot handle multidimensional column labels
d = d.stack()
assert set(d.apply(type)) == {str}
assert isinstance(d, pd.Series), d.columns
assert all(map(_is_valid_dimension_name, d.index.names))
d.index.set_levels(
[level.str.strip() for level in d.index.levels], inplace=True
)
d = _split_values_flags(d)
d.loc[d["value"] == ":", "value"] = np.nan
d["value"] = d["value"].astype(float)
if "time" in d.index.names:
time_strings = d.index.unique("time")
matches_year = (_YEAR_RE.match(s) for s in time_strings)
if all(matches_year):
d.index = _set_multiindex_dtype(d.index, "time", int)
d = d.sort_index()
return d
_TSV_GZ_FILENAME = "data.tsv.gz"
_HDF_FILENAME = "data.h5"
_HDF_TABLE_PATH = "eurostat_table"
def _read_tsv_gz(path_or_buffer) -> pd.DataFrame:
with gzip.open(path_or_buffer, "rb") as f:
return _read_tsv(f)
def _download_tsv_gz(url, dst_dir):
path = dst_dir / _TSV_GZ_FILENAME
_download_file(url, path)
def _read(the_dir):
hdf_path = the_dir / _HDF_FILENAME
tsv_gz_path = the_dir / _TSV_GZ_FILENAME
try:
data = pd.read_hdf(hdf_path, _HDF_TABLE_PATH)
except FileNotFoundError:
data = _read_tsv_gz(tsv_gz_path)
data.to_hdf(
hdf_path,
_HDF_TABLE_PATH,
complevel=conf["hdf_complevel"],
complib=conf["hdf_complib"],
)
# Replace empty flags by None (issue #3)
#
# Doing it at this point so that the null flag is saved in the HDF
# file as a string, for performance reasons.
# This is a pandas PerformanceWarning:
# "your performance may suffer as PyTables will pickle object types
# that it cannot map directly to c-types
# [inferred_type->mixed,key->block0_values] [items->['flag']]"
data["flag"] = data["flag"].replace({"": None})
return data
| 2.65625 | 3 |
utils.py | fatemehtd/Echo-SyncNet | 6 | 3592 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from config import CONFIG
import json
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top
import io
import math
import os
import time
from absl import flags
from absl import logging
from easydict import EasyDict
import matplotlib
matplotlib.use('Agg')
FLAGS = flags.FLAGS
def visualize_batch(data, global_step, batch_size, num_steps):
"""Visualizes a batch."""
frames = data['frames']
frames_list = tf.unstack(frames, num=num_steps, axis=1)
frames_summaries = tf.concat(frames_list, axis=2)
batch_list = tf.split(frames_summaries, batch_size, axis=0)
batch_summaries = tf.concat(batch_list, axis=1)
tf.summary.image('train_batch', batch_summaries, step=global_step)
def visualize_nearest_neighbours(model, data, global_step, batch_size,
num_steps, num_frames_per_step, split):
"""Visualize nearest neighbours in embedding space."""
# Set learning_phase to False to use models in inference mode.
tf.keras.backend.set_learning_phase(0)
cnn = model['cnn']
emb = model['emb']
if 'tcn' in CONFIG.TRAINING_ALGO:
cnn_feats = get_cnn_feats(
cnn, data, training=False, num_steps=2 * num_steps)
emb_feats = emb(cnn_feats, 2 * num_steps)
emb_feats = tf.stack(
tf.split(emb_feats, 2 * num_steps, axis=0)[::2], axis=1)
else:
cnn_feats = get_cnn_feats(cnn, data, training=False)
emb_feats = emb(cnn_feats, num_steps)
emb_feats = tf.stack(tf.split(emb_feats, num_steps, axis=0), axis=1)
query_feats = emb_feats[0]
if CONFIG.OPTICALFLOW:
frames = data['video_frames']
else:
frames = data['frames']
image_list = tf.unstack(frames, num=batch_size, axis=0)
if 'tcn' in CONFIG.TRAINING_ALGO:
im_list = [image_list[0]
[num_frames_per_step - 1::num_frames_per_step][::2]]
else:
im_list = [image_list[0][num_frames_per_step - 1::num_frames_per_step]]
sim_matrix = np.zeros(
(batch_size-1, num_steps, num_steps), dtype=np.float32)
for i in range(1, batch_size):
candidate_feats = emb_feats[i]
if 'tcn' in CONFIG.TRAINING_ALGO:
img_list = tf.unstack(image_list[i], num=2 * num_steps * num_frames_per_step,
axis=0)[num_frames_per_step - 1::num_frames_per_step][::2]
else:
img_list = tf.unstack(image_list[i], num=num_steps * num_frames_per_step,
axis=0)[num_frames_per_step - 1::num_frames_per_step]
nn_img_list = []
for j in range(num_steps):
curr_query_feats = tf.tile(query_feats[j:j+1], [num_steps, 1])
mean_squared_distance = tf.reduce_mean(
tf.math.squared_difference(curr_query_feats, candidate_feats), axis=1)
sim_matrix[i-1, j] = softmax(-1.0 * mean_squared_distance)
nn_img_list.append(img_list[tf.argmin(mean_squared_distance)])
nn_img = tf.stack(nn_img_list, axis=0)
im_list.append(nn_img)
def vstack(im):
return tf.concat(tf.unstack(im, num=num_steps), axis=1)
summary_im = tf.expand_dims(tf.concat([vstack(im) for im in im_list],
axis=0), axis=0)
tf.summary.image('%s/nn' % split, summary_im, step=global_step)
# Convert sim_matrix to float32 as summary_image doesn't take float64
sim_matrix = sim_matrix.astype(np.float32)
tf.summary.image('%s/similarity_matrix' % split,
np.expand_dims(sim_matrix, axis=3), step=global_step)
def softmax(w, t=1.0):
e = np.exp(np.array(w) / t)
dist = e / np.sum(e)
return dist
def random_choice_noreplace(m, n, axis=-1):
# Generate m random permuations of range (0, n)
# NumPy version: np.random.rand(m,n).argsort(axis=axis)
return tf.cast(tf.argsort(tf.random.uniform((m, n)), axis=axis), tf.int64)
def gen_cycles(num_cycles, batch_size, cycle_len):
"""Generate cycles for alignment."""
random_cycles = random_choice_noreplace(
num_cycles, batch_size)[:, :cycle_len]
return random_cycles
def get_warmup_lr(lr, global_step, lr_params):
"""Returns learning rate during warm up phase."""
if lr_params.NUM_WARMUP_STEPS > 0:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(
lr_params.NUM_WARMUP_STEPS, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_lr = lr_params.INITIAL_LR * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
lr = (1.0 - is_warmup) * lr + is_warmup * warmup_lr
return lr
# Minimally adapted from Tensorflow object_detection code.
def manual_stepping(global_step, boundaries, rates):
boundaries = [0] + boundaries
num_boundaries = len(boundaries)
rate_index = tf.reduce_max(
tf.where(
tf.greater_equal(global_step, boundaries),
list(range(num_boundaries)), [0] * num_boundaries))
return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries))
def get_lr_fn(optimizer_config):
"""Returns function that provides current learning rate based on config.
NOTE: This returns a function as in Eager we need to call assign to update
the learning rate.
Args:
optimizer_config: EasyDict, contains params required to initialize the
learning rate and the learning rate decay function.
Returns:
lr_fn: function, this can be called to return the current learning rate
based on the provided config.
Raises:
ValueError: in case invalid params have been passed in the config.
"""
lr_params = optimizer_config.LR
# pylint: disable=g-long-lambda
if lr_params.DECAY_TYPE == 'exp_decay':
def lr_fn(lr, global_step): return tf.train.exponential_decay(
lr,
global_step,
lr_params.EXP_DECAY_STEPS,
lr_params.EXP_DECAY_RATE,
staircase=True)()
elif lr_params.DECAY_TYPE == 'manual':
lr_step_boundaries = [int(x)
for x in lr_params.MANUAL_LR_STEP_BOUNDARIES]
f = lr_params.MANUAL_LR_DECAY_RATE
learning_rate_sequence = [(lr_params.INITIAL_LR) * f**p
for p in range(len(lr_step_boundaries) + 1)]
def lr_fn(lr, global_step): return manual_stepping(
global_step, lr_step_boundaries, learning_rate_sequence)
elif lr_params.DECAY_TYPE == 'fixed':
def lr_fn(lr, global_step): return lr_params.INITIAL_LR
elif lr_params.DECAY_TYPE == 'poly':
def lr_fn(lr, global_step): return tf.train.polynomial_decay(
lr,
global_step,
CONFIG.TRAIN.MAX_ITERS,
end_learning_rate=0.0,
power=1.0,
cycle=False)
else:
raise ValueError('Learning rate decay type %s not supported. Only support'
'the following decay types: fixed, exp_decay, manual,'
'and poly.')
return (lambda lr, global_step: get_warmup_lr(lr_fn(lr, global_step),
global_step, lr_params))
def get_optimizer(optimizer_config, learning_rate):
"""Returns optimizer based on config and learning rate."""
if optimizer_config.TYPE == 'AdamOptimizer':
opt = tf.keras.optimizers.Adam(learning_rate=learning_rate)
elif optimizer_config.TYPE == 'MomentumOptimizer':
opt = tf.keras.optimizers.SGD(
learning_rate=learning_rate, momentum=0.9)
else:
raise ValueError('Optimizer %s not supported. Only support the following'
'optimizers: AdamOptimizer, MomentumOptimizer .')
return opt
def get_lr_opt_global_step():
"""Intializes learning rate, optimizer and global step."""
optimizer = get_optimizer(CONFIG.OPTIMIZER, CONFIG.OPTIMIZER.LR.INITIAL_LR)
global_step = optimizer.iterations
learning_rate = optimizer.learning_rate
return learning_rate, optimizer, global_step
def create_ckpt(logdir, restore=False, **ckpt_objects):
# Since model is a dict we can insert multiple modular networks in this dict.
checkpoint = tf.train.Checkpoint(**ckpt_objects)
ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=logdir,
max_to_keep=10,
keep_checkpoint_every_n_hours=1)
status = checkpoint.restore(
ckpt_manager.latest_checkpoint) if restore else -1
return ckpt_manager, status, checkpoint
def restore_ckpt(logdir, **ckpt_objects):
"""Create and restore checkpoint (if one exists on the path)."""
# Instantiate checkpoint and restore from any pre-existing checkpoint.
# Since model is a dict we can insert multiple modular networks in this dict.
checkpoint = tf.train.Checkpoint(**ckpt_objects)
ckpt_manager = tf.train.CheckpointManager(
checkpoint,
directory=logdir,
max_to_keep=10,
keep_checkpoint_every_n_hours=1)
status = checkpoint.restore(ckpt_manager.latest_checkpoint)
return ckpt_manager, status, checkpoint
def to_dict(config):
if isinstance(config, list):
return [to_dict(c) for c in config]
elif isinstance(config, EasyDict):
return dict([(k, to_dict(v)) for k, v in config.items()])
else:
return config
def setup_train_dir(logdir, overwrite=False, force_train=True):
"""Setups directory for training."""
tf.io.gfile.makedirs(logdir)
config_path = os.path.join(logdir, 'config.json')
if not os.path.exists(config_path) or overwrite:
logging.info(
'Using the existing passed in config as no config.json file exists in '
'%s', logdir)
with tf.io.gfile.GFile(config_path, 'w') as config_file:
config = dict([(k, to_dict(v)) for k, v in CONFIG.items()])
json.dump(config, config_file, sort_keys=True, indent=4)
else:
logging.info(
'Using config from config.json that exists in %s.', logdir)
with tf.io.gfile.GFile(config_path, 'r') as config_file:
config_dict = json.load(config_file)
CONFIG.update(config_dict)
train_logs_dir = os.path.join(logdir, 'train.logs')
if os.path.exists(train_logs_dir) and not force_train:
raise ValueError('You might be overwriting a directory that already '
'has train_logs. Please provide a new logdir name in '
'config or pass --force_train while launching script.')
tf.io.gfile.makedirs(train_logs_dir)
def setup_eval_dir(logdir, config_timeout_seconds=1):
"""Setups directory for evaluation."""
tf.io.gfile.makedirs(logdir)
tf.io.gfile.makedirs(os.path.join(logdir, 'eval_logs'))
config_path = os.path.join(logdir, 'config.json')
while not tf.io.gfile.exists(config_path):
logging.info('Waiting for config to exist. Going to sleep '
' %s for secs.', config_timeout_seconds)
time.sleep(config_timeout_seconds)
while True:
with tf.io.gfile.GFile(config_path, 'r') as config_file:
config_dict = json.load(config_file)
if config_dict is None:
time.sleep(config_timeout_seconds)
else:
break
CONFIG.update(config_dict)
def get_data(iterator):
"""Return a data dict which contains all the requested sequences."""
data = iterator.get_next()
return data, data['chosen_steps'], data['seq_lens']
@tf.function
def get_cnn_feats(cnn, data, training, num_steps=None):
"""Passes data through base CNN."""
if num_steps is None:
if training:
num_steps = CONFIG.TRAIN.NUM_FRAMES * CONFIG.DATA.NUM_STEPS
else:
num_steps = CONFIG.EVAL.NUM_FRAMES * CONFIG.DATA.NUM_STEPS
cnn.num_steps = num_steps
cnn_feats = cnn(data['frames'])
return cnn_feats
def get_context_steps(step):
num_steps = CONFIG.DATA.NUM_STEPS
stride = CONFIG.DATA.FRAME_STRIDE
# We don't want to see the future.
steps = np.arange(step - (num_steps - 1) * stride, step + stride, stride)
return steps
def get_indices(curr_idx, num_steps, seq_len):
steps = range(curr_idx, curr_idx + num_steps)
single_steps = np.concatenate([get_context_steps(step) for step in steps])
single_steps = np.concatenate(np.array(list(map(get_context_steps,
np.arange(curr_idx, curr_idx + num_steps)))))
single_steps = np.maximum(0, single_steps)
single_steps = np.minimum(seq_len, single_steps)
return single_steps
def get_embeddings_dataset(model, iterator, frames_per_batch,
keep_data=False, optical_flow=False, keep_labels=True,
max_embs=None, callbacks=[]):
"""Get embeddings from a one epoch iterator."""
keep_labels = keep_labels and CONFIG.DATA.FRAME_LABELS
num_frames_per_step = CONFIG.DATA.NUM_STEPS
cnn = model['cnn']
emb = model['emb']
embs_list = []
labels_list = []
steps_list = []
seq_lens_list = []
names_list = []
seq_labels_list = []
if keep_data:
frames_list = []
if optical_flow:
frame_original_list = []
n = 0
def cond(n):
if max_embs is None:
return True
else:
return n < max_embs
# Make Recurrent Layers stateful, set batch size.
# We do this as we are embedding the whole sequence and that can take
# more than one batch to be passed and we don't want to automatically
# reset hidden states after each batch.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.stateful = True
gru_layer.input_spec[0].shape = [1, ]
while cond(n):
try:
print(n)
embs = []
labels = []
steps = []
seq_lens = []
names = []
seq_labels = []
if keep_data:
frames = []
if optical_flow:
frame_original = []
# Reset GRU states for each video.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.reset_states()
data, chosen_steps, seq_len = get_data(iterator)
seq_len = seq_len.numpy()[0]
num_batches = int(math.ceil(float(seq_len)/frames_per_batch))
for i in range(num_batches):
if (i + 1) * frames_per_batch > seq_len:
num_steps = seq_len - i * frames_per_batch
else:
num_steps = frames_per_batch
curr_idx = i * frames_per_batch
curr_data = {}
for k, v in data.items():
# Need to do this as some modalities might not exist.
if len(v.shape) > 1 and v.shape[1] != 0:
idxes = get_indices(curr_idx, num_steps, seq_len)
curr_data[k] = tf.gather(v, idxes, axis=1)
else:
curr_data[k] = v
cnn_feats = get_cnn_feats(cnn, curr_data,
num_steps=num_frames_per_step * num_steps,
training=False)
emb_feats = emb(cnn_feats, num_steps)
logging.debug('On sequence number %d, frames embedded %d', n,
curr_idx + num_steps)
# np.save(tf.io.gfile.GFile('/air/team/saman/test_weights_old.npy', 'w'), cnn.weights[0].numpy())
# np.save(tf.io.gfile.GFile('/air/team/saman/test_batch_old.npy', 'w'), curr_data["frames"])
# np.save(tf.io.gfile.GFile('/air/team/saman/test_cnn_old.npy', 'w'), cnn_feats.numpy())
# np.save(tf.io.gfile.GFile('/air/team/saman/test_emb_old.npy', 'w'), emb_feats.numpy())
embs.append(emb_feats.numpy())
for f in callbacks:
f(np.concatenate(embs), data, chosen_steps, seq_len)
steps.append(chosen_steps.numpy()[0])
seq_lens.append(seq_len * [seq_len])
all_labels = data['frame_labels'].numpy()[0]
name = data['name'].numpy()[0]
names.append(seq_len * [name])
seq_label = data['seq_labels'].numpy()[0]
seq_labels.append(seq_len * [seq_label])
labels.append(all_labels)
embs = np.concatenate(embs, axis=0)
labels = np.concatenate(labels, axis=0)
steps = np.concatenate(steps, axis=0)
seq_lens = np.concatenate(seq_lens, axis=0)
names = np.concatenate(names, axis=0)
seq_labels = np.concatenate(seq_labels, axis=0)
if keep_data:
frames.append(data['frames'].numpy()[0])
frames = np.concatenate(frames, axis=0)
if optical_flow:
frame_original.append(data['video_frames'].numpy()[0])
frame_original = np.concatenate(frame_original, axis=0)
if keep_labels:
labels = labels[~np.isnan(embs).any(axis=1)]
assert len(embs) == len(labels)
seq_labels = seq_labels[~np.isnan(embs).any(axis=1)]
names = names[~np.isnan(embs).any(axis=1)]
seq_lens = seq_lens[~np.isnan(embs).any(axis=1)]
steps = steps[~np.isnan(embs).any(axis=1)]
if keep_data:
frames = frames[~np.isnan(embs).any(axis=1)]
if optical_flow:
frame_original = frame_original[~np.isnan(embs).any(axis=1)]
embs = embs[~np.isnan(embs).any(axis=1)]
assert len(embs) == len(seq_lens)
assert len(embs) == len(steps)
assert len(names) == len(steps)
embs_list.append(embs)
if keep_labels:
labels_list.append(labels)
seq_labels_list.append(seq_labels)
steps_list.append(steps)
seq_lens_list.append(seq_lens)
names_list.append(names)
if keep_data:
frames_list.append(frames)
if optical_flow:
frame_original_list.append(frame_original)
n += 1
except tf.errors.OutOfRangeError:
logging.info('Finished embedding the dataset.')
break
dataset = {'embs': embs_list,
'seq_lens': seq_lens_list,
'steps': steps_list,
'names': names_list,
'seq_labels': seq_labels_list}
if keep_data:
dataset['frames'] = frames_list
if optical_flow:
dataset['frames_original'] = frame_original_list
if keep_labels:
dataset['labels'] = labels_list
# Reset statefulness to recurrent layers for other evaluation tasks.
if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru':
for gru_layer in emb.gru_layers:
gru_layer.stateful = False
return dataset
def gen_plot(x, y):
"""Create a pyplot, save to buffer and return TB compatible image."""
plt.figure()
plt.plot(x, y)
plt.title('Val Accuracy')
plt.ylim(0, 1)
plt.tight_layout()
buf = io.BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
# Convert PNG buffer to TF image
image = tf.image.decode_png(buf.getvalue(), channels=4)
# Add the batch dimension
image = tf.expand_dims(image, 0)
return image
class Stopwatch(object):
"""Simple timer for measuring elapsed time."""
def __init__(self):
self.reset()
def elapsed(self):
return time.time() - self.time
def done(self, target_interval):
return self.elapsed() >= target_interval
def reset(self):
self.time = time.time()
def set_learning_phase(f):
"""Sets the correct learning phase before calling function f."""
def wrapper(*args, **kwargs):
"""Calls the function f after setting proper learning phase."""
if 'training' not in kwargs:
raise ValueError('Function called with set_learning_phase decorator which'
' does not have training argument.')
training = kwargs['training']
if training:
# Set learning_phase to True to use models in training mode.
tf.keras.backend.set_learning_phase(1)
else:
# Set learning_phase to False to use models in inference mode.
tf.keras.backend.set_learning_phase(0)
return f(*args, **kwargs)
return wrapper
def load_config(config_path):
config = None
if os.path.exists(config_path):
with open(config_path) as f:
config = json.load(f)
assert config is not None, "config file is not provided or is corrupted"
return config
def prepare_gpu(ind=-1):
ind = int(ind)
GPUS = tf.config.experimental.list_physical_devices('GPU')
if GPUS:
if ind > -1:
tf.config.experimental.set_visible_devices(GPUS[ind], 'GPU')
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in GPUS:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
logging.info([len(GPUS), "Physical GPUs,", len(logical_gpus),
"Logical GPUs"])
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
logging.info(e)
os.environ["CUDA_VISIBLE_DEVICES"] = str(ind)
| 2.3125 | 2 |
UnityPy/classes/Sprite.py | dblack2056/UnityPy | 0 | 3593 | from enum import IntEnum
from .Mesh import BoneWeights4, SubMesh, VertexData
from .NamedObject import NamedObject
from .PPtr import PPtr, save_ptr
from ..export import SpriteHelper
from ..enums import SpriteMeshType
from ..streams import EndianBinaryWriter
class Sprite(NamedObject):
@property
def image(self):
return SpriteHelper.get_image_from_sprite(self)
def __init__(self, reader):
super().__init__(reader=reader)
version = self.version
self.m_Rect = reader.read_rectangle_f()
self.m_Offset = reader.read_vector2()
if version >= (4, 5): # 4.5 and up
self.m_Border = reader.read_vector4()
self.m_PixelsToUnits = reader.read_float()
if version >= (5, 4, 2) or (
version >= (5, 4, 1, 3) and self.build_type.IsPatch
): # 5.4.1p3 and up
self.m_Pivot = reader.read_vector2()
self.m_Extrude = reader.read_u_int()
if version >= (5, 3): # 5.3 and up
self.m_IsPolygon = reader.read_boolean()
reader.align_stream()
if version >= (2017,): # 2017 and up
first = reader.read_bytes(16) # GUID
second = reader.read_long()
self.m_RenderDataKey = (first, second)
self.m_AtlasTags = reader.read_string_array()
self.m_SpriteAtlas = PPtr(reader) # SpriteAtlas
self.m_RD = SpriteRenderData(reader)
if version >= (2017,): # 2017 and up
m_PhysicsShapeSize = reader.read_int()
self.m_PhysicsShape = [
reader.read_vector2_array() for _ in range(m_PhysicsShapeSize)
]
if version >= (2018,): # 2018 and up
m_BonesSize = reader.read_int()
self.m_Bones = [
reader.read_vector2_array() for _ in range(m_BonesSize)
]
def save(self, writer: EndianBinaryWriter = None):
if writer is None:
writer = EndianBinaryWriter(endian=self.reader.endian)
version = self.version
super().save(writer)
writer.write_rectangle_f(self.m_Rect)
writer.write_vector2(self.m_Offset)
if version >= (4, 5): # 4.5 and up
writer.write_vector4(self.m_Border)
writer.write_float(self.m_PixelsToUnits)
if version >= (5, 4, 2) or (
version >= (5, 4, 1, 3) and self.build_type.IsPatch
): # 5.4.1p3 and up
writer.write_vector2(self.m_Pivot)
writer.write_u_int(self.m_Extrude)
if version >= (5, 3): # 5.3 and up
writer.write_boolean(self.m_IsPolygon)
writer.align_stream()
if version >= (2017,): # 2017 and up
writer.write_bytes(self.m_RenderDataKey[0]) # GUID
writer.write_long(self.m_RenderDataKey[1])
writer.write_string_array(self.m_AtlasTags)
self.m_SpriteAtlas.save(writer) # SpriteAtlas
self.m_RD.save(writer, version)
if version >= (2017,): # 2017 and up
writer.write_int(len(self.m_PhysicsShape))
for phys in self.m_PhysicsShape:
writer.write_vector2_array(phys)
if version >= (2018,): # 2018 and up
writer.write_int(len(self.m_Bones))
for bone in self.m_Bones:
writer.write_vector2_array(bone)
self.set_raw_data(writer.bytes)
class SecondarySpriteTexture:
def __init__(self, reader):
self.texture = PPtr(reader) # Texture2D
self.name = reader.read_string_to_null()
def save(self, writer):
self.texture.save(writer)
writer.write_string_to_null(self.name)
class SpritePackingRotation(IntEnum):
kSPRNone = (0,)
kSPRFlipHorizontal = (1,)
kSPRFlipVertical = (2,)
kSPRRotate180 = (3,)
kSPRRotate90 = 4
class SpritePackingMode(IntEnum):
kSPMTight = (0,)
kSPMRectangle = 1
class SpriteSettings:
def __init__(self, reader):
self.value = reader.read_u_int()
@property
def value(self):
return self.m_settingsRaw
@value.setter
def value(self, _value):
self.m_settingsRaw = _value
self.packed = self.m_settingsRaw & 1 # 1
self.packingMode = SpritePackingMode((self.m_settingsRaw >> 1) & 1) # 1
self.packingRotation = SpritePackingRotation((self.m_settingsRaw >> 2) & 0xF) # 4
self.meshType = SpriteMeshType((self.m_settingsRaw >> 6) & 1) # 1
# rest of the bits are reserved
def save(self, writer):
writer.write_u_int(self.m_settingsRaw)
class SpriteVertex:
def __init__(self, reader):
version = reader.version
self.pos = reader.read_vector3()
if version[:2] <= (4, 3): # 4.3 and down
self.uv = reader.read_vector2()
def save(self, writer, version):
writer.write_vector3(self.pos)
if version[:2] <= (4, 3): # 4.3 and down
writer.write__vector2(self.uv)
class SpriteRenderData:
def __init__(self, reader):
version = reader.version
self.texture = PPtr(reader) # Texture2D
if version >= (5, 2): # 5.2 and up
self.alphaTexture = PPtr(reader) # Texture2D
if version >= (2019,): # 2019 and up
secondaryTexturesSize = reader.read_int()
self.secondaryTextures = [
SecondarySpriteTexture(reader) for _ in range(secondaryTexturesSize)
]
if version >= (5, 6): # 5.6 and up
SubMeshesSize = reader.read_int()
self.m_SubMeshes = [SubMesh(reader) for _ in range(SubMeshesSize)]
IndexBufferSize = reader.read_int()
self.m_IndexBuffer = reader.read_bytes(IndexBufferSize)
reader.align_stream()
self.m_VertexData = VertexData(reader)
else:
verticesSize = reader.read_int()
self.vertices = [SpriteVertex(reader) for _ in range(verticesSize)]
self.indices = reader.read_u_short_array()
reader.align_stream()
if version >= (2018,): # 2018 and up
self.m_Bindpose = reader.read_matrix_array()
if version < (2018, 2): # 2018.2 down
self.m_SourceSkinSize = reader.read_int()
self.m_SourceSkin = [BoneWeights4(reader)]
self.textureRect = reader.read_rectangle_f()
self.textureRectOffset = reader.read_vector2()
if version >= (5, 6): # 5.6 and up
self.atlasRectOffset = reader.read_vector2()
self.settingsRaw = SpriteSettings(reader)
if version >= (4, 5): # 4.5 and up
self.uvTransform = reader.read_vector4()
if version >= (2017,): # 2017 and up
self.downscaleMultiplier = reader.read_float()
def save(self, writer, version):
self.texture.save(writer) # Texture2D
if version >= (5, 2): # 5.2 and up
self.alphaTexture.save(writer) # Texture2D
if version >= (2019,): # 2019 and up
writer.write_int(len(self.secondaryTextures))
for tex in self.secondaryTextures:
tex.save(writer)
if version >= (5, 6): # 5.6 and up
writer.write_int(len(self.m_SubMeshes))
for mesh in self.m_SubMeshes:
mesh.save(writer, version)
writer.write_int(len(self.m_IndexBuffer))
writer.write_bytes(self.m_IndexBuffer)
writer.align_stream()
self.m_VertexData.save(writer, version)
else:
writer.write_int(len(self.vertices))
for vertex in self.vertices:
vertex.save(writer, version)
writer.write_u_short_array(self.indices)
writer.align_stream()
if version >= (2018,): # 2018 and up
writer.write_matrix_array(self.m_Bindpose)
if version < (2018, 2): # 2018.2 down
writer.write_int(self.m_SourceSkinSize)
self.m_SourceSkin[0].save(writer)
writer.write_rectangle_f(self.textureRect)
writer.write_vector2(self.textureRectOffset)
if version >= (5, 6): # 5.6 and up
writer.write_vector2(self.atlasRectOffset)
self.settingsRaw.save(writer)
if version >= (4, 5): # 4.5 and up
writer.write_vector4(self.uvTransform)
if version >= (2017,): # 2017 and up
writer.write_float(self.downscaleMultiplier)
| 2.265625 | 2 |
eazy/filters.py | albertfxwang/eazy-py | 0 | 3594 | <filename>eazy/filters.py
import numpy as np
import os
from astropy.table import Table
from . import utils
__all__ = ["FilterDefinition", "FilterFile", "ParamFilter"]
VEGA_FILE = os.path.join(utils.path_to_eazy_data(),
'alpha_lyr_stis_008.fits')
VEGA = Table.read(VEGA_FILE)
for c in VEGA.colnames:
VEGA[c] = VEGA[c].astype(float)
class FilterDefinition:
def __init__(self, name=None, wave=None, throughput=None, bp=None):
"""
Bandpass object
Parameters
----------
name : str
Label name
wave : array
Wavelength array, in `astropy.units.Angstrom`.
throughput : array
Throughput, arbitrary normalization
bp : optional, `pysynphot.obsbandpass` object
`pysynphot` filter bandpass
"""
self.name = name
self.wave = wave
self.throughput = throughput
self.Aflux = 1.
# pysynphot Bandpass
if bp is not None:
self.wave = np.cast[np.double](bp.wave)
self.throughput = np.cast[np.double](bp.throughput)
self.name = bp.name
self.norm = 1.
if self.throughput is not None:
self.norm = np.trapz(self.throughput/self.wave, self.wave)
def __repr__(self):
return self.name.__repr__()
def __str__(self):
return self.name.__str__()
def get_extinction(self, EBV=0, Rv=3.1):
"""
Extinction factor
"""
import astropy.units as u
f99 = utils.GalacticExtinction(EBV=EBV, Rv=Rv)
self.Alambda = f99(self.wave)
self.Aflux = 10**(-0.4*self.Alambda)
def extinction_correction(self, EBV, Rv=3.1, mag=True, source_lam=None, source_flux=None):
"""
Get the MW extinction correction within the filter.
Optionally supply a source spectrum.
"""
import astropy.units as u
try:
import grizli.utils_c
interp = grizli.utils_c.interp.interp_conserve_c
except ImportError:
interp = utils.interp_conserve
if self.wave is None:
print('Filter not defined.')
return False
if source_flux is None:
source_flux = self.throughput*0.+1
else:
source_flux = interp(self.wave, source_lam, source_flux, left=0, right=0)
if (self.wave.min() < 910) | (self.wave.max() > 6.e4):
Alambda = 0.
else:
f99 = utils.GalacticExtinction(EBV=EBV, Rv=Rv)
Alambda = f99(self.wave)
delta = np.trapz(self.throughput*source_flux*10**(-0.4*Alambda), self.wave) / np.trapz(self.throughput*source_flux, self.wave)
if mag:
return 2.5*np.log10(delta)
else:
return 1./delta
@property
def ABVega(self):
"""
Compute AB-Vega conversion
"""
from astropy.constants import c
import astropy.units as u
try:
import grizli.utils_c
interp = grizli.utils_c.interp.interp_conserve_c
except ImportError:
interp = utils.interp_conserve
# Union of throughput and Vega spectrum arrays
full_x = np.hstack([self.wave, VEGA['WAVELENGTH']])
full_x = full_x[np.argsort(full_x)]
# Vega spectrum, units of f-lambda flux density, cgs
# Interpolate to wavelength grid, no extrapolation
vega_full = interp(full_x, VEGA['WAVELENGTH'], VEGA['FLUX'],
left=0, right=0)
thru_full = interp(full_x, self.wave, self.throughput,
left=0, right=0)
# AB = 0, same units
absp = 3631*1e-23*c.to(u.m/u.s).value*1.e10/full_x**2
# Integrate over the bandpass, flam dlam
num = np.trapz(vega_full*thru_full, full_x)
den = np.trapz(absp*thru_full, full_x)
return -2.5*np.log10(num/den)
@property
def pivot(self):
"""
Pivot wavelength
http://pysynphot.readthedocs.io/en/latest/properties.html
"""
integrator = np.trapz
num = integrator(self.wave, self.wave*self.throughput)
den = integrator(self.wave, self.throughput/self.wave)
pivot = np.sqrt(num/den)
return pivot
@property
def equivwidth(self):
"""
Filter equivalent width
http://pysynphot.readthedocs.io/en/latest/properties.html
"""
return np.trapz(self.throughput, self.wave)
@property
def rectwidth(self):
"""
Filter rectangular width
http://pysynphot.readthedocs.io/en/latest/properties.html
"""
rect = self.equivwidth / self.throughput.max()
return rect
@property
def ctw95(self):
"""
95% cumulative throughput width
http://www.stsci.edu/hst/acs/analysis/bandwidths/#keywords
"""
dl = np.diff(self.wave)
filt = np.cumsum((self.wave*self.throughput)[1:]*dl)
ctw95 = np.interp([0.025, 0.975], filt/filt.max(), self.wave[1:])
return np.diff(ctw95)[0]
def for_filter_file(self, row_str='{i:6} {wave:.5e} {thru:.5e}'):
"""
Return a string that can be put in the EAZY filter file
"""
header = '{0} {1} lambda_c= {2:.4e} AB-Vega= {3:.3f} w95={4:.1f}'
N = len(self.wave)
lines = [header.format(N, self.name.split('lambda_c')[0],
self.pivot, self.ABVega, self.ctw95)]
lines += [row_str.format(i=i+1, wave=w, thru=t)
for i, (w, t) in enumerate(zip(self.wave, self.throughput))]
return '\n'.join(lines)
class FilterFile:
def __init__(self, file='FILTER.RES.latest', path='./'):
"""
Read a EAZY filter file.
.. plot::
:include-source:
import matplotlib.pyplot as plt
from eazy.filters import FilterFile
res = FilterFile(path=None)
print(len(res.filters))
bp = res[205]
print(bp)
fig, ax = plt.subplots(1,1,figsize=(6,4))
ax.plot(bp.wave, bp.throughput, label=bp.name.split()[0])
ax.set_xlabel('wavelength, Angstroms')
ax.set_ylabel('throughput')
ax.legend()
ax.grid()
fig.tight_layout(pad=0.5)
"""
if path is None:
file_path = os.path.join(os.getenv('EAZYCODE'), 'filters', file)
else:
file_path = os.path.join(path, file)
with open(file_path, 'r') as fp:
lines = fp.readlines()
self.filename = file_path
filters = []
wave = []
trans = []
header = ''
for line in lines:
if 'lambda_c' in line:
if len(wave) > 0:
# Make filter from lines already read in
new_filter = FilterDefinition(name=header,
wave=np.cast[float](wave),
throughput=np.cast[float](trans))
# new_filter.name = header
# new_filter.wave = np.cast[float](wave)
# new_filter.throughput = np.cast[float](trans)
filters.append(new_filter)
# Initialize filter
header = ' '.join(line.split()[1:])
wave = []
trans = []
else:
lspl = np.cast[float](line.split())
wave.append(lspl[1])
trans.append(lspl[2])
# last one
# new_filter = FilterDefinition()
# new_filter.name = header
# new_filter.wave = np.cast[float](wave)
# new_filter.throughput = np.cast[float](trans)
new_filter = FilterDefinition(name=header,
wave=np.cast[float](wave),
throughput=np.cast[float](trans))
filters.append(new_filter)
self.filters = filters
@property
def NFILT(self):
"""
Number of filters in the list
"""
return len(self.filters)
def __getitem__(self, i1):
"""
Return unit-indexed filter, e.g., 161 = 2mass-j
"""
return self.filters[i1-1]
def names(self, verbose=True):
"""
Print the filter names.
"""
if verbose:
for i in range(len(self.filters)):
print('{0:5d} {1}'.format(i+1, self.filters[i].name))
else:
string_list = ['{0:5d} {1}\n'.format(i+1, self.filters[i].name) for i in range(len(self.filters))]
return string_list
def write(self, file='xxx.res', verbose=True):
"""
Dump the filter information to a filter file.
"""
fp = open(file,'w')
for filter in self.filters:
fp.write('{0:6d} {1}\n'.format(len(filter.wave), filter.name))
for i in range(len(filter.wave)):
fp.write('{0:6d} {1:.5e} {2:.5e}\n'.format(i+1, filter.wave[i], filter.throughput[i]))
fp.close()
string_list = self.names(verbose=False)
fp = open(file+'.info', 'w')
fp.writelines(string_list)
fp.close()
if verbose:
print('Wrote <{0}[.info]>'.format(file))
def search(self, search_string, case=False, verbose=True):
"""
Search filter names for ``search_string``. If ``case`` is True, then
match case.
"""
import re
if not case:
search_string = search_string.upper()
matched = []
for i in range(len(self.filters)):
filt_name = self.filters[i].name
if not case:
filt_name = filt_name.upper()
if re.search(search_string, filt_name) is not None:
if verbose:
print('{0:5d} {1}'.format(i+1, self.filters[i].name))
matched.append(i)
return np.array(matched)
class ParamFilter(FilterDefinition):
def __init__(self, line='# Filter #20, RES#78: COSMOS/SUBARU_filter_B.txt - lambda_c=4458.276253'):
self.lambda_c = float(line.split('lambda_c=')[1])
self.name = line.split()[4]
self.fnumber = int(line.split('RES#')[1].split(':')[0])
self.cnumber = int(line.split('Filter #')[1].split(',')[0])
| 2.421875 | 2 |
LeetCode/106.py | KevinTMtz/CompetitiveProgramming | 1 | 3595 | <reponame>KevinTMtz/CompetitiveProgramming
#
# LeetCode
#
# Problem - 106
# URL - https://leetcode.com/problems/construct-binary-tree-from-inorder-and-postorder-traversal/
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:
if not inorder:
return None
r = postorder.pop()
root = TreeNode(r)
index = inorder.index(r)
root.right = self.buildTree(inorder[index+1:], postorder)
root.left = self.buildTree(inorder[:index], postorder)
return root
| 3.8125 | 4 |
evalml/automl/automl_search.py | skvorekn/evalml | 0 | 3596 | import copy
import time
from collections import defaultdict
import cloudpickle
import numpy as np
import pandas as pd
import woodwork as ww
from sklearn.model_selection import BaseCrossValidator
from .pipeline_search_plots import PipelineSearchPlots
from evalml.automl.automl_algorithm import IterativeAlgorithm
from evalml.automl.callbacks import log_error_callback
from evalml.automl.engine import SequentialEngine
from evalml.automl.utils import (
check_all_pipeline_names_unique,
get_default_primary_search_objective,
make_data_splitter
)
from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError
from evalml.model_family import ModelFamily
from evalml.objectives import (
get_core_objectives,
get_non_core_objectives,
get_objective
)
from evalml.pipelines import (
MeanBaselineRegressionPipeline,
ModeBaselineBinaryPipeline,
ModeBaselineMulticlassPipeline,
TimeSeriesBaselineBinaryPipeline,
TimeSeriesBaselineMulticlassPipeline,
TimeSeriesBaselineRegressionPipeline
)
from evalml.pipelines.components.utils import get_estimators
from evalml.pipelines.utils import make_pipeline
from evalml.preprocessing import split_data
from evalml.problem_types import ProblemTypes, handle_problem_types
from evalml.tuners import SKOptTuner
from evalml.utils import convert_to_seconds, infer_feature_types
from evalml.utils.logger import (
get_logger,
log_subtitle,
log_title,
time_elapsed,
update_pipeline
)
logger = get_logger(__file__)
class AutoMLSearch:
"""Automated Pipeline search."""
_MAX_NAME_LEN = 40
# Necessary for "Plotting" documentation, since Sphinx does not work well with instance attributes.
plot = PipelineSearchPlots
def __init__(self,
X_train=None,
y_train=None,
problem_type=None,
objective='auto',
max_iterations=None,
max_time=None,
patience=None,
tolerance=None,
data_splitter=None,
allowed_pipelines=None,
allowed_model_families=None,
start_iteration_callback=None,
add_result_callback=None,
error_callback=None,
additional_objectives=None,
random_seed=0,
n_jobs=-1,
tuner_class=None,
optimize_thresholds=True,
ensembling=False,
max_batches=None,
problem_configuration=None,
train_best_pipeline=True,
pipeline_parameters=None,
_ensembling_split_size=0.2,
_pipelines_per_batch=5):
"""Automated pipeline search
Arguments:
X_train (pd.DataFrame, ww.DataTable): The input training data of shape [n_samples, n_features]. Required.
y_train (pd.Series, ww.DataColumn): The target training data of length [n_samples]. Required for supervised learning tasks.
problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.all_problem_types for a full list.
objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time.
When set to 'auto', chooses:
- LogLossBinary for binary classification problems,
- LogLossMulticlass for multiclass classification problems, and
- R2 for regression problems.
max_iterations (int): Maximum number of iterations to search. If max_iterations and
max_time is not set, then max_iterations will default to max_iterations of 5.
max_time (int, str): Maximum time to search for pipelines.
This will not start a new pipeline search after the duration
has elapsed. If it is an integer, then the time will be in seconds.
For strings, time can be specified as seconds, minutes, or hours.
patience (int): Number of iterations without improvement to stop search early. Must be positive.
If None, early stopping is disabled. Defaults to None.
tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping.
Only applicable if patience is not None. Defaults to None.
allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines allowed in the search.
The default of None indicates all pipelines for this problem type are allowed. Setting this field will cause
allowed_model_families to be ignored.
allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over all
model families. Run evalml.pipelines.components.utils.allowed_model_families("binary") to see options. Change `binary`
to `multiclass` or `regression` depending on the problem type. Note that if allowed_pipelines is provided,
this parameter will be ignored.
data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold.
tuner_class: The tuner class to use. Defaults to SKOptTuner.
optimize_thresholds (bool): Whether or not to optimize the binary pipeline threshold. Defaults to True.
start_iteration_callback (callable): Function called before each pipeline training iteration.
Callback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object.
add_result_callback (callable): Function called after each pipeline training iteration.
Callback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object.
error_callback (callable): Function called when `search()` errors and raises an Exception.
Callback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object.
Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default.
Defaults to None, which will call `log_error_callback`.
additional_objectives (list): Custom set of objectives to score on.
Will override default objectives for problem type if not empty.
random_seed (int): Seed for the random number generator. Defaults to 0.
n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines.
None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
ensembling (boolean): If True, runs ensembling in a separate batch after every allowed pipeline class has been iterated over.
If the number of unique pipelines to search over per batch is one, ensembling will not run. Defaults to False.
max_batches (int): The maximum number of batches of pipelines to search. Parameters max_time, and
max_iterations have precedence over stopping the search.
problem_configuration (dict, None): Additional parameters needed to configure the search. For example,
in time series problems, values should be passed in for the gap and max_delay variables.
train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True.
pipeline_parameters (dict): A dict of the parameters used to initalize a pipeline with.
_ensembling_split_size (float): The amount of the training data we'll set aside for training ensemble metalearners. Only used when ensembling is True.
Must be between 0 and 1, exclusive. Defaults to 0.2
_pipelines_per_batch (int): The number of pipelines to train for every batch after the first one.
The first batch will train a baseline pipline + one of each pipeline family allowed in the search.
"""
if X_train is None:
raise ValueError('Must specify training data as a 2d array using the X_train argument')
if y_train is None:
raise ValueError('Must specify training data target values as a 1d vector using the y_train argument')
try:
self.problem_type = handle_problem_types(problem_type)
except ValueError:
raise ValueError('choose one of (binary, multiclass, regression) as problem_type')
self.tuner_class = tuner_class or SKOptTuner
self.start_iteration_callback = start_iteration_callback
self.add_result_callback = add_result_callback
self.error_callback = error_callback or log_error_callback
self.data_splitter = data_splitter
self.optimize_thresholds = optimize_thresholds
self.ensembling = ensembling
if objective == 'auto':
objective = get_default_primary_search_objective(self.problem_type.value)
objective = get_objective(objective, return_instance=False)
self.objective = self._validate_objective(objective)
if self.data_splitter is not None and not issubclass(self.data_splitter.__class__, BaseCrossValidator):
raise ValueError("Not a valid data splitter")
if not objective.is_defined_for_problem_type(self.problem_type):
raise ValueError("Given objective {} is not compatible with a {} problem.".format(self.objective.name, self.problem_type.value))
if additional_objectives is None:
additional_objectives = get_core_objectives(self.problem_type)
# if our main objective is part of default set of objectives for problem_type, remove it
existing_main_objective = next((obj for obj in additional_objectives if obj.name == self.objective.name), None)
if existing_main_objective is not None:
additional_objectives.remove(existing_main_objective)
else:
additional_objectives = [get_objective(o) for o in additional_objectives]
additional_objectives = [self._validate_objective(obj) for obj in additional_objectives]
self.additional_objectives = additional_objectives
self.objective_name_to_class = {o.name: o for o in [self.objective] + self.additional_objectives}
if not isinstance(max_time, (int, float, str, type(None))):
raise TypeError(f"Parameter max_time must be a float, int, string or None. Received {type(max_time)} with value {str(max_time)}..")
if isinstance(max_time, (int, float)) and max_time < 0:
raise ValueError(f"Parameter max_time must be None or non-negative. Received {max_time}.")
if max_batches is not None and max_batches < 0:
raise ValueError(f"Parameter max_batches must be None or non-negative. Received {max_batches}.")
if max_iterations is not None and max_iterations < 0:
raise ValueError(f"Parameter max_iterations must be None or non-negative. Received {max_iterations}.")
self.max_time = convert_to_seconds(max_time) if isinstance(max_time, str) else max_time
self.max_iterations = max_iterations
self.max_batches = max_batches
self._pipelines_per_batch = _pipelines_per_batch
if not self.max_iterations and not self.max_time and not self.max_batches:
self.max_batches = 1
logger.info("Using default limit of max_batches=1.\n")
if patience and (not isinstance(patience, int) or patience < 0):
raise ValueError("patience value must be a positive integer. Received {} instead".format(patience))
if tolerance and (tolerance > 1.0 or tolerance < 0.0):
raise ValueError("tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead".format(tolerance))
self.patience = patience
self.tolerance = tolerance or 0.0
self._results = {
'pipeline_results': {},
'search_order': [],
'errors': []
}
self.random_seed = random_seed
self.n_jobs = n_jobs
self.plot = None
try:
self.plot = PipelineSearchPlots(self)
except ImportError:
logger.warning("Unable to import plotly; skipping pipeline search plotting\n")
self.allowed_pipelines = allowed_pipelines
self.allowed_model_families = allowed_model_families
self._automl_algorithm = None
self._start = 0.0
self._baseline_cv_scores = {}
self.show_batch_output = False
self._validate_problem_type()
self.problem_configuration = self._validate_problem_configuration(problem_configuration)
self._train_best_pipeline = train_best_pipeline
self._best_pipeline = None
self._searched = False
self.X_train = infer_feature_types(X_train)
self.y_train = infer_feature_types(y_train)
self.ensembling_indices = None
default_data_splitter = make_data_splitter(self.X_train, self.y_train, self.problem_type, self.problem_configuration,
n_splits=3, shuffle=True, random_seed=self.random_seed)
self.data_splitter = self.data_splitter or default_data_splitter
self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {}
self.search_iteration_plot = None
self._interrupted = False
if self.allowed_pipelines is None:
logger.info("Generating pipelines to search over...")
allowed_estimators = get_estimators(self.problem_type, self.allowed_model_families)
logger.debug(f"allowed_estimators set to {[estimator.name for estimator in allowed_estimators]}")
self.allowed_pipelines = [make_pipeline(self.X_train, self.y_train, estimator, self.problem_type, custom_hyperparameters=self.pipeline_parameters) for estimator in allowed_estimators]
if self.allowed_pipelines == []:
raise ValueError("No allowed pipelines to search")
check_all_pipeline_names_unique(self.allowed_pipelines)
run_ensembling = self.ensembling
if run_ensembling and len(self.allowed_pipelines) == 1:
logger.warning("Ensembling is set to True, but the number of unique pipelines is one, so ensembling will not run.")
run_ensembling = False
if run_ensembling and self.max_iterations is not None:
# Baseline + first batch + each pipeline iteration + 1
first_ensembling_iteration = (1 + len(self.allowed_pipelines) + len(self.allowed_pipelines) * self._pipelines_per_batch + 1)
if self.max_iterations < first_ensembling_iteration:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but max_iterations is too small, so ensembling will not run. Set max_iterations >= {first_ensembling_iteration} to run ensembling.")
else:
logger.info(f"Ensembling will run at the {first_ensembling_iteration} iteration and every {len(self.allowed_pipelines) * self._pipelines_per_batch} iterations after that.")
if self.max_batches and self.max_iterations is None:
self.show_batch_output = True
if run_ensembling:
ensemble_nth_batch = len(self.allowed_pipelines) + 1
num_ensemble_batches = (self.max_batches - 1) // ensemble_nth_batch
if num_ensemble_batches == 0:
run_ensembling = False
logger.warning(f"Ensembling is set to True, but max_batches is too small, so ensembling will not run. Set max_batches >= {ensemble_nth_batch + 1} to run ensembling.")
else:
logger.info(f"Ensembling will run every {ensemble_nth_batch} batches.")
self.max_iterations = (1 + len(self.allowed_pipelines) +
self._pipelines_per_batch * (self.max_batches - 1 - num_ensemble_batches) +
num_ensemble_batches)
else:
self.max_iterations = 1 + len(self.allowed_pipelines) + (self._pipelines_per_batch * (self.max_batches - 1))
if run_ensembling:
if not (0 < _ensembling_split_size < 1):
raise ValueError(f"Ensembling split size must be between 0 and 1 exclusive, received {_ensembling_split_size}")
X_shape = ww.DataTable(np.arange(self.X_train.shape[0]))
_, ensembling_indices, _, _ = split_data(X_shape, self.y_train, problem_type=self.problem_type, test_size=_ensembling_split_size, random_seed=self.random_seed)
self.ensembling_indices = ensembling_indices.to_dataframe()[0].tolist()
self._engine = SequentialEngine(self.X_train,
self.y_train,
self.ensembling_indices,
self,
should_continue_callback=self._should_continue,
pre_evaluation_callback=self._pre_evaluation_callback,
post_evaluation_callback=self._post_evaluation_callback)
self.allowed_model_families = list(set([p.model_family for p in (self.allowed_pipelines)]))
logger.debug(f"allowed_pipelines set to {[pipeline.name for pipeline in self.allowed_pipelines]}")
logger.debug(f"allowed_model_families set to {self.allowed_model_families}")
if len(self.problem_configuration):
pipeline_params = {**{'pipeline': self.problem_configuration}, **self.pipeline_parameters}
else:
pipeline_params = self.pipeline_parameters
self._automl_algorithm = IterativeAlgorithm(
max_iterations=self.max_iterations,
allowed_pipelines=self.allowed_pipelines,
tuner_class=self.tuner_class,
random_seed=self.random_seed,
n_jobs=self.n_jobs,
number_features=self.X_train.shape[1],
pipelines_per_batch=self._pipelines_per_batch,
ensembling=run_ensembling,
pipeline_params=pipeline_params
)
def _pre_evaluation_callback(self, pipeline):
if self.start_iteration_callback:
self.start_iteration_callback(pipeline.__class__, pipeline.parameters, self)
desc = f"{pipeline.name}"
if len(desc) > AutoMLSearch._MAX_NAME_LEN:
desc = desc[:AutoMLSearch._MAX_NAME_LEN - 3] + "..."
desc = desc.ljust(AutoMLSearch._MAX_NAME_LEN)
batch_number = 1
if self._automl_algorithm is not None and self._automl_algorithm.batch_number > 0:
batch_number = self._automl_algorithm.batch_number
update_pipeline(logger,
desc,
len(self._results['pipeline_results']) + 1,
self.max_iterations,
self._start,
batch_number,
self.show_batch_output)
def _validate_objective(self, objective):
non_core_objectives = get_non_core_objectives()
if isinstance(objective, type):
if objective in non_core_objectives:
raise ValueError(f"{objective.name.lower()} is not allowed in AutoML! "
"Use evalml.objectives.utils.get_core_objective_names() "
"to get all objective names allowed in automl.")
return objective()
return objective
def __str__(self):
def _print_list(obj_list):
lines = sorted(['\t{}'.format(o.name) for o in obj_list])
return '\n'.join(lines)
def _get_funct_name(function):
if callable(function):
return function.__name__
else:
return None
search_desc = (
f"{handle_problem_types(self.problem_type).name} Search\n\n"
f"Parameters: \n{'='*20}\n"
f"Objective: {get_objective(self.objective).name}\n"
f"Max Time: {self.max_time}\n"
f"Max Iterations: {self.max_iterations}\n"
f"Max Batches: {self.max_batches}\n"
f"Allowed Pipelines: \n{_print_list(self.allowed_pipelines or [])}\n"
f"Patience: {self.patience}\n"
f"Tolerance: {self.tolerance}\n"
f"Data Splitting: {self.data_splitter}\n"
f"Tuner: {self.tuner_class.__name__}\n"
f"Start Iteration Callback: {_get_funct_name(self.start_iteration_callback)}\n"
f"Add Result Callback: {_get_funct_name(self.add_result_callback)}\n"
f"Additional Objectives: {_print_list(self.additional_objectives or [])}\n"
f"Random Seed: {self.random_seed}\n"
f"n_jobs: {self.n_jobs}\n"
f"Optimize Thresholds: {self.optimize_thresholds}\n"
)
rankings_desc = ""
if not self.rankings.empty:
rankings_str = self.rankings.drop(['parameters'], axis='columns').to_string()
rankings_desc = f"\nSearch Results: \n{'='*20}\n{rankings_str}"
return search_desc + rankings_desc
def _validate_problem_configuration(self, problem_configuration=None):
if self.problem_type in [ProblemTypes.TIME_SERIES_REGRESSION]:
required_parameters = {'gap', 'max_delay'}
if not problem_configuration or not all(p in problem_configuration for p in required_parameters):
raise ValueError("user_parameters must be a dict containing values for at least the gap and max_delay "
f"parameters. Received {problem_configuration}.")
return problem_configuration or {}
def _handle_keyboard_interrupt(self):
"""Presents a prompt to the user asking if they want to stop the search.
Returns:
bool: If True, search should terminate early
"""
leading_char = "\n"
start_of_loop = time.time()
while True:
choice = input(leading_char + "Do you really want to exit search (y/n)? ").strip().lower()
if choice == "y":
logger.info("Exiting AutoMLSearch.")
return True
elif choice == "n":
# So that the time in this loop does not count towards the time budget (if set)
time_in_loop = time.time() - start_of_loop
self._start += time_in_loop
return False
else:
leading_char = ""
def search(self, show_iteration_plot=True):
"""Find the best pipeline for the data set.
Arguments:
feature_types (list, optional): list of feature types, either numerical or categorical.
Categorical features will automatically be encoded
show_iteration_plot (boolean, True): Shows an iteration vs. score plot in Jupyter notebook.
Disabled by default in non-Jupyter enviroments.
"""
if self._searched:
logger.info("AutoMLSearch.search() has already been run and will not run again on the same instance. Re-initialize AutoMLSearch to search again.")
return
# don't show iteration plot outside of a jupyter notebook
if show_iteration_plot:
try:
get_ipython
except NameError:
show_iteration_plot = False
log_title(logger, "Beginning pipeline search")
logger.info("Optimizing for %s. " % self.objective.name)
logger.info("{} score is better.\n".format('Greater' if self.objective.greater_is_better else 'Lower'))
logger.info(f"Using {self._engine.__class__.__name__} to train and score pipelines.")
if self.max_batches is not None:
logger.info(f"Searching up to {self.max_batches} batches for a total of {self.max_iterations} pipelines. ")
elif self.max_iterations is not None:
logger.info("Searching up to %s pipelines. " % self.max_iterations)
if self.max_time is not None:
logger.info("Will stop searching for new pipelines after %d seconds.\n" % self.max_time)
logger.info("Allowed model families: %s\n" % ", ".join([model.value for model in self.allowed_model_families]))
self.search_iteration_plot = None
if self.plot:
self.search_iteration_plot = self.plot.search_iteration_plot(interactive_plot=show_iteration_plot)
self._start = time.time()
try:
self._add_baseline_pipelines()
except KeyboardInterrupt:
if self._handle_keyboard_interrupt():
self._interrupted = True
current_batch_pipelines = []
current_batch_pipeline_scores = []
new_pipeline_ids = []
loop_interrupted = False
while self._should_continue():
try:
if not loop_interrupted:
current_batch_pipelines = self._automl_algorithm.next_batch()
except StopIteration:
logger.info('AutoML Algorithm out of recommendations, ending')
break
try:
new_pipeline_ids = self._engine.evaluate_batch(current_batch_pipelines)
loop_interrupted = False
except KeyboardInterrupt:
loop_interrupted = True
if self._handle_keyboard_interrupt():
break
full_rankings = self.full_rankings
current_batch_idx = full_rankings['id'].isin(new_pipeline_ids)
current_batch_pipeline_scores = full_rankings[current_batch_idx]['score']
if len(current_batch_pipeline_scores) and current_batch_pipeline_scores.isna().all():
raise AutoMLSearchException(f"All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}.")
self.search_duration = time.time() - self._start
elapsed_time = time_elapsed(self._start)
desc = f"\nSearch finished after {elapsed_time}"
desc = desc.ljust(self._MAX_NAME_LEN)
logger.info(desc)
self._find_best_pipeline()
if self._best_pipeline is not None:
best_pipeline = self.rankings.iloc[0]
best_pipeline_name = best_pipeline["pipeline_name"]
logger.info(f"Best pipeline: {best_pipeline_name}")
logger.info(f"Best pipeline {self.objective.name}: {best_pipeline['score']:3f}")
self._searched = True
def _find_best_pipeline(self):
"""Finds the best pipeline in the rankings
If self._best_pipeline already exists, check to make sure it is different from the current best pipeline before training and thresholding"""
if len(self.rankings) == 0:
return
best_pipeline = self.rankings.iloc[0]
if not (self._best_pipeline and self._best_pipeline == self.get_pipeline(best_pipeline['id'])):
best_pipeline = self.get_pipeline(best_pipeline['id'])
if self._train_best_pipeline:
if best_pipeline.model_family == ModelFamily.ENSEMBLE:
X_train, y_train = self.X_train.iloc[self.ensembling_indices], self.y_train.iloc[self.ensembling_indices]
else:
X_train = self.X_train
y_train = self.y_train
if hasattr(self.data_splitter, "transform_sample"):
train_indices = self.data_splitter.transform_sample(X_train, y_train)
X_train = X_train.iloc[train_indices]
y_train = y_train.iloc[train_indices]
best_pipeline = self._engine.train_pipeline(best_pipeline, X_train, y_train,
self.optimize_thresholds, self.objective)
self._best_pipeline = best_pipeline
def _num_pipelines(self):
"""Return the number of pipeline evaluations which have been made
Returns:
int: the number of pipeline evaluations made in the search
"""
return len(self._results['pipeline_results'])
def _should_continue(self):
"""Given the original stopping criterion and current state, should the search continue?
Returns:
bool: True if yes, False if no.
"""
if self._interrupted:
return False
# for add_to_rankings
if self._searched:
return True
# Run at least one pipeline for every search
num_pipelines = self._num_pipelines()
if num_pipelines == 0:
return True
# check max_time and max_iterations
elapsed = time.time() - self._start
if self.max_time and elapsed >= self.max_time:
return False
elif self.max_iterations and num_pipelines >= self.max_iterations:
return False
# check for early stopping
if self.patience is None or self.tolerance is None:
return True
first_id = self._results['search_order'][0]
best_score = self._results['pipeline_results'][first_id]['score']
num_without_improvement = 0
for id in self._results['search_order'][1:]:
curr_score = self._results['pipeline_results'][id]['score']
significant_change = abs((curr_score - best_score) / best_score) > self.tolerance
score_improved = curr_score > best_score if self.objective.greater_is_better else curr_score < best_score
if score_improved and significant_change:
best_score = curr_score
num_without_improvement = 0
else:
num_without_improvement += 1
if num_without_improvement >= self.patience:
logger.info("\n\n{} iterations without improvement. Stopping search early...".format(self.patience))
return False
return True
def _validate_problem_type(self):
for obj in self.additional_objectives:
if not obj.is_defined_for_problem_type(self.problem_type):
raise ValueError("Additional objective {} is not compatible with a {} problem.".format(obj.name, self.problem_type.value))
for pipeline in self.allowed_pipelines or []:
if pipeline.problem_type != self.problem_type:
raise ValueError("Given pipeline {} is not compatible with problem_type {}.".format(pipeline.name, self.problem_type.value))
def _add_baseline_pipelines(self):
"""Fits a baseline pipeline to the data.
This is the first pipeline fit during search.
"""
if self.problem_type == ProblemTypes.BINARY:
baseline = ModeBaselineBinaryPipeline(parameters={})
elif self.problem_type == ProblemTypes.MULTICLASS:
baseline = ModeBaselineMulticlassPipeline(parameters={})
elif self.problem_type == ProblemTypes.REGRESSION:
baseline = MeanBaselineRegressionPipeline(parameters={})
else:
pipeline_class = {ProblemTypes.TIME_SERIES_REGRESSION: TimeSeriesBaselineRegressionPipeline,
ProblemTypes.TIME_SERIES_MULTICLASS: TimeSeriesBaselineMulticlassPipeline,
ProblemTypes.TIME_SERIES_BINARY: TimeSeriesBaselineBinaryPipeline}[self.problem_type]
gap = self.problem_configuration['gap']
max_delay = self.problem_configuration['max_delay']
baseline = pipeline_class(parameters={"pipeline": {"gap": gap, "max_delay": max_delay},
"Time Series Baseline Estimator": {"gap": gap, "max_delay": max_delay}})
self._engine.evaluate_batch([baseline])
@staticmethod
def _get_mean_cv_scores_for_all_objectives(cv_data, objective_name_to_class):
scores = defaultdict(int)
n_folds = len(cv_data)
for fold_data in cv_data:
for field, value in fold_data['all_objective_scores'].items():
# The 'all_objective_scores' field contains scores for all objectives
# but also fields like "# Training" and "# Testing", so we want to exclude them since
# they are not scores
if field in objective_name_to_class:
scores[field] += value
return {objective: float(score) / n_folds for objective, score in scores.items()}
def _post_evaluation_callback(self, pipeline, evaluation_results):
training_time = evaluation_results['training_time']
cv_data = evaluation_results['cv_data']
cv_scores = evaluation_results['cv_scores']
is_baseline = pipeline.model_family == ModelFamily.BASELINE
cv_score = cv_scores.mean()
percent_better_than_baseline = {}
mean_cv_all_objectives = self._get_mean_cv_scores_for_all_objectives(cv_data, self.objective_name_to_class)
if is_baseline:
self._baseline_cv_scores = mean_cv_all_objectives
for obj_name in mean_cv_all_objectives:
objective_class = self.objective_name_to_class[obj_name]
# In the event add_to_rankings is called before search _baseline_cv_scores will be empty so we will return
# nan for the base score.
percent_better = objective_class.calculate_percent_difference(mean_cv_all_objectives[obj_name],
self._baseline_cv_scores.get(obj_name, np.nan))
percent_better_than_baseline[obj_name] = percent_better
high_variance_cv = self._check_for_high_variance(pipeline, cv_scores)
pipeline_id = len(self._results['pipeline_results'])
self._results['pipeline_results'][pipeline_id] = {
"id": pipeline_id,
"pipeline_name": pipeline.name,
"pipeline_class": type(pipeline),
"pipeline_summary": pipeline.summary,
"parameters": pipeline.parameters,
"score": cv_score,
"high_variance_cv": high_variance_cv,
"training_time": training_time,
"cv_data": cv_data,
"percent_better_than_baseline_all_objectives": percent_better_than_baseline,
"percent_better_than_baseline": percent_better_than_baseline[self.objective.name],
"validation_score": cv_scores[0]
}
if pipeline.model_family == ModelFamily.ENSEMBLE:
input_pipeline_ids = [self._automl_algorithm._best_pipeline_info[model_family]["id"] for model_family in self._automl_algorithm._best_pipeline_info]
self._results['pipeline_results'][pipeline_id]["input_pipeline_ids"] = input_pipeline_ids
self._results['search_order'].append(pipeline_id)
if not is_baseline:
score_to_minimize = -cv_score if self.objective.greater_is_better else cv_score
try:
self._automl_algorithm.add_result(score_to_minimize, pipeline, self._results['pipeline_results'][pipeline_id])
except PipelineNotFoundError:
pass
if self.search_iteration_plot:
self.search_iteration_plot.update()
if self.add_result_callback:
self.add_result_callback(self._results['pipeline_results'][pipeline_id], pipeline, self)
return pipeline_id
def _check_for_high_variance(self, pipeline, cv_scores, threshold=0.2):
"""Checks cross-validation scores and logs a warning if variance is higher than specified threshhold."""
pipeline_name = pipeline.name
high_variance_cv = bool(abs(cv_scores.std() / cv_scores.mean()) > threshold)
if high_variance_cv:
logger.warning(f"High coefficient of variation (cv >= {threshold}) within cross validation scores. {pipeline_name} may not perform as estimated on unseen data.")
return high_variance_cv
def get_pipeline(self, pipeline_id):
"""Given the ID of a pipeline training result, returns an untrained instance of the specified pipeline
initialized with the parameters used to train that pipeline during automl search.
Arguments:
pipeline_id (int): pipeline to retrieve
Returns:
PipelineBase: untrained pipeline instance associated with the provided ID
"""
pipeline_results = self.results['pipeline_results'].get(pipeline_id)
if pipeline_results is None:
raise PipelineNotFoundError("Pipeline not found in automl results")
pipeline_class = pipeline_results.get('pipeline_class')
parameters = pipeline_results.get('parameters')
if pipeline_class is None or parameters is None:
raise PipelineNotFoundError("Pipeline class or parameters not found in automl results")
return pipeline_class(parameters, random_seed=self.random_seed)
def describe_pipeline(self, pipeline_id, return_dict=False):
"""Describe a pipeline
Arguments:
pipeline_id (int): pipeline to describe
return_dict (bool): If True, return dictionary of information
about pipeline. Defaults to False.
Returns:
Description of specified pipeline. Includes information such as
type of pipeline components, problem, training time, cross validation, etc.
"""
if pipeline_id not in self._results['pipeline_results']:
raise PipelineNotFoundError("Pipeline not found")
pipeline = self.get_pipeline(pipeline_id)
pipeline_results = self._results['pipeline_results'][pipeline_id]
pipeline.describe()
if pipeline.model_family == ModelFamily.ENSEMBLE:
logger.info("Input for ensembler are pipelines with IDs: " + str(pipeline_results['input_pipeline_ids']))
log_subtitle(logger, "Training")
logger.info("Training for {} problems.".format(pipeline.problem_type))
if self.optimize_thresholds and self.objective.is_defined_for_problem_type(ProblemTypes.BINARY) and self.objective.can_optimize_threshold:
logger.info("Objective to optimize binary classification pipeline thresholds for: {}".format(self.objective))
logger.info("Total training time (including CV): %.1f seconds" % pipeline_results["training_time"])
log_subtitle(logger, "Cross Validation", underline="-")
all_objective_scores = [fold["all_objective_scores"] for fold in pipeline_results["cv_data"]]
all_objective_scores = pd.DataFrame(all_objective_scores)
for c in all_objective_scores:
if c in ["# Training", "# Validation"]:
all_objective_scores[c] = all_objective_scores[c].astype("object")
continue
mean = all_objective_scores[c].mean(axis=0)
std = all_objective_scores[c].std(axis=0)
all_objective_scores.loc["mean", c] = mean
all_objective_scores.loc["std", c] = std
all_objective_scores.loc["coef of var", c] = std / mean if abs(mean) > 0 else np.inf
all_objective_scores = all_objective_scores.fillna("-")
with pd.option_context('display.float_format', '{:.3f}'.format, 'expand_frame_repr', False):
logger.info(all_objective_scores)
if return_dict:
return pipeline_results
def add_to_rankings(self, pipeline):
"""Fits and evaluates a given pipeline then adds the results to the automl rankings with the requirement that automl search has been run.
Arguments:
pipeline (PipelineBase): pipeline to train and evaluate.
"""
pipeline_rows = self.full_rankings[self.full_rankings['pipeline_name'] == pipeline.name]
for parameter in pipeline_rows['parameters']:
if pipeline.parameters == parameter:
return
self._engine.evaluate_batch([pipeline])
self._find_best_pipeline()
@property
def results(self):
"""Class that allows access to a copy of the results from `automl_search`.
Returns: dict containing `pipeline_results`: a dict with results from each pipeline,
and `search_order`: a list describing the order the pipelines were searched.
"""
return copy.deepcopy(self._results)
@property
def rankings(self):
"""Returns a pandas.DataFrame with scoring results from the highest-scoring set of parameters used with each pipeline."""
return self.full_rankings.drop_duplicates(subset="pipeline_name", keep="first")
@property
def full_rankings(self):
"""Returns a pandas.DataFrame with scoring results from all pipelines searched"""
ascending = True
if self.objective.greater_is_better:
ascending = False
full_rankings_cols = ["id", "pipeline_name", "score", "validation_score",
"percent_better_than_baseline", "high_variance_cv", "parameters"]
if not self._results['pipeline_results']:
return pd.DataFrame(columns=full_rankings_cols)
rankings_df = pd.DataFrame(self._results['pipeline_results'].values())
rankings_df = rankings_df[full_rankings_cols]
rankings_df.sort_values("score", ascending=ascending, inplace=True)
rankings_df.reset_index(drop=True, inplace=True)
return rankings_df
@property
def best_pipeline(self):
"""Returns a trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
Returns:
PipelineBase: A trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance.
"""
if not self._best_pipeline:
raise PipelineNotFoundError("automl search must be run before selecting `best_pipeline`.")
return self._best_pipeline
def save(self, file_path, pickle_protocol=cloudpickle.DEFAULT_PROTOCOL):
"""Saves AutoML object at file path
Arguments:
file_path (str): location to save file
pickle_protocol (int): the pickle data stream format.
Returns:
None
"""
with open(file_path, 'wb') as f:
cloudpickle.dump(self, f, protocol=pickle_protocol)
@staticmethod
def load(file_path):
"""Loads AutoML object at file path
Arguments:
file_path (str): location to find file to load
Returns:
AutoSearchBase object
"""
with open(file_path, 'rb') as f:
return cloudpickle.load(f)
def train_pipelines(self, pipelines):
"""Train a list of pipelines on the training data.
This can be helpful for training pipelines once the search is complete.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
Returns:
Dict[str, PipelineBase]: Dictionary keyed by pipeline name that maps to the fitted pipeline.
Note that the any pipelines that error out during training will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.train_batch(pipelines)
def score_pipelines(self, pipelines, X_holdout, y_holdout, objectives):
"""Score a list of pipelines on the given holdout data.
Arguments:
pipelines (list(PipelineBase)): List of pipelines to train.
X_holdout (ww.DataTable, pd.DataFrame): Holdout features.
y_holdout (ww.DataTable, pd.DataFrame): Holdout targets for scoring.
objectives (list(str), list(ObjectiveBase)): Objectives used for scoring.
Returns:
Dict[str, Dict[str, float]]: Dictionary keyed by pipeline name that maps to a dictionary of scores.
Note that the any pipelines that error out during scoring will not be included in the dictionary
but the exception and stacktrace will be displayed in the log.
"""
return self._engine.score_batch(pipelines, X_holdout, y_holdout, objectives)
| 1.890625 | 2 |
graphql_social_auth/mutations.py | deepsourcelabs/django-graphql-social-auth | 1 | 3597 | import graphene
from graphql_jwt.decorators import setup_jwt_cookie
from . import mixins, types
from .decorators import social_auth
class SocialAuthMutation(mixins.SocialAuthMixin, graphene.Mutation):
social = graphene.Field(types.SocialType)
class Meta:
abstract = True
class Arguments:
provider = graphene.String(required=True)
code = graphene.String(required=True)
@classmethod
@setup_jwt_cookie
@social_auth
def mutate(cls, root, info, social, **kwargs):
return cls.resolve(root, info, social, **kwargs)
class SocialAuth(mixins.ResolveMixin, SocialAuthMutation):
"""Social Auth Mutation"""
class SocialAuthJWT(mixins.JSONWebTokenMixin, SocialAuthMutation):
"""Social Auth for JSON Web Token (JWT)"""
| 2.40625 | 2 |
yellowbrick/regressor/base.py | Juan0001/yellowbrick-docs-zh | 20 | 3598 | <reponame>Juan0001/yellowbrick-docs-zh
# yellowbrick.regressor.base
# Base classes for regressor Visualizers.
#
# Author: <NAME> <<EMAIL>>
# Author: <NAME> <<EMAIL>>
# Created: Fri Jun 03 10:30:36 2016 -0700
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: base.py [7d3f5e6] <EMAIL> $
"""
Base classes for regressor Visualizers.
"""
##########################################################################
## Imports
##########################################################################
from ..utils import isregressor
from ..base import ScoreVisualizer
from ..exceptions import YellowbrickTypeError
## Packages for export
__all__ = [
"RegressionScoreVisualizer",
]
##########################################################################
## Regression Visualization Base Object
##########################################################################
class RegressionScoreVisualizer(ScoreVisualizer):
"""
Base class for all ScoreVisualizers that evaluate a regression estimator.
The primary functionality of this class is to perform a check to ensure
the passed in estimator is a regressor, otherwise it raises a
``YellowbrickTypeError``.
"""
def __init__(self, model, ax=None, **kwargs):
if not isregressor(model):
raise YellowbrickTypeError(
"This estimator is not a regressor; try a classifier or "
"clustering score visualizer instead!"
)
super(RegressionScoreVisualizer, self).__init__(model, ax=ax, **kwargs)
| 1.726563 | 2 |
contrib/stack/stripmapStack/crossmul.py | falkamelung/isce2 | 0 | 3599 | #!/usr/bin/env python3
import os
import argparse
import logging
import isce
import isceobj
from components.stdproc.stdproc import crossmul
from iscesys.ImageUtil.ImageUtil import ImageUtil as IU
def createParser():
'''
Command Line Parser.
'''
parser = argparse.ArgumentParser( description='Generate offset field between two Sentinel swaths')
parser.add_argument('-m', '--master', type=str, dest='master', required=True,
help='Master image')
parser.add_argument('-s', '--slave', type=str, dest='slave', required=True,
help='Slave image')
parser.add_argument('-o', '--outdir', type=str, dest='prefix', default='crossmul',
help='Prefix of output int and amp files')
parser.add_argument('-a', '--alks', type=int, dest='azlooks', default=1,
help='Azimuth looks')
parser.add_argument('-r', '--rlks', type=int, dest='rglooks', default=1,
help='Range looks')
return parser
def cmdLineParse(iargs = None):
parser = createParser()
return parser.parse_args(args=iargs)
def run(imageSlc1, imageSlc2, resampName, azLooks, rgLooks):
objSlc1 = isceobj.createSlcImage()
#right now imageSlc1 and 2 are just text files, need to open them as image
IU.copyAttributes(imageSlc1, objSlc1)
objSlc1.setAccessMode('read')
objSlc1.createImage()
objSlc2 = isceobj.createSlcImage()
IU.copyAttributes(imageSlc2, objSlc2)
objSlc2.setAccessMode('read')
objSlc2.createImage()
slcWidth = imageSlc1.getWidth()
intWidth = int(slcWidth / rgLooks)
lines = min(imageSlc1.getLength(), imageSlc2.getLength())
resampAmp = resampName + '.amp'
resampInt = resampName + '.int'
objInt = isceobj.createIntImage()
objInt.setFilename(resampInt)
objInt.setWidth(intWidth)
imageInt = isceobj.createIntImage()
IU.copyAttributes(objInt, imageInt)
objInt.setAccessMode('write')
objInt.createImage()
objAmp = isceobj.createAmpImage()
objAmp.setFilename(resampAmp)
objAmp.setWidth(intWidth)
imageAmp = isceobj.createAmpImage()
IU.copyAttributes(objAmp, imageAmp)
objAmp.setAccessMode('write')
objAmp.createImage()
objCrossmul = crossmul.createcrossmul()
objCrossmul.width = slcWidth
objCrossmul.length = lines
objCrossmul.LooksDown = azLooks
objCrossmul.LooksAcross = rgLooks
objCrossmul.crossmul(objSlc1, objSlc2, objInt, objAmp)
for obj in [objInt, objAmp, objSlc1, objSlc2]:
obj.finalizeImage()
return imageInt, imageAmp
def main(iargs=None):
inps = cmdLineParse(iargs)
img1 = isceobj.createImage()
img1.load(inps.master + '.xml')
img2 = isceobj.createImage()
img2.load(inps.slave + '.xml')
os.makedirs(os.path.dirname(inps.prefix), exist_ok=True)
run(img1, img2, inps.prefix, inps.azlooks, inps.rglooks)
if __name__ == '__main__':
main()
'''
Main driver.
'''
| 2.328125 | 2 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.