content
stringlengths 5
1.05M
|
---|
import logging
import io
import os
import os.path
import platform
import sys
from collections import deque, OrderedDict
from datetime import datetime, timezone, timedelta
from itertools import islice
from operator import attrgetter
from ..logger import create_logger
logger = create_logger()
from .time import to_localtime
from . import msgpack
from .. import __version__ as borg_version
from .. import chunker
def prune_within(archives, hours, kept_because):
target = datetime.now(timezone.utc) - timedelta(seconds=hours * 3600)
kept_counter = 0
result = []
for a in archives:
if a.ts > target:
kept_counter += 1
kept_because[a.id] = ("within", kept_counter)
result.append(a)
return result
PRUNING_PATTERNS = OrderedDict([
("secondly", '%Y-%m-%d %H:%M:%S'),
("minutely", '%Y-%m-%d %H:%M'),
("hourly", '%Y-%m-%d %H'),
("daily", '%Y-%m-%d'),
("weekly", '%G-%V'),
("monthly", '%Y-%m'),
("yearly", '%Y'),
])
def prune_split(archives, rule, n, kept_because=None):
last = None
keep = []
pattern = PRUNING_PATTERNS[rule]
if kept_because is None:
kept_because = {}
if n == 0:
return keep
for a in sorted(archives, key=attrgetter('ts'), reverse=True):
period = to_localtime(a.ts).strftime(pattern)
if period != last:
last = period
if a.id not in kept_because:
keep.append(a)
kept_because[a.id] = (rule, len(keep))
if len(keep) == n:
break
return keep
def sysinfo():
show_sysinfo = os.environ.get('BORG_SHOW_SYSINFO', 'yes').lower()
if show_sysinfo == 'no':
return ''
python_implementation = platform.python_implementation()
python_version = platform.python_version()
# platform.uname() does a shell call internally to get processor info,
# creating #3732 issue, so rather use os.uname().
try:
uname = os.uname()
except AttributeError:
uname = None
if sys.platform.startswith('linux'):
try:
linux_distribution = platform.linux_distribution() # noqa
except:
# platform.linux_distribution() is deprecated since py 3.5 and removed in 3.7.
linux_distribution = ('Unknown Linux', '', '')
else:
linux_distribution = None
try:
msgpack_version = '.'.join(str(v) for v in msgpack.version)
except:
msgpack_version = 'unknown'
info = []
if uname is not None:
info.append('Platform: %s' % (' '.join(uname), ))
if linux_distribution is not None:
info.append('Linux: %s %s %s' % linux_distribution)
info.append('Borg: %s Python: %s %s msgpack: %s' % (
borg_version, python_implementation, python_version, msgpack_version))
info.append('PID: %d CWD: %s' % (os.getpid(), os.getcwd()))
info.append('sys.argv: %r' % sys.argv)
info.append('SSH_ORIGINAL_COMMAND: %r' % os.environ.get('SSH_ORIGINAL_COMMAND'))
info.append('')
return '\n'.join(info)
def log_multi(*msgs, level=logging.INFO, logger=logger):
"""
log multiple lines of text, each line by a separate logging call for cosmetic reasons
each positional argument may be a single or multiple lines (separated by newlines) of text.
"""
lines = []
for msg in msgs:
lines.extend(msg.splitlines())
for line in lines:
logger.log(level, line)
class ChunkIteratorFileWrapper:
"""File-like wrapper for chunk iterators"""
def __init__(self, chunk_iterator, read_callback=None):
"""
*chunk_iterator* should be an iterator yielding bytes. These will be buffered
internally as necessary to satisfy .read() calls.
*read_callback* will be called with one argument, some byte string that has
just been read and will be subsequently returned to a caller of .read().
It can be used to update a progress display.
"""
self.chunk_iterator = chunk_iterator
self.chunk_offset = 0
self.chunk = b''
self.exhausted = False
self.read_callback = read_callback
def _refill(self):
remaining = len(self.chunk) - self.chunk_offset
if not remaining:
try:
chunk = next(self.chunk_iterator)
self.chunk = memoryview(chunk)
except StopIteration:
self.exhausted = True
return 0 # EOF
self.chunk_offset = 0
remaining = len(self.chunk)
return remaining
def _read(self, nbytes):
if not nbytes:
return b''
remaining = self._refill()
will_read = min(remaining, nbytes)
self.chunk_offset += will_read
return self.chunk[self.chunk_offset - will_read:self.chunk_offset]
def read(self, nbytes):
parts = []
while nbytes and not self.exhausted:
read_data = self._read(nbytes)
nbytes -= len(read_data)
parts.append(read_data)
if self.read_callback:
self.read_callback(read_data)
return b''.join(parts)
def open_item(archive, item):
"""Return file-like object for archived item (with chunks)."""
chunk_iterator = archive.pipeline.fetch_many([c.id for c in item.chunks])
return ChunkIteratorFileWrapper(chunk_iterator)
def chunkit(it, size):
"""
Chunk an iterator <it> into pieces of <size>.
>>> list(chunker('ABCDEFG', 3))
[['A', 'B', 'C'], ['D', 'E', 'F'], ['G']]
"""
iterable = iter(it)
return iter(lambda: list(islice(iterable, size)), [])
def consume(iterator, n=None):
"""Advance the iterator n-steps ahead. If n is none, consume entirely."""
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
deque(iterator, maxlen=0)
else:
# advance to the empty slice starting at position n
next(islice(iterator, n, n), None)
class ErrorIgnoringTextIOWrapper(io.TextIOWrapper):
def read(self, n):
if not self.closed:
try:
return super().read(n)
except BrokenPipeError:
try:
super().close()
except OSError:
pass
return ''
def write(self, s):
if not self.closed:
try:
return super().write(s)
except BrokenPipeError:
try:
super().close()
except OSError:
pass
return len(s)
|
''' Program to connect with database and update the employee record of entered empno. '''
from utils import clear_screen
from sqlTor import SqlTor
from q18_dbSearch import get_employee
from q17_dbRecord import input_employee_details
def update_employee(cursor):
''' Update an employee '''
emp = get_employee(cursor)
if not emp:
print('Employee does not exist.')
return
print('Enter new details of employee.')
name, department, salary = input_employee_details()
employee_updation = f"UPDATE employees \
SET name='{name}',\
department='{department}',\
salary={salary} \
WHERE emp_id={emp[0]};"
try:
cursor.execute(employee_updation)
except Exception as err:
print(err)
else:
print('Update Successful!')
if __name__ == "__main__":
with SqlTor() as my_con:
cursor = my_con.cursor()
while True:
clear_screen()
print('UPDATE EMPLOYEE')
update_employee(cursor)
my_con.commit()
|
#!/usr/bin/env python
"""
A script that imports and verifies metadata and then dumps it in a basic
dictionary format.
"""
import sys
from saml2.mdstore import MetaDataExtern
from saml2.mdstore import MetaDataFile
MDIMPORT = {
"swamid": {
"url": "https://kalmar2.org/simplesaml/module.php/aggregator/?id=kalmarcentral2&set=saml2",
"cert": "kalmar2.pem",
"type": "external"
},
"incommon": {
"file": "InCommon-metadata.xml",
"type": "local"
},
"test": {
"file": "mdtest.xml",
"type": "local"
}
}
def main():
item = MDIMPORT[sys.argv[1]]
metad = None
if item["type"] == "local":
metad = MetaDataFile(sys.argv[1], item["file"])
elif item["type"] == "external":
metad = MetaDataExtern(sys.argv[1], item["url"],
"/opt/local/bin/xmlsec1", item["cert"])
if metad:
metad.load()
print(metad.dumps())
if __name__ == '__main__':
main()
|
import random
import torch as t
from py.data.XYDataset import XYDataset
from py.util.Config import dtype, device
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import numpy as np
en_stops = set(stopwords.words('english'))
# TODO: move into more reusable class
class Embedding():
def __init__(self, embedding):
self.embedding = embedding
self.cache = {}
def get(self, token):
if token in self.cache:
return self.cache[token]
else:
vec = self.embedding[token]
self.cache[token] = vec
return vec
class TestDescriptionConsistencyHumanLabeledDataset(XYDataset):
def __init__(self, description_embedding, test_embedding, embedding_size, max_body_length, max_description_length):
self.description_embedding = description_embedding
self.test_embedding = test_embedding
self.embedding_size = embedding_size
self.max_body_length = max_body_length
self.max_description_length = max_description_length
def prepare_data(self, consistent_json_dataset, inconsistent_json_dataset):
print("Preparing dataset")
self.body_tensors = []
self.description_tensors = []
self.is_consistent_tensors = [] # consistent (1.0) inconsistent (0.0)
self.ids = [] # unique id for each item, useful for debugging
de = Embedding(self.description_embedding)
te = Embedding(self.test_embedding)
# add human labelled data
# positive examples
for token_seq in consistent_json_dataset:
test_description = token_seq["metadata"]["description"]
description_vec = []
for token in word_tokenize(test_description)[:self.max_description_length]:
if token not in en_stops:
description_vec.append(de.get(token))
while len(description_vec) < self.max_description_length:
description_vec.append([0] * self.embedding_size)
body_vec = []
for token in token_seq["data"][:self.max_body_length]:
if token not in en_stops:
body_vec.append(te.get(token))
while len(body_vec) < self.max_body_length:
body_vec.append([0] * self.embedding_size)
self.body_tensors.append(body_vec)
self.description_tensors.append(description_vec)
self.is_consistent_tensors.append([1.0])
self.ids.append(token_seq["id"])
# negative examples
next_neg_example_id = -1 # negative ids for for negative examples
for token_seq in inconsistent_json_dataset:
test_description = token_seq["metadata"]["description"]
description_vec = []
for token in word_tokenize(test_description)[:self.max_description_length]:
if token not in en_stops:
description_vec.append(de.get(token))
while len(description_vec) < self.max_description_length:
description_vec.append([0] * self.embedding_size)
body_vec = []
for token in token_seq["data"][:self.max_body_length]:
if token not in en_stops:
body_vec.append(te.get(token))
while len(body_vec) < self.max_body_length:
body_vec.append([0] * self.embedding_size)
self.body_tensors.append(body_vec)
self.description_tensors.append(description_vec)
self.is_consistent_tensors.append([0.0])
self.ids.append(next_neg_example_id)
next_neg_example_id -= 1
self.body_tensors = t.as_tensor(
self.body_tensors, dtype=dtype, device="cpu")
self.description_tensors = t.as_tensor(
self.description_tensors, dtype=dtype, device="cpu")
self.is_consistent_tensors = t.as_tensor(
self.is_consistent_tensors, dtype=dtype, device="cpu")
self.ids = t.as_tensor(
self.ids, device="cpu")
print(
f"Done with data preparation: {len(self.body_tensors)} datapoints")
def save_to_disk(self, filename):
t.save({"body_tensors": self.body_tensors,
"description_tensors": self.description_tensors,
"is_consistent_tensors": self.is_consistent_tensors,
"ids": self.ids},
filename)
def load_from_disk(self, filename):
tensors = t.load(filename)
self.body_tensors = tensors["body_tensors"]
self.description_tensors = tensors["description_tensors"]
self.is_consistent_tensors = tensors["is_consistent_tensors"]
self.ids = tensors["ids"]
def move_to_target_device(self):
print("Moving dataset to target device (e.g. GPU)")
self.body_tensors = t.as_tensor(
self.body_tensors, dtype=dtype, device=device)
self.description_tensors = t.as_tensor(
self.description_tensors, dtype=dtype, device=device)
self.is_consistent_tensors = t.as_tensor(
self.is_consistent_tensors, dtype=dtype, device=device)
self.ids = t.as_tensor(
self.ids, dtype=dtype, device=device)
def __len__(self):
return len(self.body_tensors)
def __getitem__(self, index):
return [self.body_tensors[index], self.description_tensors[index]], self.is_consistent_tensors[index], self.ids[index]
|
from typing import Tuple
import numpy as np
from numpy import ndarray
from skfem.element import DiscreteField
from skfem.quadrature import get_quadrature
from .basis import Basis
class FacetBasis(Basis):
"""Basis functions evaluated at quadrature points on the element boundaries.
Initialized and used similarly as :class:`~skfem.assembly.InteriorBasis`.
"""
def __init__(self,
mesh,
elem,
mapping=None,
intorder: int = None,
side: int = None,
facets: ndarray = None,
quadrature: Tuple[ndarray, ndarray] = None):
"""Combine :class:`~skfem.mesh.Mesh` and :class:`~skfem.element.Element`
into a set of precomputed global basis functions at element facets.
Parameters
----------
mesh
An object of type :class:`~skfem.mesh.Mesh`.
elem
An object of type :class:`~skfem.element.Element`.
mapping
An object of type :class:`skfem.mapping.Mapping`. If `None`, uses
`mesh.mapping`.
intorder
Optional integration order, i.e. the degree of polynomials that are
integrated exactly by the used quadrature. Not used if `quadrature`
is specified.
side
If 0 or 1, basis functions are evaluated on the interior facets.
The numbers 0 and 1 refer to the different sides of the facets.
Side 0 corresponds to the indices `mesh.f2t[0]`. If `None`, basis
is evaluated only on the exterior facets.
facets
Optional subset of facet indices.
quadrature
Optional tuple of quadrature points and weights.
"""
super(FacetBasis, self).__init__(mesh, elem, mapping)
if quadrature is not None:
self.X, self.W = quadrature
else:
self.X, self.W = get_quadrature(
self.brefdom,
intorder if intorder is not None else 2 * self.elem.maxdeg
)
# facets where the basis is evaluated
if facets is None:
if side is None:
self.find = np.nonzero(self.mesh.f2t[1] == -1)[0]
self.tind = self.mesh.f2t[0, self.find]
elif hasattr(self.mapping, 'helper_to_orig') and side in [0, 1]:
self.mapping.side = side
self.find = self.mapping.helper_to_orig[side]
self.tind = self.mesh.f2t[0, self.find]
elif side in [0, 1]:
self.find = np.nonzero(self.mesh.f2t[1] != -1)[0]
self.tind = self.mesh.f2t[side, self.find]
else:
raise Exception("Parameter 'side' must be either 0 or 1. "
"A facet shares only two elements.")
else:
self.find = facets
self.tind = self.mesh.f2t[0, self.find]
# boundary refdom to global facet
x = self.mapping.G(self.X, find=self.find)
# global facet to refdom facet
Y = self.mapping.invF(x, tind=self.tind)
# construct normal vectors from side=0 always
Y0 = self.mapping.invF(x, tind=self.mesh.f2t[0, self.find])
self.normals = DiscreteField(
value=self.mapping.normals(Y0,
self.mesh.f2t[0, self.find],
self.find,
self.mesh.t2f)
)
self.nelems = len(self.find)
self.basis = [self.elem.gbasis(self.mapping, Y, j, self.tind)
for j in range(self.Nbfun)]
self.dx = (np.abs(self.mapping.detDG(self.X, find=self.find))
* np.tile(self.W, (self.nelems, 1)))
self.element_dofs = self.element_dofs[:, self.tind]
def default_parameters(self):
"""Return default parameters for `~skfem.assembly.asm`."""
return {'x': self.global_coordinates(),
'h': self.mesh_parameters(),
'n': self.normals}
def global_coordinates(self) -> ndarray:
return DiscreteField(self.mapping.G(self.X, find=self.find))
def mesh_parameters(self) -> ndarray:
return DiscreteField((np.abs(self.mapping.detDG(self.X, self.find))
** (1. / (self.mesh.dim() - 1.)))
if self.mesh.dim() != 1 else np.array([0.]))
|
import validators as _v
from rest_framework import serializers
from .models import Check
from autotasks.models import AutomatedTask
from scripts.serializers import ScriptSerializer, ScriptCheckSerializer
class AssignedTaskField(serializers.ModelSerializer):
class Meta:
model = AutomatedTask
fields = "__all__"
class CheckSerializer(serializers.ModelSerializer):
readable_desc = serializers.ReadOnlyField()
script = ScriptSerializer(read_only=True)
assigned_task = serializers.SerializerMethodField()
last_run = serializers.ReadOnlyField(source="last_run_as_timezone")
history_info = serializers.ReadOnlyField()
## Change to return only array of tasks after 9/25/2020
def get_assigned_task(self, obj):
if obj.assignedtask.exists():
tasks = obj.assignedtask.all()
if len(tasks) == 1:
return AssignedTaskField(tasks[0]).data
else:
return AssignedTaskField(tasks, many=True).data
class Meta:
model = Check
fields = "__all__"
# https://www.django-rest-framework.org/api-guide/serializers/#object-level-validation
def validate(self, val):
try:
check_type = val["check_type"]
except KeyError:
return val
# disk checks
# make sure no duplicate diskchecks exist for an agent/policy
if check_type == "diskspace" and not self.instance: # only on create
checks = (
Check.objects.filter(**self.context)
.filter(check_type="diskspace")
.exclude(managed_by_policy=True)
)
for check in checks:
if val["disk"] in check.disk:
raise serializers.ValidationError(
f"A disk check for Drive {val['disk']} already exists!"
)
# ping checks
if check_type == "ping":
if (
not _v.ipv4(val["ip"])
and not _v.ipv6(val["ip"])
and not _v.domain(val["ip"])
):
raise serializers.ValidationError(
"Please enter a valid IP address or domain name"
)
return val
class AssignedTaskCheckRunnerField(serializers.ModelSerializer):
class Meta:
model = AutomatedTask
fields = ["id", "enabled"]
class CheckRunnerGetSerializer(serializers.ModelSerializer):
# for the windows agent
# only send data needed for agent to run a check
assigned_task = serializers.SerializerMethodField()
script = ScriptSerializer(read_only=True)
def get_assigned_task(self, obj):
if obj.assignedtask.exists():
# this will not break agents on version 0.10.2 or lower
# newer agents once released will properly handle multiple tasks assigned to a check
task = obj.assignedtask.first()
return AssignedTaskCheckRunnerField(task).data
class Meta:
model = Check
exclude = [
"policy",
"managed_by_policy",
"overriden_by_policy",
"parent_check",
"name",
"more_info",
"last_run",
"email_alert",
"text_alert",
"fails_b4_alert",
"fail_count",
"email_sent",
"text_sent",
"outage_history",
"extra_details",
"stdout",
"stderr",
"retcode",
"execution_time",
"svc_display_name",
"svc_policy_mode",
"created_by",
"created_time",
"modified_by",
"modified_time",
"history",
]
class CheckRunnerGetSerializerV2(serializers.ModelSerializer):
# for the windows __python__ agent
# only send data needed for agent to run a check
assigned_tasks = serializers.SerializerMethodField()
script = ScriptSerializer(read_only=True)
def get_assigned_tasks(self, obj):
if obj.assignedtask.exists():
tasks = obj.assignedtask.all()
return AssignedTaskCheckRunnerField(tasks, many=True).data
class Meta:
model = Check
exclude = [
"policy",
"managed_by_policy",
"overriden_by_policy",
"parent_check",
"name",
"more_info",
"last_run",
"email_alert",
"text_alert",
"fails_b4_alert",
"fail_count",
"email_sent",
"text_sent",
"outage_history",
"extra_details",
"stdout",
"stderr",
"retcode",
"execution_time",
"svc_display_name",
"svc_policy_mode",
"created_by",
"created_time",
"modified_by",
"modified_time",
"history",
]
class CheckRunnerGetSerializerV3(serializers.ModelSerializer):
# for the windows __golang__ agent
# only send data needed for agent to run a check
# the difference here is in the script serializer
# script checks no longer rely on salt and are executed directly by the go agent
assigned_tasks = serializers.SerializerMethodField()
script = ScriptCheckSerializer(read_only=True)
def get_assigned_tasks(self, obj):
if obj.assignedtask.exists():
tasks = obj.assignedtask.all()
return AssignedTaskCheckRunnerField(tasks, many=True).data
class Meta:
model = Check
exclude = [
"policy",
"managed_by_policy",
"overriden_by_policy",
"parent_check",
"name",
"more_info",
"last_run",
"email_alert",
"text_alert",
"fails_b4_alert",
"fail_count",
"email_sent",
"text_sent",
"outage_history",
"extra_details",
"stdout",
"stderr",
"retcode",
"execution_time",
"svc_display_name",
"svc_policy_mode",
"created_by",
"created_time",
"modified_by",
"modified_time",
"history",
]
class CheckResultsSerializer(serializers.ModelSerializer):
# used when patching results from the windows agent
# no validation needed
class Meta:
model = Check
fields = "__all__"
|
#! /usr/bin/env python
# This script is to read from a Active Directory Server
# to get sAMAccountName and objectGUID to transform them
# to SQL statements for a specific user table
import ldap
import getpass
import uuid
LDAPURL = "ldaps://ldapserver"
DOMAIN = "example.com"
# ask for login credentials
USER = raw_input("Username:")
PASS = getpass.getpass("Password for " + USER + ":")
USERNAME = USER + "@" + DOMAIN
l = ldap.initialize(LDAPURL)
try:
l.protocol_version = ldap.VERSION3
l.set_option(ldap.OPT_REFERRALS, 0)
bind = l.simple_bind_s(USERNAME, PASS)
base = "cn=Users"
for d in DOMAIN.split("."):
base += ",dc={}".format(d)
criteria = "(cn=*)"
attributes = ['sAMAccountName', 'ObjectGUID']
result = l.search_s(base, ldap.SCOPE_SUBTREE, criteria, attributes)
results = [entry for dn, entry in result if isinstance(entry, dict)]
finally:
l.unbind()
for item in results:
uobjectGUID = uuid.UUID(bytes=item['objectGUID'][0])
objectGUID = str(uobjectGUID).upper()
objectGUIDplain = objectGUID.translate(None, '-')
sAMAccountName = item.get('sAMAccountName')
print "update users set id_extern = '" + objectGUIDplain + "' where login = " + str(sAMAccountName).strip('[]') + ";"
|
from talon.api import lib
from talon.voice import Context, ContextGroup, talon
from talon.engine import engine
from talon import app
def set_enabled(enable):
if enable:
talon.enable()
app.icon_color(0, 0.7, 0, 1)
else:
talon.disable()
app.icon_color(1, 0, 0, 1)
lib.menu_check(b'!Enable Speech Recognition', enable)
def on_menu(item):
if item == '!Enable Speech Recognition':
set_enabled(not talon.enabled)
app.register('menu', on_menu)
set_enabled(talon.enabled)
sleep_group = ContextGroup('sleepy')
sleepy = Context('sleepy', group=sleep_group)
sleepy.keymap({
'talon sleep': lambda m: set_enabled(False),
'talon wake': lambda m: set_enabled(True),
'dragon mode': [lambda m: set_enabled(False), lambda m: engine.mimic('wake up'.split())],
'talon mode': [lambda m: set_enabled(True), lambda m: engine.mimic('go to sleep'.split())],
})
sleep_group.load()
|
#!/usr/bin/env python
u"""
convert_calendar_decimal.py
Written by Tyler Sutterley (07/2020)
Converts from calendar date into decimal years
Converts year, month (day, hour, minute, second)
into decimal years taking into account leap years
CALLING SEQUENCE:
t_date = convert_calendar_decimal(year, month)
t_date = convert_calendar_decimal(year, month, DAY=day,
HOUR=hour, MINUTE=minute, SECOND=second)
INPUTS:
year: can be a single value or an array of dates
month: can be a single value or an array of dates
OPTION:
DAY: can be a single value or an array of dates
HOUR: can be a single value or an array of dates
MINUTE: can be a single value or an array of dates
SECOND: can be a single value or an array of dates
DofY: day of the year (January 1 = 1)
can be a single value or an array of dates
OUTPUTS:
t_date: date in decimal format (years)
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
NOTES:
Dershowitz, N. and E.M. Reingold. 2008. Calendrical Calculations.
Cambridge: Cambridge University Press.
UPDATE HISTORY:
Updated 07/2020: added function docstrings
Updated 05/2015: updated comments and minor update to nonzero statement
Updated 05/2014: added option for day of year
Updated 04/2014: new code from convert_J2000.py
Updated 04/2014: updated comments and improved rules
for leap years to include mod 100 and mod 400
Written 04/2014
"""
import numpy as np
def convert_calendar_decimal(year, month, DAY=None, HOUR=None, MINUTE=None,
SECOND=None, DofY=None):
"""
Converts from calendar date into decimal years taking into
account leap years
Arguments
---------
year: calendar year
month: calendar month
Keyword arguments
-----------------
DAY: day of the month
HOUR: hour of the day
MINUTE: minute of the hour
SECOND: second of the minute
DofY: day of the year (January 1 = 1)
Returns
-------
t_date: date in decimal format
"""
#-- number of dates
n_dates = len(np.atleast_1d(year))
#-- create arrays for calendar date variables
cal_date = {}
cal_date['year'] = np.zeros((n_dates))
cal_date['month'] = np.zeros((n_dates))
cal_date['day'] = np.zeros((n_dates))
cal_date['hour'] = np.zeros((n_dates))
cal_date['minute'] = np.zeros((n_dates))
cal_date['second'] = np.zeros((n_dates))
#-- day of the year
cal_date['DofY'] = np.zeros((n_dates))
#-- remove singleton dimensions and use year and month
cal_date['year'][:] = np.squeeze(year)
cal_date['month'][:] = np.squeeze(month)
#-- create output date variable
t_date = np.zeros((n_dates))
#-- days per month in a leap and a standard year
#-- only difference is February (29 vs. 28)
dpm_leap=np.array([31,29,31,30,31,30,31,31,30,31,30,31], dtype=np.float)
dpm_stnd=np.array([31,28,31,30,31,30,31,31,30,31,30,31], dtype=np.float)
#-- Rules in the Gregorian calendar for a year to be a leap year:
#-- divisible by 4, but not by 100 unless divisible by 400
#-- True length of the year is about 365.2422 days
#-- Adding a leap day every four years ==> average 365.25
#-- Subtracting a leap year every 100 years ==> average 365.24
#-- Adding a leap year back every 400 years ==> average 365.2425
#-- Subtracting a leap year every 4000 years ==> average 365.24225
m4 = (cal_date['year'] % 4)
m100 = (cal_date['year'] % 100)
m400 = (cal_date['year'] % 400)
m4000 = (cal_date['year'] % 4000)
#-- find indices for standard years and leap years using criteria
leap, = np.nonzero((m4 == 0) & (m100 != 0) | (m400 == 0) & (m4000 != 0))
stnd, = np.nonzero((m4 != 0) | (m100 == 0) & (m400 != 0) | (m4000 == 0))
#-- calculate the day of the year
if DofY is not None:
#-- if entered directly as an input
#-- remove 1 so day 1 (Jan 1st) = 0.0 in decimal format
cal_date['DofY'][:] = np.squeeze(DofY)-1
else:
#-- use calendar month and day of the month to calculate day of the year
#-- month minus 1: January = 0, February = 1, etc (indice of month)
#-- in decimal form: January = 0.0
month_m1 = np.array(cal_date['month'],dtype=np.int) - 1
#-- day of month
if DAY is not None:
#-- remove 1 so 1st day of month = 0.0 in decimal format
cal_date['day'][:] = np.squeeze(DAY)-1.0
else:
#-- if not entering days as an input
#-- will use the mid-month value
cal_date['day'][leap] = dpm_leap[month_m1[leap]]/2.0
cal_date['day'][stnd] = dpm_stnd[month_m1[stnd]]/2.0
#-- create matrix with the lower half = 1
#-- this matrix will be used in a matrix multiplication
#-- to calculate the total number of days for prior months
#-- the -1 will make the diagonal == 0
#-- i.e. first row == all zeros and the
#-- last row == ones for all but the last element
mon_mat=np.tri(12,12,-1)
#-- using a dot product to calculate total number of days
#-- for the months before the input date
#-- basically is sum(i*dpm)
#-- where i is 1 for all months < the month of interest
#-- and i is 0 for all months >= the month of interest
#-- month of interest is zero as the exact days will be
#-- used to calculate the date
#-- calculate the day of the year for leap and standard
#-- use total days of all months before date
#-- and add number of days before date in month
cal_date['DofY'][stnd] = cal_date['day'][stnd] + \
np.dot(mon_mat[month_m1[stnd],:],dpm_stnd)
cal_date['DofY'][leap] = cal_date['day'][leap] + \
np.dot(mon_mat[month_m1[leap],:],dpm_leap)
#-- hour of day (else is zero)
if HOUR is not None:
cal_date['hour'][:] = np.squeeze(HOUR)
#-- minute of hour (else is zero)
if MINUTE is not None:
cal_date['minute'][:] = np.squeeze(MINUTE)
#-- second in minute (else is zero)
if SECOND is not None:
cal_date['second'][:] = np.squeeze(SECOND)
#-- calculate decimal date
#-- convert hours, minutes and seconds into days
#-- convert calculated fractional days into decimal fractions of the year
#-- Leap years
t_date[leap] = cal_date['year'][leap] + \
(cal_date['DofY'][leap] + cal_date['hour'][leap]/24. + \
cal_date['minute'][leap]/1440. + \
cal_date['second'][leap]/86400.)/np.sum(dpm_leap)
#-- Standard years
t_date[stnd] = cal_date['year'][stnd] + \
(cal_date['DofY'][stnd] + cal_date['hour'][stnd]/24. + \
cal_date['minute'][stnd]/1440. + \
cal_date['second'][stnd]/86400.)/np.sum(dpm_stnd)
return t_date
|
# Bep Marketplace ELE
# Copyright (c) 2016-2021 Kolibri Solutions
# License: See LICENSE file or https://github.com/KolibriSolutions/BepMarketplace/blob/master/LICENSE
#
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('presentations', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='presentationset',
options={'ordering': ['DateTime']},
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 23 14:48:36 2020
@author: arest
"""
import argparse,sys
import numpy as np
from jwst_SNR import jwst_SNRclass
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage="create exposure time table for a given set of filters, mags, and target S/N")
parser.add_argument('SNR', type=float, help=('specify target SNR'))
parser.add_argument('-i','--instrument', default='nircam', choices=['nircam','miri','niriss'], help=('specify instrument (default=%(default)s)'))
parser.add_argument('--mode', default=None, choices=['imaging','sw_imaging','lw_imaging'], help=('specify mode. If None, then the default mode for a given instrument is chosen (default=%(default)s)'))
parser.add_argument('-f','--filters', default=['F200W'],nargs='+', help=('specify filters'))
parser.add_argument('-m','--magrange', nargs=3, type=float, default=[24,28,1], help=('specify the magnitude range magmin magmax dm (default=%(default)s)'))
parser.add_argument('-s','--save', nargs='*', type=str, default=None, help=('save the table. If no filename specified, then SNR_<exptime>sec.txt is used (default=%(default)s)'))
parser.add_argument('--bkg_target', type=str, default='CDF-S', help=('specify the background target (default=%(default)s)'))
parser.add_argument('--bkg_targetposition', nargs='+', help=('specify the background position in RA and Dec, overwriting --bkg_target. optional add name of position (default=%(default)s)'))
parser.add_argument('--bkg_percentile', type=float, default=50.0, help=('specify the background percentile at the given position (default=%(default)s)'))
parser.add_argument('--bkg_lam4percentile', type=float, default=4.5, help=('specify the background percentile at the given position (default=%(default)s)'))
parser.add_argument('--bkg_lam', type=float, default=4.5, help=('specify the wavelength passed to pandeia jwst_backgrounds.background routine (default=%(default)s)'))
parser.add_argument('--bkg_thresh', type=float, default=1.1, help=('specify the threshold passed to pandeia jwst_backgrounds.background routine (default=%(default)s)'))
parser.add_argument('-v','--verbose', default=0, action='count')
parser.add_argument('--SNR_tolerance_in_percent', type=float, default=10.0, help=('specify the tolerance in target S/N (default=%(default)s)'))
parser.add_argument('--saveSNR', default=False, action='store_true', help=('Save the S/N as well in the table'))
args = parser.parse_args()
if args.instrument=='nircam':
mode='sw_imaging'
else:
mode='imaging'
# initialize with instrument and mode
jwst_SNR=jwst_SNRclass(instrument=args.instrument,mode=mode)
jwst_SNR.verbose=args.verbose
# set the background
# Note: targetpos overwrites target.
jwst_SNR.set_background4jwst(args.bkg_percentile,
lam=args.bkg_lam,thresh=args.bkg_thresh,
lam4percentile=args.bkg_lam4percentile,
target=args.bkg_target,targetpos=args.bkg_targetposition)
filters = args.filters
magrange = np.arange(args.magrange[0],args.magrange[1],args.magrange[2])
# exposure time panda table is in jwst_SNR.texp.t
jwst_SNR.Imaging_texp_table(filters,magrange,args.SNR,
SNR_tolerance_in_percent=args.SNR_tolerance_in_percent,
saveSNRflag=args.saveSNR)
# save the file if wanted
if not(args.save is None):
# if verbose, also write it to screen
if jwst_SNR.verbose: jwst_SNR.texp.write(formatters=jwst_SNR.formatters4texptable)
# get the filename
if args.save ==[]:
filename = 'texp_SNR%.0f.txt' % (args.SNR)
else:
filename = args.save[0]
# save the table
print('Saving table into %s' % filename)
jwst_SNR.texp.write(filename,formatters=jwst_SNR.formatters4texptable)
else:
# if not saved, write it to console
jwst_SNR.texp.write(formatters=jwst_SNR.formatters4texptable) |
# http://www.codewars.com/kata/55192f4ecd82ff826900089e/
def divide(weight):
return weight > 2 and weight % 2 == 0
|
import numpy as np
from kb_learning.envs import ObjectEnv
from kb_learning.tools import rot_matrix, compute_robust_mean_swarm_position
from gym import spaces
class ObjectAbsoluteEnv(ObjectEnv):
_observe_objects = True
def __init__(self,
num_kilobots=None,
object_shape='quad',
object_width=.15,
object_height=.15,
object_init=None,
light_type='circular',
light_radius=.2,
done_after_steps=350):
super(ObjectAbsoluteEnv, self).__init__(num_kilobots=num_kilobots,
object_shape=object_shape,
object_width=object_width,
object_height=object_height,
object_init=object_init,
light_type=light_type,
light_radius=light_radius)
self._desired_pose = None
self._done_after_steps = done_after_steps
@property
def state_space(self):
_state_space_low = self.kilobots_space.low
_state_space_high = self.kilobots_space.high
if self.light_state_space:
_state_space_low = np.concatenate((_state_space_low, self.light_state_space.low))
_state_space_high = np.concatenate((_state_space_high, self.light_state_space.high))
if self.object_state_space:
_state_space_low = np.concatenate((_state_space_low, self.object_state_space.low))
_state_space_high = np.concatenate((_state_space_high, self.object_state_space.high))
return spaces.Box(low=_state_space_low, high=_state_space_high, dtype=np.float32)
@property
def observation_space(self):
_observation_spaces_low = self.kilobots_space.low
_observation_spaces_high = self.kilobots_space.high
if self.light_observation_space:
_observation_spaces_low = np.concatenate((_observation_spaces_low, self.light_observation_space.low))
_observation_spaces_high = np.concatenate((_observation_spaces_high, self.light_observation_space.high))
if self.object_observation_space:
# the objects are observed as x, y, sin(theta), cos(theta)
objects_low = np.array([self.world_x_range[0], self.world_y_range[0], -1., -1.] * len(self._objects))
objects_high = np.array([self.world_x_range[1], self.world_y_range[1], 1., 1.] * len(self._objects))
_observation_spaces_low = np.concatenate((_observation_spaces_low, objects_low))
_observation_spaces_high = np.concatenate((_observation_spaces_high, objects_high))
# # for the desired pose
# _observation_spaces_low = np.concatenate((_observation_spaces_low, self._object_observation_space.low))
# _observation_spaces_high = np.concatenate((_observation_spaces_high, self._object_observation_space.high))
return spaces.Box(low=_observation_spaces_low, high=_observation_spaces_high,
dtype=np.float32)
def get_desired_pose(self):
return self._desired_pose
def get_state(self):
return np.concatenate(tuple(k.get_position() for k in self._kilobots)
+ tuple(o.get_pose() for o in self._objects)
+ (self._light.get_state(),))
def get_info(self, state, action):
return {'desired_pose': self._desired_pose}
def get_observation(self):
if self._light_type in ['circular', 'dual']:
_light_position = (self._light.get_state(),)
else:
_light_position = tuple()
_object_orientation = self._objects[0].get_orientation()
_object_sin_cos = ((np.sin(_object_orientation), np.cos(_object_orientation)),)
return np.concatenate(tuple(k.get_position() for k in self._kilobots)
# + (self._objects[0].get_pose(),)
+ (self._objects[0].get_position(),)
+ _object_sin_cos
+ _light_position
# + (self._object_desired,)
)
def get_reward(self, old_state, action, new_state):
# obj pose in frame of desired pose
old_obj_pose = old_state[-5:-2] - self._desired_pose
new_obj_pose = new_state[-5:-2] - self._desired_pose
# swarm_pos = old_state[:-5].reshape(-1, 2)
#
# reward_swarm = -np.sum(np.linalg.norm(swarm_pos - old_obj_pose[:2], axis=1)) / swarm_pos.shape[0]
#
# reward_obj = -np.linalg.norm(old_obj_pose[:2]) / 2 - np.abs(np.sin(old_obj_pose[2] / 2)) / 2
#
# reward = reward_swarm + np.exp(reward_swarm) * reward_obj
# THIS WAS WORKING
reward = .0
# compute polar coordinates of object positions
r_old = np.linalg.norm(old_obj_pose[:2])
r_new = np.linalg.norm(new_obj_pose[:2])
reward += 10 * np.exp(-(new_obj_pose[:2] ** 2).sum() / 2) * (r_old - r_new)
# compute differences between absolute orientations
reward += 1 * np.exp(-(new_obj_pose[:2] ** 2).sum() / .05) * (np.abs(old_obj_pose[2]) - np.abs(new_obj_pose[2]))
return reward
def has_finished(self, state, action):
# has finished if object reached goal pose with certain ε
obj_pose = state[-5:-2]
dist_obj_pose = self._desired_pose - obj_pose
dist_obj_pose[2] = np.abs(np.sin(dist_obj_pose[2] / 2))
l2_norm = dist_obj_pose.dot(dist_obj_pose)
# print('sq_error_norm: {}'.format(sq_error_norm))
if l2_norm < .005:
return True
if self._sim_steps >= self._done_after_steps * self._steps_per_action:
# print('maximum number of sim steps.')
return True
return False
def _get_init_object_pose(self):
# sample initial position as polar coordinates
# get the min of width, height
min_extend = max(self.world_size)
# sample the radius between [min_ext/6, 2min_ext/6]
radius = np.random.rand() * min_extend / 6 + min_extend / 6
# sample the angle uniformly from [-π, +π]
angle = np.random.rand() * np.pi * 2 - np.pi
_object_init_position = np.array([np.cos(angle), np.sin(angle)]) * radius
# sample the initial orientation uniformly from [-π, +π]
_object_init_orientation = np.random.rand() * np.pi * 2 - np.pi
self._object_init = np.concatenate((_object_init_position, [_object_init_orientation]))
return self._object_init
def _get_desired_object_pose(self):
# # sample the desired position uniformly between [-w/2+ow, w/2-ow] and [-h/2+oh, h/2-oh] (w = width, h = height)
# _object_desired_position = np.random.rand(2) * self.world_size + np.array(self.world_bounds[0])
# _object_size = np.array([self._object_width, self._object_height])
# _object_desired_position = np.maximum(_object_desired_position, self.world_bounds[0] + _object_size)
# _object_desired_position = np.minimum(_object_desired_position, self.world_bounds[1] - _object_size)
# # sample the desired orientation uniformly from [-π, +π]
# _object_desired_orientation = np.random.rand() * 2 * np.pi - np.pi
# self._object_desired = np.concatenate((_object_desired_position, [_object_desired_orientation]))
return np.zeros(3)
def _configure_environment(self):
self._desired_pose = self._get_desired_object_pose()
super(ObjectAbsoluteEnv, self)._configure_environment()
def _draw_on_table(self, screen):
# draw the desired pose as grey square
vertices = np.array([[1, 1], [1, -1], [-1, -1], [-1, 1]], dtype=np.float64)
vertices *= np.array([[self._object_width, self._object_height]]) / 2.
# rotate vertices
vertices = rot_matrix(self._desired_pose[2]).dot(vertices.T).T
# translate vertices
vertices += self._desired_pose[None, :2]
screen.draw_polygon(vertices=vertices, color=(200, 200, 200), filled=True, width=.005)
screen.draw_polygon(vertices=vertices[0:3], color=(220, 200, 200), width=.005)
|
"""
__author__: Abhishek Thakur
"""
import datetime
import pytorch_lightning as pl
import segmentation_models_pytorch as smp
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.nn.modules import BCEWithLogitsLoss
from tqdm import tqdm
from wtfml.engine.image.embedder.utils import (
get_mean_prediction_from_mask,
get_recall_from_mask,
hflip_batch,
l2_norm,
)
class SegmentationPLEngine(pl.LightningModule):
def __init__(
self,
face_encoder,
location_decoder,
is_train_encoder=False,
lr=0.001,
image_loss_method="MSE",
class_loss_fn=nn.BCEWithLogitsLoss(),
lamb=1,
F_score_metrix=smp.utils.metrics.Fscore(threshold=0.5),
normalize = False,
):
super(SegmentationPLEngine, self).__init__()
self.face_encoder = face_encoder
if not is_train_encoder:
for name, module in self.face_encoder._modules.items():
module.requires_grad = False # 全ての層を凍結
self.location_decoder = location_decoder
self.scaler = None
if image_loss_method == "MSE":
self.image_loss_fn = nn.MSELoss()
elif image_loss_method == "BCE":
self.image_loss_fn = nn.BCEWithLogitsLoss()
elif image_loss_method == "L1":
self.image_loss_fn = nn.L1Loss()
else:
raise ValueError("image_loss_method should be MSE or L1 or BCE")
self.class_loss_fn = class_loss_fn
self.F_score_metrix = F_score_metrix
self.lamb = lamb
self.lr = lr
self.normalize = normalize
def forward(self, x):
self.face_encoder.eval()
with torch.no_grad():
fliped = hflip_batch(x)
emb_batch = self.face_encoder(x) + self.face_encoder(fliped)
if self.normalize:
representations = l2_norm(emb_batch)
if not self.normalize:
representations = emb_batch /2
x = self.location_decoder(representations)
return x
def training_step(self, batch, batch_idx):
# REQUIRED
image, mask, target = batch
# target = main_target + sub_target * self.sub_adjustment
pred_batch_train = self.forward(image)
train_loss = self.image_loss_fn(pred_batch_train, mask)
F_score_image = self.F_score_metrix(pred_batch_train, mask)
# pred_class, _ = get_mean_prediction_from_mask(pred_batch_train)
# class_loss = self.class_loss_fn(pred_class, target)
self.log(
"train_batch_F_score_image",
F_score_image,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"train_batch_loss",
train_loss,
prog_bar=True,
on_epoch=True,
on_step=True,
logger=True,
)
return {"loss": train_loss}
def validation_step(self, batch, batch_idx):
image, mask, target = batch
# target = main_target + sub_target * self.sub_adjustment
pred_batch_valid = self.forward(image)
recall_list_just_list = get_recall_from_mask(pred_batch_valid, target, threshold=0.5, radius_intention=1, metrix="max")
recall_list_wide_list = get_recall_from_mask(pred_batch_valid, target, threshold=0.5, radius_intention=2, metrix="max")
if self.current_epoch < 35:
recall_list_just = 0
loss = np.inf
recall_list_wide = 0
acc_just = 0
acc_wide = 0
else:
recall_list_just = sum(recall_list_just_list) / len(recall_list_just_list)
loss = self.image_loss_fn(pred_batch_valid, mask)
recall_list_wide = sum(recall_list_wide_list) / len(recall_list_wide_list)
acc_just = len(np.where(np.array(recall_list_just_list) > 0)[0]) / len(recall_list_just_list)
acc_wide = len(np.where(np.array(recall_list_wide_list) > 0)[0]) / len(recall_list_wide_list)
self.log(
"recall_list_just",
recall_list_just,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"valid_loss",
loss,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"recall_list_wide",
recall_list_wide,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"acc_wide",
acc_wide,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"acc_just",
acc_just,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
return {
"val_loss": loss,
# "acc": acc,
}
def configure_optimizers(self):
# REQUIRED
opt = optim.Adam(
self.location_decoder.parameters(),
lr=self.lr,
)
sch = optim.lr_scheduler.CosineAnnealingLR(opt, T_max= 20)
return [opt], [sch]
class SegmentationPLEngineWithEye(pl.LightningModule):
def __init__(
self,
face_encoder,
location_decoder,
eye_encoder,
is_train_encoder=False,
lr=0.001,
image_loss_method="MSE",
class_loss_fn=nn.BCEWithLogitsLoss(),
lamb=1,
F_score_metrix=smp.utils.metrics.Fscore(threshold=0.5),
normalize = False,
):
super(SegmentationPLEngineWithEye, self).__init__()
self.face_encoder = face_encoder
if not is_train_encoder:
for name, module in self.face_encoder._modules.items():
module.requires_grad = False # 全ての層を凍結
self.location_decoder = location_decoder
self.scaler = None
if image_loss_method == "MSE":
self.image_loss_fn = nn.MSELoss()
elif image_loss_method == "BCE":
self.image_loss_fn = nn.BCEWithLogitsLoss()
elif image_loss_method == "L1":
self.image_loss_fn = nn.L1Loss()
else:
raise ValueError("image_loss_method should be MSE or L1 or BCE")
self.eye_encoder = eye_encoder
self.class_loss_fn = class_loss_fn
self.F_score_metrix = F_score_metrix
self.lamb = lamb
self.lr = lr
self.normalize = normalize
def forward(self, face_image : torch.Tensor, right_eye_image : torch.Tensor, left_eye_image : torch.Tensor):
self.face_encoder.eval()
with torch.no_grad():
fliped = hflip_batch(face_image,)
emb_batch = self.face_encoder(face_image,) + self.face_encoder(fliped)
if self.normalize:
representations = l2_norm(emb_batch)
if not self.normalize:
representations = emb_batch /2
right_vector = self.eye_encoder(right_eye_image)
left_vector = self.eye_encoder(left_eye_image)
eye_vector = (right_vector + left_vector)/2
representations = torch.cat([representations, eye_vector], dim = 1)
x = self.location_decoder(representations)
return x
def training_step(self, batch, batch_idx):
# REQUIRED
image,right_eye_image, left_eye_image, mask, target = batch
pred_batch_train = self.forward(image, right_eye_image, left_eye_image)
# target = main_target + sub_target * self.sub_adjustment
train_loss = self.image_loss_fn(pred_batch_train, mask)
F_score_image = self.F_score_metrix(pred_batch_train, mask)
self.log(
"train_batch_F_score_image",
F_score_image,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"train_batch_loss",
train_loss,
prog_bar=True,
on_epoch=True,
on_step=True,
logger=True,
)
return {"loss": train_loss}
def validation_step(self, batch, batch_idx):
image,right_eye_image, left_eye_image, mask, target = batch
pred_batch_valid = self.forward(image, right_eye_image, left_eye_image)
recall_list_just_list = get_recall_from_mask(pred_batch_valid, target, threshold=0.5, radius_intention=1, metrix="max")
recall_list_wide_list = get_recall_from_mask(pred_batch_valid, target, threshold=0.5, radius_intention=2, metrix="max")
if self.current_epoch < 35:
recall_list_just = 0
loss = np.inf
recall_list_wide = 0
acc_just = 0
acc_wide = 0
else:
recall_list_just = sum(recall_list_just_list) / len(recall_list_just_list)
loss = self.image_loss_fn(pred_batch_valid, mask)
recall_list_wide = sum(recall_list_wide_list) / len(recall_list_wide_list)
acc_just = len(np.where(np.array(recall_list_just_list) > 0)[0]) / len(recall_list_just_list)
acc_wide = len(np.where(np.array(recall_list_wide_list) > 0)[0]) / len(recall_list_wide_list)
self.log(
"recall_list_just",
recall_list_just,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"valid_loss",
loss,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"recall_list_wide",
recall_list_wide,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"acc_wide",
acc_wide,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"acc_just",
acc_just,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
return {
"val_loss": loss,
# "acc": acc,
}
def configure_optimizers(self):
# REQUIRED
# opt_generator = optim.Adam(
# self.location_decoder.parameters(),
# lr=self.lr,
# )
# opt_eye_image = optim.Adam(
# self.eye_encoder.parameters(),
# lr=self.lr,
# )
opt = optim.Adam(
list(self.location_decoder.parameters()) + list(self.eye_encoder.parameters()),
lr=self.lr,
)
# sch_generator = optim.lr_scheduler.CosineAnnealingLR(opt_generator, T_max= 20)
# sch_eye_image = optim.lr_scheduler.CosineAnnealingLR(opt_eye_image, T_max= 20)
sch = optim.lr_scheduler.CosineAnnealingLR(opt, T_max= 20)
return [opt], [sch]
class SegmentationPLEngineWithEyeAndLandmark(pl.LightningModule):
def __init__(
self,
face_encoder,
location_decoder,
eye_encoder,
is_train_encoder=False,
lr=0.001,
image_loss_method="MSE",
class_loss_fn=nn.BCEWithLogitsLoss(),
lamb=1,
F_score_metrix=smp.utils.metrics.Fscore(threshold=0.5),
normalize = True,
):
super(SegmentationPLEngineWithEyeAndLandmark, self).__init__()
self.face_encoder = face_encoder
if not is_train_encoder:
for name, module in self.face_encoder._modules.items():
module.requires_grad = False # 全ての層を凍結
self.location_decoder = location_decoder
self.scaler = None
if image_loss_method == "MSE":
self.image_loss_fn = nn.MSELoss()
elif image_loss_method == "BCE":
self.image_loss_fn = nn.BCEWithLogitsLoss()
elif image_loss_method == "L1":
self.image_loss_fn = nn.L1Loss()
else:
raise ValueError("image_loss_method should be MSE or L1 or BCE")
self.eye_encoder = eye_encoder
self.class_loss_fn = class_loss_fn
self.F_score_metrix = F_score_metrix
self.lamb = lamb
self.lr = lr
self.normalize = normalize
def forward(self, face_image : torch.Tensor, right_eye_image : torch.Tensor, left_eye_image : torch.Tensor, landmark_point:torch.Tensor):
self.face_encoder.eval()
with torch.no_grad():
fliped = hflip_batch(face_image,)
emb_batch = self.face_encoder(face_image,) + self.face_encoder(fliped)
if self.normalize:
representations = l2_norm(emb_batch)
if not self.normalize:
representations = emb_batch /2
right_vector = self.eye_encoder(right_eye_image)
left_vector = self.eye_encoder(left_eye_image)
eye_vector = (right_vector + left_vector)/2
representations = torch.cat([representations, eye_vector, landmark_point], dim = 1)
x = self.location_decoder(representations)
return x
def training_step(self, batch, batch_idx):
# REQUIRED
image,right_eye_image, left_eye_image, landmark_point,mask, target = batch
pred_batch_train = self.forward(image, right_eye_image, left_eye_image, landmark_point)
# target = main_target + sub_target * self.sub_adjustment
train_loss = self.image_loss_fn(pred_batch_train, mask)
F_score_image = self.F_score_metrix(pred_batch_train, mask)
self.log(
"train_batch_F_score_image",
F_score_image,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"train_batch_loss",
train_loss,
prog_bar=True,
on_epoch=True,
on_step=True,
logger=True,
)
return {"loss": train_loss}
def validation_step(self, batch, batch_idx):
image,right_eye_image, left_eye_image,landmark_point, mask, target = batch
pred_batch_valid = self.forward(image, right_eye_image, left_eye_image, landmark_point)
recall_list_just_list = get_recall_from_mask(pred_batch_valid, target, threshold=0.5, radius_intention=1, metrix="max")
recall_list_wide_list = get_recall_from_mask(pred_batch_valid, target, threshold=0.5, radius_intention=2, metrix="max")
if self.current_epoch < 35:
recall_list_just = 0
loss = np.inf
recall_list_wide = 0
acc_just = 0
acc_wide = 0
else:
recall_list_just = sum(recall_list_just_list) / len(recall_list_just_list)
loss = self.image_loss_fn(pred_batch_valid, mask)
recall_list_wide = sum(recall_list_wide_list) / len(recall_list_wide_list)
acc_just = len(np.where(np.array(recall_list_just_list) > 0)[0]) / len(recall_list_just_list)
acc_wide = len(np.where(np.array(recall_list_wide_list) > 0)[0]) / len(recall_list_wide_list)
self.log(
"recall_list_just",
recall_list_just,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"valid_loss",
loss,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"recall_list_wide",
recall_list_wide,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"acc_wide",
acc_wide,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
self.log(
"acc_just",
acc_just,
prog_bar=True,
logger=True,
on_epoch=True,
on_step=False,
)
return {
"val_loss": loss,
# "acc": acc,
}
def configure_optimizers(self):
# REQUIRED
# opt_generator = optim.Adam(
# self.location_decoder.parameters(),
# lr=self.lr,
# )
# opt_eye_image = optim.Adam(
# self.eye_encoder.parameters(),
# lr=self.lr,
# )
opt = optim.Adam(
list(self.location_decoder.parameters()) + list(self.eye_encoder.parameters()),
lr=self.lr,
)
# sch_generator = optim.lr_scheduler.CosineAnnealingLR(opt_generator, T_max= 20)
# sch_eye_image = optim.lr_scheduler.CosineAnnealingLR(opt_eye_image, T_max= 20)
sch = optim.lr_scheduler.CosineAnnealingLR(opt, T_max= 20)
return [opt], [sch] |
print("First Number:")
first_number = int(input())
print("Second Number:")
second_number = int(input())
sum = first_number + second_number
print("Sum: " + str(sum)) |
class BookStorage:
def __init__(self, data=[]):
self.data = data
def add(self, book):
self.data.append(book)
return True
def searchById(self, id):
return self.data[id]
def giveAll(self):
return self.data |
import attr
import copy
import six
def interpretBoolean(s):
"""
Interpret string as a boolean value.
"0" and "False" are interpreted as False. All other strings result in
True. Technically, only "0" and "1" values should be seen in UCI files.
However, because of some string conversions in Python, we may encounter
"False".
"""
if s in ["0", "False"]:
return False
else:
return True
class ConfigObject(object):
nextId = 0
typename = None
options = []
# Use to resolve conflicts in with old-style section naming. maskable=True
# means a section can be replaced when we are gathering the list of
# ConfigObject subclasses. We set maskable=False on network:interface so
# that it receives precedence over qos:interface and old code continues to
# work.
maskable = True
# Priorities to ensure proper interleaving of commands.
# Revert commands should use negated value.
PRIO_CREATE_IFACE = 20
PRIO_CREATE_VLAN = 25
PRIO_CONFIG_IFACE = 30
PRIO_IPTABLES_TOP = 35
PRIO_IPTABLES_ZONE = 36
PRIO_IPTABLES_RULE = 37
PRIO_CREATE_QDISC = 40
PRIO_CONFIG_QDISC = 45
PRIO_START_DAEMON = 60
def __init__(self, name=None):
self.id = ConfigObject.nextId
ConfigObject.nextId += 1
self.epoch = 0
self.source = None
self.name = name
self.comment = None
self.parents = set()
self.dependents = set()
# name is the externally visible name, e.g. "lan" in
# "config interface lan". name is None for anonymous sections,
# e.g. "config wifi-iface".
#
# internalName is used for identifying config objects and must
# always be defined. For anonymous sections, we generate a unique
# name from id.
if name is None:
self.internalName = "s{:08x}".format(self.id)
else:
self.internalName = name
# Will be a running list of commands executed by request of this config
# object.
self.executed = list()
for option in self.options:
setattr(self, option.name, option.default)
def __hash__(self):
return hash(self.getTypeAndName())
def __lt__(self, other):
"""
Compare ConfigObject instances.
Currently, it arbitrarily sorts based on the string form of the config
section type and name (e.g. "config interface wlan0").
"""
return str(self) < str(other)
def __str__(self):
if self.name is None:
return "config {}".format(self.typename)
else:
return "config {} {}".format(self.typename, self.name)
def setup(self):
"""
Finish object initialization.
This is called after the config object is initialized will all of its
options values filled in. Override to do some preparation work before
we start generating commands.
"""
pass
def copy(self):
"""
Make a copy of the config object.
The copy will receive the same name and option values.
"""
other = self.__class__()
other.source = self.source
other.name = self.name
other.comment = self.comment
other.parents = self.parents.copy()
other.dependents = self.dependents.copy()
for option in self.options:
# We use copy here because it works with both the str- and
# list-typed values. Any lists are lists of strings, so
# shallow-copy here is fine.
copied = copy.copy(getattr(self, option.name))
setattr(other, option.name, copied)
# We should call setup on a new config object after all of the option
# values are filled in.
other.setup()
return other
def dump(self):
"""
Return full configuration section as a string.
"""
result = "# internal id: s{:08x}\n".format(self.id)
if self.name is None:
result += "config {}".format(self.typename)
else:
result += "config {} {}".format(self.typename, self.name)
if self.comment is not None:
result += " #" + self.comment
result += "\n"
for opdef in self.options:
value = getattr(self, opdef.name)
if value is None:
continue
elif opdef.type == bool:
result += "\toption {} '{}'\n".format(opdef.name, 1 * value)
elif opdef.type == list:
for v in value:
result += "\tlist {} '{}'\n".format(opdef.name, v)
else:
result += "\toption {} '{}'\n".format(opdef.name, value)
return result
def getName(self):
"""
Return section name.
Subclasses that do not have names (anonymous sections) should override
this to return some other unique identifier such as an interface name.
"""
return self.internalName
def getTypeAndName(self):
"""
Return tuple (section module, section type, section name).
"""
return (self.getModule(), self.typename, self.getName())
def lookup(self, allConfigs, sectionModule, sectionType, sectionName, addDependent=True):
"""
Look up a section by type and name.
If addDependent is True (default), the current object will be added as
a dependent of the found section.
Will raise an exception if the section is not found.
"""
config = allConfigs[(sectionModule, sectionType, sectionName)]
if addDependent:
self.parents.add(config)
config.dependents.add(self)
return config
def removeFromParents(self):
"""
Remove this section from being tracked by its parents.
Call this before discarding a configuration section so that later on,
if the parent is updated, it doesn't try to update non-existent
children.
"""
for parent in self.parents:
if self in parent.dependents:
parent.dependents.remove(self)
self.parents.clear()
def findByType(self, allConfigs, module, typename, where={}):
"""
Look up sections by type (generator).
where: filter the returned results by checking option values.
"""
for key in allConfigs.keys():
if key[0] == module and key[1] == typename:
# Skip this section if any of the option values do not match.
if any(getattr(allConfigs[key], op, None) != where[op] for op in where):
continue
yield allConfigs[key]
def optionsMatch(self, other):
"""
Test equality of config sections by comparing option values.
"""
if not isinstance(other, self.__class__):
return False
for opdef in self.options:
if getattr(self, opdef.name) != getattr(other, opdef.name):
return False
return True
#
# The following methods (apply, revert, updateApply, and updateRevert)
# are the most important for subclasses to override.
#
# These methods are expected to return a list of commands to make system
# changes when the section is loaded, deleted, or modified.
#
# Here is the methods are used:
#
# 1. When a section is loaded for the first time (new), the apply method
# is called to perform actions such as create an interface or firewall
# rule.
#
# 2. When a section is unloaded or detected as removed from a configuration
# file, the revert method is called to undo everything that was done by
# apply (e.g. delete an interface or rule).
#
# 3. When a section is reloaded (and has changed) or one or more of its
# dependencies have have changed, the two update methods are used.
# updateRevert selectively reverts actions that were done by apply; it can
# (and by default does) undo everything just as revert would. Then
# updateApply is called to apply any changes needed to bring the system to
# the new required state. A motivating example is in order.
#
# Suppose we have one or more APs running on a single WiFi device, and we
# loaded a new configuration that changes the channel. The default
# behavior would be to call revert, then call apply. Revert would not only
# bring down the hostapd instances but also destroy the AP-mode interfaces.
# The latter step, however, is not necessary to achieve a channel change,
# and if written carefully, updateRevert and updateApply can achieve the
# configuration change with less disruption.
#
def apply(self, allConfigs):
"""
Return a list of commands to apply this configuration.
Most subclasses will need to implement this function.
Returns a list of (priority, Command) tuples.
"""
return []
def revert(self, allConfigs):
"""
Return a list of commands to revert this configuration.
Most subclasses will need to implement this function.
Returns a list of (priority, Command) tuples.
"""
return []
def updateApply(self, new, allConfigs):
"""
Return a list of commands to update to new configuration.
Implementing this is optional for subclasses. The default behavior is
to call apply.
Returns a list of (priority, Command) tuples.
"""
return new.apply(allConfigs)
def updateRevert(self, new, allConfigs):
"""
Return a list of commands to (partially) revert the configuration.
The implementation can be selective about what it reverts (e.g. do not
delete an interface if we are only updating its IP address). The
default behavior is to call revert.
Returns a list of (priority, Command) tuples.
"""
return self.revert(allConfigs)
@classmethod
def build(cls, manager, source, name, options, comment):
"""
Build a config object instance from the UCI section.
Arguments:
source -- file containing this configuration section
name -- name of the configuration section
If None, a unique name will be generated.
options -- dictionary of options loaded from the section
comment -- comment string or None
"""
obj = cls(name)
obj.manager = manager
obj.source = source
obj.comment = comment
for opdef in cls.options:
found = False
if opdef.type == list:
if opdef.name in options:
value = options[opdef.name]
if not isinstance(value, list):
# Sometimes we expect a list but get a single value instead.
# Example:
# ...
# option network 'lan'
# ...
# instead of
# ...
# list network 'lan'
# ...
value = [value]
found = True
elif opdef.type == bool:
if opdef.name in options:
value = interpretBoolean(options[opdef.name])
found = True
else:
if opdef.name in options:
value = opdef.type(options[opdef.name])
found = True
if not found:
if opdef.required:
raise Exception("Missing required option {} in {}:{}:{}".format(
opdef.name, source, cls.typename, name))
else:
value = opdef.default
setattr(obj, opdef.name, value)
# If this section specifies name as an option rather than in the header
# line, update the name that will be used for comparison.
if getattr(obj, "name", None) is not None and name is None:
obj.internalName = obj.name
obj.setup()
return obj
@classmethod
def getModule(cls):
"""
Get the module name (e.g. "dhcp", "wireless") for a ConfigObject class.
"""
parts = cls.__module__.split(".")
return parts[-1]
@staticmethod
def _assignPriority(config, assigned):
"""
Recursively assign priorities to config objects based on dependencies.
This is meant to be called by prioritizeConfigs.
"""
if config in assigned:
return assigned[config]
priority = 0
for parent in config.parents:
pprio = ConfigObject._assignPriority(parent, assigned)
if pprio >= priority:
priority = pprio + 1
assigned[config] = priority
return priority
@staticmethod
def prioritizeConfigs(configs, reverse=False):
"""
Assign priorities to config objects based on the dependency graph.
Priority zero is assigned to all configs with no dependencies.
priority(config1) > priority(config2) means config1 should be applied
later than config2, and config1 should be reverted earlier than
config2. For configs with the same priority value, it is presumed
that order does not matter.
If reverse is True, the priorities are made negative so that traversing
in increasing order gives the proper order for reverting.
Returns a list of tuples (priority, config). This format is suitable
for heapq.
"""
priorities = dict()
for config in configs:
ConfigObject._assignPriority(config, priorities)
mult = -1 if reverse else 1
return [(mult * prio, config) \
for (config, prio) in six.iteritems(priorities)]
@attr.s(slots=True)
class ConfigOption(object):
name = attr.ib(convert=str)
type = attr.ib(default=str, validator=attr.validators.instance_of(type))
required = attr.ib(convert=bool, default=False)
default = attr.ib(default=None)
|
class PurplexError(Exception):
pass
class TokenMatchesEmptyStringError(PurplexError):
'''Raised when TokenDef regex matches the empty string.'''
def __init__(self, regexp):
message = 'token {!r} matched the empty string'.format(regexp)
super(TokenMatchesEmptyStringError, self).__init__(message)
class NoMatchingTokenFoundError(PurplexError):
'''Raised when a Lexer cannot match a TokenDef to the input data.'''
def __init__(self, line_num, line_pos, data):
message = ('No token definition matched @ line {} position {}: {!r}'
.format(line_num, line_pos, data + '...'))
super(NoMatchingTokenFoundError, self).__init__(message)
class TableConflictError(PurplexError):
'''Raised when a Parser would overwrite an action in the action table.'''
def __init__(self, prev_action, new_action):
message = 'Tried to replace {} with {}'.format(prev_action, new_action)
super(TableConflictError, self).__init__(message)
class StartSymbolNotReducedError(PurplexError):
'''Raised when a Parser uses all input without accepting.'''
def __init__(self, start_symbol):
message = 'Consumed all input without reducing {}'.format(start_symbol)
super(StartSymbolNotReducedError, self).__init__(message)
|
# Generated by Django 2.0.3 on 2018-06-22 19:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("main", "0010_remove_line_replicate")]
operations = [
migrations.AlterField(
model_name="protocol",
name="categorization",
field=models.CharField(
choices=[
("NA", "None"),
("OD", "Optical Density"),
("HPLC", "HPLC"),
("LCMS", "LCMS"),
("RAMOS", "RAMOS"),
("TPOMICS", "Transcriptomics / Proteomics"),
],
default="NA",
help_text="SBML category for this Protocol.",
max_length=8,
verbose_name="SBML Category",
),
)
]
|
from datetime import datetime, timedelta
from django.db.models import Count
from django.db.models.functions import TruncDate, TruncYear
from django.shortcuts import render
from account.models import User
from dispatcher.manage import ping
from dispatcher.models import Server
from problem.models import Problem
from submission.models import Submission
def museum_view(request):
def convert_timedelta(td):
return {
'year': td.days // 365,
'day': td.days % 365,
'hour': td.seconds // 3600,
'minute': (td.seconds % 3600) // 60,
'second': td.seconds % 60
}
ctx = {}
ctx['total_problem_count'] = Problem.objects.count()
ctx['total_submission_count'] = Submission.objects.count()
ctx['total_user_count'] = User.objects.filter(is_active=True).count()
# NOTE: this will break if there is no submission at all
first_submission = Submission.objects.last()
ctx['first_submission_time'] = first_submission.create_time
ctx['first_submission_duration'] = convert_timedelta(datetime.now() - ctx['first_submission_time'])
ctx['first_submission_author'] = first_submission.author
from uptime import uptime
ctx['uptime'] = convert_timedelta(timedelta(seconds=uptime()))
ctx['server_time'] = datetime.now()
ctx['eoj3_create_duration'] = convert_timedelta(datetime.now() - datetime(2017, 3, 11, 18, 32))
ctx['submission_count_1'] = Submission.objects.filter(create_time__gt=datetime.now() - timedelta(days=1)).count()
ctx['submission_count_7'] = Submission.objects.filter(create_time__gt=datetime.now() - timedelta(days=7)).count()
ctx['submission_count_30'] = Submission.objects.filter(create_time__gt=datetime.now() - timedelta(days=30)).count()
ctx['submission_stat'] = Submission.objects.filter(create_time__gt=datetime.today() - timedelta(days=30)). \
annotate(date=TruncDate('create_time')).values('date'). \
annotate(count=Count('id')).values('date', 'count').order_by()
ctx['user_stat'] = User.objects.filter(is_active=True).annotate(date=TruncYear('date_joined')).values('date'). \
annotate(count=Count('id')).values('date', 'count').order_by("date")
for idx, user in enumerate(ctx['user_stat']):
if idx == 0:
continue
user['count'] += ctx['user_stat'][idx - 1]['count']
ctx['problem_stat'] = Problem.objects.annotate(date=TruncYear('create_time')).values('date'). \
annotate(count=Count('id')).values('date', 'count').order_by("date")
for idx, user in enumerate(ctx['problem_stat']):
if idx == 0:
continue
user['count'] += ctx['problem_stat'][idx - 1]['count']
ctx['servers'] = servers = Server.objects.filter(enabled=True)
for server in servers:
server.status = ping(server)
return render(request, 'museum.jinja2', context=ctx)
|
import struct
import snap7
from snap7 import util
class DTbool:
def __init__(self, readBuffer,boolIndex):
self.readBuffer=readBuffer
self.boolIndex=boolIndex
def readValue (self,offset):
result=snap7.util.get_bool(self.readBuffer,offset,self.boolIndex)
return result
|
import serial
import time
import fnmatch
import sys
def _auto_detect_serial_unix(preferred_list=['*']):
import glob
glist = glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*')
ret = []
for d in glist:
for preferred in preferred_list:
if fnmatch.fnmatch(d, preferred):
ret.append(d)
if len(ret) > 0:
return ret
for d in glist:
ret.append(d)
return ret
class NoneSerialManager(object):
def write(self, val):
pass
def read(self):
return ""
def readline(self):
return ""
class SerialManager(object):
def __init__(self):
available_ports = _auto_detect_serial_unix()
try:
self._serial = serial.Serial(available_ports[0], 9600, timeout=1)
time.sleep(2)
except:
print("Error trying to connect to Arduino")
self._serial = NoneSerialManager()
def connect(self, device):
self._serial = serial.Serial(device, 9600)
time.sleep(2)
def write(self, value):
self._serial.write(bytes(value, 'latin-1'))
def readline(self):
return self._serial.readline().decode()
class SerialManagerPy2(SerialManager):
def __init__(self):
SerialManager.__init__(self)
def write(self, value):
self._serial.write(value)
if sys.version_info.major == 2:
serial_manager = SerialManagerPy2()
elif sys.version_info.major == 3:
serial_manager = SerialManager()
|
#####################################################################
# #
# /batch_compiler.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program runmanager, in the labscript #
# suite (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
import sys
from zprocess import setup_connection_with_parent
to_parent, from_parent, kill_lock = setup_connection_with_parent(lock = True)
import labscript
import labscript_utils.excepthook
from labscript_utils.modulewatcher import ModuleWatcher
class BatchProcessor(object):
def __init__(self, to_parent, from_parent, kill_lock):
self.to_parent = to_parent
self.from_parent = from_parent
self.kill_lock = kill_lock
self.mainloop()
def mainloop(self):
while True:
signal, data = self.from_parent.get()
if signal == 'compile':
with kill_lock:
# TODO: remove actual compilation of labscript from here and
# move to when file is ready to go at blacs. This code should do
#
# labscript.labscript_init(run_file, labscript_file=labscript_file)
# with h5py.File(run_file) as h5_file:
# labscript.save_labscripts(h5_file)
# labscript.labscript_cleanup()
# instead of the following code
#
success = labscript.compile(*data)
self.to_parent.put(['done',success])
elif signal == 'quit':
sys.exit(0)
else:
raise ValueError(signal)
if __name__ == '__main__':
module_watcher = ModuleWatcher() # Make sure modified modules are reloaded
batch_processor = BatchProcessor(to_parent,from_parent,kill_lock)
|
from JumpscaleCore.servers.tests.base_test import BaseTest
from Jumpscale import j
from smtplib import SMTP
from imbox import Imbox
from imapclient import IMAPClient
class TestSMTPIMAP(BaseTest):
def test001_check_smtp_save_message_in_bcdb(self):
"""
SMTP server should connect to 'main' bcdb instance and store data into it.
steps:
- Start SMTP server in tmux
- Connect to the server, should succeed.
- Send a message to the server, should succeed.
- Check the database, Should have the message.
:return:
"""
cmd = "kosmos 'j.servers.smtp.start()'"
self.info("Execute {} in tmux main session".format(cmd))
pan = j.servers.tmux.execute(cmd=cmd)
self.info("Assert that the server is running")
self.assertTrue(pan.cmd_running)
self.info("Connect to the server 0.0.0.0:7002")
with SMTP("0.0.0.0", 7002) as smtp:
body = "Hello!"
from_mail = "[email protected]"
to_mail = "[email protected]"
msg = ("From: %s\r\nTo: %s\r\n\r\n" % (from_mail, to_mail)) + body
smtp.sendmail(from_mail, to_mail, msg)
self.info("Get the data from the database")
db = j.data.bcdb.get("mails")
retrieved_model = db.model_get(url="jumpscale.email.message")
data = retrieved_model.find()[-1]
self.info("Assert that the message has been saved in the database")
self.assertEqual(data.from_email, "[email protected]")
self.assertEqual(data.to_email, "[email protected]")
self.assertEqual(data.body, body)
self.info("Destroy the database")
db.destroy()
self.info("Stop the running server")
pan.kill()
def test002_imapclient_can_create_folder_in_imap(self):
"""
Client can create folders in his mail.
Steps:
- Start imap server, should succeed.
- List default folder, inbox should be there.
- Create new folder, should succeed.
"""
cmd = "kosmos 'j.servers.imap.start()'"
self.info("Execute {} in tmux main session".format(cmd))
pan = j.servers.tmux.execute(cmd=cmd)
self.info("Assert that the server is running")
self.assertTrue(pan.cmd_running)
self.info("List default folder, inbox should be there")
box = Imbox("0.0.0.0", "[email protected]", "randomPW", ssl=False, port=7143)
self.assertIn("INBOX", box.folders()[-1])
self.info("Connect the client to the IMAP server")
client = IMAPClient("0.0.0.0", port=7143, ssl=False)
client.login("[email protected]", "randomPW")
box_name = self.rand_string()
self.info("Create {} box".format(box_name))
client.create_folder(box_name)
self.info("Assert that the new box has been created")
self.assertIn(box_name, box.folders()[-1])
self.info("Stop the running server")
pan.kill()
def test003_imapClient_get_messages_from_database(self):
"""
Client can create folders in his mail.
Steps:
- Start smtp server, shoud success.
- Send message to smtp server.
- Start imap server, should succeed.
- List default folder, inbox should be there.
- Client should get the message from the database.
"""
cmd = "kosmos 'j.servers.smtp.start()'"
self.info("Execute {} in tmux main session".format(cmd))
pan = j.servers.tmux.execute(cmd=cmd)
self.info("Assert that the server is running")
self.assertTrue(pan.cmd_running)
self.info("Connect to the server 0.0.0.0:7002")
with SMTP("0.0.0.0", 7002) as smtp:
body = "Hello!"
from_mail = "[email protected]"
to_mail = "[email protected]"
msg = ("From: %s\r\nTo: %s\r\n\r\n" % (from_mail, to_mail)) + body
smtp.sendmail(from_mail, to_mail, msg)
cmd = "kosmos 'j.servers.imap.start()'"
self.info("Execute {} in tmux main session".format(cmd))
pan_imap = j.servers.tmux.execute(cmd=cmd)
self.info("Assert that the server is running")
self.assertTrue(pan.cmd_running)
self.info("Connect to the imap server")
box = Imbox("0.0.0.0", "[email protected]", "randomPW", ssl=False, port=7143)
uid, last_message = box.messages()[-1]
self.info("Assert that client get the message from the database")
self.assertEqual(last_message.sent_from[0]["email"], "[email protected]")
self.assertEqual(last_message.sent_to[0]["email"], "[email protected]")
self.assertEqual(last_message.subject, body)
self.info("Stop the running server")
pan.kill()
pan_imap.kill()
|
'''Common implementation for routing clear triggers'''
# python
from functools import partial
# genie libs
from genie.libs.sdk.libs.utils.mapping import Mapping
from genie.libs.sdk.triggers.clear.clear import TriggerClear, verify_clear_callable
from genie.libs.sdk.libs.utils.triggeractions import CompareUptime
# Ignore keys when doing the diff with Ops objects for save_snapshot and
# verify_clear, it will be used for LearnPollDiff.ops_diff callable
exclude = ['maker','updated']
class TriggerClearIpRouteVrfAll(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'vrf', '(?P<vrf>.*)',
'address_family', 'ipv4',
'routes', '(?P<route>.*)',
'next_hop', 'next_hop_list','(?P<index>.*)',
'updated', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.routing.routing.Routing': {
'requirements': [ \
['info', 'vrf', '(?P<vrf>.*)',
'address_family', 'ipv4',
'routes', '(?P<route>.*)',
'active', True]],
'kwargs': {'attributes': \
['info[vrf][(.*)][address_family][ipv4][routes][(.*)]']},
'exclude': exclude}},
verify_ops={'ops.routing.routing.Routing':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes': [
'info[vrf][(.*)][address_family][ipv4][routes][(.*)]']},
'exclude': exclude}},
num_values={'vrf': 'all', 'route': 'all', 'af': 'all'})
class TriggerClearIpv6RouteVrfAll(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'vrf', '(?P<vrf>.*)',
'address_family', 'ipv6',
'routes', '(?P<route>.*)',
'next_hop', 'next_hop_list', '(?P<index>.*)',
'updated', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.routing.routing.Routing': {
'requirements': [ \
['info', 'vrf', '(?P<vrf>.*)',
'address_family', 'ipv6',
'routes', '(?P<route>.*)',
'active', True]],
'kwargs': {'attributes': \
['info[vrf][(.*)][address_family][ipv6][routes][(.*)]']},
'exclude': exclude}},
verify_ops={'ops.routing.routing.Routing':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes': [
'info[vrf][(.*)][address_family][ipv6][routes][(.*)]']},
'exclude': exclude}},
num_values={'vrf': 'all', 'route': 'all', 'af': 'all'})
class TriggerClearIpRouteVrfDefault(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'vrf', '(?P<vrf>^default$)',
'address_family', 'ipv4',
'routes', '(?P<route>.*)',
'next_hop', 'next_hop_list','(?P<index>.*)',
'updated', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.routing.routing.Routing': {
'requirements': [ \
['info', 'vrf', '(?P<vrf>^default$)',
'address_family', 'ipv4',
'routes', '(?P<route>.*)',
'active', True]],
'kwargs': {'attributes': \
['info[vrf][(.*)][address_family][ipv4][routes][(.*)]']},
'exclude': exclude}},
verify_ops={'ops.routing.routing.Routing':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes': [
'info[vrf][(.*)][address_family][ipv4][routes][(.*)]']},
'exclude': exclude}},
num_values={'vrf': 1, 'route': 'all', 'af': 'all'})
class TriggerClearIpv6RouteVrfDefault(TriggerClear):
# Argument with dynamic value for verify callable
# As verify callable can be re-used in multiple triggers
# with different variable names. This dictionary is used to map
# dynamic argument name to actual script argument name
# <expected argument_name for callable>: <script argument name>
verify_func_args={'r_obj': [['info', 'vrf', '(?P<vrf>^default$)',
'address_family', 'ipv6',
'routes', '(?P<route>.*)',
'next_hop', 'next_hop_list','(?P<index>.*)',
'updated', '(.*)']],
'relation': '<',
'threshold_time': 'compare_time',
'ops': 'ops'}
mapping = Mapping(requirements={'ops.routing.routing.Routing': {
'requirements': [ \
['info', 'vrf', '(?P<vrf>^default$)',
'address_family', 'ipv6',
'routes', '(?P<route>.*)',
'active', True]],
'kwargs': {'attributes': \
['info[vrf][(.*)][address_family][ipv6][routes][(.*)]']},
'exclude': exclude}},
verify_ops={'ops.routing.routing.Routing':{
'requirements':[[partial(verify_clear_callable,
verify_func=CompareUptime.compare_uptime,
verify_func_args=verify_func_args)]],
'kwargs':{'attributes': [
'info[vrf][(.*)][address_family][ipv6][routes][(.*)]']},
'exclude': exclude}},
num_values={'vrf': 1, 'route': 'all', 'af': 'all'})
|
import unittest, pytest, os
from declarativeunittest import raises
ontravis = 'TRAVIS' in os.environ
from construct import *
from construct.lib import *
class TestThis(unittest.TestCase):
def test_this(self):
assert repr(this) == "this"
this_example = Struct(
# straight-forward usage: instead of passing (lambda ctx: ctx["length"]) use this.length
"length" / Int8ub,
"value" / Bytes(this.length),
# an example of nesting: '_' refers to the parent's scope
"nested" / Struct(
"b1" / Int8ub,
"b2" / Int8ub,
"b3" / Computed(this.b1 * this.b2 + this._.length),
),
# and conditions work as expected
"condition" / IfThenElse(
this.nested.b1 > 50,
"c1" / Int32ub,
"c2" / Int8ub,
),
)
assert this_example.parse(b"\x05helloABXXXX") == Container(length=5)(value=b'hello')(nested=Container(b1=65)(b2=66)(b3=4295))(condition=1482184792)
assert this_example.build(dict(length=5, value=b'hello', nested=dict(b1=65, b2=66), condition=1482184792)) == b"\x05helloABXXXX"
def test_this_getitem(self):
gi = Struct(
"length of text" / Int8ub,
"text" / Bytes(this["length of text"]),
)
assert gi.parse(b"\x06World!") == Container({"length of text": 6, "text":b"World!"})
assert gi.build({"length of text": 6, "text":b"World!"}) == b"\x06World!"
def test_path(self):
path = Path("path")
x = ~((path.foo * 2 + 3 << 2) % 11)
assert str(x) == 'not ((((path.foo * 2) + 3) >> 2) % 11)'
assert repr(x) == 'not ((((path.foo * 2) + 3) >> 2) % 11)'
assert not x(dict(foo=7))
def test_obj(self):
assert repr(obj_) == "obj_"
assert repr(obj_ + 1 == 12) == "((obj_ + 1) == 12)"
assert (obj_)(1,{}) == 1
assert (obj_ + 10)(1,{}) == 11
assert (obj_ == 12)(12,{})
assert (obj_ != 12)(13,{})
def test_functions(self):
assert repr(len_(this.x)) == "len_(this.x)"
assert repr(sum_(this.x)) == "sum_(this.x)"
assert repr(len_) == "len_"
assert repr(sum_) == "sum_"
example = Struct(
"items" / Byte[2],
Check(len_(this.items) == 2),
Check(sum_(this.items) == 10),
Check(min_(this.items) == 3),
Check(max_(this.items) == 7),
"nega" / Int8sb,
Check(this.nega == -1),
Check(abs_(this.nega) == 1),
)
assert example.parse(b"\x03\x07\xff") == dict(items=[3,7], nega=-1)
assert example.build(dict(items=[3,7], nega=-1)) == b"\x03\x07\xff"
example = Struct(
"items" / RepeatUntil(obj_ == 255, Byte),
)
assert example.parse(b"\x03\x07\xff") == dict(items=[3,7,255])
assert example.build(dict(items=[3,7,255])) == b"\x03\x07\xff"
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
from PIL import Image
import re
from thumbor.filters import BaseFilter, filter_method
from thumbor_extras.filters import rainbow
class Filter(BaseFilter):
@filter_method(
BaseFilter.PositiveNonZeroNumber,
BaseFilter.DecimalNumber,
BaseFilter.Boolean,
BaseFilter.Boolean,
BaseFilter.Boolean,
BaseFilter.PositiveNumber,
BaseFilter.PositiveNumber,
BaseFilter.PositiveNumber
)
def draw_focal_points(self, line_width=3, label_height_percentage=.1, show_labels=True, show_heatmap=True, show_rainbow=True, r=0, g=255, b=0):
img = np.array(self.engine.image)
class_label_regex = re.compile('DNN Object Detection \(class: ([a-z ]+)\)')
for index, focal_point in enumerate(self.context.request.focal_points):
width = int(focal_point.width)
height = int(focal_point.height)
left = int(focal_point.x - (width / 2))
top = int(focal_point.y - (height / 2))
right = left + width
bottom = top + height
weight = focal_point.weight
# 🌈-map
if show_rainbow:
color = rainbow[index % len(rainbow)]
r, g, b = color
# A 🔥 heatmap 🔥 (kinda) from transparent (0% confidence) to opaque (100% confidence)
if show_heatmap:
overlay = img.copy()
cv2.rectangle(overlay, (left, top), (right, bottom), (r, g, b), line_width)
cv2.addWeighted(overlay, weight, img, 1 - weight, 0, img)
else:
cv2.rectangle(img, (left, top), (right, bottom), (r, g, b), line_width)
# Draw class labels
if show_labels:
match = class_label_regex.match(focal_point.origin)
if match:
class_label = match.groups(1)[0]
elif focal_point.origin == 'DNN Face Detection':
class_label = 'face'
match = True
if match:
# one-tenth the height of the box
label_height = int(height * label_height_percentage)
# the font is *about* 30 pixels tall
scale = label_height / 30.
cv2.putText(
img,
' {} ({:0.3f})'.format(class_label, weight),
(left, top + label_height),
cv2.FONT_HERSHEY_SIMPLEX,
scale,
(r, g, b),
line_width
)
self.engine.image = Image.fromarray(img)
|
import os
import argparse
from trainer import SemanticSeg
import pandas as pd
import random
from PIL import Image
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from rcnn.config import INIT_TRAINER, SETUP_TRAINER, CURRENT_FOLD, PATH_LIST, FOLD_NUM, ROI_NAME,TEST_PATH
from rcnn.config import VERSION, ROI_NAME, DISEASE, MODE
import time
VAL_SAMPLE = ['10446967','10682303','08676580','16674245','12786488','01472680','0009413103','30346866','17509127','16215626',\
'15189944','11921906','0008549664','0001900608','0009363417','17508083']
def get_cross_validation_by_specificed(path_list, val_sample=None):
sample_list = list(set([os.path.basename(case).split('.')[0] for case in path_list]))
print('number of sample:',len(sample_list))
train_id = []
validation_id = []
for sample in sample_list:
if sample in val_sample:
validation_id.append(sample)
else:
train_id.append(sample)
train_path = []
validation_path = []
for case in path_list:
if os.path.basename(case).split('.')[0] in train_id:
train_path.append(case)
else:
validation_path.append(case)
random.shuffle(train_path)
random.shuffle(validation_path)
print("Train set length ", len(train_path),
"Val set length", len(validation_path))
return train_path, validation_path
def get_cross_validation_by_sample(path_list, fold_num, current_fold):
sample_list = list(set([os.path.basename(case).split('_')[0] for case in path_list]))
# print(len(sample_list))
sample_list.sort()
_len_ = len(sample_list) // fold_num
train_id = []
validation_id = []
end_index = current_fold * _len_
start_index = end_index - _len_
if current_fold == fold_num:
validation_id.extend(sample_list[start_index:])
train_id.extend(sample_list[:start_index])
else:
validation_id.extend(sample_list[start_index:end_index])
train_id.extend(sample_list[:start_index])
train_id.extend(sample_list[end_index:])
train_path = []
validation_path = []
for case in path_list:
if os.path.basename(case).split('_')[0] in train_id:
train_path.append(case)
else:
validation_path.append(case)
random.shuffle(train_path)
random.shuffle(validation_path)
print("Train set length ", len(train_path),
"Val set length", len(validation_path))
return train_path, validation_path
def get_cross_validation(path_list, fold_num, current_fold):
_len_ = len(path_list) // fold_num
train_id = []
validation_id = []
end_index = current_fold * _len_
start_index = end_index - _len_
if current_fold == fold_num:
validation_id.extend(path_list[start_index:])
train_id.extend(path_list[:start_index])
else:
validation_id.extend(path_list[start_index:end_index])
train_id.extend(path_list[:start_index])
train_id.extend(path_list[end_index:])
random.shuffle(train_id)
random.shuffle(validation_id)
print(len(train_id), len(validation_id))
return train_id, validation_id
def get_parameter_number(net):
total_num = sum(p.numel() for p in net.parameters())
trainable_num = sum(p.numel() for p in net.parameters() if p.requires_grad)
return {'Total': total_num, 'Trainable': trainable_num}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-m',
'--mode',
default='train_cross_val',
choices=["train", 'train_cross_val', "inf","test"],
help='choose the mode',
type=str)
parser.add_argument('-s', '--save', default='no', choices=['no', 'n', 'yes', 'y'],
help='save the forward middle features or not', type=str)
args = parser.parse_args()
# Set data path & segnetwork
if args.mode != 'train_cross_val':
segnetwork = SemanticSeg(**INIT_TRAINER)
print(get_parameter_number(segnetwork.net))
path_list = PATH_LIST
# Training
###############################################
if args.mode == 'train_cross_val':
for current_fold in range(1, FOLD_NUM + 1):
print("=== Training Fold ", current_fold, " ===")
segnetwork = SemanticSeg(**INIT_TRAINER)
print(get_parameter_number(segnetwork.net))
train_path, val_path = get_cross_validation_by_sample(path_list, FOLD_NUM, current_fold)
SETUP_TRAINER['train_path'] = train_path
SETUP_TRAINER['val_path'] = val_path
SETUP_TRAINER['cur_fold'] = current_fold
start_time = time.time()
segnetwork.trainer(**SETUP_TRAINER)
print('run time:%.4f' % (time.time() - start_time))
if args.mode == 'train':
train_path, val_path = get_cross_validation_by_sample(path_list, FOLD_NUM, CURRENT_FOLD)
# train_path, val_path = get_cross_validation_by_specificed(path_list, VAL_SAMPLE)
SETUP_TRAINER['train_path'] = train_path
SETUP_TRAINER['val_path'] = val_path
SETUP_TRAINER['cur_fold'] = CURRENT_FOLD
start_time = time.time()
segnetwork.trainer(**SETUP_TRAINER)
print('run time:%.4f' % (time.time() - start_time))
###############################################
# Inference
###############################################
if args.mode == 'test':
start_time = time.time()
test_path = TEST_PATH
print("test set len:",len(test_path))
save_path = './analysis/result/{}/{}/{}'.format(DISEASE,VERSION,MODE)
if not os.path.exists(save_path):
os.makedirs(save_path)
save_flag = False if args.save == 'no' or args.save == 'n' else True
cls_result = segnetwork.test(test_path,save_path,mode=MODE,save_flag=save_flag)
if MODE != 'seg':
csv_path = os.path.join(save_path,ROI_NAME + '.csv')
info = {}
info['id'] = test_path
info['label'] = cls_result['true']
info['pred'] = cls_result['pred']
info['prob'] = cls_result['prob']
print(classification_report(cls_result['true'], cls_result['pred'], target_names=['without','with'],output_dict=False))
print(confusion_matrix(cls_result['true'], cls_result['pred']))
csv_file = pd.DataFrame(info)
csv_file.to_csv(csv_path, index=False)
print('run time:%.4f' % (time.time() - start_time))
|
from django.conf import settings
from django.urls import path, re_path, reverse_lazy
from django.conf.urls.static import static
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic.base import RedirectView
from graphene_django.views import GraphQLView
from rest_framework.routers import DefaultRouter
from rest_framework.authtoken import views
from rest_framework_swagger.views import get_swagger_view
from cornershop.apps.users.views import UserCreateViewSet, UserViewSet
from cornershop.apps.weather.views import WeatherViewSet
router = DefaultRouter(trailing_slash=True)
router.register(r'users', UserViewSet)
router.register(r'users', UserCreateViewSet)
router.register(r'weather', WeatherViewSet)
endpoints_patterns = [
path('api/v1/', include(router.urls)),
]
schema_view = get_swagger_view(
patterns=endpoints_patterns,
title='Cornershop Weather API',
)
urlpatterns = [
*endpoints_patterns,
path('admin/', admin.site.urls),
path('api-token-auth/', views.obtain_auth_token),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
# GraphQL
url(r'^graphql$', GraphQLView.as_view(graphiql=False)),
url(r'^graphiql$', GraphQLView.as_view(graphiql=True)),
# Open API 3.x Schema
path('docs/', schema_view, name='swagger'),
re_path(r'^$', RedirectView.as_view(url=reverse_lazy('swagger'), permanent=False)),
] + static(
settings.STATIC_URL,
document_root=settings.STATIC_ROOT,
) + static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT
)
|
"""
AWS Example
"""
import fire
import pkg
if __name__ == '__main__':
fire.Fire(pkg.aws.Handler)
|
"""
@author: Min Du ([email protected])
Copyright (c) 2021 Palo Alto Networks
"""
import time
import logging
from utils import misc
from utils import const
from worker import combo_selection
if __name__ == '__main__':
misc.init_logger(const.get_data_preparation_logs_filename())
logger = logging.getLogger(misc.get_logger_name(__name__))
logger.info('Combo selector start.')
start_time = time.time()
try:
selector = combo_selection.ComboSelector()
selector.select_combos()
except Exception:
info = 'Exception in combo selector.'
logger.exception(info)
time_elapsed = time.time() - start_time
end_msg = f'Combo selector end. Time elapsed {time_elapsed:.2f} seconds'
logger.info(end_msg)
|
# -*- coding: utf-8 -*-
"""
> 007 @ Facebook
~~~~~~~~~~~~~~~~
Given the mapping a = 1, b = 2, ... z = 26, and an encoded message, count the
number of ways it can be decoded.
For example, the message '111' would give 3, since it could be
decoded as 'aaa', 'ka', and 'ak'.
__
You can assume that the messages are decodable. For example, '001' is not allowed.
"""
def memo(em: str, k: int, m: list):
"""
Helper method for dynamic programming method -> memoize
:param em: encoded message
:param k:
:param m: memo lookup variable
:return: int
"""
if k == 0:
return 1
s = len(em) - k
if em[s] == '0':
return 0
if m[k] is not None:
return m[k]
result = memo(em, k - 1, m)
if k >= 2 and int(em[s:s + 2]) <= 26:
result += memo(em, k - 2, m)
m[k] = result
return result
def num_of_ways(em: str):
"""
General idea is to split the problem into smaller problems
1. for k == 0 -> return 1
2. for
:param em: encoded message
:return: int
"""
length = len(em)
return memo(em, length, [None] * (length + 1))
if __name__ == '__main__':
assert num_of_ways('111') == 3
assert num_of_ways('123') == 3
assert num_of_ways('011') == 0
|
# -*- coding: utf-8 -*-
"""Wrapper to run ML from the command line.
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdp, pkdc, pkdlog
from sirepo import simulation_db
from sirepo.template import template_common
import py.path
import sirepo.template.ml as template
def run(cfg_dir):
template_common.exec_parameters()
data = simulation_db.read_json(template_common.INPUT_BASE_NAME)
template.save_sequential_report_data(py.path.local(cfg_dir), data)
def run_background(cfg_dir):
template_common.exec_parameters()
|
# Generated by Django 2.0.2 on 2018-02-19 12:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('website', '0006_video_contest_vote'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='voted_videos',
field=models.ManyToManyField(related_name='voters', to='website.VideoContestRegistration'),
),
]
|
import pandas as pd
import numpy as np
from collections import namedtuple
from typing import Tuple, List, Iterator, Any
from .. import spec as S
from ..dsl import Node, AtomNode, ParamNode, ApplyNode
from ..visitor import GenericVisitor
from .interpreter import Interpreter
from .post_order import PostOrderInterpreter
from .context import Context
from .error import InterpreterError, GeneralError
from ..spec.interval import *
# re-use the concrete interpreter's abstract domain util functions
from .morpheus_interpreter import *
# fixme: currently abstract interpretation rules are hard coded
# need to dynamically load it from the spec in the next version
class KSMorpheusAbstractInterpreter(PostOrderInterpreter):
'''
An extended abstract interpreter that works on non-full skeleton level (i.e., considers some arguments).
'''
def __init__(self, spec: S.TyrellSpec, *args, **kwargs):
super(KSMorpheusAbstractInterpreter, self).__init__(*args, **kwargs)
self._spec = spec
# note: a stateful variable that keeps track of interpretation combination
# which is connected to LineSkeletonEnumerator
# typical combination can be (1, 4, 1, 2, None, None)
# where None indicates anything and integers indicate production id
# LineSkeletonEnumerator will capture and utilize it to speed up enumeration
self._current_combination = None
def make_abs(self):
return {
"row": Interval(IMIN,IMAX),
"col": Interval(IMIN,IMAX),
"head": Interval(IMIN,IMAX),
"content": Interval(IMIN,IMAX),
}
def abs_intersected(self, abs0, abs1):
# within this framework, abs0 and abs1 have the same key set
for p in abs0.keys():
if not interval_is_intersected(abs0[p], abs1[p]):
return False
return True
# hijack eval function: transform the inputs to abstract values before feeding
def eval(self, prog: Node, abstract_inputs: List[Any]) -> Any:
class NodeVisitor(GenericVisitor):
_interp: PostOrderInterpreter
_context: Context
def __init__(self, interp):
self._interp = interp
self._context = Context()
def visit_with_context(self, node: Node):
self._context.observe(node)
res = self.visit(node)
self._context.finish(node)
return res
# note: for atom node, to support parameter level conflict-driven learning,
# use ??? to get the value, not the original eval_??? methods in Trinity
# and eventually return the node itself
# note: in this version, every atom node is required to have tag and "cpos" field
def visit_atom_node(self, atom_node: AtomNode):
tmp_prod_id = atom_node.production.id
# note: use self._interp to refer to the self in eval
self._interp._current_combination = tuple([
tmp_prod_id if i==atom_node.tag["cpos"] else self._interp._current_combination[i]
for i in range(len(self._interp._current_combination))
])
return atom_node
def visit_param_node(self, param_node: ParamNode):
param_index = param_node.index
if param_index >= len(abstract_inputs):
msg = 'Input parameter access({}) out of bound({})'.format(
param_index, len(abstract_inputs))
raise GeneralError(msg)
return abstract_inputs[param_index]
def visit_apply_node(self, apply_node: ApplyNode):
in_values = [self.visit_with_context(
x) for x in apply_node.args]
self._context.pop()
method_name = self._eval_method_name(apply_node.name)
method = getattr(self._interp, method_name,
self._method_not_found)
return method(apply_node, in_values)
def _method_not_found(self, apply_node: ApplyNode, arg_values: List[Any]):
msg = 'Cannot find required eval method: "{}"'.format(
self._eval_method_name(apply_node.name))
raise NotImplementedError(msg)
@staticmethod
def _eval_method_name(name):
return 'eval_' + name
node_visitor = NodeVisitor(self)
try:
# try if this node is a root node ("skeleton" field only exists in root node)
if prog.tag is not None:
if "skeleton" in prog.tag:
# yes it's root
# then initialize set the _current_combination
self._current_combination = tuple([None for _ in range(prog.tag["nslot"])])
return node_visitor.visit_with_context(prog)
except InterpreterError as e:
e.context = node_visitor._context
raise e from None
def _get_context(self, comb, nodes):
# this extracts the interpreter context (not the visitor context) from the interpreter combination
return tuple([
None if i in [p.tag["cpos"] for p in nodes] else comb[i]
for i in range(len(comb))
])
# ================================== #
# ======== enum productions ======== #
# ================================== #
# fixme: merge with NodeVisitor later
def _eval_method_name(self, name):
return 'eval_' + name
# main entrance of evaluating an atom node
def _eval_atom_node(self, node):
node_type = node.type.name
method_name = self._eval_method_name(node_type)
method = getattr(self, method_name)
return method(node.data)
# note: use this method in EnumAssertion
def _eval_enum_prod(self, prod):
prod_type = prod.lhs.name
method_name = self._eval_method_name(prod_type)
method = getattr(self, method_name)
return method(prod.rhs[0])
# can only be called by _eval_atom_node
def eval_ColInt(self, v):
return int(v)
# can only be called by _eval_atom_node
# def eval_SmallInt(self, v):
# return int(v)
def eval_ConstVal(self, v):
if v.endswith("@Float"):
return float(v[:-6])
elif v.endswith("@Int"):
return int(v[:-4])
elif v.endswith("@Str"):
return v[:-4]
else:
raise InterpreterError("Exception evaluating ConstVal.")
# can only be called by _eval_atom_node
def eval_ColList(self, v):
# question: is this true?
return [int(p) for p in v]
# can only be called by _eval_atom_node
def eval_AggrFunc(self, v):
return v
# can only be called by _eval_atom_node
def eval_NumFunc(self, v):
return v
# can only be called by _eval_atom_node
def eval_BoolFunc(self, v):
return v
# ====================================== #
# ======== function productions ======== #
# ====================================== #
# this validates that the collist is not stupid in a fast way that won't check column overflow
# note: this should be called with in assertEnum, and before you call the explain function
def fast_validate_collist(self, arg_collist):
# arg_collist is the original collist (before explanation)
if len(arg_collist) != len(list(set(arg_collist))):
# don't include duplicates
# e.g., -1, -1, will cause ValueError in .remove(x) in explain_collist
return False
# note-important: don't mix positive and negative ints
if max(arg_collist) >= 0 and min(arg_collist) < 0:
return False
for p in arg_collist:
if p == 0:
if -99 in arg_collist:
return False
else:
continue
elif p == -99:
if 0 in arg_collist:
return False
else:
continue
elif p > 0:
if -p in arg_collist:
return False
else:
continue
elif p < 0:
if -p in arg_collist:
return False
else:
continue
return True
def eval_select(self, node, args):
arg_tb, node_collist = args
arg_collist = self._eval_atom_node(node_collist)
# extract the context from combination
_current_context = self._get_context(self._current_combination, [node_collist])
self.assertEnum(node, _current_context, self._current_combination,
# this makes sure the original colist is not stupid
lambda comb: ( lambda x: self.fast_validate_collist(x) )(
# original collist
self._eval_enum_prod( self._spec.get_production( comb[node_collist.tag["cpos"]] ) ),
),
tag="abs:alpha:select:0",
)
out = self.make_abs()
out["row"] = interval_binary_op("==", out["row"], arg_tb["row"])
out["head"] = interval_binary_op("<=", out["head"], arg_tb["head"])
out["content"] = interval_binary_op("<=", out["content"], arg_tb["content"])
# precise tracking
# out["col"] = interval_binary_op("<", out["col"], arg_tb["col"])
if arg_collist[0]<0:
out["col"] = interval_binary_op(
"==",
out["col"],
interval_binary_op("-", arg_tb["col"], Interval(len(arg_collist),len(arg_collist)))
)
else:
out["col"] = interval_binary_op("==", out["col"], Interval(len(arg_collist),len(arg_collist)))
return out
def eval_unite(self, node, args):
arg_tb, node_col0, node_col1 = args
arg_col0 = self._eval_atom_node(node_col0)
arg_col1 = self._eval_atom_node(node_col1)
# extract the context from combination
_current_context = self._get_context(self._current_combination, [node_col0, node_col1])
self.assertEnum(node, _current_context, self._current_combination,
# note: nested lambda to store temp variable
lambda comb: (lambda x0,x1: x0 != x1)(
self._eval_enum_prod( self._spec.get_production( comb[node_col0.tag["cpos"]] ) ),
self._eval_enum_prod( self._spec.get_production( comb[node_col1.tag["cpos"]] ) )
),
tag="abs:alpha:unite:0",
)
out = self.make_abs()
out["row"] = interval_binary_op("==", out["row"], arg_tb["row"])
out["col"] = interval_binary_op(
"==",
out["col"],
interval_binary_op("-", arg_tb["col"], Interval(1,1)),
)
out["head"] = interval_binary_op(
"<=",
out["head"],
interval_binary_op("+", arg_tb["head"], Interval(1,1)),
)
out["content"] = interval_binary_op(
">=",
out["content"],
interval_binary_op("+", arg_tb["content"], Interval(1,1)),
)
return out
def eval_separate(self, node, args):
arg_tb, node_col = args
arg_col = self._eval_atom_node(node_col)
out = self.make_abs()
out["row"] = interval_binary_op("==", out["row"], arg_tb["row"])
out["col"] = interval_binary_op(
"==",
out["col"],
interval_binary_op("+", arg_tb["col"], Interval(1,1)),
)
out["head"] = interval_binary_op(
"<=",
out["head"],
interval_binary_op("+", arg_tb["head"], Interval(2,2)),
)
out["content"] = interval_binary_op(
">=",
out["content"],
interval_binary_op("+", arg_tb["content"], Interval(2,2)),
)
return out
def eval_gather(self, node, args):
arg_tb, node_collist = args
arg_collist = self._eval_atom_node(node_collist)
# extract the context from combination
_current_context = self._get_context(self._current_combination, [node_collist])
self.assertEnum(node, _current_context, self._current_combination,
# x0: this makes sure the original colist is not stupid
# self.explain_collist(x0): normal gather check
# note-important: to ensure x0 comes before self.explain_collist(x0) checks, merge them into one assertEnum
lambda comb: ( lambda x0: self.fast_validate_collist(x0) )(
# original collist
self._eval_enum_prod( self._spec.get_production( comb[node_collist.tag["cpos"]] ) ),
),
tag="abs:alpha:gather:0",
)
out = self.make_abs()
out["row"] = interval_binary_op(">=", out["row"], arg_tb["row"])
out["head"] = interval_binary_op(
"<=",
out["head"],
interval_binary_op("+", arg_tb["head"], Interval(2,2)),
)
out["content"] = interval_binary_op(
"<=",
out["content"],
interval_binary_op("+", arg_tb["content"], Interval(2,2)),
)
# precise tracking
# out["col"] = interval_binary_op("<=", out["col"], arg_tb["col"])
if arg_collist[0]<0:
out["col"] = interval_binary_op(
"==",
out["col"],
interval_binary_op(
"+",
Interval(len(arg_collist),len(arg_collist)),
Interval(2,2)
)
)
else:
out["col"] = interval_binary_op(
"==",
out["col"],
interval_binary_op(
"+",
interval_binary_op("-", arg_tb["col"], Interval(len(arg_collist),len(arg_collist))),
Interval(2,2)
)
)
return out
def eval_spread(self, node, args):
arg_tb, node_col0, node_col1 = args
arg_col0 = self._eval_atom_node(node_col0)
arg_col1 = self._eval_atom_node(node_col1)
# extract the context from combination
_current_context = self._get_context(self._current_combination, [node_col0, node_col1])
self.assertEnum(node, _current_context, self._current_combination,
# note: nested lambda to store temp variable
lambda comb: (lambda x0, x1: x0 != x1)(
self._eval_enum_prod( self._spec.get_production( comb[node_col0.tag["cpos"]] ) ),
self._eval_enum_prod( self._spec.get_production( comb[node_col1.tag["cpos"]] ) )
),
tag="abs:alpha:spread:0",
)
out = self.make_abs()
out["row"] = interval_binary_op("<=", out["row"], arg_tb["row"])
out["col"] = interval_binary_op(">=", out["col"], arg_tb["col"])
out["head"] = interval_binary_op("<=", out["head"], arg_tb["content"])
out["content"] = interval_binary_op("<=", out["content"], arg_tb["content"])
return out
def eval_mutate(self, node, args):
arg_tb, node_op, node_col0, node_col1 = args
arg_op = self._eval_atom_node(node_op)
arg_col0 = self._eval_atom_node(node_col0)
arg_col1 = self._eval_atom_node(node_col1)
# extract the context from combination
_current_context = self._get_context(self._current_combination, [node_op, node_col0, node_col1])
self.assertEnum(node, _current_context, self._current_combination,
# note: nested lambda to store temp variable
lambda comb: (lambda x0, x1: x0 != x1)(
self._eval_enum_prod( self._spec.get_production( comb[node_col0.tag["cpos"]] ) ),
self._eval_enum_prod( self._spec.get_production( comb[node_col1.tag["cpos"]] ) )
),
tag="abs:alpha:mutate:0",
)
out = self.make_abs()
out["row"] = interval_binary_op("==", out["row"], arg_tb["row"])
out["col"] = interval_binary_op(
"==",
out["col"],
interval_binary_op("+", arg_tb["col"], Interval(1,1)),
)
out["head"] = interval_binary_op(
"==",
out["head"],
interval_binary_op("+", arg_tb["head"], Interval(1,1)),
)
out["content"] = interval_binary_op(">", out["content"], arg_tb["content"])
out["content"] = interval_binary_op(
"<=",
out["content"],
interval_binary_op("+", arg_tb["content"], arg_tb["row"]),
)
return out
def eval_filter(self, node, args):
arg_tb, node_op, node_col, node_int = args
arg_op = self._eval_atom_node(node_op)
arg_col = self._eval_atom_node(node_col)
arg_int = self._eval_atom_node(node_int)
out = self.make_abs()
out["row"] = interval_binary_op("<", out["row"], arg_tb["row"])
out["col"] = interval_binary_op("==", out["col"], arg_tb["col"])
out["head"] = interval_binary_op("==", out["head"], arg_tb["head"])
out["content"] = interval_binary_op("<=", out["content"], arg_tb["content"])
return out
def eval_group(self, node, args):
arg_tb, node_collist, node_op, node_col = args
arg_collist = self._eval_atom_node(node_collist)
arg_op = self._eval_atom_node(node_op)
arg_col = self._eval_atom_node(node_col)
# extract the context from combination
_current_context = self._get_context(self._current_combination, [node_collist, node_op, node_col])
self.assertEnum(node, _current_context, self._current_combination,
# this makes sure the original colist is not stupid
# note-important: to ensure x0 comes before self.explain_collist(x0)/y checks, merge them into one assertEnum
lambda comb: ( lambda x0,y: self.fast_validate_collist(x0))(
# x0: original collist
self._eval_enum_prod( self._spec.get_production( comb[node_collist.tag["cpos"]] ) ),
# y
self._eval_enum_prod( self._spec.get_production( comb[node_col.tag["cpos"]] ) ),
),
tag="abs:alpha:group:0",
)
out = self.make_abs()
out["row"] = interval_binary_op("<=", out["row"], arg_tb["row"])
out["head"] = interval_binary_op(">", out["head"], Interval(0,0))
out["head"] = interval_binary_op(
"<=",
out["head"],
interval_binary_op("+", arg_tb["head"], Interval(1,1)),
)
# note: originally it's: out.content <= in.content + in.group + 1
# since we don't track group, it becomes: out.content <= in.content + in.row + 1
out["content"] = interval_binary_op(
"<=",
out["content"],
interval_binary_op(
"+",
arg_tb["content"],
interval_binary_op("+", arg_tb["row"], Interval(1,1)),
),
)
# precise tracking
# out["col"] = interval_binary_op(
# "<=",
# out["col"],
# interval_binary_op("+", arg_tb["col"], Interval(1,1)),
# )
if arg_collist[0]<0:
out["col"] = interval_binary_op(
"==",
out["col"],
interval_binary_op(
"+",
interval_binary_op("-", arg_tb["col"], Interval(len(arg_collist),len(arg_collist))),
Interval(1,1)
)
)
else:
out["col"] = interval_binary_op(
"==",
out["col"],
interval_binary_op("+", Interval(len(arg_collist),len(arg_collist)), Interval(1,1))
)
return out
|
"""
Extra exception types that may be thrown in CoralNet methods.
These can be useful if:
- When catching exceptions that you've thrown, you don't want
to catch any exceptions that you didn't anticipate. For example, perhaps
you were going to throw and catch a ValueError in some case, but you're
worried that you'll accidentally catch a ValueError from some other error
case that you didn't think of.
- You want the types of exceptions to better describe the nature of the
exception, for more self-documenting code. (It's advised to not go
overboard and create tons of exception types, though.)
"""
class FilenameError(Exception):
"""
When a filename isn't of the expected format; for example, perhaps 2
or more underscore-separated tokens are expected, and there are no
underscores in the filename.
"""
pass
class FileContentError(Exception):
"""
When file contents are not as expected; for example, a line in a text
file doesn't have the expected number of words, or one of the words
in the file is supposed to match something in the database but doesn't.
Contrast this with IOError, which is for problems with finding a file,
not being able to read its contents, etc.
"""
pass
class DirectoryAccessError(Exception):
"""
Raised when a directory is expected to exist, be readable, and/or be
writable, and that turns out to not be the case.
For example, a directory is specified in a settings file and
we now want to create a file in that directory, but that directory
doesn't exist.
"""
pass
class TestfileDirectoryError(Exception):
"""
When there's something wrong with a directory meant to hold
temporary test-generated files:
(1) The directory already has files in it before a test.
(2) After the test, the directory has a file that was created
before the test began. (Given (1), this is a serious corner
case, but still, we do not want to take chances with file
deletions.)
"""
pass
class ValueObjectNotFoundError(Exception):
"""
When a location value object is looked up by name in the database,
and a value object can't be found.
Basically a generic-ized error class for Value1.DoesNotExist,
Value2.DoesNotExist, etc.
"""
pass |
import LanguageModel
import argparse
import torch
import sys
import time
import resource
torch.no_grad()
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default='models/test.json')
args = parser.parse_args()
model = LanguageModel.LanguageModel()
model.load_json(args.checkpoint)
model.eval()
encoded = model.encode_string("Hello!")
#encoded = encoded.unsqueeze(0)
print(encoded)
with torch.no_grad():
out = model.forward(encoded)[:, -1]
print(out.pow(2).sum())
#exit(0)
probs = out.double().exp().squeeze()
probs.div_(probs.sum())
#print(probs)
#for i,p in enumerate(probs):
# if p.item() > 0.01:
# print(model.idx_to_token[i], "%.2f" % p.item())
start = time.time()
inp = torch.LongTensor(1,1)
states = {}
with torch.no_grad():
for i in range(0,200):
probs = out.double().div(1).exp().squeeze()
probs.div_(probs.sum())
next_char_idx = torch.multinomial(probs, 1).item()
sys.stdout.write(model.idx_to_token[next_char_idx].decode(errors='ignore'))
sys.stdout.flush()
inp[0,0] = next_char_idx
#out = model.forward(inp)[:, -1]
out, outstates = model.forward_with_states(inp, states)
states[0] = outstates[0]
# if i % 100 == 0:
# print("memory @ %d: %.2f" % (i, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024))
end = time.time()
print("time: %.2fs" % (end - start))
print("memory: %.2f" % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024))
|
from __future__ import unicode_literals
from django.shortcuts import render, HttpResponse
def index(request):
response = "Hello, I am your first request!"
return render(request, "Amniotic_App/index.html")
|
# -*- coding: utf-8 -*-
from essentia.streaming import *
import essentia.standard as es
import essentia
import librosa
import librosa.display
import numpy as np
def melspectrogram(audio, sampleRate=44100, frameSize=2048, hopSize=1024,
window='blackmanharris62', zeroPadding=0, center=True,
numberBands=[128, 96, 48, 32, 24, 16, 8],
lowFrequencyBound=0, highFrequencyBound=None,
weighting='linear', warpingFormula='slaneyMel', normalize='unit_tri'):
if highFrequencyBound is None:
highFrequencyBound = sampleRate/2
windowing = es.Windowing(type=window, normalized=False, zeroPadding=zeroPadding)
spectrum = es.Spectrum()
melbands = {}
for nBands in numberBands:
melbands[nBands] = es.MelBands(numberBands=nBands,
sampleRate=sampleRate,
lowFrequencyBound=lowFrequencyBound,
highFrequencyBound=highFrequencyBound,
inputSize=(frameSize+zeroPadding)//2+1,
weighting=weighting,
normalize=normalize,
warpingFormula=warpingFormula,
type='power')
norm10k = es.UnaryOperator(type='identity', shift=1, scale=10000)
log10 = es.UnaryOperator(type='log10')
amp2db = es.UnaryOperator(type='lin2db', scale=2)
results = essentia.Pool()
for frame in es.FrameGenerator(audio, frameSize=frameSize, hopSize=hopSize,
startFromZero=not center):
spectrumFrame = spectrum(windowing(frame))
for nBands in numberBands:
melFrame = melbands[nBands](spectrumFrame)
results.add('mel_' + str(nBands)+'_db', amp2db(melFrame))
results.add('mel_' + str(nBands)+'_log1+10kx', log10(norm10k(melFrame)))
return results
def cut_audio(filename, sampleRate=44100, segment_duration=None):
audio = es.MonoLoader(filename=filename, sampleRate=sampleRate)()
if segment_duration:
segment_duration = round(segment_duration*sampleRate)
segment_start = (len(audio) - segment_duration) // 2
segment_end = segment_start + segment_duration
else:
segment_start = 0
segment_end = len(audio)
if segment_start < 0 or segment_end > len(audio):
raise ValueError('Segment duration is larger than the input audio duration')
return audio[segment_start:segment_end]
def analyze_mel(filename, segment_duration=None, maxFrequency=11025, replaygain=True):
lowlevelFrameSize=2048
lowlevelHopSize=1024
# Compute replay gain and duration on the entire file, then load the
# segment that is centered in time with replaygain applied
audio = es.MonoLoader(filename=filename)()
if replaygain:
replaygain = es.ReplayGain()(audio)
else:
replaygain = -6 # Default replaygain value in EasyLoader
if segment_duration:
segment_start = (len(audio) / 44100 - segment_duration) / 2
segment_end = segment_start + segment_duration
else:
segment_start = 0
segment_end = len(audio)/44100
if segment_start < 0 or segment_end > len(audio)/44100:
raise ValueError('Segment duration is larger than the input audio duration')
loader_mel = EasyLoader(filename=filename, replayGain=replaygain,
startTime=segment_start, endTime=segment_end)
# Processing for Mel bands
framecutter_mel = FrameCutter(frameSize=lowlevelFrameSize,
hopSize=lowlevelHopSize)
window_mel = Windowing(type='blackmanharris62', zeroPadding=lowlevelFrameSize)
spectrum_mel = Spectrum()
melbands128 = MelBands(numberBands=128,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands96 = MelBands(numberBands=96,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands48 = MelBands(numberBands=48,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands32 = MelBands(numberBands=32,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands24 = MelBands(numberBands=24,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands16 = MelBands(numberBands=16,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
melbands8 = MelBands(numberBands=8,
lowFrequencyBound=0,
highFrequencyBound=maxFrequency,
inputSize=lowlevelFrameSize+1)
# Normalize Mel bands: log10(1+x*10000)
norm128 = UnaryOperator(type='identity', shift=1, scale=10000)
log10128 = UnaryOperator(type='log10')
norm96 = UnaryOperator(type='identity', shift=1, scale=10000)
log1096 = UnaryOperator(type='log10')
norm48 = UnaryOperator(type='identity', shift=1, scale=10000)
log1048 = UnaryOperator(type='log10')
norm32 = UnaryOperator(type='identity', shift=1, scale=10000)
log1032 = UnaryOperator(type='log10')
norm24 = UnaryOperator(type='identity', shift=1, scale=10000)
log1024 = UnaryOperator(type='log10')
norm16 = UnaryOperator(type='identity', shift=1, scale=10000)
log1016 = UnaryOperator(type='log10')
norm8 = UnaryOperator(type='identity', shift=1, scale=10000)
log108 = UnaryOperator(type='log10')
p = essentia.Pool()
loader_mel.audio >> framecutter_mel.signal
framecutter_mel.frame >> window_mel.frame >> spectrum_mel.frame
spectrum_mel.spectrum >> melbands128.spectrum
spectrum_mel.spectrum >> melbands96.spectrum
spectrum_mel.spectrum >> melbands48.spectrum
spectrum_mel.spectrum >> melbands32.spectrum
spectrum_mel.spectrum >> melbands24.spectrum
spectrum_mel.spectrum >> melbands16.spectrum
spectrum_mel.spectrum >> melbands8.spectrum
melbands128.bands >> norm128.array >> log10128.array >> (p, 'mel128')
melbands96.bands >> norm96.array >> log1096.array >> (p, 'mel96')
melbands48.bands >> norm48.array >> log1048.array >> (p, 'mel48')
melbands32.bands >> norm32.array >> log1032.array >> (p, 'mel32')
melbands24.bands >> norm24.array >> log1024.array >> (p, 'mel24')
melbands16.bands >> norm16.array >> log1016.array >> (p, 'mel16')
melbands8.bands >> norm8.array >> log108.array >> (p, 'mel8')
essentia.run(loader_mel)
return p
def analyze(filename, segment_duration=20):
lowlevelFrameSize=2048
lowlevelHopSize=1024
tonalFrameSize=4096
tonalHopSize=1024
# Compute replay gain and duration on the entire file, then load the
# segment that is centered in time with replaygain applied
audio = es.MonoLoader(filename=filename)()
replaygain = es.ReplayGain()(audio)
segment_start = (len(audio) / 44100 - segment_duration) / 2
segment_end = segment_start + segment_duration
if segment_start < 0 or segment_end > len(audio)/44100:
raise ValueError('Segment duration is larger than the input audio duration')
# TODO
# There's a bug in streaming mode Python wrapper: running both Mel and HPCP
# in the same network with the same loader will result in a memory error.
# This does not happen in C++. As a workaround, compute Mel and HPCP in
# two separate networks with two separate loaders.
loader_mel = EasyLoader(filename=filename, replayGain=replaygain,
startTime=segment_start, endTime=segment_end)
loader_hpcp = EasyLoader(filename=filename, replayGain=replaygain,
startTime=segment_start, endTime=segment_end)
# Processing for Mel bands
framecutter_mel = FrameCutter(frameSize=lowlevelFrameSize,
hopSize=lowlevelHopSize)
window_mel = Windowing(type='blackmanharris62')
spectrum_mel = Spectrum()
melbands = MelBands(numberBands=96,
lowFrequencyBound=0,
highFrequencyBound=11025)
# Processing for HPCPs
framecutter_hpcp = FrameCutter(frameSize=tonalFrameSize,
hopSize=tonalHopSize)
window_hpcp = Windowing(type='blackmanharris62')
spectrum_hpcp = Spectrum()
speaks = SpectralPeaks(maxPeaks=60,
magnitudeThreshold=0.00001,
minFrequency=20.0,
maxFrequency=3500.0,
orderBy='magnitude')
# Normalize Mel bands: log10(1+x*10000)
norm = UnaryOperator(type='identity', shift=1, scale=10000)
log10 = UnaryOperator(type='log10')
hpcp = HPCP(size=12,
bandPreset=False,
minFrequency=20.0,
maxFrequency=3500.0,
weightType='cosine',
windowSize=1.)
p = essentia.Pool()
loader_mel.audio >> framecutter_mel.signal
framecutter_mel.frame >> window_mel.frame >> spectrum_mel.frame
spectrum_mel.spectrum >> melbands.spectrum
melbands.bands >> norm.array >> log10.array >> (p, 'melbands')
essentia.run(loader_mel)
loader_hpcp.audio >> framecutter_hpcp.signal
framecutter_hpcp.frame >> window_hpcp.frame >> spectrum_hpcp.frame
spectrum_hpcp.spectrum >> speaks.spectrum
speaks.frequencies >> hpcp.frequencies
speaks.magnitudes >> hpcp.magnitudes
hpcp.hpcp >> (p, 'hpcp')
essentia.run(loader_hpcp)
return p
def analyze_misc(filename, segment_duration=20):
# Compute replay gain and duration on the entire file, then load the
# segment that is centered in time with replaygain applied
audio = es.MonoLoader(filename=filename)()
replaygain = es.ReplayGain()(audio)
segment_start = (len(audio) / 44100 - segment_duration) / 2
segment_end = segment_start + segment_duration
if segment_start < 0 or segment_end > len(audio)/44100:
raise ValueError('Segment duration is larger than the input audio duration')
loader = es.EasyLoader(filename=filename, replayGain=replaygain,
startTime=segment_start, endTime=segment_end)
windowing = es.Windowing(type='blackmanharris62')
spectrum = es.Spectrum()
powerspectrum = es.PowerSpectrum()
centroid = es.Centroid()
zcr = es.ZeroCrossingRate()
rms = es.RMS()
hfc = es.HFC()
pool = essentia.Pool()
audio = loader()
for frame in es.FrameGenerator(audio, frameSize=2048, hopSize=1024):
frame_spectrum = spectrum(windowing(frame))
pool.add('rms', rms(frame))
pool.add('rms_spectrum', rms(frame_spectrum))
pool.add('hfc', hfc(frame_spectrum))
pool.add('spectral_centroid', centroid(frame_spectrum))
pool.add('zcr', zcr(frame))
audio_st, sr, _, _, _, _ = es.AudioLoader(filename=filename)()
# Ugly hack because we don't have a StereoResample
left, right = es.StereoDemuxer()(audio_st)
resampler = es.Resample(inputSampleRate=sr, outputSampleRate=44100)
left = resampler(left)
right = resampler(right)
audio_st = es.StereoMuxer()(left, right)
audio_st = es.StereoTrimmer(startTime=segment_start, endTime=segment_end)(audio_st)
ebu_momentary, _, _, _ = es.LoudnessEBUR128(hopSize=1024/44100, startAtZero=True)(audio_st)
pool.set('ebu_momentary', ebu_momentary)
return pool
def analyze_hp(filename, segment_duration=20):
lowlevelFrameSize=2048
lowlevelHopSize=1024
tonalFrameSize=4096
tonalHopSize=1024
# Compute replay gain and duration on the entire file, then load the
# segment that is centered in time with replaygain applied
audio = es.MonoLoader(filename=filename)()
replaygain = es.ReplayGain()(audio)
segment_start = (len(audio) / 44100 - segment_duration) / 2
segment_end = segment_start + segment_duration
if segment_start < 0 or segment_end > len(audio)/44100:
raise ValueError('Segment duration is larger than the input audio duration')
loader = es.EasyLoader(filename=filename, replayGain=replaygain,
startTime=segment_start, endTime=segment_end)
window = es.Windowing(type='blackmanharris62')
fft = es.FFT()
stft = []
audio = loader()
for frame in es.FrameGenerator(audio, frameSize=lowlevelFrameSize, hopSize=lowlevelHopSize):
stft.append(fft(window(frame)))
# Librosa requires bins x frames format
stft = np.array(stft).T
D_harmonic, D_percussive = librosa.decompose.hpss(stft, margin=8)
D_percussive_magnitude, _ = librosa.magphase(D_percussive)
D_harmonic_magnitude, _ = librosa.magphase(D_harmonic)
# Convert back to Essentia format (frames x bins)
spectrum_harmonic = D_harmonic_magnitude.T
specturm_percussive = D_percussive_magnitude.T
# Processing for Mel bands
melbands = es.MelBands(numberBands=96,
lowFrequencyBound=0,
highFrequencyBound=11025)
# Normalize Mel bands: log10(1+x*10000)
norm = es.UnaryOperator(type='identity', shift=1, scale=10000)
log10 = es.UnaryOperator(type='log10')
p = essentia.Pool()
for spectrum_frame in spectrum_harmonic:
p.add('melbands_harmonic', log10(norm(melbands(spectrum_frame))))
for spectrum_frame in specturm_percussive:
p.add('melbands_percussive', log10(norm(melbands(spectrum_frame))))
return p
|
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils import weight_norm
class RNN(nn.Module):
"""
Base RNN class
"""
def __init__(self, input_size, hidden_size, nlayers, embed_dim,
rnn_type, pad_idx, use_cuda, dropout, bidirect):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.nlayers = nlayers
self.embed_dim = embed_dim
self.ndirect = 2 if bidirect else 1
self.embedding = nn.Embedding(input_size, embed_dim, padding_idx=pad_idx)
if rnn_type in ['GRU', 'LSTM']:
self.rnn = getattr(nn, rnn_type)(embed_dim,
hidden_size // self.ndirect,
num_layers=nlayers,
batch_first=True, dropout=dropout,
bidirectional=bidirect)
if use_cuda:
self.rnn.cuda() # turn on cuda before applying weight_norm
else:
raise ValueError("Please choose rnn type from: GRU or LSTM")
self.rnn_type = rnn_type
def forward(self, input):
"""
Override default forward function in torch.nn.Module
"""
pass
def init_hidden(self, batch_size):
# Get Tensor type from first parameter of model (e.g. cuda.FloatTensor)
# to see if we should initialize cuda tensor or not
weight = next(self.parameters()).data
h_0 = Variable(weight.new(self.nlayers * self.ndirect,
batch_size,
self.hidden_size // self.ndirect).zero_(),
requires_grad=False)
if self.rnn_type == 'LSTM':
return (h_0,
Variable(weight.new(self.nlayers * self.ndirect,
batch_size,
self.hidden_size // self.ndirect).zero_(),
requires_grad=False))
else:
return h_0
def init_weights(self):
"""
Initialize weights, including internal weights of RNN. From:
gist.github.com/thomwolf/eea8989cab5ac49919df95f6f1309d80
Apply weight normalization to internal weights of RNN.
"""
ih = (param.data for name, param in self.named_parameters() if 'weight_ih' in name)
hh = (param.data for name, param in self.named_parameters() if 'weight_hh' in name)
b = (param.data for name, param in self.named_parameters() if 'bias' in name)
for t in ih:
nn.init.xavier_uniform(t)
for t in hh:
nn.init.orthogonal(t)
for t in b:
nn.init.constant(t, 0)
self.embedding.weight.data.uniform_(-0.05, 0.05)
# Apply Weight Normalization
l = [name for name, _ in list(self.rnn.named_parameters()) if 'weight' in name]
for name in l:
weight_norm(self.rnn, name)
def is_cuda(self):
"""
Return boolean value of whether model is cuda enabled.
"""
param_type = str(type(next(self.parameters()).data))
return 'cuda' in param_type
class DecoderRNN(RNN):
"""
Basic Decoder without attentional mechanism
"""
def __init__(self, input_size, hidden_size, nlayers, embed_dim,
rnn_type, pad_idx, use_cuda, dropout, bidirect=False):
super().__init__(input_size, hidden_size, nlayers, embed_dim,
rnn_type, pad_idx, use_cuda, dropout, False) # unidirectional
self.linear = nn.Linear(hidden_size, input_size)
self.softmax = nn.LogSoftmax()
self.init_weights()
def init_weights(self):
super().init_weights()
self.linear.weight.data.uniform_(-0.05, 0.05)
def forward(self, input, hidden):
batch_size = input.size()[0]
embedded = self.embedding(input).unsqueeze(1)
output, hidden = self.rnn(embedded, hidden)
output = self.linear(output[:, 0, :])
output = self.softmax(output)
return output, hidden
|
import nltk
from nltk.translate import AlignedSent
from nltk.translate.ibm2 import (
IBMModel2,
Model2Counts
)
from tqdm import tqdm
class IBMModel2WithProgressbar(IBMModel2):
def __init__(
self,
sentence_aligned_corpus,
iterations,
probability_tables=None
):
"""
IBM Model 2 with progress bar for training
"""
super(IBMModel2WithProgressbar, self).__init__(
sentence_aligned_corpus,
iterations, probability_tables
)
def train(self, parallel_corpus):
counts = Model2Counts()
for aligned_sentence in tqdm(parallel_corpus, unit=' samples'):
src_sentence = [None] + aligned_sentence.mots
trg_sentence = ['UNUSED'] + aligned_sentence.words # 1-indexed
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
# E step (a): Compute normalization factors to weigh counts
total_count = self.prob_all_alignments(src_sentence, trg_sentence)
# E step (b): Collect counts
for j in range(1, m + 1):
t = trg_sentence[j]
for i in range(0, l + 1):
s = src_sentence[i]
count = self.prob_alignment_point(i, j, src_sentence, trg_sentence)
normalized_count = count / total_count[t]
counts.update_lexical_translation(normalized_count, s, t)
counts.update_alignment(normalized_count, i, j, l, m)
# M step: Update probabilities with maximum likelihood estimates
self.maximize_lexical_translation_probabilities(counts)
self.maximize_alignment_probabilities(counts)
def train_ibmmodel2(src_text, trg_text, iterations=5):
"""
train IBM model 2
:param src_text: (list) src text
:param trg_text: (list) trg text
:param iterations: (int) number of iterations to run training algorithm
:return: trained IBM model
"""
if len(src_text) != len(trg_text):
raise AssertionError("different numbers of samples in src and trg")
aligned_text = []
for src_sample, trg_sample in zip(src_text, trg_text):
al_sent = AlignedSent(src_sample, trg_sample)
aligned_text.append(al_sent)
ibm_model = IBMModel2WithProgressbar(aligned_text, iterations)
return ibm_model
def translate(ibm_model, src_tokens):
translation_tokens = []
for tok in src_tokens:
probs = ibm_model.translation_table[tok]
if len(probs) == 0:
continue
sorted_words = sorted(
[(k, v) for k, v in probs.items()],
key=lambda x: x[1],
reverse=True
)
top_token = sorted_words[1][0]
if top_token is not None:
translation_tokens.append(top_token)
return translation_tokens
def tokenize_en(sent, lowercase=False):
toks = nltk.word_tokenize(sent)
return [tok.lower() for tok in toks] if lowercase else toks
def tokenize_od(sent):
return sent.split()
def detokenize_od(toks):
return ' '.join(toks)
|
"""Fixtures for Forecast.Solar integration tests."""
import datetime
from typing import Generator
from unittest.mock import MagicMock, patch
import pytest
from homeassistant.components.forecast_solar.const import (
CONF_AZIMUTH,
CONF_DAMPING,
CONF_DECLINATION,
CONF_MODULES_POWER,
DOMAIN,
)
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
@pytest.fixture(autouse=True)
async def mock_persistent_notification(hass: HomeAssistant) -> None:
"""Set up component for persistent notifications."""
await async_setup_component(hass, "persistent_notification", {})
@pytest.fixture
def mock_config_entry() -> MockConfigEntry:
"""Return the default mocked config entry."""
return MockConfigEntry(
title="Green House",
unique_id="unique",
domain=DOMAIN,
data={
CONF_LATITUDE: 52.42,
CONF_LONGITUDE: 4.42,
},
options={
CONF_API_KEY: "abcdef12345",
CONF_DECLINATION: 30,
CONF_AZIMUTH: 190,
CONF_MODULES_POWER: 5100,
CONF_DAMPING: 0.5,
},
)
@pytest.fixture
def mock_forecast_solar() -> Generator[None, MagicMock, None]:
"""Return a mocked Forecast.Solar client."""
with patch(
"homeassistant.components.forecast_solar.ForecastSolar", autospec=True
) as forecast_solar_mock:
forecast_solar = forecast_solar_mock.return_value
estimate = MagicMock()
estimate.timezone = "Europe/Amsterdam"
estimate.energy_production_today = 100
estimate.energy_production_tomorrow = 200
estimate.power_production_now = 300
estimate.power_highest_peak_time_today = datetime.datetime(
2021, 6, 27, 13, 0, tzinfo=datetime.timezone.utc
)
estimate.power_highest_peak_time_tomorrow = datetime.datetime(
2021, 6, 27, 14, 0, tzinfo=datetime.timezone.utc
)
estimate.power_production_next_hour = 400
estimate.power_production_next_6hours = 500
estimate.power_production_next_12hours = 600
estimate.power_production_next_24hours = 700
estimate.energy_current_hour = 800
estimate.energy_next_hour = 900
forecast_solar.estimate.return_value = estimate
yield forecast_solar
@pytest.fixture
async def init_integration(
hass: HomeAssistant,
mock_config_entry: MockConfigEntry,
mock_forecast_solar: MagicMock,
) -> MockConfigEntry:
"""Set up the Forecast.Solar integration for testing."""
mock_config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_config_entry.entry_id)
await hass.async_block_till_done()
return mock_config_entry
|
import unittest
from jsonasobj2 import JsonObj, loads
from linkml import METAMODEL_CONTEXT_URI, META_BASE_URI
from linkml_runtime.utils.context_utils import merge_contexts
json_1 = '{ "ex": "http://example.org/test/", "ex2": "http://example.org/test2/" }'
json_2 = '{ "foo": 17, "@context": { "ex": "http://example.org/test3/", "ex2": {"@id": "http://example.org/test4/" }}}'
context_output = """{
"@context": [
"file://local.jsonld",
"https://w3id.org/linkml/meta.context.jsonld",
{
"ex": "http://example.org/test/",
"ex2": "http://example.org/test2/"
},
{
"ex": "http://example.org/test3/",
"ex2": {
"@id": "http://example.org/test4/"
}
}
]
}"""
class ContextUtilsTestCase(unittest.TestCase):
def test_merge_contexts(self):
self.assertIsNone(merge_contexts())
self.assertEqual('file://local.jsonld', merge_contexts("local.jsonld")['@context'])
self.assertEqual('file://local.jsonld', merge_contexts(["local.jsonld"])['@context'])
self.assertEqual(METAMODEL_CONTEXT_URI, merge_contexts(METAMODEL_CONTEXT_URI)['@context'])
self.assertEqual(METAMODEL_CONTEXT_URI, merge_contexts([METAMODEL_CONTEXT_URI])['@context'])
self.assertEqual(JsonObj(ex='http://example.org/test/', ex2='http://example.org/test2/'),
merge_contexts(json_1)['@context'])
self.assertEqual(JsonObj(ex='http://example.org/test/', ex2='http://example.org/test2/'),
merge_contexts([json_1])['@context'])
self.assertEqual(JsonObj(ex='http://example.org/test3/', ex2=JsonObj(**{'@id': 'http://example.org/test4/'})),
merge_contexts(json_2)['@context'])
self.assertEqual(JsonObj(ex='http://example.org/test3/', ex2=JsonObj(**{'@id': 'http://example.org/test4/'})),
merge_contexts([json_2])['@context'])
self.assertEqual([f'file://local.jsonld',
'https://w3id.org/linkml/meta.context.jsonld',
JsonObj(ex='http://example.org/test/', ex2='http://example.org/test2/'),
JsonObj(ex='http://example.org/test3/', ex2=JsonObj(**{'@id': 'http://example.org/test4/'}))],
merge_contexts(["local.jsonld", METAMODEL_CONTEXT_URI, json_1, json_2])['@context'])
self.assertEqual(loads(context_output),
merge_contexts(["local.jsonld", METAMODEL_CONTEXT_URI, json_1, json_2]))
# Dups are not removed
self.assertEqual(
JsonObj(**{'@context': [JsonObj(ex='http://example.org/test/', ex2='http://example.org/test2/'),
JsonObj(ex='http://example.org/test/', ex2='http://example.org/test2/')]}),
merge_contexts([json_1, json_1]))
self.assertEqual('file://local.jsonld', merge_contexts("local.jsonld")['@context'])
def test_merge_contexts_base(self):
self.assertEqual(
JsonObj(**{'@context':
JsonObj(**{'@base': 'file://relloc'})}),
merge_contexts(base='file://relloc'))
self.assertEqual(loads(f'{{"@context": {{"@base": "{META_BASE_URI}"}}}}'), merge_contexts(base=META_BASE_URI))
self.assertEqual(loads("""
{"@context": [
"https://w3id.org/linkml/meta.context.jsonld",
{
"ex": "http://example.org/test/",
"ex2": "http://example.org/test2/"
},
{
"ex": "http://example.org/test3/",
"ex2": {
"@id": "http://example.org/test4/"
}
},
{
"@base": "https://w3id.org/linkml/"
}
]
}"""), merge_contexts([METAMODEL_CONTEXT_URI, json_1, json_2], base=META_BASE_URI))
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2015 Intel Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import six
from oslo_log import log
from oslo_service import periodic_task
from oslo_service import threadgroup
from magnum.common import clients
from magnum.common import context
from magnum.common import exception
from magnum.i18n import _
from magnum.i18n import _LI
from magnum.i18n import _LW
from magnum import objects
from magnum.objects.fields import BayStatus as bay_status
LOG = log.getLogger(__name__)
def set_context(func):
@functools.wraps(func)
def handler(self, ctx):
ctx = context.make_admin_context(all_tenants=True)
context.set_ctx(ctx)
func(self, ctx)
context.set_ctx(None)
return handler
class MagnumPeriodicTasks(periodic_task.PeriodicTasks):
'''Magnum periodic Task class
Any periodic task job need to be added into this class
'''
@periodic_task.periodic_task(run_immediately=True)
@set_context
def sync_bay_status(self, ctx):
try:
LOG.debug('Starting to sync up bay status')
osc = clients.OpenStackClients(ctx)
status = [bay_status.CREATE_IN_PROGRESS,
bay_status.UPDATE_IN_PROGRESS,
bay_status.DELETE_IN_PROGRESS]
filters = {'status': status}
bays = objects.Bay.list(ctx, filters=filters)
if not bays:
return
sid_to_bay_mapping = {bay.stack_id: bay for bay in bays}
bay_stack_ids = sid_to_bay_mapping.keys()
stacks = osc.heat().stacks.list(global_tenant=True,
filters={'id': bay_stack_ids})
sid_to_stack_mapping = {s.id: s for s in stacks}
for sid in (six.viewkeys(sid_to_bay_mapping) &
six.viewkeys(sid_to_stack_mapping)):
stack = sid_to_stack_mapping[sid]
bay = sid_to_bay_mapping[sid]
if bay.status != stack.stack_status:
old_status = bay.status
bay.status = stack.stack_status
bay.status_reason = stack.stack_status_reason
bay.save()
LOG.info(_LI("Sync up bay with id %(id)s from "
"%(old_status)s to %(status)s."),
{'id': bay.id, 'old_status': old_status,
'status': bay.status})
for sid in (six.viewkeys(sid_to_bay_mapping) -
six.viewkeys(sid_to_stack_mapping)):
bay = sid_to_bay_mapping[sid]
if bay.status == bay_status.DELETE_IN_PROGRESS:
try:
bay.destroy()
except exception.BayNotFound:
LOG.info(_LI('The bay %s has been deleted by others.')
% bay.uuid)
LOG.info(_LI("Bay with id %(id)s has been deleted due "
"to stack with id %(sid)s not found in "
"Heat."),
{'id': bay.id, 'sid': sid})
elif bay.status == bay_status.CREATE_IN_PROGRESS:
bay.status = bay_status.CREATE_FAILED
bay.status_reason = _("Stack with id %s not found in "
"Heat.") % sid
bay.save()
LOG.info(_LI("Bay with id %(id)s has been set to "
"%(status)s due to stack with id %(sid)s "
"not found in Heat."),
{'id': bay.id, 'status': bay.status,
'sid': sid})
elif bay.status == bay_status.UPDATE_IN_PROGRESS:
bay.status = bay_status.UPDATE_FAILED
bay.status_reason = _("Stack with id %s not found in "
"Heat.") % sid
bay.save()
LOG.info(_LI("Bay with id %(id)s has been set to "
"%(status)s due to stack with id %(sid)s "
"not found in Heat."),
{'id': bay.id, 'status': bay.status,
'sid': sid})
except Exception as e:
LOG.warn(_LW("Ignore error [%s] when syncing up bay status."), e,
exc_info=True)
def setup(conf):
tg = threadgroup.ThreadGroup()
pt = MagnumPeriodicTasks(conf)
tg.add_dynamic_timer(
pt.run_periodic_tasks,
periodic_interval_max=conf.periodic_interval_max,
context=None)
return tg
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sparse transformer layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import expert_utils
from tensor2tensor.utils import mlperf_log
import tensorflow.compat.v1 as tf
from state_of_sparsity.sparse_transformer.layers import sparse_attention
from state_of_sparsity.sparse_transformer.layers import sparse_layers
def transformer_encoder(encoder_input,
encoder_self_attention_bias,
hparams,
name="encoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True):
"""A stack of transformer layers.
Args:
encoder_input: a Tensor
encoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This must either be
passed in, which we do for "packed" datasets, or inferred from
encoder_self_attention_bias. The knowledge about padding is used
for pad_remover(efficiency) and to mask out padding in convolutional
layers.
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
y: a Tensors
"""
x = encoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,
value=hparams.num_encoder_layers or hparams.num_hidden_layers)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT,
value=hparams.attention_dropout)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DENSE,
value={
"use_bias": "false",
"num_heads": hparams.num_heads,
"hidden_size": hparams.hidden_size
})
with tf.variable_scope(name):
if nonpadding is not None:
padding = 1.0 - nonpadding
else:
padding = common_attention.attention_bias_to_padding(
encoder_self_attention_bias)
nonpadding = 1.0 - padding
pad_remover = None
if hparams.use_pad_remover and not common_layers.is_xla_compiled():
pad_remover = expert_utils.PadRemover(padding)
for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
initial_sparsity = None
if hparams.get("load_masks_from"):
initial_sparsity = hparams.get("initial_sparsity")
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = sparse_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
vars_3d=hparams.get("attention_variables_3d"),
sparsity_technique=hparams.get("sparsity_technique"),
threshold=hparams.get("log_alpha_threshold"),
training=hparams.get("mode") == tf.estimator.ModeKeys.TRAIN,
clip_alpha=hparams.get("clip_log_alpha"),
initial_sparsity=initial_sparsity,
split_heads=hparams.get("split_heads"))
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope("ffn"):
y = transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams),
hparams,
pad_remover)
x = common_layers.layer_postprocess(x, y, hparams)
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NORM,
value={"hidden_size": hparams.hidden_size})
return common_layers.layer_preprocess(x, hparams)
def transformer_ffn_layer(x, hparams, pad_remover=None):
"""Feed-forward layer in the transformer.
Args:
x: a Tensor of shape [batch_size, length, hparams.hidden_size]
hparams: hyperparameters for model
pad_remover: an expert_utils.PadRemover object tracking the padding
positions. If provided, when using convolutional settings, the padding
is removed before applying the convolution, and restored afterward. This
can give a significant speedup.
Returns:
a Tensor of shape [batch_size, length, hparams.hidden_size]
Raises:
ValueError: If losses arg is None, but layer generates extra losses.
"""
ffn_layer = hparams.ffn_layer
if ffn_layer != "dense_relu_dense":
raise ValueError("sparse transformer only supports dense_relu_dense ffn.")
relu_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "relu_dropout_broadcast_dims", "")))
# In simple convolution mode, use `pad_remover` to speed up processing.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_FFN_FILTER_DENSE,
value={
"filter_size": hparams.filter_size,
"use_bias": "True",
"activation": mlperf_log.RELU
})
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_FFN_OUTPUT_DENSE,
value={
"hidden_size": hparams.hidden_size,
"use_bias": "True",
})
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_RELU_DROPOUT, value=hparams.relu_dropout)
if pad_remover:
original_shape = common_layers.shape_list(x)
# Collapse `x` across examples, and remove padding positions.
x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0))
x = tf.expand_dims(pad_remover.remove(x), axis=0)
initial_sparsity = None
if hparams.get("load_masks_from"):
initial_sparsity = hparams.get("initial_sparsity")
conv_output = sparse_layers.dense_relu_dense(
x,
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout,
dropout_broadcast_dims=relu_dropout_broadcast_dims,
sparsity_technique=hparams.get("sparsity_technique"),
threshold=hparams.get("log_alpha_threshold"),
training=hparams.get("mode") == tf.estimator.ModeKeys.TRAIN,
clip_alpha=hparams.get("clip_log_alpha"),
initial_sparsity=initial_sparsity)
if pad_remover:
# Restore `conv_output` to the original shape of `x`, including padding.
conv_output = tf.reshape(
pad_remover.restore(tf.squeeze(conv_output, axis=0)), original_shape)
return conv_output
|
import os
import time
import torch
import logging
import argparse
from utils.train import train
from utils.hparams import HParam
from utils.writer import MyWriter
from utils.graph_reader import read_graph
from dataset.dataloader import create_dataloader
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, required=True,
help="yaml file for configuration")
parser.add_argument('-p', '--checkpoint_path', type=str, default=None, required=False,
help="path of checkpoint pt file")
parser.add_argument('-m', '--model', type=str, required=True,
help="name of the model. used for logging/saving checkpoints")
args = parser.parse_args()
hp = HParam(args.config)
with open(args.config, 'r') as f:
hp_str = ''.join(f.readlines())
pt_path = os.path.join('.', hp.log.chkpt_dir)
out_dir = os.path.join(pt_path, args.model)
os.makedirs(out_dir, exist_ok=True)
log_dir = os.path.join('.', hp.log.log_dir)
log_dir = os.path.join(log_dir, args.model)
os.makedirs(log_dir, exist_ok=True)
if args.checkpoint_path is not None:
chkpt_path = args.checkpoint_path
else:
chkpt_path = None
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler(os.path.join(log_dir,
'%s-%d.log' % (args.model, time.time()))),
logging.StreamHandler()
]
)
logger = logging.getLogger()
if hp.data.train == '' or hp.data.val == '':
logger.error("hp.data.train, hp.data.val cannot be empty")
raise Exception("Please specify directories of train data.")
if hp.model.graph0 == '' or hp.model.graph1 == '' or hp.model.graph2 == '':
logger.error("hp.model.graph0, graph1, graph2 cannot be empty")
raise Exception("Please specify random DAG architecture.")
graphs = [
read_graph(hp.model.graph0),
read_graph(hp.model.graph1),
read_graph(hp.model.graph2),
]
writer = MyWriter(log_dir)
trainset = create_dataloader(hp, args, True)
valset = create_dataloader(hp, args, False)
train(out_dir, chkpt_path, trainset, valset, writer, logger, hp, hp_str, graphs)
|
import os
import re
import logging
from collections import OrderedDict
import xml.etree.ElementTree as ET
import penman
from penman.models.noop import NoOpModel
logger = logging.getLogger(__name__)
# Cache for Penman graphs so they only need to be loaded once
pgraph_cache = {}
def load_amrs_cached(amr_fpath):
global pgraph_cache
pgraphs = pgraph_cache.get(amr_fpath, None)
if pgraphs is None:
pgraphs = penman.load(amr_fpath, model=NoOpModel())
pgraph_cache[amr_fpath] = pgraphs
return pgraphs
# Class for extracting Multi-sentence AMR data
class MSAMR(object):
def __init__(self, xml_fpath):
self.sents = []
self.sent_info = {}
self.ident_chain_dict = {} # d[relationid] = list of dicts (type is key from collapsed tree tag)
self.singleton_dict = {} # d[relationid] = list of dicts (type is key from collapsed tree tag)
self.bridging_dict = {} # d[relationid] = list of dicts (type and sub-type are keys from collapsed tree tags)
self._parse_xml(xml_fpath) # fills in the above attribs
def load_amrs(self, amr_fpath):
pgraphs = load_amrs_cached(amr_fpath)
sent_ids = self.get_sentence_ids()
gdict = {g.metadata['id']:g for g in pgraphs}
odict = OrderedDict()
for sid in sent_ids:
odict[sid] = gdict[sid]
return odict
def get_sentence_ids(self):
sents = sorted(self.sents, key=lambda s:int(s['order']))
sents = [s['id'] for s in sents]
return sents
def dump_corefs(self, identities=True, singletons=True, bridging=True):
string = ''
string += 'Source : %s\n' % str(self.sent_info)
string += '\n'.join([' snum=%2d %s' % (i, str(s)) for i, s in enumerate(self.sents)]) + '\n'
if identities:
for key, vals in self.ident_chain_dict.items():
string += 'identity: %s\n' % key
for val in vals:
string += ' ' + str(val) + '\n'
if singletons:
for key, vals in self.singleton_dict.items():
string += 'singleton: %s\n' % key
for val in vals:
string += ' ' + str(val) + '\n'
if bridging:
for key, vals in self.bridging_dict.items():
string += 'bridging: %s\n' % key
for val in vals:
string += ' ' + str(val) + '\n'
return string
def _parse_xml(self, fn):
# root
tree = ET.parse(fn)
root = tree.getroot()
assert root.tag == 'document'
assert len(root) == 2
sentences = root[0]
relations = root[1]
assert sentences.tag == 'sentences'
assert relations.tag == 'relations'
# Level 2 under sentences
self.sent_info = sentences.attrib
for sent in sentences:
assert sent.tag == 'amr'
self.sents.append(sent.attrib)
# Level 2 under relations
assert len(relations) == 3
identity = relations[0]
singletons = relations[1]
bridging = relations[2]
assert identity.tag == 'identity'
assert singletons.tag == 'singletons'
assert bridging.tag == 'bridging'
# Level 3 under identity
# These are mentions and implicity roles
for identchain in identity:
assert identchain.tag == 'identchain'
key = identchain.attrib['relationid']
assert key not in self.ident_chain_dict
self.ident_chain_dict[key] = []
for x in identchain:
entry = {'type':x.tag, **x.attrib}
self.ident_chain_dict[key].append( entry )
# Level 3 under singletons
# Sigletons are for identity chains that only participate in the bridging relations, not co-reference itself
for identchain in singletons:
assert identchain.tag == 'identchain'
key = identchain.attrib['relationid']
assert key not in self.singleton_dict
self.singleton_dict[key] = []
for x in identchain:
entry = {'type':x.tag, **x.attrib}
self.singleton_dict[key].append( entry )
# Level 3 under bridging
# Bridging relations are for co-references that are part of a set (ie.. 'they')
for x in bridging:
assert x.tag in ('setmember' 'partwhole')
key = x.attrib['relationid']
assert key not in self.bridging_dict
self.bridging_dict[key] = []
for y in x:
entry = {'type':x.tag, 'subtype':y.tag, **y.attrib}
self.bridging_dict[key].append(entry)
# Functions for building the file paths needed for loading multi-sentence data
# in LDC2020T02 (AMR3). These are all for the "split" data.
class MSAMRFiles(object):
name_re = re.compile(r'msamr_(\w+)_(\d+).xml$')
def __init__(self, amr3_dir, is_train=False):
self.amr3_dir = amr3_dir
self.is_train = is_train
# Get all the files in the directory
tt_dir = 'train' if self.is_train else 'test'
ms_dir = os.path.join(self.amr3_dir, 'data', 'multisentence', 'ms-amr-split', tt_dir)
self.ms_fpaths = sorted([os.path.join(ms_dir, fn) for fn in os.listdir(ms_dir) if fn.startswith('msamr_')])
# Get the file paths for all the multi-sentence xml files
def get_ms_fpath(self, index):
return self.ms_fpaths[index]
# Get the short name for the xml file
def get_name_number(self, index):
ms_fpath = self.get_ms_fpath(index)
match = self.name_re.search(ms_fpath)
return match[1], match[2]
# Get the test name
def get_test_name(self, index):
return '%s_%s' % self.get_name_number(index)
# Get the standard (non-aligned) amr graph based on index of the xml file
def get_amr_fpath(self, index):
name, _ = self.get_name_number(index)
tt_dir = 'training' if self.is_train else 'test'
fn = 'amr-release-3.0-amrs-%s-%s.txt' % (tt_dir, name)
fpath = os.path.join(self.amr3_dir, 'data', 'amrs', 'split', tt_dir, fn)
return fpath
# Get the AMR graph with alignments based on the index of the xml file
def get_amr_aligned_fpath(self, index):
name, _ = self.get_name_number(index)
tt_dir = 'training' if self.is_train else 'test'
fn = 'amr-release-3.0-alignments-%s-%s.txt' % (tt_dir, name)
fpath = os.path.join(self.amr3_dir, 'data', 'alignments', 'split', tt_dir, fn)
return fpath
# Get the number fo files in in the directory to process
def __len__(self):
return len(self.ms_fpaths)
|
import requests
import turtle
### Implementation details
# Global mutable state. Forgive me.
state = {
'connected_to_bot': False,
'window': None,
'turtle': None,
'distance_traveled': 0,
}
# These measurements are in "steps", which are basically pixels.
WCB_WIDTH = 500
WCB_HEIGHT = 360
SUGGESTED_REINKING_DISTANCE_IN_CM = 48
def _make_cnc_request(endpoint):
"""CNC Server is the way that madison_wcb talks to the WaterColorBot.
See https://github.com/techninja/cncserver/ for more information.
"""
if state['connected_to_bot']:
return requests.get('http://localhost:4242/' + endpoint)
### Public API
def initialize():
"""IMPORTANT: Call this function at the beginning of your program."""
try:
requests.get('http://localhost:4242/poll')
state['connected_to_bot'] = True
except requests.exceptions.ConnectionError:
state['connected_to_bot'] = False
# set up turtle
state['window'] = turtle.Screen()
state['window'].setup(width=WCB_WIDTH, height=WCB_HEIGHT)
state['turtle'] = turtle.Turtle()
state['turtle'].width(5)
point_in_direction(0)
# set up watercolorbot brush
brush_up()
wash_brush()
park()
def cleanup():
"""IMPORTANT: Call this function at the end of your program."""
brush_up()
wash_brush()
park()
def park():
"""Park the watercolorbot's brush in the top-left corner."""
_make_cnc_request("park")
def wash_brush():
"""Wash the brush in water."""
_make_cnc_request("pen.wash")
state['distance_traveled'] = 0
def get_color(index):
"""Dips the brush in paint.
Arguments:
index - an integer between 0 and 7, inclusive. Tells the bot which color you want.
"""
if index in range(0, 8):
# Send the turtle to the top-left corner of the window to imitate the position of the WCB's brush.
state['turtle'].goto(-WCB_WIDTH / 2, -WCB_HEIGHT / 2)
_make_cnc_request("tool.color./" + str(index))
# This is the order of the colors in the palette in our classroom's bot; yours may vary!
colors = ["black", "red", "orange", "yellow", "green", "blue", "purple", "brown"]
state['turtle'].color(colors[index])
state['distance_traveled'] = 0
else:
print("Color indexes must be between 0 and 7, but you gave me: " + index)
def brush_down():
"""Puts the brush in its "down" position, so that it touches the paper."""
_make_cnc_request("pen.down")
state['turtle'].pendown()
# Wiggle the turtle one step so that it marks a dot on the turtle canvas.
state['turtle'].forward(1)
state['turtle'].backward(1)
def brush_up():
"""Puts the brush in its "up" position, so that it doesn't touch the paper."""
_make_cnc_request("pen.up")
state['turtle'].penup()
def move_to(x, y):
"""Moves the brush to a particular position.
Arguments:
x - a number between -250 and 250.
y - a number between -180 and 180.
"""
_make_cnc_request("coord/{0}/{1}".format(x, y))
state['turtle'].goto(x, y)
def point_in_direction(angle):
"""Points the brush's "turtle" in the direction of the angle specified.
Arguments:
angle - a number between 0 and 360.
"""
# convert angle from regular coordinates to scratch coordinates
_make_cnc_request("move.absturn./" + str(90 - angle))
state['turtle'].setheading(angle)
def move_forward(num_steps):
"""Moves the brush forward a few steps in the direction that its "turtle" is facing.
Arguments:
num_steps - a number like 20. A bigger number makes the brush move farther.
"""
assert int(num_steps) == num_steps, "move_forward() only accepts integers, but you gave it " + str(num_steps)
_make_cnc_request("move.forward./" + str(num_steps))
state['turtle'].forward(num_steps)
state['distance_traveled'] += num_steps
def turn_left(relative_angle):
"""Turns the brush's "turtle" to the left.
Arguments:
relative_angle - a number like 10.
A bigger number makes the turtle turn farther to the left.
"""
assert int(relative_angle) == relative_angle, "turn_left() only accepts integers, but you gave it " + str(relative_angle)
_make_cnc_request("move.left./" + str(relative_angle))
state['turtle'].left(relative_angle)
def turn_right(relative_angle):
"""Turns the brush's "turtle" to the right.
Arguments:
relative_angle - a number like 10.
A bigger number makes the turtle turn farther to the right.
"""
assert int(relative_angle) == relative_angle, "turn_right() only accepts integers, but you gave it " + str(relative_angle)
_make_cnc_request("move.right./" + str(relative_angle))
state['turtle'].right(relative_angle)
def get_position():
"""Returns the brush's current position.
Return value:
A list like [-102, 50] representing the brush's current [x, y] position.
"""
return state['turtle'].position()
def get_x():
"""Returns the brush's current x-coordinate.
Return value:
A number between -250 and 250, represnting the brush's current horizontal position.
"""
return state['turtle'].xcor()
def get_y():
"""Returns the brush's current y-coordinate.
Return value:
A number between -180 and 180, representing the brush's current vertical position.
"""
return state['turtle'].ycor()
def set_reinking_distance(distance_in_cm):
"""Sets the number of centimeters the bot will draw before re-inking the brush with the last used paint.
Arguments:
distance_in_cm - an integer representing a number of centimeters.
"""
_make_cnc_request("penreink/" + str(distance_in_cm))
def get_distance_traveled():
"""Returns a number like 123, representing the distance that the brush has traveled
since the last time that it was dipped in paint or water.
NOTE: Only tracks movement triggered by calls to the `move_forward()` function.
Movement caused by `move_to()` is _not_ recorded.
This number represents the number of "steps" that the brush has traveled, not the number
of inches or centimeters; you'll have to do some experimentation on your own to figure
out e.g. how many centimeters 100 steps is equal to.
"""
return state['distance_traveled']
|
import adv.adv_test
import ieyasu
from slot.a import *
def module():
return Ieyasu
class Ieyasu(ieyasu.Ieyasu):
comment = ''
def prerun(this):
super().prerun()
from adv.adv_test import sim_duration
if this.condition('always poisoned'):
this.afflics.poison.resist=0
this.afflics.poison.on('always_poisoned', 1, 0, duration=sim_duration, iv=sim_duration)
def d_slots(this):
this.slots.a = HoH()+The_Plaguebringer()
if __name__ == '__main__':
conf = {}
adv.adv_test.test(module(), conf, verbose=-2)
|
#functions to handle request to microsoft store
class msft():
#color terminal output
Red = '\033[91m'
Green = '\033[92m'
Yellow = '\033[93m'
Endc = '\033[0m'
#button html classes
X_class_text = '_-_-node_modules--xbox-web-partner-core-build-pages-BundleBuilder-Components-BundleBuilderHeader-__BundleBuilderHeader-module___checkoutButton w-100 bg-light-green btn btn-primary'
S_class_text = '_-_-node_modules--xbox-web-partner-core-build-pages-BundleBuilder-Components-BundleBuilderHeader-__BundleBuilderHeader-module___checkoutButton w-100 bg-light-green text-gray-900 btn btn-primary'
time_format = '%I:%M %p'
def __init__(self, requests, BeautifulSoup, datetime, client):
self.requests = requests
self.BeautifulSoup = BeautifulSoup
self.datetime = datetime
self.client = client
def xbox_series_X(self):
result = self.requests.get("https://www.xbox.com/en-us/configure/8wj714n3rbtl")
msft_xbox_website = result.content
soup = self.BeautifulSoup(msft_xbox_website, 'html.parser')
#can we do a specific one time search for that btn. 1 is there 0 its not.
button = soup.find('button', { 'class' : self.X_class_text })
if button is not None:
#run it through a double check just in caseself.
if self.msft_xbox_double_check(button.text, button.get('aria-label')): #Out of stock
now = self.datetime.now()
current_time = now.strftime(self.time_format)
print('') #padding
print('Microsoft Store : Xbox Series X 1TB {:>26} {:>10}'.format(f'{self.Red} {button.text} {self.Endc}', current_time))
return 'MSFT Store : Series X Out of stock {0}'.format(current_time)
else: #Button is there but don't know
now = self.datetime.now()
current_time = now.strftime(self.time_format)
print('Mircosoft Store: Xbox Series X 1TB {:>26} {:>10}'.format(f'{self.Yellow} Unsure {self.Endc}', current_time))
print('')
return 'MSFT Store : Series X Unsure '
else: #Possibily in stock
now = self.datetime.now()
current_time = now.strftime(self.time_format)
print('')
print('Microsoft Store : Xbox Series X 1TB {:>26} {:>10}'.format(f'{self.Green} In Stock {self.Endc}', current_time))
return 'MSFT Store : Series X In Stock'
def xbox_series_S(self):
result = self.requests.get("https://www.xbox.com/en-us/configure/942J774TP9JN?ranMID=24542&ranEAID=AKGBlS8SPlM&ranSiteID=AKGBlS8SPlM-rraowjl6v6LYgVrhvaWJcQ&epi=AKGBlS8SPlM-rraowjl6v6LYgVrhvaWJcQ&irgwc=1&OCID=AID2000142_aff_7593_1243925&tduid=%28ir__lgev9o9dlkkfq0rz2kainzir222xu1tshxpyuevp00%29%287593%29%281243925%29%28AKGBlS8SPlM-rraowjl6v6LYgVrhvaWJcQ%29%28%29&irclickid=_lgev9o9dlkkfq0rz2kainzir222xu1tshxpyuevp00")
msft_xbox_website = result.content
soup = self.BeautifulSoup(msft_xbox_website, 'html.parser')
#can we do a specific one time search for that btn. 1 is there 0 its not.
button = soup.find('button', { 'class' : self.S_class_text })
if button is not None:
#run it through a double check just in case
if self.msft_xbox_double_check(button.text, button.get('aria-label')): #Out of stock
now = self.datetime.now()
current_time = now.strftime(self.time_format)
print('Microsoft Store : Xbox Series S 512GB {:>24} {:>10}'.format(f'{self.Red} {button.text} {self.Endc}', current_time))
print('') #padding
return 'MSFT Store : Series S Out of stock {0}'.format(current_time)
else: #Button is there but don't know
now = self.datetime.now()
current_time = now.strftime(self.time_format)
print('Mircosoft Store: Xbox Series S 512TB {:>24} {:>10}'.format(f'{self.Yellow} Unsure {self.Endc}', current_time))
return 'MSFT Stroe : Series S Unsure '
else: #Possibily in stock
now = self.datetime.now()
current_time = now.strftime(self.time_format)
print('Microsoft Store : Xbox Series S 512GB {:>24} {:>10}'.format(f'{self.Green} In Stock {self.Endc}', current_time))
print('')
return 'MSFT Store : Series S In Stock'
def msft_xbox_double_check(self, txt, label):
return txt == 'Out of stock' and label == 'Checkout bundle'
|
from enum import Enum, auto
from parse.node import NodeType, NodeFunctionExpression
from interpreter.typing.basic_type import BasicType
from interpreter.function import Function
from interpreter.basic_value import BasicValue
class VariableType(Enum):
Auto = 'auto'
Int = 'int'
String = 'str'
Dict = 'dict'
Any = 'any'
Function = 'func'
Type = 'type'
Array = auto()
Object = auto() # Class, data structure, etc.
# class Variable(BasicValue):
# def __init__(self, name, vtype, value):
# BasicValue.__init__(self, value)
# self.name = name
# self.value = 0
# if (type(value) is NodeFunctionExpression):
# self.value = Function(name, vtype, value)
# else:
# self.value = value
# self.type = vtype
# def clone(self):
# return Variable(self.name, self.vtype, self.value) |
#!/usr/bin/env python3
import multiprocessing
import numpy as np
import os
import re
import subprocess
import sys
class colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def run_program(optimizer, problem_file):
try:
result = subprocess.run(
['python3', str(os.path.join(os.path.dirname(__file__), '../program/program.py')), '--optimizer', optimizer, '--problem', problem_file, '--script'],
stdout=subprocess.PIPE)
except:
print("failed to run optimizer '{}' with problem '{}'".format(
optimizer, problem_file), file=sys.stderr)
sys.exit(-1)
return float(result.stdout.decode('utf-8').strip())
problem_file_path = '../assignment/Test Data/'
makespan_baseline = {
'1.txt': 55.0,
'2.txt': 930.0,
'3.txt': 1165.0,
'4.txt': 1005.0,
'5.txt': 1235.0,
'6.txt': 943.0
}
problem_files = sorted(
filter(lambda filename: re.match('\d+\.txt', filename),
os.listdir(problem_file_path)))
pool = multiprocessing.Pool(1)#multiprocessing.cpu_count())
run_count = 5
optimizers = ['aco', 'ba', 'pso']
makespan_values = np.zeros((len(optimizers), len(problem_files), run_count))
evaluations = [
(optimizer_index, problem_index, run_index, pool.apply_async(run_program, (optimizer, os.path.join(problem_file_path, problem_file))))
for problem_index, problem_file in enumerate(problem_files)
for optimizer_index, optimizer in enumerate(optimizers)
for run_index in range(run_count)]
for evaluation_index, evaluation in enumerate(evaluations):
optimizer_index, problem_index, run_index, result = evaluation
makespan = result.get()
makespan_values[optimizer_index, problem_index, run_index] = makespan
print('{:.2f}%'.format(100 * (evaluation_index + 1) / len(evaluations)))
pool.close()
pool.join()
def format_makespan(name, value, baseline):
color = '' if not baseline else colors.OKGREEN if value <= (baseline * 1.1) else colors.FAIL
return '{}{:>6.1f} {:>5.1f}% ({}){}'.format(
color, value, 100 * value / baseline, name, colors.ENDC)
for optimizer_index in range(len(optimizers)):
print(optimizers[optimizer_index])
for problem_index, problem_file in enumerate(problem_files):
baseline = makespan_baseline[problem_file]
min_makespan = np.min(makespan_values[optimizer_index, problem_index])
mean_makespan = np.mean(makespan_values[optimizer_index, problem_index])
print('{}: {} {} {:>6.1f} (baseline)'.format(
problem_file,
format_makespan('min', min_makespan, baseline),
format_makespan('mean', mean_makespan, baseline),
baseline))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2020-07-06 18:23
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('emgapi', '0026_auto_20200612_1102'),
]
operations = [
migrations.CreateModel(
name='ChecksumAlgorithm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_column='NAME', max_length=255, unique=True)),
],
options={
'db_table': 'CHECKSUM_ALGORITHM',
},
),
migrations.AlterModelOptions(
name='analysismetadatavariablenames',
options={'verbose_name': 'analysis meta variable name'},
),
migrations.AlterModelOptions(
name='antismashgc',
options={'verbose_name_plural': 'antiSMASH clusters'},
),
migrations.AlterModelOptions(
name='assembly',
options={'ordering': ('accession',), 'verbose_name_plural': 'assemblies'},
),
migrations.AlterModelOptions(
name='assemblyrun',
options={'verbose_name_plural': 'assembly runs'},
),
migrations.AlterModelOptions(
name='assemblysample',
options={'verbose_name_plural': 'assembly samples'},
),
migrations.AlterModelOptions(
name='blacklistedstudy',
options={'managed': False, 'verbose_name_plural': 'blacklisted studies'},
),
migrations.AlterModelOptions(
name='cogcat',
options={'verbose_name_plural': 'COG categories'},
),
migrations.AlterModelOptions(
name='variablenames',
options={'verbose_name': 'variable name'},
),
migrations.AddField(
model_name='analysisjobdownload',
name='file_checksum',
field=models.CharField(blank=True, db_column='CHECKSUM', max_length=255),
),
migrations.AddField(
model_name='genomedownload',
name='file_checksum',
field=models.CharField(blank=True, db_column='CHECKSUM', max_length=255),
),
migrations.AddField(
model_name='releasedownload',
name='file_checksum',
field=models.CharField(blank=True, db_column='CHECKSUM', max_length=255),
),
migrations.AddField(
model_name='studydownload',
name='file_checksum',
field=models.CharField(blank=True, db_column='CHECKSUM', max_length=255),
),
migrations.AlterField(
model_name='genome',
name='ena_genome_accession',
field=models.CharField(blank=True, db_column='ENA_GENOME_ACCESSION', max_length=20, null=True, unique=True),
),
migrations.AlterField(
model_name='genome',
name='ena_sample_accession',
field=models.CharField(blank=True, db_column='ENA_SAMPLE_ACCESSION', max_length=20, null=True),
),
migrations.AlterField(
model_name='genome',
name='geo_origin',
field=models.ForeignKey(blank=True, db_column='GEOGRAPHIC_ORIGIN', null=True, on_delete=django.db.models.deletion.CASCADE, to='emgapi.GeographicLocation'),
),
migrations.AlterField(
model_name='genome',
name='img_genome_accession',
field=models.CharField(blank=True, db_column='IMG_GENOME_ACCESSION', max_length=20, null=True, unique=True),
),
migrations.AlterField(
model_name='genome',
name='ncbi_genome_accession',
field=models.CharField(blank=True, db_column='NCBI_GENOME_ACCESSION', max_length=20, null=True, unique=True),
),
migrations.AlterField(
model_name='genome',
name='ncbi_sample_accession',
field=models.CharField(blank=True, db_column='NCBI_SAMPLE_ACCESSION', max_length=20, null=True),
),
migrations.AlterField(
model_name='genome',
name='ncbi_study_accession',
field=models.CharField(blank=True, db_column='NCBI_STUDY_ACCESSION', max_length=20, null=True),
),
migrations.AlterField(
model_name='genome',
name='num_genomes_non_redundant',
field=models.IntegerField(blank=True, db_column='PANGENOME_NON_RED_GENOMES', null=True),
),
migrations.AlterField(
model_name='genome',
name='num_genomes_total',
field=models.IntegerField(blank=True, db_column='PANGENOME_TOTAL_GENOMES', null=True),
),
migrations.AlterField(
model_name='genome',
name='pangenome_accessory_size',
field=models.IntegerField(blank=True, db_column='PANGENOME_ACCESSORY_PROP', null=True),
),
migrations.AlterField(
model_name='genome',
name='pangenome_core_size',
field=models.IntegerField(blank=True, db_column='PANGENOME_CORE_PROP', null=True),
),
migrations.AlterField(
model_name='genome',
name='pangenome_eggnog_coverage',
field=models.FloatField(blank=True, db_column='PANGENOME_EGGNOG_COV', null=True),
),
migrations.AlterField(
model_name='genome',
name='pangenome_ipr_coverage',
field=models.FloatField(blank=True, db_column='PANGENOME_IPR_COV', null=True),
),
migrations.AlterField(
model_name='genome',
name='pangenome_size',
field=models.IntegerField(blank=True, db_column='PANGENOME_SIZE', null=True),
),
migrations.AlterField(
model_name='genome',
name='patric_genome_accession',
field=models.CharField(blank=True, db_column='PATRIC_GENOME_ACCESSION', max_length=20, null=True, unique=True),
),
migrations.AddField(
model_name='analysisjobdownload',
name='checksum_algorithm',
field=models.ForeignKey(blank=True, db_column='CHECKSUM_ALGORITHM', null=True, on_delete=django.db.models.deletion.CASCADE, to='emgapi.ChecksumAlgorithm'),
),
migrations.AddField(
model_name='genomedownload',
name='checksum_algorithm',
field=models.ForeignKey(blank=True, db_column='CHECKSUM_ALGORITHM', null=True, on_delete=django.db.models.deletion.CASCADE, to='emgapi.ChecksumAlgorithm'),
),
migrations.AddField(
model_name='releasedownload',
name='checksum_algorithm',
field=models.ForeignKey(blank=True, db_column='CHECKSUM_ALGORITHM', null=True, on_delete=django.db.models.deletion.CASCADE, to='emgapi.ChecksumAlgorithm'),
),
migrations.AddField(
model_name='studydownload',
name='checksum_algorithm',
field=models.ForeignKey(blank=True, db_column='CHECKSUM_ALGORITHM', null=True, on_delete=django.db.models.deletion.CASCADE, to='emgapi.ChecksumAlgorithm'),
),
]
|
# Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from cinder import coordination
from cinder import exception
from cinder.i18n import _
from cinder.volume.drivers.dell_emc.vmax import utils
LOG = logging.getLogger(__name__)
WRITE_DISABLED = "Write Disabled"
UNLINK_INTERVAL = 15
UNLINK_RETRIES = 30
class VMAXProvision(object):
"""Provisioning Class for Dell EMC VMAX volume drivers.
It supports VMAX arrays.
"""
def __init__(self, rest):
self.utils = utils.VMAXUtils()
self.rest = rest
def create_storage_group(
self, array, storagegroup_name, srp, slo, workload,
extra_specs, do_disable_compression=False):
"""Create a new storage group.
:param array: the array serial number
:param storagegroup_name: the group name (String)
:param srp: the SRP (String)
:param slo: the SLO (String)
:param workload: the workload (String)
:param extra_specs: additional info
:param do_disable_compression: disable compression flag
:returns: storagegroup - storage group object
"""
start_time = time.time()
@coordination.synchronized("emc-sg-{storage_group}")
def do_create_storage_group(storage_group):
# Check if storage group has been recently created
storagegroup = self.rest.get_storage_group(
array, storagegroup_name)
if storagegroup is None:
storagegroup = self.rest.create_storage_group(
array, storage_group, srp, slo, workload, extra_specs,
do_disable_compression)
LOG.debug("Create storage group took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
LOG.info("Storage group %(sg)s created successfully.",
{'sg': storagegroup_name})
else:
LOG.info("Storage group %(sg)s already exists.",
{'sg': storagegroup_name})
return storagegroup
return do_create_storage_group(storagegroup_name)
def create_volume_from_sg(self, array, volume_name, storagegroup_name,
volume_size, extra_specs):
"""Create a new volume in the given storage group.
:param array: the array serial number
:param volume_name: the volume name (String)
:param storagegroup_name: the storage group name
:param volume_size: volume size (String)
:param extra_specs: the extra specifications
:returns: dict -- volume_dict - the volume dict
"""
@coordination.synchronized("emc-sg-{storage_group}")
def do_create_volume_from_sg(storage_group):
start_time = time.time()
volume_dict = self.rest.create_volume_from_sg(
array, volume_name, storage_group,
volume_size, extra_specs)
LOG.debug("Create volume from storage group "
"took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
return volume_dict
return do_create_volume_from_sg(storagegroup_name)
def delete_volume_from_srp(self, array, device_id, volume_name):
"""Delete a volume from the srp.
:param array: the array serial number
:param device_id: the volume device id
:param volume_name: the volume name
"""
start_time = time.time()
LOG.debug("Delete volume %(volume_name)s from srp.",
{'volume_name': volume_name})
self.rest.delete_volume(array, device_id)
LOG.debug("Delete volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(
start_time, time.time())})
def create_volume_snapvx(self, array, source_device_id,
snap_name, extra_specs, ttl=0):
"""Create a snapVx of a volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param snap_name: the snapshot name
:param extra_specs: the extra specifications
:param ttl: time to live in hours, defaults to 0
"""
start_time = time.time()
LOG.debug("Create Snap Vx snapshot of: %(source)s.",
{'source': source_device_id})
self.rest.create_volume_snap(
array, snap_name, source_device_id, extra_specs, ttl)
LOG.debug("Create volume snapVx took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def create_volume_replica(
self, array, source_device_id, target_device_id,
snap_name, extra_specs, create_snap=False):
"""Create a snap vx of a source and copy to a target.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
:param create_snap: Flag for create snapvx
"""
start_time = time.time()
if create_snap:
# We are creating a temporary snapshot. Specify a ttl of 1 hour
self.create_volume_snapvx(array, source_device_id,
snap_name, extra_specs, ttl=1)
# Link source to target
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, link=True)
LOG.debug("Create element replica took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def break_replication_relationship(
self, array, target_device_id, source_device_id, snap_name,
extra_specs, generation=0):
"""Unlink a snapshot from its target volume.
:param array: the array serial number
:param source_device_id: source volume device id
:param target_device_id: target volume device id
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
:param generation: the generation number of the snapshot
"""
LOG.debug("Break snap vx link relationship between: %(src)s "
"and: %(tgt)s.",
{'src': source_device_id, 'tgt': target_device_id})
self._unlink_volume(array, source_device_id, target_device_id,
snap_name, extra_specs,
list_volume_pairs=None, generation=generation)
def _unlink_volume(
self, array, source_device_id, target_device_id, snap_name,
extra_specs, list_volume_pairs=None, generation=0):
"""Unlink a target volume from its source volume.
:param array: the array serial number
:param source_device_id: the source device id
:param target_device_id: the target device id
:param snap_name: the snap name
:param extra_specs: extra specifications
:param list_volume_pairs: list of volume pairs, optional
:param generation: the generation number of the snapshot
:return: return code
"""
def _unlink_vol():
"""Called at an interval until the synchronization is finished.
:raises: loopingcall.LoopingCallDone
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['modify_vol_success']:
self.rest.modify_volume_snap(
array, source_device_id, target_device_id, snap_name,
extra_specs, unlink=True,
list_volume_pairs=list_volume_pairs,
generation=generation)
kwargs['modify_vol_success'] = True
except exception.VolumeBackendAPIException:
pass
if kwargs['retries'] > UNLINK_RETRIES:
LOG.error("_unlink_volume failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(retvalue=30)
if kwargs['modify_vol_success']:
raise loopingcall.LoopingCallDone()
kwargs = {'retries': 0,
'modify_vol_success': False}
timer = loopingcall.FixedIntervalLoopingCall(_unlink_vol)
rc = timer.start(interval=UNLINK_INTERVAL).wait()
return rc
def delete_volume_snap(self, array, snap_name,
source_device_id, restored=False, generation=0):
"""Delete a snapVx snapshot of a volume.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_id: the source device id
:param restored: Flag to indicate if restored session is being deleted
:param generation: the snapshot generation number
"""
LOG.debug("Delete SnapVx: %(snap_name)s for volume %(vol)s.",
{'vol': source_device_id, 'snap_name': snap_name})
self.rest.delete_volume_snap(
array, snap_name, source_device_id, restored, generation)
def is_restore_complete(self, array, source_device_id,
snap_name, extra_specs):
"""Check and wait for a restore to complete
:param array: the array serial number
:param source_device_id: source device id
:param snap_name: snapshot name
:param extra_specs: extra specification
:returns: bool
"""
def _wait_for_restore():
"""Called at an interval until the restore is finished.
:raises: loopingcall.LoopingCallDone
:raises: VolumeBackendAPIException
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['wait_for_restore_called']:
if self._is_restore_complete(
array, source_device_id, snap_name):
kwargs['wait_for_restore_called'] = True
except Exception:
exception_message = (_("Issue encountered waiting for "
"restore."))
LOG.exception(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
if kwargs['wait_for_restore_called']:
raise loopingcall.LoopingCallDone()
if kwargs['retries'] > int(extra_specs[utils.RETRIES]):
LOG.error("_wait_for_restore failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(
retvalue=int(extra_specs[utils.RETRIES]))
kwargs = {'retries': 0,
'wait_for_restore_called': False}
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_restore)
rc = timer.start(interval=int(extra_specs[utils.INTERVAL])).wait()
return rc
def _is_restore_complete(self, array, source_device_id, snap_name):
"""Helper function to check if restore is complete.
:param array: the array serial number
:param source_device_id: source device id
:param snap_name: the snapshot name
:returns: restored -- bool
"""
restored = False
snap_details = self.rest.get_volume_snap(
array, source_device_id, snap_name)
if snap_details:
linked_devices = snap_details.get("linkedDevices", [])
for linked_device in linked_devices:
if ('targetDevice' in linked_device and
source_device_id == linked_device['targetDevice']):
if ('state' in linked_device and
linked_device['state'] == "Restored"):
restored = True
return restored
def delete_temp_volume_snap(self, array, snap_name,
source_device_id, generation=0):
"""Delete the temporary snapshot created for clone operations.
There can be instances where the source and target both attempt to
delete a temp snapshot simultaneously, so we must lock the snap and
then double check it is on the array.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_device_id: the source device id
:param generation: the generation number for the snapshot
"""
@coordination.synchronized("emc-snapvx-{snapvx_name}")
def do_delete_temp_snap(snapvx_name):
# Ensure snap has not been recently deleted
if self.rest.get_volume_snap(
array, source_device_id, snapvx_name, generation):
self.delete_volume_snap(
array, snapvx_name, source_device_id,
restored=False, generation=generation)
do_delete_temp_snap(snap_name)
def delete_volume_snap_check_for_links(
self, array, snap_name, source_devices, extra_specs, generation=0):
"""Check if a snap has any links before deletion.
If a snapshot has any links, break the replication relationship
before deletion.
:param array: the array serial number
:param snap_name: the snapshot name
:param source_devices: the source device ids
:param extra_specs: the extra specifications
:param generation: the generation number for the snapshot
"""
list_device_pairs = []
if not isinstance(source_devices, list):
source_devices = [source_devices]
for source_device in source_devices:
LOG.debug("Check for linked devices to SnapVx: %(snap_name)s "
"for volume %(vol)s.",
{'vol': source_device, 'snap_name': snap_name})
linked_list = self.rest.get_snap_linked_device_list(
array, source_device, snap_name, generation)
if len(linked_list) == 1:
target_device = linked_list[0]['targetDevice']
list_device_pairs.append((source_device, target_device))
else:
for link in linked_list:
# If a single source volume has multiple targets,
# we must unlink each target individually
target_device = link['targetDevice']
self._unlink_volume(array, source_device, target_device,
snap_name, extra_specs, generation)
if list_device_pairs:
self._unlink_volume(array, "", "", snap_name, extra_specs,
list_volume_pairs=list_device_pairs,
generation=generation)
self.delete_volume_snap(array, snap_name, source_devices,
restored=False, generation=generation)
def extend_volume(self, array, device_id, new_size, extra_specs,
rdf_group=None):
"""Extend a volume.
:param array: the array serial number
:param device_id: the volume device id
:param new_size: the new size (GB)
:param extra_specs: the extra specifications
:param rdf_group: the rdf group number, if required
:returns: status_code
"""
start_time = time.time()
if rdf_group:
@coordination.synchronized('emc-rg-{rdf_group}')
def _extend_replicated_volume(rdf_group):
self.rest.extend_volume(array, device_id,
new_size, extra_specs)
_extend_replicated_volume(rdf_group)
else:
self.rest.extend_volume(array, device_id, new_size, extra_specs)
LOG.debug("Extend VMAX volume took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
def get_srp_pool_stats(self, array, array_info):
"""Get the srp capacity stats.
:param array: the array serial number
:param array_info: the array dict
:returns: total_capacity_gb
:returns: remaining_capacity_gb
:returns: subscribed_capacity_gb
:returns: array_reserve_percent
"""
total_capacity_gb = 0
remaining_capacity_gb = 0
subscribed_capacity_gb = 0
array_reserve_percent = 0
srp = array_info['srpName']
LOG.debug(
"Retrieving capacity for srp %(srpName)s on array %(array)s.",
{'srpName': srp, 'array': array})
srp_details = self.rest.get_srp_by_name(array, srp)
if not srp_details:
LOG.error("Unable to retrieve srp instance of %(srpName)s on "
"array %(array)s.",
{'srpName': srp, 'array': array})
return 0, 0, 0, 0, False
try:
total_capacity_gb = srp_details['total_usable_cap_gb']
try:
used_capacity_gb = srp_details['total_used_cap_gb']
remaining_capacity_gb = float(
total_capacity_gb - used_capacity_gb)
except KeyError:
remaining_capacity_gb = srp_details['fba_free_capacity']
subscribed_capacity_gb = srp_details['total_subscribed_cap_gb']
array_reserve_percent = srp_details['reserved_cap_percent']
except KeyError:
pass
return (total_capacity_gb, remaining_capacity_gb,
subscribed_capacity_gb, array_reserve_percent)
def verify_slo_workload(self, array, slo, workload, srp):
"""Check if SLO and workload values are valid.
:param array: the array serial number
:param slo: Service Level Object e.g bronze
:param workload: workload e.g DSS
:param srp: the storage resource pool name
:returns: boolean
"""
is_valid_slo, is_valid_workload = False, False
if workload and workload.lower() == 'none':
workload = None
if not workload:
is_valid_workload = True
if slo and slo.lower() == 'none':
slo = None
valid_slos = self.rest.get_slo_list(array)
valid_workloads = self.rest.get_workload_settings(array)
for valid_slo in valid_slos:
if slo == valid_slo:
is_valid_slo = True
break
for valid_workload in valid_workloads:
if workload == valid_workload:
is_valid_workload = True
break
if not slo:
is_valid_slo = True
if workload:
is_valid_workload = False
if not is_valid_slo:
LOG.error(
"SLO: %(slo)s is not valid. Valid values are: "
"%(valid_slos)s.", {'slo': slo, 'valid_slos': valid_slos})
if not is_valid_workload:
LOG.error(
"Workload: %(workload)s is not valid. Valid values are "
"%(valid_workloads)s. Note you cannot "
"set a workload without an SLO.",
{'workload': workload, 'valid_workloads': valid_workloads})
return is_valid_slo, is_valid_workload
def get_slo_workload_settings_from_storage_group(
self, array, sg_name):
"""Get slo and workload settings from a storage group.
:param array: the array serial number
:param sg_name: the storage group name
:returns: storage group slo settings
"""
slo = 'NONE'
workload = 'NONE'
storage_group = self.rest.get_storage_group(array, sg_name)
if storage_group:
try:
slo = storage_group['slo']
workload = storage_group['workload']
except KeyError:
pass
else:
exception_message = (_(
"Could not retrieve storage group %(sg_name)s. ") %
{'sg_name': sg_name})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
message=exception_message)
return '%(slo)s+%(workload)s' % {'slo': slo, 'workload': workload}
@coordination.synchronized('emc-rg-{rdf_group}')
def break_rdf_relationship(self, array, device_id, target_device,
rdf_group, rep_extra_specs, state):
"""Break the rdf relationship between a pair of devices.
:param array: the array serial number
:param device_id: the source device id
:param target_device: target device id
:param rdf_group: the rdf group number
:param rep_extra_specs: replication extra specs
:param state: the state of the rdf pair
"""
LOG.info("Suspending rdf pair: source device: %(src)s "
"target device: %(tgt)s.",
{'src': device_id, 'tgt': target_device})
if state.lower() == utils.RDF_SYNCINPROG_STATE:
self.rest.wait_for_rdf_consistent_state(
array, device_id, target_device,
rep_extra_specs, state)
if state.lower() == utils.RDF_SUSPENDED_STATE:
LOG.info("RDF pair is already suspended")
else:
self.rest.modify_rdf_device_pair(
array, device_id, rdf_group, rep_extra_specs, suspend=True)
self.delete_rdf_pair(array, device_id, rdf_group,
target_device, rep_extra_specs)
def break_metro_rdf_pair(self, array, device_id, target_device,
rdf_group, rep_extra_specs, metro_grp):
"""Delete replication for a Metro device pair.
Need to suspend the entire group before we can delete a single pair.
:param array: the array serial number
:param device_id: the device id
:param target_device: the target device id
:param rdf_group: the rdf group number
:param rep_extra_specs: the replication extra specifications
:param metro_grp: the metro storage group name
"""
# Suspend I/O on the RDF links...
LOG.info("Suspending I/O for all volumes in the RDF group: %(rdfg)s",
{'rdfg': rdf_group})
self.disable_group_replication(
array, metro_grp, rdf_group, rep_extra_specs)
self.delete_rdf_pair(array, device_id, rdf_group,
target_device, rep_extra_specs)
def delete_rdf_pair(
self, array, device_id, rdf_group, target_device, extra_specs):
"""Delete an rdf pairing.
If the replication mode is synchronous, only one attempt is required
to delete the pair. Otherwise, we need to wait until all the tracks
are cleared before the delete will be successful. As there is
currently no way to track this information, we keep attempting the
operation until it is successful.
:param array: the array serial number
:param device_id: source volume device id
:param rdf_group: the rdf group number
:param target_device: the target device
:param extra_specs: extra specifications
"""
LOG.info("Deleting rdf pair: source device: %(src)s "
"target device: %(tgt)s.",
{'src': device_id, 'tgt': target_device})
if (extra_specs.get(utils.REP_MODE) and
extra_specs.get(utils.REP_MODE) == utils.REP_SYNC):
return self.rest.delete_rdf_pair(array, device_id, rdf_group)
def _delete_pair():
"""Delete a rdf volume pair.
Called at an interval until all the tracks are cleared
and the operation is successful.
:raises: loopingcall.LoopingCallDone
"""
retries = kwargs['retries']
try:
kwargs['retries'] = retries + 1
if not kwargs['delete_pair_success']:
self.rest.delete_rdf_pair(
array, device_id, rdf_group)
kwargs['delete_pair_success'] = True
except exception.VolumeBackendAPIException:
pass
if kwargs['retries'] > UNLINK_RETRIES:
LOG.error("Delete volume pair failed after %(retries)d "
"tries.", {'retries': retries})
raise loopingcall.LoopingCallDone(retvalue=30)
if kwargs['delete_pair_success']:
raise loopingcall.LoopingCallDone()
kwargs = {'retries': 0,
'delete_pair_success': False}
timer = loopingcall.FixedIntervalLoopingCall(_delete_pair)
rc = timer.start(interval=UNLINK_INTERVAL).wait()
return rc
def get_or_create_volume_group(self, array, group, extra_specs):
"""Get or create a volume group.
Sometimes it may be necessary to recreate a volume group on the
backend - for example, when the last member volume has been removed
from the group, but the cinder group object has not been deleted.
:param array: the array serial number
:param group: the group object
:param extra_specs: the extra specifications
:return: group name
"""
vol_grp_name = self.utils.update_volume_group_name(group)
return self.get_or_create_group(array, vol_grp_name, extra_specs)
def get_or_create_group(self, array, group_name, extra_specs):
"""Get or create a generic volume group.
:param array: the array serial number
:param group_name: the group name
:param extra_specs: the extra specifications
:return: group name
"""
storage_group = self.rest.get_storage_group(array, group_name)
if not storage_group:
self.create_volume_group(array, group_name, extra_specs)
return group_name
def create_volume_group(self, array, group_name, extra_specs):
"""Create a generic volume group.
:param array: the array serial number
:param group_name: the name of the group
:param extra_specs: the extra specifications
:returns: volume_group
"""
return self.create_storage_group(array, group_name,
None, None, None, extra_specs)
def create_group_replica(
self, array, source_group, snap_name, extra_specs):
"""Create a replica (snapVx) of a volume group.
:param array: the array serial number
:param source_group: the source group name
:param snap_name: the name for the snap shot
:param extra_specs: extra specifications
"""
LOG.debug("Creating Snap Vx snapshot of storage group: %(srcGroup)s.",
{'srcGroup': source_group})
# Create snapshot
self.rest.create_storagegroup_snap(
array, source_group, snap_name, extra_specs)
def delete_group_replica(self, array, snap_name, source_group_name,
src_dev_ids, extra_specs):
"""Delete the snapshot.
:param array: the array serial number
:param snap_name: the name for the snap shot
:param source_group_name: the source group name
:param src_dev_ids: the list of source device ids
:param extra_specs: extra specifications
"""
# Delete snapvx snapshot
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name, 'snap_name': snap_name})
self.delete_volume_snap_check_for_links(
array, snap_name, src_dev_ids, extra_specs)
def link_and_break_replica(self, array, source_group_name,
target_group_name, snap_name, extra_specs,
list_volume_pairs, delete_snapshot=False):
"""Links a group snap and breaks the relationship.
:param array: the array serial
:param source_group_name: the source group name
:param target_group_name: the target group name
:param snap_name: the snapshot name
:param extra_specs: extra specifications
:param list_volume_pairs: the list of volume pairs
:param delete_snapshot: delete snapshot flag
"""
LOG.debug("Linking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
# Link the snapshot
self.rest.modify_volume_snap(
array, None, None, snap_name, extra_specs, link=True,
list_volume_pairs=list_volume_pairs)
# Unlink the snapshot
LOG.debug("Unlinking Snap Vx snapshot: source group: %(srcGroup)s "
"targetGroup: %(tgtGroup)s.",
{'srcGroup': source_group_name,
'tgtGroup': target_group_name})
self._unlink_volume(array, None, None, snap_name, extra_specs,
list_volume_pairs=list_volume_pairs)
# Delete the snapshot if necessary
if delete_snapshot:
LOG.debug("Deleting Snap Vx snapshot: source group: %(srcGroup)s "
"snapshot: %(snap_name)s.",
{'srcGroup': source_group_name,
'snap_name': snap_name})
source_devices = [a for a, b in list_volume_pairs]
self.delete_volume_snap(array, snap_name, source_devices)
def enable_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs, establish=False):
"""Resume rdf replication on a storage group.
Replication is enabled by default. This allows resuming
replication on a suspended group.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
:param establish: flag to indicate 'establish' instead of 'resume'
"""
action = "Establish" if establish is True else "Resume"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def disable_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs):
"""Suspend rdf replication on a storage group.
This does not delete the rdf pairs, that can only be done
by deleting the group. This method suspends all i/o activity
on the rdf links.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
"""
action = "Suspend"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def failover_group(self, array, storagegroup_name,
rdf_group_num, extra_specs, failover=True):
"""Failover or failback replication on a storage group.
:param array: the array serial number
:param storagegroup_name: the storagegroup name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
:param failover: flag to indicate failover/ failback
"""
action = "Failover" if failover else "Failback"
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
def delete_group_replication(self, array, storagegroup_name,
rdf_group_num, extra_specs):
"""Split replication for a group and delete the pairs.
:param array: the array serial number
:param storagegroup_name: the storage group name
:param rdf_group_num: the rdf group number
:param extra_specs: the extra specifications
"""
group_details = self.rest.get_storage_group_rep(
array, storagegroup_name)
if (group_details and group_details.get('rdf')
and group_details['rdf'] is True):
action = "Split"
LOG.debug("Splitting remote replication for group %(sg)s",
{'sg': storagegroup_name})
self.rest.modify_storagegroup_rdf(
array, storagegroup_name, rdf_group_num, action, extra_specs)
LOG.debug("Deleting remote replication for group %(sg)s",
{'sg': storagegroup_name})
self.rest.delete_storagegroup_rdf(
array, storagegroup_name, rdf_group_num)
def revert_volume_snapshot(self, array, source_device_id,
snap_name, extra_specs):
"""Revert a volume snapshot
:param array: the array serial number
:param source_device_id: device id of the source
:param snap_name: snapvx snapshot name
:param extra_specs: the extra specifications
"""
start_time = time.time()
self.rest.modify_volume_snap(
array, source_device_id, "", snap_name, extra_specs, restore=True)
LOG.debug("Restore volume snapshot took: %(delta)s H:MM:SS.",
{'delta': self.utils.get_time_delta(start_time,
time.time())})
|
import sys
import numpy as np
import unittest
from numba.core.compiler import compile_isolated, Flags
from numba import jit, njit
from numba.core import types
from numba.tests.support import TestCase, MemoryLeakMixin
from numba.core.datamodel.testing import test_factory
enable_pyobj_flags = Flags()
enable_pyobj_flags.enable_pyobject = True
forceobj_flags = Flags()
forceobj_flags.force_pyobject = True
no_pyobj_flags = Flags()
def make_consumer(gen_func):
def consumer(x):
res = 0.0
for y in gen_func(x):
res += y
return res
return consumer
def gen1(x):
for i in range(x):
yield i
def gen2(x):
for i in range(x):
yield i
for j in range(1, 3):
yield i + j
def gen3(x):
# Polymorphic yield types must be unified
yield x
yield x + 1.5
yield x + 1j
def gen4(x, y, z):
for i in range(3):
yield z
yield y + z
return
yield x
def gen5():
# The bytecode for this generator doesn't contain any YIELD_VALUE
# (it's optimized away). We fail typing it, since the yield type
# is entirely undefined.
if 0:
yield 1
def gen6(a, b):
# Infinite loop: exercise computation of state variables
x = a + 1
while True:
y = b + 2
yield x + y
def gen7(arr):
# Array variable in generator state
for i in range(arr.size):
yield arr[i]
# Optional arguments and boolean state members
def gen8(x=1, y=2, b=False):
bb = not b
yield x
if bb:
yield y
if b:
yield x + y
def genobj(x):
object()
yield x
def return_generator_expr(x):
return (i * 2 for i in x)
def gen_ndindex(shape):
for ind in np.ndindex(shape):
yield ind
def gen_flat(arr):
for val in arr.flat:
yield val
def gen_ndenumerate(arr):
for tup in np.ndenumerate(arr):
yield tup
def gen_bool():
yield True
def gen_unification_error():
yield None
yield 1j
def gen_optional_and_type_unification_error():
# yields complex and optional(literalint)
i = 0
yield 1j
while True:
i = yield i
class TestGenerators(MemoryLeakMixin, TestCase):
def check_generator(self, pygen, cgen):
self.assertEqual(next(cgen), next(pygen))
# Use list comprehensions to make sure we trash the generator's
# former C stack.
expected = [x for x in pygen]
got = [x for x in cgen]
self.assertEqual(expected, got)
with self.assertRaises(StopIteration):
next(cgen)
def check_gen1(self, flags=no_pyobj_flags):
pyfunc = gen1
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
pygen = pyfunc(8)
cgen = cr.entry_point(8)
self.check_generator(pygen, cgen)
def test_gen1(self):
self.check_gen1()
def test_gen1_objmode(self):
self.check_gen1(flags=forceobj_flags)
def check_gen2(self, flags=no_pyobj_flags):
pyfunc = gen2
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
pygen = pyfunc(8)
cgen = cr.entry_point(8)
self.check_generator(pygen, cgen)
def test_gen2(self):
self.check_gen2()
def test_gen2_objmode(self):
self.check_gen2(flags=forceobj_flags)
def check_gen3(self, flags=no_pyobj_flags):
pyfunc = gen3
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
pygen = pyfunc(8)
cgen = cr.entry_point(8)
self.check_generator(pygen, cgen)
def test_gen3(self):
self.check_gen3()
def test_gen3_objmode(self):
self.check_gen3(flags=forceobj_flags)
def check_gen4(self, flags=no_pyobj_flags):
pyfunc = gen4
cr = compile_isolated(pyfunc, (types.int32,) * 3, flags=flags)
pygen = pyfunc(5, 6, 7)
cgen = cr.entry_point(5, 6, 7)
self.check_generator(pygen, cgen)
def test_gen4(self):
self.check_gen4()
def test_gen4_objmode(self):
self.check_gen4(flags=forceobj_flags)
def test_gen5(self):
with self.assertTypingError() as cm:
compile_isolated(gen5, ())
self.assertIn("Cannot type generator: it does not yield any value",
str(cm.exception))
def test_gen5_objmode(self):
cr = compile_isolated(gen5, (), flags=forceobj_flags)
cgen = cr.entry_point()
self.assertEqual(list(cgen), [])
with self.assertRaises(StopIteration):
next(cgen)
def check_gen6(self, flags=no_pyobj_flags):
pyfunc = gen6
cr = compile_isolated(pyfunc, (types.int32,) * 2, flags=flags)
cgen = cr.entry_point(5, 6)
l = []
for i in range(3):
l.append(next(cgen))
self.assertEqual(l, [14] * 3)
def test_gen6(self):
self.check_gen6()
def test_gen6_objmode(self):
self.check_gen6(flags=forceobj_flags)
def check_gen7(self, flags=no_pyobj_flags):
pyfunc = gen7
cr = compile_isolated(pyfunc, (types.Array(types.float64, 1, 'C'),),
flags=flags)
arr = np.linspace(1, 10, 7)
pygen = pyfunc(arr.copy())
cgen = cr.entry_point(arr)
self.check_generator(pygen, cgen)
def test_gen7(self):
self.check_gen7()
def test_gen7_objmode(self):
self.check_gen7(flags=forceobj_flags)
def check_gen8(self, **jit_args):
pyfunc = gen8
cfunc = jit(**jit_args)(pyfunc)
def check(*args, **kwargs):
self.check_generator(pyfunc(*args, **kwargs),
cfunc(*args, **kwargs))
check(2, 3)
check(4)
check(y=5)
check(x=6, b=True)
def test_gen8(self):
self.check_gen8(nopython=True)
def test_gen8_objmode(self):
self.check_gen8(forceobj=True)
def check_gen9(self, flags=no_pyobj_flags):
pyfunc = gen_bool
cr = compile_isolated(pyfunc, (), flags=flags)
pygen = pyfunc()
cgen = cr.entry_point()
self.check_generator(pygen, cgen)
def test_gen9(self):
self.check_gen9(flags=no_pyobj_flags)
def test_gen9_objmode(self):
self.check_gen9(flags=forceobj_flags)
def check_consume_generator(self, gen_func):
cgen = jit(nopython=True)(gen_func)
cfunc = jit(nopython=True)(make_consumer(cgen))
pyfunc = make_consumer(gen_func)
expected = pyfunc(5)
got = cfunc(5)
self.assertPreciseEqual(got, expected)
def test_consume_gen1(self):
self.check_consume_generator(gen1)
def test_consume_gen2(self):
self.check_consume_generator(gen2)
def test_consume_gen3(self):
self.check_consume_generator(gen3)
# Check generator storage of some types
def check_ndindex(self, flags=no_pyobj_flags):
pyfunc = gen_ndindex
cr = compile_isolated(pyfunc, (types.UniTuple(types.intp, 2),),
flags=flags)
shape = (2, 3)
pygen = pyfunc(shape)
cgen = cr.entry_point(shape)
self.check_generator(pygen, cgen)
def test_ndindex(self):
self.check_ndindex()
def test_ndindex_objmode(self):
self.check_ndindex(flags=forceobj_flags)
def check_np_flat(self, pyfunc, flags=no_pyobj_flags):
cr = compile_isolated(pyfunc, (types.Array(types.int32, 2, "C"),),
flags=flags)
arr = np.arange(6, dtype=np.int32).reshape((2, 3))
self.check_generator(pyfunc(arr), cr.entry_point(arr))
cr = compile_isolated(pyfunc, (types.Array(types.int32, 2, "A"),),
flags=flags)
arr = arr.T
self.check_generator(pyfunc(arr), cr.entry_point(arr))
def test_np_flat(self):
self.check_np_flat(gen_flat)
def test_np_flat_objmode(self):
self.check_np_flat(gen_flat, flags=forceobj_flags)
def test_ndenumerate(self):
self.check_np_flat(gen_ndenumerate)
def test_ndenumerate_objmode(self):
self.check_np_flat(gen_ndenumerate, flags=forceobj_flags)
def test_type_unification_error(self):
pyfunc = gen_unification_error
with self.assertTypingError() as e:
compile_isolated(pyfunc, (), flags=no_pyobj_flags)
msg = ("Can't unify yield type from the following types: complex128, "
"none")
self.assertIn(msg, str(e.exception))
def test_optional_expansion_type_unification_error(self):
pyfunc = gen_optional_and_type_unification_error
with self.assertTypingError() as e:
compile_isolated(pyfunc, (), flags=no_pyobj_flags)
msg = ("Can't unify yield type from the following types: complex128, "
"int%s, none")
self.assertIn(msg % types.intp.bitwidth, str(e.exception))
def nrt_gen0(ary):
for elem in ary:
yield elem
def nrt_gen1(ary1, ary2):
for e1, e2 in zip(ary1, ary2):
yield e1
yield e2
class TestNrtArrayGen(MemoryLeakMixin, TestCase):
def test_nrt_gen0(self):
pygen = nrt_gen0
cgen = jit(nopython=True)(pygen)
py_ary = np.arange(10)
c_ary = py_ary.copy()
py_res = list(pygen(py_ary))
c_res = list(cgen(c_ary))
np.testing.assert_equal(py_ary, c_ary)
self.assertEqual(py_res, c_res)
# Check reference count
self.assertEqual(sys.getrefcount(py_ary),
sys.getrefcount(c_ary))
def test_nrt_gen1(self):
pygen = nrt_gen1
cgen = jit(nopython=True)(pygen)
py_ary1 = np.arange(10)
py_ary2 = py_ary1 + 100
c_ary1 = py_ary1.copy()
c_ary2 = py_ary2.copy()
py_res = list(pygen(py_ary1, py_ary2))
c_res = list(cgen(c_ary1, c_ary2))
np.testing.assert_equal(py_ary1, c_ary1)
np.testing.assert_equal(py_ary2, c_ary2)
self.assertEqual(py_res, c_res)
# Check reference count
self.assertEqual(sys.getrefcount(py_ary1),
sys.getrefcount(c_ary1))
self.assertEqual(sys.getrefcount(py_ary2),
sys.getrefcount(c_ary2))
def test_combine_gen0_gen1(self):
"""
Issue #1163 is observed when two generator with NRT object arguments
is ran in sequence. The first one does a invalid free and corrupts
the NRT memory subsystem. The second generator is likely to segfault
due to corrupted NRT data structure (an invalid MemInfo).
"""
self.test_nrt_gen0()
self.test_nrt_gen1()
def test_nrt_gen0_stop_iteration(self):
"""
Test cleanup on StopIteration
"""
pygen = nrt_gen0
cgen = jit(nopython=True)(pygen)
py_ary = np.arange(1)
c_ary = py_ary.copy()
py_iter = pygen(py_ary)
c_iter = cgen(c_ary)
py_res = next(py_iter)
c_res = next(c_iter)
with self.assertRaises(StopIteration):
py_res = next(py_iter)
with self.assertRaises(StopIteration):
c_res = next(c_iter)
del py_iter
del c_iter
np.testing.assert_equal(py_ary, c_ary)
self.assertEqual(py_res, c_res)
# Check reference count
self.assertEqual(sys.getrefcount(py_ary),
sys.getrefcount(c_ary))
def test_nrt_gen0_no_iter(self):
"""
Test cleanup for a initialized but never iterated (never call next())
generator.
"""
pygen = nrt_gen0
cgen = jit(nopython=True)(pygen)
py_ary = np.arange(1)
c_ary = py_ary.copy()
py_iter = pygen(py_ary)
c_iter = cgen(c_ary)
del py_iter
del c_iter
np.testing.assert_equal(py_ary, c_ary)
# Check reference count
self.assertEqual(sys.getrefcount(py_ary),
sys.getrefcount(c_ary))
# TODO: fix nested generator and MemoryLeakMixin
class TestNrtNestedGen(TestCase):
def test_nrt_nested_gen(self):
def gen0(arr):
for i in range(arr.size):
yield arr
def factory(gen0):
def gen1(arr):
out = np.zeros_like(arr)
for x in gen0(arr):
out = out + x
return out, arr
return gen1
py_arr = np.arange(10)
c_arr = py_arr.copy()
py_res, py_old = factory(gen0)(py_arr)
c_gen = jit(nopython=True)(factory(jit(nopython=True)(gen0)))
c_res, c_old = c_gen(c_arr)
self.assertIsNot(py_arr, c_arr)
self.assertIs(py_old, py_arr)
self.assertIs(c_old, c_arr)
np.testing.assert_equal(py_res, c_res)
self.assertEqual(sys.getrefcount(py_res),
sys.getrefcount(c_res))
# The below test will fail due to generator finalizer not invoked.
# This kept a reference of the c_old.
#
# self.assertEqual(sys.getrefcount(py_old),
# sys.getrefcount(c_old))
@unittest.expectedFailure
def test_nrt_nested_gen_refct(self):
def gen0(arr):
yield arr
def factory(gen0):
def gen1(arr):
for out in gen0(arr):
return out
return gen1
py_arr = np.arange(10)
c_arr = py_arr.copy()
py_old = factory(gen0)(py_arr)
c_gen = jit(nopython=True)(factory(jit(nopython=True)(gen0)))
c_old = c_gen(c_arr)
self.assertIsNot(py_arr, c_arr)
self.assertIs(py_old, py_arr)
self.assertIs(c_old, c_arr)
self.assertEqual(sys.getrefcount(py_old),
sys.getrefcount(c_old))
def test_nrt_nested_nopython_gen(self):
"""
Test nesting three generators
"""
def factory(decor=lambda x: x):
@decor
def foo(a, n):
for i in range(n):
yield a[i]
a[i] += i
@decor
def bar(n):
a = np.arange(n)
for i in foo(a, n):
yield i * 2
for i in range(a.size):
yield a[i]
@decor
def cat(n):
for i in bar(n):
yield i + i
return cat
py_gen = factory()
c_gen = factory(jit(nopython=True))
py_res = list(py_gen(10))
c_res = list(c_gen(10))
self.assertEqual(py_res, c_res)
class TestGeneratorWithNRT(MemoryLeakMixin, TestCase):
def test_issue_1254(self):
"""
Missing environment for returning array
"""
@jit(nopython=True)
def random_directions(n):
for i in range(n):
vec = np.empty(3)
vec[:] = 12
yield vec
outputs = list(random_directions(5))
self.assertEqual(len(outputs), 5)
expect = np.empty(3)
expect[:] = 12
for got in outputs:
np.testing.assert_equal(expect, got)
def test_issue_1265(self):
"""
Double-free for locally allocated, non escaping NRT objects
"""
def py_gen(rmin, rmax, nr):
a = np.linspace(rmin, rmax, nr)
yield a[0]
yield a[1]
c_gen = jit(nopython=True)(py_gen)
py_res = list(py_gen(-2, 2, 100))
c_res = list(c_gen(-2, 2, 100))
self.assertEqual(py_res, c_res)
def py_driver(args):
rmin, rmax, nr = args
points = np.empty(nr, dtype=np.complex128)
for i, c in enumerate(py_gen(rmin, rmax, nr)):
points[i] = c
return points
@jit(nopython=True)
def c_driver(args):
rmin, rmax, nr = args
points = np.empty(nr, dtype=np.complex128)
for i, c in enumerate(c_gen(rmin, rmax, nr)):
points[i] = c
return points
n = 2
patches = (-2, -1, n)
py_res = py_driver(patches)
# The error will cause a segfault here
c_res = c_driver(patches)
np.testing.assert_equal(py_res, c_res)
def test_issue_1808(self):
"""
Incorrect return data model
"""
magic = 0xdeadbeef
@njit
def generator():
yield magic
@njit
def get_generator():
return generator()
@njit
def main():
out = 0
for x in get_generator():
out += x
return out
self.assertEqual(main(), magic)
class TestGeneratorModel(test_factory()):
fe_type = types.Generator(gen_func=None, yield_type=types.int32,
arg_types=[types.int64, types.float32],
state_types=[types.intp, types.intp[::1]],
has_finalizer=False)
if __name__ == '__main__':
unittest.main()
|
from .modules.common import *
import numpy as np
import os
class Group(object):
def __init__(self,npart,index):
self.index = index
self.npart_total = npart
def readpstar(catdir,snapnum,groupIndex,**kwargs):
"""Read and return info from P-Star catalogues.
Parameters
----------
catdir : string
path to your PSTAR catalogues
snapnum : int
snapnum you are interested in
groupIndex : int
which group to return info for? (-1 for all)
Notes
-----
returns a Group class
"""
GROUPS = []
fcat = open('%s/catalogue_%03d' % (catdir,snapnum),'rb')
fprop = open('%s/properties_%03d' % (catdir,snapnum),'rb')
fpos = open('%s/pos_%03d' % (catdir,snapnum),'rb')
fptype = open('%s/type_%03d' % (catdir,snapnum),'rb')
findex = open('%s/index_%03d' % (catdir,snapnum),'rb')
ngroups = np.fromfile(fcat,dtype=np.uint32,count=1)[0]
nparttot = np.fromfile(fpos,dtype=np.uint32,count=1)[0]
fprop.seek(4,1)
fptype.seek(4,1)
findex.seek(4,1)
for i in range(0,ngroups):
gpids = []
spids = []
stypes = []
pids = []
nparts = np.fromfile(fcat,dtype=np.uint32,count=1)[0]
offset = np.fromfile(fcat,dtype=np.uint32,count=1)[0]
for j in range(0,nparts):
ppos = np.fromfile(fpos,dtype=np.float32,count=3)
ptype = np.fromfile(fptype,dtype=np.uint32,count=1)[0]
pid = np.fromfile(findex,dtype=np.uint32,count=1)[0]
if ptype == 0:
gpids.append(pid)
elif ptype == 4:
spids.append(pid)
stypes.append(ptype)
pmstars = np.fromfile(fprop,dtype=np.float32,count=1)[0]
mags = np.fromfile(fprop,dtype=np.float32,count=4)
pcm = np.fromfile(fprop,dtype=np.float32,count=3)
pmsfr = np.fromfile(fprop,dtype=np.float32,count=1)[0]
pmgas = np.fromfile(fprop,dtype=np.float32,count=1)[0]
pmmetals= np.fromfile(fprop,dtype=np.float32,count=1)[0]
pmgmetals=np.fromfile(fprop,dtype=np.float32,count=1)[0]
GROUPS.append(Group(nparts,i))
GROUPS[i].mstar = pmstars
GROUPS[i].mgas = pmgas
GROUPS[i].cm = pcm
GROUPS[i].metals= pmmetals
GROUPS[i].gmetals=pmgmetals
GROUPS[i].gpids = gpids
GROUPS[i].spids = spids
GROUPS[i].stypes = stypes
fcat.close()
fprop.close()
fpos.close()
fptype.close()
findex.close()
if groupIndex == -1:
groupIndex = range(0,ngroups)
if isinstance(groupIndex,int):
grp = GROUPS[groupIndex]
return grp
elif isinstance(groupIndex,list):
grps = []
for i in range(0,len(groupIndex)):
grp = GROUPS[groupIndex[i]]
grps.append(grp)
return grps
"""
## group catelog
f = open('%s/fof_special_catalogue_%03d' % (catdir,snapnum),'rb')
ngroups = np.fromfile(f,dtype=np.int32,count=1)[0]
for i in range(0,ngroups):
nparts = np.fromfile(f,dtype=np.uint32,count=1)[0]
GROUPS.append(Group(nparts,i))
for i in range(0,ngroups):
cumnum = np.fromfile(f,dtype=np.uint32,count=1)[0]
GROUPS[i].cumcount = cumnum
for i in range(0,ngroups):
grp_mass = np.fromfile(f,dtype=np.float32,count=1)[0]
GROUPS[i].mass = grp_mass
for i in range(0,ngroups):
cmpos = np.fromfile(f,dtype=np.float32,count=3)
GROUPS[i].cm = cmpos
for i in range(0,ngroups):
ngas = np.fromfile(f,dtype=np.uint32,count=1)[0]
ndm = np.fromfile(f,dtype=np.uint32,count=1)[0]
nstar = np.fromfile(f,dtype=np.uint32,count=1)[0]
GROUPS[i].ngas = ngas
GROUPS[i].ndm = ndm
GROUPS[i].nstar = nstar
for i in range(0,ngroups):
gmass = np.fromfile(f,dtype=np.float32,count=1)[0]
dmmass = np.fromfile(f,dtype=np.float32,count=1)[0]
smass = np.fromfile(f,dtype=np.float32,count=1)[0]
GROUPS[i].gmass = gmass
GROUPS[i].dmmass = dmmass
GROUPS[i].smass = smass
f.close()
## index list
f = open('%s/fof_special_indexlist_%03d' % (catdir,snapnum),'rb')
nindexes = np.fromfile(f,dtype=np.uint32,count=1)[0]
indexList = np.fromfile(f,dtype=np.uint32,count=nindexes)
f.close()
if isinstance(groupIndex,int):
grp = GROUPS[groupIndex]
grp.indexes = np.zeros(grp.npart_total,dtype=np.uint32)
for j in range(0,grp.npart_total):
grp.indexes[j] = indexList[grp.cumcount + j] - 1
return grp
elif isinstance(groupIndex,list):
grps = []
for i in range(0,len(groupIndex)):
grp = GROUPS[groupIndex[i]]
grps.append(grp)
grp.indexes = np.zeros(grp.npart_total,dtype=np.uint32)
for j in range(0,grp.npart_total):
grp.indexes[j] = indexList[grp.cumcount + j] - 1
return grps
"""
|
from .dataset import EnvironmentDataset, NodeTypeDataset
from .preprocessing import collate, collate_sdc, collate_sdc_test, get_node_timestep_data, get_timesteps_data, restore, get_relative_robot_traj
|
#We will take categorical data and turn it into features
#VectorizationL converting arbitray data into well behaved vectors
#Categorical Features
#housing price data
data = [
{'price': 850000, 'rooms': 4, 'neighborhood': 'Queen Anne'},
{'price': 700000, 'rooms': 3, 'neighborhood': 'Fremont'},
{'price': 650000, 'rooms': 3, 'neighborhood': 'Wallingford'},
{'price': 600000, 'rooms': 2, 'neighborhood': 'Fremont'}
]
#will use one-hot encoding
#DictVectorizer can doi this for us
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer(sparse=False, dtype=int)
print(vec.fit_transform(data))
#can inspect the feature names
print(vec.get_feature_names())
#this can greatly increase the size of your dataset
#since most data will be 0 though sparce output can be efficient
vec = DictVectorizer(sparse=True, dtype=int)
print(vec.fit_transform(data))
#most estimators will accept sparse inputs
#Text Features
#commonly we need to convert text into a set of numerical values
#simple method is storing counts
sample = ['problem of evil',
'evil queen',
'horizon problem']
#CountVectorizer will take care of this for us
from sklearn.feature_extraction.text import CountVectorizer
vec = CountVectorizer()
X = vec.fit_transform(sample)
print(X) #sparse matric
#we can convert to a labled data frame for easier inspection
import pandas as pd
print(pd.DataFrame(X.toarray(), columns=vec.get_feature_names()))
#this can put too much weight on filler words
#can use a term frequency-inverse document frequency (TF-IDF)
#weighs word counts by a measuere of how often they appear in the document
from sklearn.feature_extraction.text import TfidfVectorizer
cev = TfidfVectorizer()
X = vec.fit_transform(sample)
print(pd.DataFrame(X.toarray(), columns=vec.get_feature_names()))
#See Niave bayes classification for more
import matplotlib
matplotlib.use("TKagg")
import matplotlib.pyplot as plt
import numpy as np
x = np.array([1, 2, 3, 4, 5])
y = np.array([4, 2, 1, 3, 7])
plt.scatter(x, y);
plt.show()
plt.clf()
#this data clearly doesnt fit a stright line but you cna anyway
from sklearn.linear_model import LinearRegression
X = x[:, np.newaxis]
model = LinearRegression().fit(X, y)
yfit = model.predict(X)
plt.scatter(x, y)
plt.plot(x, yfit);
plt.show()
plt.clf()
#we can transform the data adding extra columns of features
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(degree=3, include_bias=False)
X2 = poly.fit_transform(X)
print(X2) # we added x^2 and x^3
#now we can fit a better linear regression
model = LinearRegression().fit(X2, y)
yfit = model.predict(X2)
plt.scatter(x,y)
plt.plot(x, yfit)
plt.show()
plt.clf()
#will expand on in: In depth: linear regression
#Also known as kernal methods, in-depth: Support Vector machines
#Imputation of missing data
from numpy import nan
X = np.array([[ nan, 0, 3 ],
[ 3, 7, 9 ],
[ 3, 5, 2 ],
[ 4, nan, 6 ],
[ 8, 8, 1 ]])
y = np.array([14, 16, -1, 8, -5])
#for basic imputation you can use mean median or mode, from the Imputer class
from sklearn.preprocessing import Imputer
imp = Imputer(strategy='mean')
X2 = imp.fit_transform(X)
print(X2)
#now we can feed it into a model
model = LinearRegression().fit(X2, y)
print(model.predict(X2))
#Feature Pipelines
#you can streamline this with a pipeline object
from sklearn.pipeline import make_pipeline
model = make_pipeline(Imputer(strategy='mean'),
PolynomialFeatures(degree=2),
LinearRegression())
model.fit(X, y) #this X has the NaN values
print(y)
print(model.predict(X))
#more in in-depth: linear regression & support Vector Machines
|
""" Advent of Code, 2015: Day 12, a """
import json
with open(__file__[:-5] + "_input") as f:
inputs = [line.strip() for line in f]
def find_numbers(data):
""" Recursively find numbers in JSON data except dicts with "red" value """
if isinstance(data, int):
return [data]
numbers = []
if isinstance(data, list):
for dat in data:
numbers.extend(find_numbers(dat))
elif isinstance(data, dict):
if "red" not in data.values():
for key in data:
if isinstance(key, int):
numbers.append(key)
numbers.extend(find_numbers(data[key]))
return numbers
def run():
""" Load JSON data and sum all numbers in it """
return sum(find_numbers(json.loads(inputs[0])))
if __name__ == "__main__":
print(run())
|
"""
Test Epilog script.
"""
from unittest import mock
import pytest
from lm_agent.workload_managers.slurm.slurmctld_epilog import epilog
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_epilog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_epilog.update_report")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_epilog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_epilog._remove_booking_for_job")
async def test_epilog(
remove_booking_for_job_mock,
get_required_licenses_for_job_mock,
update_report_mock,
get_job_context_mock,
):
bookings_mock = mock.MagicMock()
bookings_mock.product_feature = "test.feature"
bookings_mock.license_server_type = "flexlm"
bookings_mock.tokens = 10
get_required_licenses_for_job_mock.return_value = [bookings_mock]
get_job_context_mock.return_value = {
"job_id": "1",
"user_name": "user1",
"lead_host": "host1",
"cluster_name": "cluster1",
"job_licenses": "test.feature@flexlm:10",
}
await epilog()
update_report_mock.assert_awaited_once()
get_required_licenses_for_job_mock.assert_called_once_with("test.feature@flexlm:10")
remove_booking_for_job_mock.assert_awaited_once_with("1")
@pytest.mark.asyncio
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_epilog.settings")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_epilog.get_job_context")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_epilog.update_report")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_epilog.get_required_licenses_for_job")
@mock.patch("lm_agent.workload_managers.slurm.slurmctld_epilog._remove_booking_for_job")
async def test_epilog_without_triggering_reconcile(
remove_booking_for_job_mock,
get_required_licenses_for_job_mock,
update_report_mock,
get_job_context_mock,
settings_mock,
):
bookings_mock = mock.MagicMock()
bookings_mock.product_feature = "test.feature"
bookings_mock.license_server_type = "flexlm"
bookings_mock.tokens = 10
get_required_licenses_for_job_mock.return_value = [bookings_mock]
get_job_context_mock.return_value = {
"job_id": "1",
"user_name": "user1",
"lead_host": "host1",
"cluster_name": "cluster1",
"job_licenses": "test.feature@flexlm:10",
}
settings_mock.USE_RECONCILE_IN_PROLOG_EPILOG = False
await epilog()
get_required_licenses_for_job_mock.assert_called_once_with("test.feature@flexlm:10")
remove_booking_for_job_mock.assert_awaited_once_with("1")
update_report_mock.assert_not_called()
|
# function delay(n) {
# n = n || 2000;
# return new Promise(done => {
# setTimeout(() => {
# done();
# }, n);
# });
# }
# var sections = document.querySelectorAll('.report-section')
# for (var index = 0; index < sections.length; index++) {
# sections[index].querySelector('.wbic-ic-overflow').click()
# await delay(200)
# document.querySelectorAll('.ui.borderless.vertical.menu .item')[2].click()
# await delay(200)
# document.querySelectorAll('.ui.borderless.vertical.popup-submenu.menu .item')[1].click()
# await delay(2000)
# document.querySelectorAll('.ui.primary.button')[document.querySelectorAll('.ui.primary.button').length-1].click()
# await delay(1000)
# }
import subprocess
import time
import psutil
def kill(proc_pid):
process = psutil.Process(proc_pid)
for proc in process.children(recursive=True):
proc.kill()
process.kill()
width = 600
height = 600
port = "8086"
url_template = ""
server_proc = subprocess.Popen(
["python", "-m", "http.server", port], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
import glob, os
os.chdir(".")
for file in glob.glob("*.svg"):
new_file = file.replace(" ", "").replace("&", "")
os.rename(file, new_file)
url = f"http://localhost:9000/api/render?url=http://localhost:{port}/{new_file}&pdf.landscape=true&pdf.height={height}&pdf.width={width}"
proc = subprocess.Popen(
["curl", "-o", new_file.rstrip(".svg") + ".pdf", url],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
time.sleep(5)
kill(server_proc.pid)
|
"""
Copyright StrangeAI authors @2019
assume you have to directly which you want
convert A to B, just put all faces of A person to A,
faces of B person to B
"""
import torch
from torch.utils.data import Dataset
import glob
import os
from alfred.dl.torch.common import device
import cv2
from PIL import Image
from torchvision import transforms
import numpy as np
from utils.umeyama import umeyama
import cv2
random_transform_args = {
'rotation_range': 10,
'zoom_range': 0.05,
'shift_range': 0.05,
'random_flip': 0.4,
}
def random_transform(image, rotation_range, zoom_range, shift_range, random_flip):
h, w = image.shape[0:2]
rotation = np.random.uniform(-rotation_range, rotation_range)
scale = np.random.uniform(1 - zoom_range, 1 + zoom_range)
tx = np.random.uniform(-shift_range, shift_range) * w
ty = np.random.uniform(-shift_range, shift_range) * h
mat = cv2.getRotationMatrix2D((w // 2, h // 2), rotation, scale)
mat[:, 2] += (tx, ty)
result = cv2.warpAffine(image, mat, (w, h), borderMode=cv2.BORDER_REPLICATE)
if np.random.random() < random_flip:
result = result[:, ::-1]
return result
def random_warp_128(image):
assert image.shape == (256, 256, 3), 'resize image to 256 256 first'
range_ = np.linspace(128 - 120, 128 + 120, 9)
mapx = np.broadcast_to(range_, (9, 9))
mapy = mapx.T
mapx = mapx + np.random.normal(size=(9, 9), scale=5)
mapy = mapy + np.random.normal(size=(9, 9), scale=5)
interp_mapx = cv2.resize(mapx, (144, 144))[8:136, 8:136].astype('float32')
interp_mapy = cv2.resize(mapy, (144, 144))[8:136, 8:136].astype('float32')
warped_image = cv2.remap(image, interp_mapx, interp_mapy, cv2.INTER_LINEAR)
src_points = np.stack([mapx.ravel(), mapy.ravel()], axis=-1)
dst_points = np.mgrid[0:129:16, 0:129:16].T.reshape(-1, 2)
mat = umeyama(src_points, dst_points, True)[0:2]
target_image = cv2.warpAffine(image, mat, (128, 128))
return warped_image, target_image
def random_warp_64(image):
assert image.shape == (256, 256, 3)
range_ = np.linspace(128 - 120, 128 + 120, 5)
mapx = np.broadcast_to(range_, (5, 5))
mapy = mapx.T
mapx = mapx + np.random.normal(size=(5, 5), scale=5)
mapy = mapy + np.random.normal(size=(5, 5), scale=5)
interp_mapx = cv2.resize(mapx, (80, 80))[8:72, 8:72].astype('float32')
interp_mapy = cv2.resize(mapy, (80, 80))[8:72, 8:72].astype('float32')
warped_image = cv2.remap(image, interp_mapx, interp_mapy, cv2.INTER_LINEAR)
src_points = np.stack([mapx.ravel(), mapy.ravel()], axis=-1)
dst_points = np.mgrid[0:65:16, 0:65:16].T.reshape(-1, 2)
mat = umeyama(src_points, dst_points, True)[0:2]
target_image = cv2.warpAffine(image, mat, (64, 64))
return warped_image, target_image
class FacePairDataset(Dataset):
def __init__(self, a_dir, b_dir, target_size, transform):
super(FacePairDataset, self).__init__
self.a_dir = a_dir
self.b_dir = b_dir
self.target_size = target_size
self.transform = transform
# extension can be changed here to png or others
self.a_images_list = glob.glob(os.path.join(a_dir, '*.png'))
self.b_images_list = glob.glob(os.path.join(b_dir, '*.png'))
def __getitem__(self, index):
# return 2 image pair, A and B
img_a = Image.open(self.a_images_list[index])
img_b = Image.open(self.b_images_list[index])
# align the face first
img_a = img_a.resize((self.target_size, self.target_size), Image.ANTIALIAS)
img_b = img_b.resize((self.target_size, self.target_size), Image.ANTIALIAS)
# transform
if self.transform:
img_a = self.transform(img_a)
img_b = self.transform(img_b)
# already resized, warp it
img_a = random_transform(np.array(img_a), **random_transform_args)
img_b = random_transform(np.array(img_b), **random_transform_args)
img_a_input, img_a = random_warp(np.array(img_a), 256)
img_b_input, img_b = random_warp(np.array(img_b), 256)
img_a_tensor = torch.Tensor(img_a.transpose(2, 0, 1)/255.).float()
img_a_input_tensor = torch.Tensor(img_a_input.transpose(2, 0, 1)/255.).float()
img_b_tensor = torch.Tensor(img_b.transpose(2, 0, 1)/255.).float()
img_b_input_tensor = torch.Tensor(img_b_input.transpose(2, 0, 1)/255.).float()
return img_a_tensor, img_a_input_tensor, img_b_tensor, img_b_input_tensor
def __len__(self):
return min(len(self.a_images_list), len(self.b_images_list))
class FacePairDataset64x64(Dataset):
def __init__(self, a_dir, b_dir, target_size, transform):
super(FacePairDataset64x64, self).__init__
self.a_dir = a_dir
self.b_dir = b_dir
self.target_size = target_size
self.transform = transform
# extension can be changed here to png or others
self.a_images_list = glob.glob(os.path.join(a_dir, '*.png'))
self.b_images_list = glob.glob(os.path.join(b_dir, '*.png'))
def __getitem__(self, index):
# return 2 image pair, A and B
img_a = Image.open(self.a_images_list[index])
img_b = Image.open(self.b_images_list[index])
# align the face first
img_a = img_a.resize((256, 256), Image.ANTIALIAS)
img_b = img_b.resize((256, 256), Image.ANTIALIAS)
# transform
if self.transform:
img_a = self.transform(img_a)
img_b = self.transform(img_b)
# # already resized, warp it
img_a = random_transform(np.array(img_a), **random_transform_args)
img_b = random_transform(np.array(img_b), **random_transform_args)
img_a_input, img_a = random_warp_64(np.array(img_a))
img_b_input, img_b = random_warp_64(np.array(img_b))
img_a = np.array(img_a)
img_b = np.array(img_b)
img_a_tensor = torch.Tensor(img_a.transpose(2, 0, 1)/255.).float()
img_a_input_tensor = torch.Tensor(img_a_input.transpose(2, 0, 1)/255.).float()
img_b_tensor = torch.Tensor(img_b.transpose(2, 0, 1)/255.).float()
img_b_input_tensor = torch.Tensor(img_b_input.transpose(2, 0, 1)/255.).float()
return img_a_tensor, img_a_input_tensor, img_b_tensor, img_b_input_tensor
def __len__(self):
return min(len(self.a_images_list), len(self.b_images_list))
class FacePairDataset128x128(Dataset):
def __init__(self, a_dir, b_dir, target_size, transform):
super(FacePairDataset128x128, self).__init__
self.a_dir = a_dir
self.b_dir = b_dir
self.target_size = target_size
self.transform = transform
self.a_images_list = glob.glob(os.path.join(a_dir, '*.png'))
self.b_images_list = glob.glob(os.path.join(b_dir, '*.png'))
def __getitem__(self, index):
# return 2 image pair, A and B
img_a = Image.open(self.a_images_list[index])
img_b = Image.open(self.b_images_list[index])
# align the face first
img_a = img_a.resize((256, 256), Image.ANTIALIAS)
img_b = img_b.resize((256, 256), Image.ANTIALIAS)
# transform
if self.transform:
img_a = self.transform(img_a)
img_b = self.transform(img_b)
img_a = random_transform(np.array(img_a), **random_transform_args)
img_b = random_transform(np.array(img_b), **random_transform_args)
img_a_input, img_a = random_warp_128(np.array(img_a))
img_b_input, img_b = random_warp_128(np.array(img_b))
img_a_tensor = torch.Tensor(img_a.transpose(2, 0, 1)/255.).float()
img_a_input_tensor = torch.Tensor(img_a_input.transpose(2, 0, 1)/255.).float()
img_b_tensor = torch.Tensor(img_b.transpose(2, 0, 1)/255.).float()
img_b_input_tensor = torch.Tensor(img_b_input.transpose(2, 0, 1)/255.).float()
return img_a_tensor, img_a_input_tensor, img_b_tensor, img_b_input_tensor
def __len__(self):
return min(len(self.a_images_list), len(self.b_images_list)) |
from flask import Flask, g, render_template, url_for, abort
from flask import make_response, request
from jinja2 import FileSystemLoader
from sqlalchemy.sql.expression import func
import itertools
import pickle
from trajectory import config as TRJ
from trajectory.utils.prereqs import get_prereq_graph
from trajectory.utils.vector import jaccard, topic_list, topic_vector
from trajectory.utils.vector import cosine_similarity, euclidean_distance
from trajectory.utils.knowledge_areas import predicted_knowledge_areas
from trajectory.utils.knowledge_areas import ground_truth_knowledge_areas
from trajectory.models import University, Department, Course, ResultSet
from trajectory.models import Topic, CourseTopicAssociation
from trajectory.models.meta import session
#####################
# Application Setup #
#####################
app = Flask(__name__)
app.config.from_object(dict(
DEBUG = True,
THREADS_PER_PAGE = 8,
))
app.jinja_loader = FileSystemLoader(TRJ.TEMPLATES)
app.db = session
###################
# View Definition #
###################
# Manage error handling.
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
# Manage error handling.
@app.errorhandler(500)
def internal_error(error):
return render_template('500.html'), 500
# Define routing for dashboard page.
@app.route('/')
def dashboard():
universities = app.db.query(University).all()
result_sets = app.db.query(ResultSet).all()
return render_template("index.html",
universities=universities,
result_sets=result_sets)
# Define routing for university index pages.
@app.route('/universities/')
@app.route('/universities/<string:u>/')
def university(u=None):
# If no university is requested, just serve up the uni list page.
if u is None:
universities = app.db.query(University).all()
return render_template("university_list.html",
universities=universities)
# If a university is requested, try to find it.
university = app.db.query(University) \
.filter(University.abbreviation==u) \
.first()
if university is None:
abort(404) # university not found
return render_template("university.html",
university=university)
# Define routing for departmental pages.
@app.route('/universities/<string:u>/<string:d>/')
def department(u=None, d=None):
department = app.db.query(Department).join(University) \
.filter(University.abbreviation==u) \
.filter(Department.abbreviation==d) \
.first()
if department is None:
abort(404) # department not found
departments = app.db.query(Department).all()
return render_template("department.html",
department=department,
departments=departments)
# Define routing for a course.
@app.route('/universities/<string:u>/<string:d>/id<string:cid>/')
def course(u=None, d=None, cid=None):
course = app.db.query(Course).join(Department).join(University) \
.filter(University.abbreviation==u) \
.filter(Department.abbreviation==d) \
.filter(Course.id==cid).first()
if course is None:
abort(404)
return render_template("course.html",
course=course,
topics=topic_list(course, result_set=g.result_set_raw))
# Define routing for topic list.
@app.route('/topics/')
def topics():
topics = app.db.query(Topic) \
.join(CourseTopicAssociation) \
.join(ResultSet) \
.group_by(Topic.id, ResultSet.id) \
.order_by(func.count().desc()) \
.all()
return render_template("topics.html",
topics=topics)
# Define routing for about page.
@app.route('/about/')
def about():
return render_template("about.html")
# Define routing for course prerequisite tree API endpoint.
@app.route('/prereqs/<string:cid>/<string:format>')
def prereq_tree(cid, format="node"):
# Attempt to retreive data in the requested format.
try:
data = get_prereq_graph(cid, format=format)
except RuntimeError:
abort(404)
if data is None:
abort(404)
response = make_response(data)
response.headers["Content-Type"] = "text/plain"
return response
# Define routing for department comparison page.
@app.route('/compare/')
@app.route('/compare/<string:daid>/<string:dbid>/')
def compare_departments(daid=None, dbid=None):
if None in [daid, dbid]:
departments = app.db.query(Department).all()
return render_template("compare_departments_landing.html",
departments=departments)
# Look up references to requested departments.
department_a = app.db.query(Department).get(daid)
department_b = app.db.query(Department).get(dbid)
# If either department isn't found, or if there is no result set
# (meaning no topics to infer) then simply 404.
if department_a is None or department_b is None or g.result_set_raw is None:
abort(404)
# Identify a set of topics for each department.
department_a_topics = set(topic_list(department_a, g.result_set_raw))
department_b_topics = set(topic_list(department_b, g.result_set_raw))
# Generate topic vectors for the two departments.
a_vector = topic_vector(department_a, g.result_set_raw)
b_vector = topic_vector(department_b, g.result_set_raw)
a_vector_string = a_vector.unpack(one=b'1', zero=b'0').decode('utf-8')
b_vector_string = b_vector.unpack(one=b'1', zero=b'0').decode('utf-8')
# Run similarity metrics.
similarity = dict()
similarity['jaccard'] = {
'name': 'Jaccard Index',
'range': '[0, 1]',
'description': 'Comparative set cardinality.',
'value': jaccard(department_a_topics, department_b_topics),
}
similarity['cosine'] = {
'name': 'Cosine Similarity',
'range': '[-1, 1]',
'description': 'Geometric cosine distance.',
'value': cosine_similarity(a_vector, b_vector),
}
similarity['euclidean'] = {
'name': 'Euclidean Distance',
'description': 'Geometric vector distance.',
'value': euclidean_distance(a_vector, b_vector),
}
# Remove common topics from the topic sets.
intersection = department_a_topics & department_b_topics
department_a_topics = department_a_topics - intersection
department_b_topics = department_b_topics - intersection
# Number of courses in each department.
num_courses_a = app.db.query(Course).join(Department) \
.filter(Department.id==daid).count()
num_courses_b = app.db.query(Course).join(Department) \
.filter(Department.id==dbid).count()
# Global list of departments for switching over.
departments = app.db.query(Department).all()
return render_template("compare_departments.html",
da=department_a,
db=department_b,
da_topics=department_a_topics,
db_topics=department_b_topics,
num_courses_a=num_courses_a,
num_courses_b=num_courses_b,
common_topics=intersection,
departments=departments,
similarity_metrics=similarity,
da_vector=a_vector_string,
db_vector=b_vector_string,
)
# Define routing for departmental evaluation tool.
@app.route('/evaluate/')
@app.route('/evaluate/<string:u>/<string:d>/')
def evaluation(u=None, d=None):
if u is None or d is None:
return render_template("evaluate_landing.html")
import numpy
department = app.db.query(Department).join(University) \
.filter(University.abbreviation==u) \
.filter(Department.abbreviation==d) \
.first()
if department is None:
abort(404) # department not found
# Retrieve the set of predicted and ground truth knowledge area labels
# for each course.
try:
knowledge_areas = {
'predicted': {
course.id: predicted_knowledge_areas(
course,
result_set=g.result_set_raw)
for course in department.courses
},
'truth': {
course.id: ground_truth_knowledge_areas(course)
for course in department.courses
},
}
except RuntimeError:
# Return empty knowledge area lists if an error is encountered.
knowledge_areas = {
'predicted': {course.id: [] for course in department.courses},
'truth': {course.id: [] for course in department.courses},
}
# Calculate the jaccard coefficient and percentage correct of the
# prediction/truth sets, use these as 'correctness' metrics.
knowledge_areas['jaccard'] = {
course.id: float(jaccard(
knowledge_areas['predicted'][course.id],
knowledge_areas['truth'][course.id]
)) for course in department.courses
if knowledge_areas['truth'][course.id]
}
knowledge_areas['percent'] = {
course.id:
float(len(set(knowledge_areas['predicted'][course.id])\
.intersection(set(knowledge_areas['truth'][course.id])))\
/ len(knowledge_areas['truth'][course.id]))
for course in department.courses
if knowledge_areas['truth'][course.id]
}
return render_template("evaluate_department.html",
department=department,
knowledge_areas=knowledge_areas,)
################################
# Custom Filters and Functions #
################################
@app.template_filter('ka_parse')
def knowledge_area_abbreviation(ka):
"""
Given an ACM knowledge area, split it up by its Abbreviation (eg. AL)
and its Title (eg. Algorithms and Complexity). This is done by
isolating the location of the abbreviation in the title string (where
the first left paren occurs) and only including the subsequent
characters.
"""
return {
'abbr': ka.title,
'title': ka.title[ka.title.find('(')+1:-1]
}
@app.template_filter('course_count')
def course_count(data):
if type(data) == University:
return app.db.query(Course).join(Department).join(University) \
.filter(University.id==data.id).count()
elif type(data) == Department:
return len(data.courses)
else:
return 0
# Make the deserialize function available to templates.
@app.context_processor
def utility_processor():
def unpickle(data):
try:
import binascii
unhex = binascii.unhexlify(data)
return pickle.loads(unhex)
except TypeError:
return None
return dict(unpickle=unpickle)
#####################
# Request Callbacks #
#####################
# Identify the requested result_set from the user. If none is currently
# selected, just default to the first one.
@app.before_request
def get_result_set():
# Load the requested and known result set ids.
# Warning: do NOT call loads on the requested data.
import binascii
result_set = request.cookies.get('result_set')
result_sets = app.db.query(ResultSet).all()
legal_result_sets = [binascii.hexlify(pickle.dumps(rs)).decode('utf-8')
for rs in result_sets]
# Check if the requested id is legal. If not, default it.
if result_set is None or result_set not in legal_result_sets:
if len(legal_result_sets) > 0:
result_set = legal_result_sets[0]
else:
result_set = None
# Look up the raw result set for server-side storage.
if result_set is not None:
result_set_index = legal_result_sets.index(result_set)
result_set_raw = result_sets[result_set_index]
else:
result_set_raw = None
# Set the global current- and legal- resultsets.
g.result_set = result_set
g.result_set_raw = result_set_raw
g.result_sets = legal_result_sets
# Set the resultset cookie after this request.
@app.after_request
def set_result_set(response):
import binascii
if g.result_set is not None:
response.set_cookie('result_set', g.result_set, path='/')
return response
|
from Katna.video import Video
from Katna.image import Image
from .version import __version__
|
from datetime import datetime
import csv
import re
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
from jumia.database import DatabaseSession, ListeMaison
class BaseScrapper:
"""Base scrapper object with methods to retrieve and save data
"""
def __init__(self, browser='firefox', **kwargs):
"""initialise the web driver instance
:param browser: The web browser used
:param kwargs:
"""
options = webdriver.IeOptions() if browser == 'ie' else webdriver.ChromeOptions() if browser == 'chrome' \
else webdriver.FirefoxOptions()
# add additional optional arguments
options_args = []
if 'headless' in kwargs and kwargs.get('headless'):
options_args.append('--headless')
for op in options_args:
options.add_argument(op)
self.driver = webdriver.Ie(ie_options=options) if browser == 'ie' else webdriver.Chrome(chrome_options=options)\
if browser == 'chrome' else webdriver.Firefox(firefox_options=options)
def connect_to_website(self, website_url):
"""Connects to the website to pull data from
:param website_url:
:return: Boolean
"""
_attempts = 0
while _attempts < 3:
try:
self.driver.get(website_url)
self.driver.fullscreen_window()
#self.driver.implicitly_wait(3)
return True
except Exception as e:
_attempts += 1
print(f'Error while connecting to {website_url}', f'Attempt #{_attempts}', end='\n')
return False
def get_deals(self, category='appartements-a-vendre', website_url='https://deals.jumia.sn/', **kwargs):
""" Execute custom search on the scrapped website
:param category: Sting - Terms to search
:param kwargs:
:return:
"""
_connected = self.connect_to_website(website_url+category)
WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable(
(By.ID, "nav")))
try:
self.driver.find_element_by_class_name('-close_popup').click()
except Exception as e:
pass
html = self.driver.page_source
soup = BeautifulSoup(html, 'html.parser')
annonces = soup.find_all('div', class_='post')
results = []
for a in annonces:
results.append(
{
'titre': re.sub(r"[\n\t]*", "", a.find(class_='address').text.split(',')[0]).strip(),
'ville': re.sub(r"[\n\t]*", "", a.find(class_='address').text.split(',')[1]).strip(),
'description': re.sub(r"[\n\t]*", "", a.find(class_='announcement-infos').a.span.text).strip(),
'date': a.find(class_='price-date').time.text,
'prix': a.find(class_='price-date').span.text.strip(),
'image': a.img['data-src'] if 'data-src' in a.img.attrs else a.img['src'],
'lien': website_url+a.find(class_='announcement-infos').a['href'],
'type': 'location' if 'louer' in category else 'vente'
}
)
return results
def disconnect(self):
self.driver.quit()
def process_results(self, html, type='location', **kwargs):
"""Process the downloaded scrapping results.
:param html:
:param kwargs:
:return:
"""
# Create beautifulsoup object
soup = BeautifulSoup(html, 'html.parser')
annonces = soup.find_all('li', class_='highlight-box')
results = []
for a in annonces:
results.append({'title': a.div.h3.text.strip(), 'type': type, 'link': 'https://house.jumia.sn' + a.div.h3.a.get('href'),
'address': a.div.p.text, 'price': a.div.find(class_='listing-price').text,
'image': a.find(class_='listing-image').img.get('src')})
return results
def save_results_to_file(self, input, output='annonces.csv'):
"""
:param input:
:return:
"""
fieldnames = ['type', 'titre', 'description', 'ville', 'prix', 'date', 'lien', 'image']
with open(output, 'a') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writerow({'type': 'Type', 'titre': 'Titre', 'description': 'Description', 'ville': 'Ville', 'prix': 'Prix', 'date': 'Date',
'lien': 'Lien','image': 'Image'})
writer.writerows(input)
def reset_database(self):
with DatabaseSession() as session:
print("Removing previous records")
try:
session.query(ListeMaison).delete()
session.commit()
except:
session.rollback()
def save_results_to_database(self, input, **kwargs):
"""
:param input:
:param kwargs:
:return:
"""
with DatabaseSession() as session:
print("Inserting new records")
for i in input:
m = ListeMaison(titre=i.get('titre', 'Test'), description=i.get('description', 'Test'),
image=i.get('image', 'test'), lien= i.get('lien', 'Test'),pays='SN',
ville=i.get('ville', ''), quartier=i.get('ville', ''), superficie=50,
prix=i.get('prix', 0), chambres=2, type=i.get('type', 'location'), date=datetime.now().date())
try:
session.add(m)
except Exception as e:
print(e)
session.rollback()
session.commit() |
# Generated by Django 3.0.7 on 2020-07-22 11:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('contr_clienti', '0009_remove_contract_document'),
]
operations = [
migrations.AddField(
model_name='contractscan',
name='actaditional',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='contr_clienti.ActAditional'),
),
]
|
# Advent of Code 2021 - Day 2 Part 1
# 2 Dec 2021 Brian Green
#
# Problem:
# Calculate the horizontal position and depth you would have after following the planned course.
# What do you get if you multiply your final horizontal position by your final depth?
#
import os
filename = "data" + os.sep + "brian_aoc202102.dat"
with open(filename) as data_file:
data_set = [direction.strip() for direction in data_file.readlines()]
depth = 0
position = 0
for d in data_set:
direction, movement = d.split()
moves = int(movement)
if direction == "up":
depth -= moves
elif direction == "down":
depth += moves
elif direction == "forward":
position += moves
else:
print(direction)
if depth < 0:
print("HELP!")
print(f"p={position} d={depth} t={position * depth}")
|
"""
Segment level tests.
"""
from fontTools.misc import bezierTools as ftBezierTools
import defcon
from .tools import (
roundPoint,
unwrapPoint,
calculateAngle,
calculateAngleOffset,
calculateLineLineIntersection,
calculateLineCurveIntersection,
calculateLineLength,
calculateLineThroughPoint
)
from . import registry
from .wrappers import *
# Straight Lines
def testForAngleNearMiss(contour):
"""
Lines shouldn't be just shy of vertical or horizontal.
Data structure:
set(
(pt1, pt2),
...
)
"""
contour = wrapContour(contour)
segments = contour.segments
prev = unwrapPoint(segments[-1].onCurve)
slightlyOffLines = set()
for segment in segments:
point = unwrapPoint(segment.onCurve)
if segment[-1].type == "line":
x = abs(prev[0] - point[0])
y = abs(prev[1] - point[1])
if x > 0 and x <= 5 and prev[1] != point[1]:
slightlyOffLines.add((prev, point))
if y > 0 and y <= 5 and prev[0] != point[0]:
slightlyOffLines.add((prev, point))
prev = point
return slightlyOffLines
registry.registerTest(
identifier="angleNearMiss",
level="segment",
title="Angle Near Miss",
description="One or more lines are nearly at important angles.",
testFunction=testForAngleNearMiss,
defconClass=defcon.Contour,
destructiveNotifications=["Contour.PointsChanged"]
)
# Segments Near Vertical Metrics
def testForSegmentsNearVerticalMetrics(contour):
"""
Points shouldn't be just off a vertical metric or blue zone.
Data structure:
{
vertical metric y value : set(pt, ...),
...
}
"""
font = wrapFont(contour.font)
glyph = wrapGlyph(contour.glyph)
contour = wrapContour(contour)
threshold = 5
# gather the blues into top and bottom groups
topZones = _makeZonePairs(font.info.postscriptBlueValues)
bottomZones = _makeZonePairs(font.info.postscriptOtherBlues)
if topZones:
t = topZones[0]
if t[0] <= 0 and t[1] == 0:
bottomZones.append(topZones.pop(0))
# insert vertical metrics into the zones
topMetrics = [getattr(font.info, attr) for attr in "xHeight capHeight ascender".split(" ") if getattr(font.info, attr) is not None]
bottomMetrics = [getattr(font.info, attr) for attr in "descender".split(" ") if getattr(font.info, attr) is not None] + [0]
for value in topMetrics:
found = False
for b, t in topZones:
if b <= value and t >= value:
found = True
break
if not found:
topZones.append((value, value))
for value in bottomMetrics:
found = False
for b, t in bottomZones:
if b <= value and t >= value:
found = True
break
if not found:
bottomZones.append((value, value))
# find points
found = {}
if len(contour) >= 3:
for segmentIndex, segment in enumerate(contour):
prev = segmentIndex - 1
next = segmentIndex + 1
if next == len(contour):
next = 0
prevSegment = contour[prev]
nextSegment = contour[next]
pt = (segment.onCurve.x, segment.onCurve.y)
prevPt = (prevSegment.onCurve.x, prevSegment.onCurve.y)
nextPt = (nextSegment.onCurve.x, nextSegment.onCurve.y)
pY = prevPt[1]
x, y = pt
nY = nextPt[1]
# top point
if y >= pY and y >= nY:
for b, t in topZones:
test = None
# point is above zone
if y > t and abs(t - y) <= threshold:
test = t
# point is below zone
elif y < b and abs(b - y) <= threshold:
test = b
if test is not None:
if contour.pointInside((x, y - 1)):
if test not in found:
found[test] = set()
found[test].add((x, y))
# bottom point
if y <= pY and y <= nY:
for b, t in bottomZones:
test = None
# point is above zone
if y > t and abs(t - y) <= threshold:
test = t
# point is below zone
elif y < b and abs(b - y) <= threshold:
test = b
if test is not None:
if contour.pointInside((x, y + 1)):
if test not in found:
found[test] = set()
found[test].add((x, y))
return found
def _makeZonePairs(blues):
blues = list(blues)
pairs = []
if not len(blues) % 2:
while blues:
bottom = blues.pop(0)
top = blues.pop(0)
pairs.append((bottom, top))
return pairs
registry.registerTest(
identifier="pointsNearVerticalMetrics",
level="segment",
title="Near Vertical Metrics",
description="Two or more points are just off a vertical metric.",
testFunction=testForSegmentsNearVerticalMetrics,
defconClass=defcon.Contour,
destructiveNotifications=["Contour.PointsChanged"]
)
# Unsmooth Smooths
def testUnsmoothSmooths(contour):
"""
Smooth segments should have bcps in the right places.
Data structure:
[
(offcurvePoint, point, offcurvePoint),
...
]
"""
contour = wrapContour(contour)
unsmoothSmooths = []
prev = contour[-1]
for segment in contour:
if prev.type == "curve" and segment.type == "curve":
if prev.smooth:
angle1 = calculateAngle(prev.offCurve[1], prev.onCurve, r=0)
angle2 = calculateAngle(prev.onCurve, segment.offCurve[0], r=0)
if angle1 != angle2:
pt1 = unwrapPoint(prev.offCurve[1])
pt2 = unwrapPoint(prev.onCurve)
pt3 = unwrapPoint(segment.offCurve[0])
unsmoothSmooths.append((pt1, pt2, pt3))
prev = segment
return unsmoothSmooths
registry.registerTest(
identifier="unsmoothSmooths",
level="segment",
title="Unsmooth Smooths",
description="One or more smooth points do not have handles that are properly placed.",
testFunction=testUnsmoothSmooths,
defconClass=defcon.Contour,
destructiveNotifications=["Contour.PointsChanged"]
)
# Complex Curves
def testForComplexCurves(contour):
"""
S curves are suspicious.
Data structure:
[
(onCurve, offCurve, offCurve, onCurve),
...
]
"""
contour = wrapContour(contour)
impliedS = []
prev = unwrapPoint(contour[-1].onCurve)
for segment in contour:
if segment.type == "curve":
pt0 = prev
pt1, pt2 = [unwrapPoint(p) for p in segment.offCurve]
pt3 = unwrapPoint(segment.onCurve)
line1 = (pt0, pt3)
line2 = (pt1, pt2)
if calculateLineLineIntersection(line1, line2):
impliedS.append((prev, pt1, pt2, pt3))
prev = unwrapPoint(segment.onCurve)
return impliedS
registry.registerTest(
identifier="complexCurves",
level="segment",
title="Complex Curves",
description="One or more curves is suspiciously complex.",
testFunction=testForComplexCurves,
defconClass=defcon.Contour,
destructiveNotifications=["Contour.PointsChanged"]
)
# Crossed Handles
def testForCrossedHandles(contour):
"""
Handles shouldn't intersect.
Data structure:
[
{
points : (pt1, pt2, pt3, pt4),
intersection : pt
},
...
]
"""
contour = wrapContour(contour)
crossedHandles = []
pt0 = unwrapPoint(contour[-1].onCurve)
for segment in contour:
pt3 = unwrapPoint(segment.onCurve)
if segment.type == "curve":
pt1, pt2 = [unwrapPoint(p) for p in segment.offCurve]
# direct intersection
direct = calculateLineLineIntersection((pt0, pt1), (pt2, pt3))
if direct:
if _crossedHanldeWithNoOtherOptions(direct, pt0, pt1, pt2, pt3):
pass
else:
crossedHandles.append(dict(points=(pt0, pt1, pt2, pt3), intersection=direct))
# indirect intersection
else:
while 1:
# bcp1 = ray, bcp2 = segment
angle = calculateAngle(pt0, pt1)
if angle in (0, 180.0):
t1 = (pt0[0] + 1000, pt0[1])
t2 = (pt0[0] - 1000, pt0[1])
else:
yOffset = calculateAngleOffset(angle, 1000)
t1 = (pt0[0] + 1000, pt0[1] + yOffset)
t2 = (pt0[0] - 1000, pt0[1] - yOffset)
indirect = calculateLineLineIntersection((t1, t2), (pt2, pt3))
if indirect:
if _crossedHanldeWithNoOtherOptions(indirect, pt0, pt1, pt2, pt3):
pass
else:
crossedHandles.append(dict(points=(pt0, indirect, pt2, pt3), intersection=indirect))
break
# bcp1 = segment, bcp2 = ray
angle = calculateAngle(pt3, pt2)
if angle in (90.0, 270.0):
t1 = (pt3[0], pt3[1] + 1000)
t2 = (pt3[0], pt3[1] - 1000)
else:
yOffset = calculateAngleOffset(angle, 1000)
t1 = (pt3[0] + 1000, pt3[1] + yOffset)
t2 = (pt3[0] - 1000, pt3[1] - yOffset)
indirect = calculateLineLineIntersection((t1, t2), (pt0, pt1))
if indirect:
if _crossedHanldeWithNoOtherOptions(indirect, pt0, pt1, pt2, pt3):
pass
else:
crossedHandles.append(dict(points=(pt0, pt1, indirect, pt3), intersection=indirect))
break
break
pt0 = pt3
return crossedHandles
def _crossedHanldeWithNoOtherOptions(hit, pt0, pt1, pt2, pt3):
hitWidth = max((abs(hit[0] - pt0[0]), abs(hit[0] - pt3[0])))
hitHeight = max((abs(hit[1] - pt0[1]), abs(hit[1] - pt3[1])))
w = abs(pt0[0] - pt3[0])
h = abs(pt0[1] - pt3[1])
bw = max((abs(pt0[0] - pt1[0]), abs(pt3[0] - pt2[0])))
bh = max((abs(pt0[1] - pt1[1]), abs(pt3[1] - pt2[1])))
if w == 1 and bw == 1 and not bh > h:
return True
elif h == 1 and bh == 1 and not bw > w:
return True
return False
registry.registerTest(
identifier="crossedHandles",
level="segment",
title="Crossed Handles",
description="One or more curves contain crossed handles.",
testFunction=testForCrossedHandles,
defconClass=defcon.Contour,
destructiveNotifications=["Contour.PointsChanged"]
)
# Unnecessary Handles
def testForUnnecessaryHandles(contour):
"""
Handles shouldn't be used if they aren't doing anything.
Data structure:
[
(pt1, pt2),
...
]
"""
contour = wrapContour(contour)
unnecessaryHandles = []
prevPoint = contour[-1].onCurve
for segment in contour:
if segment.type == "curve":
pt0 = prevPoint
pt1, pt2 = segment.offCurve
pt3 = segment.onCurve
lineAngle = calculateAngle(pt0, pt3, 0)
bcpAngle1 = bcpAngle2 = None
if (pt0.x, pt0.y) != (pt1.x, pt1.y):
bcpAngle1 = calculateAngle(pt0, pt1, 0)
if (pt2.x, pt2.y) != (pt3.x, pt3.y):
bcpAngle2 = calculateAngle(pt2, pt3, 0)
if bcpAngle1 == lineAngle and bcpAngle2 == lineAngle:
unnecessaryHandles.append((unwrapPoint(pt1), unwrapPoint(pt2)))
prevPoint = segment.onCurve
return unnecessaryHandles
registry.registerTest(
identifier="unnecessaryHandles",
level="segment",
title="Unnecessary Handles",
description="One or more curves has unnecessary handles.",
testFunction=testForUnnecessaryHandles,
defconClass=defcon.Contour,
destructiveNotifications=["Contour.PointsChanged"]
)
# Uneven Handles
def testForUnevenHandles(contour):
"""
Handles should share the workload as evenly as possible.
Data structure:
[
(off1, off2, off1Shape, off2Shape),
...
]
"""
contour = wrapContour(contour)
unevenHandles = []
prevPoint = contour[-1].onCurve
for segment in contour:
if segment.type == "curve":
# create rays perpendicular to the
# angle between the on and off
# through the on
on1 = unwrapPoint(prevPoint)
off1, off2 = [unwrapPoint(pt) for pt in segment.offCurve]
on2 = unwrapPoint(segment.onCurve)
curve = (on1, off1, off2, on2)
off1Angle = calculateAngle(on1, off1) - 90
on1Ray = calculateLineThroughPoint(on1, off1Angle)
off2Angle = calculateAngle(off2, on2) - 90
on2Ray = calculateLineThroughPoint(on2, off2Angle)
# find the intersection of the rays
rayIntersection = calculateLineLineIntersection(on1Ray, on2Ray)
if rayIntersection is not None:
# draw a line between the off curves and the intersection
# and find out where these lines intersect the curve
off1Intersection = calculateLineCurveIntersection((off1, rayIntersection), curve)
off2Intersection = calculateLineCurveIntersection((off2, rayIntersection), curve)
if off1Intersection is not None and off2Intersection is not None:
if off1Intersection.points and off2Intersection.points:
off1IntersectionPoint = (off1Intersection.points[0].x, off1Intersection.points[0].y)
off2IntersectionPoint = (off2Intersection.points[0].x, off2Intersection.points[0].y)
# assemble the off curves and their intersections into lines
off1Line = (off1, off1IntersectionPoint)
off2Line = (off2, off2IntersectionPoint)
# measure and compare these
# if they are not both very short calculate the ratio
length1, length2 = sorted((calculateLineLength(*off1Line), calculateLineLength(*off2Line)))
if length1 >= 3 and length2 >= 3:
ratio = length2 / float(length1)
# if outside acceptable range, flag
if ratio > 1.5:
off1Shape = _getUnevenHandleShape(on1, off1, off2, on2, off1Intersection, on1, off1IntersectionPoint, off1)
off2Shape = _getUnevenHandleShape(on1, off1, off2, on2, off2Intersection, off2IntersectionPoint, on2, off2)
unevenHandles.append((off1, off2, off1Shape, off2Shape))
prevPoint = segment.onCurve
return unevenHandles
def _getUnevenHandleShape(pt0, pt1, pt2, pt3, intersection, start, end, off):
splitSegments = ftBezierTools.splitCubicAtT(pt0, pt1, pt2, pt3, *intersection.t)
curves = []
for segment in splitSegments:
if roundPoint(segment[0]) != roundPoint(start) and not curves:
continue
curves.append(segment[1:])
if roundPoint(segment[-1]) == roundPoint(end):
break
return curves + [off, start]
registry.registerTest(
identifier="unevenHandles",
level="segment",
title="Uneven Handles",
description="One or more curves has uneven handles.",
testFunction=testForUnevenHandles,
defconClass=defcon.Contour,
destructiveNotifications=["Contour.PointsChanged"]
) |
from __future__ import annotations
import datetime
import enum
from typing import Any, Dict, List, Literal, NewType, Optional, TYPE_CHECKING, Union
from pydantic import (
Extra,
Field,
PrivateAttr,
StrictBool,
ValidationError,
root_validator,
validator,
)
from server import logger
from tarkov.inventory.helpers import generate_item_id
from tarkov.inventory.prop_models import (
AnyProp,
BaseItemProps,
FilterProperty,
props_models_map,
)
from tarkov.inventory.types import ItemId, TemplateId
from tarkov.models import Base
if TYPE_CHECKING:
# pylint: disable=cyclic-import
from tarkov.inventory.inventory import MutableInventory
class NodeTemplateBase(Base):
class Config:
extra = Extra.allow
allow_mutation = False
fields = {
"id": "_id",
"name": "_name",
"parent": "_parent",
"type": "_type",
"props": "_props",
"proto": "_proto",
}
id: TemplateId
name: str
parent: TemplateId
proto: Optional[str] = None
class NodeTemplate(NodeTemplateBase):
type: Literal["Node"]
class ItemTemplate(NodeTemplateBase):
type: Literal["Item"]
props: AnyProp
@root_validator(pre=True)
def assign_prop( # pylint: disable=no-self-argument, no-self-use
cls, values: dict
) -> dict:
if values["_type"] != "Item":
return values
if isinstance(values["_props"], BaseItemProps):
return values
props = values["_props"]
try:
model = props_models_map[values["_parent"]]
except KeyError as e:
raise KeyError(
f"Props class for node with id {values['_parent']} was not found"
) from e
try:
values["_props"] = model.parse_obj(props)
except ValidationError as e:
logger.debug(values["_id"])
logger.debug(e)
raise
return values
def has_in_slots(self, template_id: TemplateId) -> bool:
"""
Checks if template has template_id in one of it's filters
"""
props = self.props
for slot_filter in ("Cartridges", "Chambers", "Slots"):
if not hasattr(props, slot_filter):
continue
filters: List[FilterProperty] = getattr(props, slot_filter)
for slot in filters:
for filter_group in slot.props.filters:
if template_id in filter_group.Filter:
return True
return False
class ItemUpdDogtag(Base):
AccountId: str
ProfileId: str
Nickname: str
Side: str
Level: int
Time: datetime.datetime
Status: str
KillerAccountId: str
KillerProfileId: str
KillerName: str
WeaponName: str
class ItemUpdTag(Base):
Name: Optional[str]
Color: Optional[int]
class ItemUpdTogglable(Base):
On: bool
class ItemUpdFaceShield(Base):
Hits: int
HitSeed: int
class ItemUpdLockable(Base):
Locked: bool
class ItemUpdRepairable(Base):
MaxDurability: Optional[
float
] = None # TODO: Some items in bot inventories don't have MaxDurability
Durability: float
class ItemUpdFoldable(Base):
Folded: bool
class ItemUpdFireMode(Base):
FireMode: str
class ItemUpdResource(Base):
Value: float
class ItemUpdFoodDrink(Base):
HpPercent: int
class ItemUpdKey(Base):
NumberOfUsages: int
class ItemUpdMedKit(Base):
HpResource: int
class ItemUpd(Base):
StackObjectsCount: int = 1
SpawnedInSession: bool = False
Repairable: Optional[ItemUpdRepairable] = None
Foldable: Optional[ItemUpdFoldable] = None
FireMode: Optional[ItemUpdFireMode] = None
Resource: Optional[ItemUpdResource] = None
FoodDrink: Optional[ItemUpdFoodDrink] = None
Key: Optional[ItemUpdKey] = None
MedKit: Optional[ItemUpdMedKit] = None
Lockable: Optional[ItemUpdLockable] = None
Sight: Optional[Any] = None
Light: Optional[Any] = None
FaceShield: Optional[ItemUpdFaceShield] = None
Togglable: Optional[ItemUpdTogglable] = None
Tag: Optional[ItemUpdTag] = None
Dogtag: Optional[ItemUpdDogtag] = None
UnlimitedCount: StrictBool = False
def folded(self) -> bool:
return self.Foldable is not None and self.Foldable.Folded
def toggled(self) -> bool:
return self.Togglable is not None and self.Togglable.On
ItemAmmoStackPosition = NewType("ItemAmmoStackPosition", int)
ItemOrientation = Literal["Horizontal", "Vertical"]
class ItemOrientationEnum(enum.Enum):
Horizontal = "Horizontal"
Vertical = "Vertical"
class ItemInventoryLocation(Base):
x: int
y: int
r: str = ItemOrientationEnum.Vertical.value
isSearched: Optional[bool] = None
@validator("r", pre=True)
def validate_rotation( # pylint: disable=no-self-argument, no-self-use
cls, value: Any
) -> Any:
if value == 1:
return ItemOrientationEnum.Vertical.value
if value == 0:
return ItemOrientationEnum.Horizontal.value
return value
AnyItemLocation = Union[ItemInventoryLocation, ItemAmmoStackPosition]
class Item(Base):
class Config:
extra = Extra.forbid
__inventory__: Optional["MutableInventory"] = PrivateAttr(
default=None
) # Link to the inventory
id: ItemId = Field(alias="_id", default_factory=generate_item_id)
tpl: TemplateId = Field(alias="_tpl")
slot_id: Optional[str] = Field(alias="slotId")
parent_id: Optional[ItemId] = Field(alias="parentId", default=None)
location: Optional[AnyItemLocation] = None
upd: ItemUpd = Field(default_factory=ItemUpd)
def get_inventory(self) -> "MutableInventory":
if self.__inventory__ is None:
raise ValueError("Item does not have inventory")
return self.__inventory__
# @root_validator(pre=False, skip_on_failure=True)
# def validate_medkit_hp(cls, values: dict) -> dict: # pylint: disable=no-self-argument,no-self-use
# if "id" not in values:
# return values
#
# item_tpl_id: TemplateId = cast(TemplateId, values.get("tpl"))
# item_template = tarkov.inventory.item_templates_repository.get_template(item_tpl_id)
# if item_template.parent == "5448f39d4bdc2d0a728b4568": # Medkit Id
# assert isinstance(item_template.props, MedsProps)
# upd: ItemUpd = cast(ItemUpd, values.get("upd"))
# if not isinstance(item_template.props.MaxHpResource, int):
# raise ResourceWarning(
# f"""Item template that inherits directly form MedKit does not have MaxHpResource property
# template id: {item_template.id}
# """
# )
# upd.MedKit = (
# upd.MedKit if upd.MedKit else ItemUpdMedKit(HpResource=item_template.props.MaxHpResource)
# )
#
# return values
#
# @root_validator(pre=False, skip_on_failure=True)
# def validate_upd_none(cls, values: dict) -> dict: # pylint: disable=no-self-argument,no-self-use
# if "upd" in values and values["upd"] is None:
# values["upd"] = ItemUpd()
#
# return values
def copy(self: Item, **kwargs: Any) -> Item:
item_inventory = self.__inventory__
# Avoid copying inventory
self.__inventory__ = None
item_copy: Item = super().copy(**kwargs)
self.__inventory__ = item_inventory
return item_copy
def __hash__(self) -> int:
return hash(self.id)
class InventoryModel(Base):
class Config(Base.Config):
pass
equipment: ItemId
stash: ItemId
questRaidItems: ItemId
questStashItems: ItemId
fastPanel: Dict[str, ItemId]
items: List[Item]
class MoveLocation(Base):
id: ItemId
container: str
location: Optional[ItemInventoryLocation]
class CartridgesMoveLocation(Base):
id: ItemId # Magazine id
container: Literal["cartridges"]
AnyMoveLocation = Union[
CartridgesMoveLocation,
MoveLocation,
]
|
#!/usr/bin/env python
'''
CUDA-accelerated Computer Vision functions
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import os
from tests_common import NewOpenCVTests, unittest
class cuda_test(NewOpenCVTests):
def setUp(self):
super(cuda_test, self).setUp()
if not cv.cuda.getCudaEnabledDeviceCount():
self.skipTest("No CUDA-capable device is detected")
def test_cuda_upload_download(self):
npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
cuMat = cv.cuda_GpuMat()
cuMat.upload(npMat)
self.assertTrue(np.allclose(cuMat.download(), npMat))
def test_cudaarithm_arithmetic(self):
npMat1 = np.random.random((128, 128, 3)) - 0.5
npMat2 = np.random.random((128, 128, 3)) - 0.5
cuMat1 = cv.cuda_GpuMat()
cuMat2 = cv.cuda_GpuMat()
cuMat1.upload(npMat1)
cuMat2.upload(npMat2)
cuMatDst = cv.cuda_GpuMat(cuMat1.size(),cuMat1.type())
self.assertTrue(np.allclose(cv.cuda.add(cuMat1, cuMat2).download(),
cv.add(npMat1, npMat2)))
cv.cuda.add(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.add(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.subtract(cuMat1, cuMat2).download(),
cv.subtract(npMat1, npMat2)))
cv.cuda.subtract(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.subtract(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.multiply(cuMat1, cuMat2).download(),
cv.multiply(npMat1, npMat2)))
cv.cuda.multiply(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.multiply(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.divide(cuMat1, cuMat2).download(),
cv.divide(npMat1, npMat2)))
cv.cuda.divide(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.divide(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.absdiff(cuMat1, cuMat2).download(),
cv.absdiff(npMat1, npMat2)))
cv.cuda.absdiff(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.absdiff(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.compare(cuMat1, cuMat2, cv.CMP_GE).download(),
cv.compare(npMat1, npMat2, cv.CMP_GE)))
cuMatDst1 = cv.cuda_GpuMat(cuMat1.size(),cv.CV_8UC3)
cv.cuda.compare(cuMat1, cuMat2, cv.CMP_GE, cuMatDst1)
self.assertTrue(np.allclose(cuMatDst1.download(),cv.compare(npMat1, npMat2, cv.CMP_GE)))
self.assertTrue(np.allclose(cv.cuda.abs(cuMat1).download(),
np.abs(npMat1)))
cv.cuda.abs(cuMat1, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),np.abs(npMat1)))
self.assertTrue(np.allclose(cv.cuda.sqrt(cv.cuda.sqr(cuMat1)).download(),
cv.cuda.abs(cuMat1).download()))
cv.cuda.sqr(cuMat1, cuMatDst)
cv.cuda.sqrt(cuMatDst, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.cuda.abs(cuMat1).download()))
self.assertTrue(np.allclose(cv.cuda.log(cv.cuda.exp(cuMat1)).download(),
npMat1))
cv.cuda.exp(cuMat1, cuMatDst)
cv.cuda.log(cuMatDst, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),npMat1))
self.assertTrue(np.allclose(cv.cuda.pow(cuMat1, 2).download(),
cv.pow(npMat1, 2)))
cv.cuda.pow(cuMat1, 2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.pow(npMat1, 2)))
def test_cudaarithm_logical(self):
npMat1 = (np.random.random((128, 128)) * 255).astype(np.uint8)
npMat2 = (np.random.random((128, 128)) * 255).astype(np.uint8)
cuMat1 = cv.cuda_GpuMat()
cuMat2 = cv.cuda_GpuMat()
cuMat1.upload(npMat1)
cuMat2.upload(npMat2)
cuMatDst = cv.cuda_GpuMat(cuMat1.size(),cuMat1.type())
self.assertTrue(np.allclose(cv.cuda.bitwise_or(cuMat1, cuMat2).download(),
cv.bitwise_or(npMat1, npMat2)))
cv.cuda.bitwise_or(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.bitwise_or(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.bitwise_and(cuMat1, cuMat2).download(),
cv.bitwise_and(npMat1, npMat2)))
cv.cuda.bitwise_and(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.bitwise_and(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.bitwise_xor(cuMat1, cuMat2).download(),
cv.bitwise_xor(npMat1, npMat2)))
cv.cuda.bitwise_xor(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.bitwise_xor(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.bitwise_not(cuMat1).download(),
cv.bitwise_not(npMat1)))
cv.cuda.bitwise_not(cuMat1, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.bitwise_not(npMat1)))
self.assertTrue(np.allclose(cv.cuda.min(cuMat1, cuMat2).download(),
cv.min(npMat1, npMat2)))
cv.cuda.min(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.min(npMat1, npMat2)))
self.assertTrue(np.allclose(cv.cuda.max(cuMat1, cuMat2).download(),
cv.max(npMat1, npMat2)))
cv.cuda.max(cuMat1, cuMat2, cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),cv.max(npMat1, npMat2)))
def test_cudaarithm_arithmetic(self):
npMat1 = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
cuMat1 = cv.cuda_GpuMat(npMat1)
cuMatDst = cv.cuda_GpuMat(cuMat1.size(),cuMat1.type())
cuMatB = cv.cuda_GpuMat(cuMat1.size(),cv.CV_8UC1)
cuMatG = cv.cuda_GpuMat(cuMat1.size(),cv.CV_8UC1)
cuMatR = cv.cuda_GpuMat(cuMat1.size(),cv.CV_8UC1)
self.assertTrue(np.allclose(cv.cuda.merge(cv.cuda.split(cuMat1)),npMat1))
cv.cuda.split(cuMat1,[cuMatB,cuMatG,cuMatR])
cv.cuda.merge([cuMatB,cuMatG,cuMatR],cuMatDst)
self.assertTrue(np.allclose(cuMatDst.download(),npMat1))
def test_cudabgsegm_existence(self):
#Test at least the existence of wrapped functions for now
_bgsub = cv.cuda.createBackgroundSubtractorMOG()
_bgsub = cv.cuda.createBackgroundSubtractorMOG2()
self.assertTrue(True) #It is sufficient that no exceptions have been there
@unittest.skipIf('OPENCV_TEST_DATA_PATH' not in os.environ,
"OPENCV_TEST_DATA_PATH is not defined")
def test_cudacodec(self):
#Test the functionality but not the results of the video reader
vid_path = os.environ['OPENCV_TEST_DATA_PATH'] + '/cv/video/1920x1080.avi'
try:
reader = cv.cudacodec.createVideoReader(vid_path)
ret, gpu_mat = reader.nextFrame()
self.assertTrue(ret)
self.assertTrue('GpuMat' in str(type(gpu_mat)), msg=type(gpu_mat))
#TODO: print(cv.utils.dumpInputArray(gpu_mat)) # - no support for GpuMat
# not checking output, therefore sepearate tests for different signatures is unnecessary
ret, _gpu_mat2 = reader.nextFrame(gpu_mat)
#TODO: self.assertTrue(gpu_mat == gpu_mat2)
self.assertTrue(ret)
except cv.error as e:
notSupported = (e.code == cv.Error.StsNotImplemented or e.code == cv.Error.StsUnsupportedFormat or e.code == cv.Error.GPU_API_CALL_ERROR)
self.assertTrue(notSupported)
if e.code == cv.Error.StsNotImplemented:
self.skipTest("NVCUVID is not installed")
elif e.code == cv.Error.StsUnsupportedFormat:
self.skipTest("GPU hardware video decoder missing or video format not supported")
elif e.code == cv.Error.GPU_API_CALL_ERRROR:
self.skipTest("GPU hardware video decoder is missing")
else:
self.skipTest(e.err)
def test_cudacodec_writer_existence(self):
#Test at least the existence of wrapped functions for now
try:
_writer = cv.cudacodec.createVideoWriter("tmp", (128, 128), 30)
except cv.error as e:
self.assertEqual(e.code, cv.Error.StsNotImplemented)
self.skipTest("NVCUVENC is not installed")
self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_cudafeatures2d(self):
npMat1 = self.get_sample("samples/data/right01.jpg")
npMat2 = self.get_sample("samples/data/right02.jpg")
cuMat1 = cv.cuda_GpuMat()
cuMat2 = cv.cuda_GpuMat()
cuMat1.upload(npMat1)
cuMat2.upload(npMat2)
cuMat1 = cv.cuda.cvtColor(cuMat1, cv.COLOR_RGB2GRAY)
cuMat2 = cv.cuda.cvtColor(cuMat2, cv.COLOR_RGB2GRAY)
fast = cv.cuda_FastFeatureDetector.create()
_kps = fast.detectAsync(cuMat1)
orb = cv.cuda_ORB.create()
_kps1, descs1 = orb.detectAndComputeAsync(cuMat1, None)
_kps2, descs2 = orb.detectAndComputeAsync(cuMat2, None)
bf = cv.cuda_DescriptorMatcher.createBFMatcher(cv.NORM_HAMMING)
matches = bf.match(descs1, descs2)
self.assertGreater(len(matches), 0)
matches = bf.knnMatch(descs1, descs2, 2)
self.assertGreater(len(matches), 0)
matches = bf.radiusMatch(descs1, descs2, 0.1)
self.assertGreater(len(matches), 0)
self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_cudafilters_existence(self):
#Test at least the existence of wrapped functions for now
_filter = cv.cuda.createBoxFilter(cv.CV_8UC1, -1, (3, 3))
_filter = cv.cuda.createLinearFilter(cv.CV_8UC4, -1, np.eye(3))
_filter = cv.cuda.createLaplacianFilter(cv.CV_16UC1, -1, ksize=3)
_filter = cv.cuda.createSeparableLinearFilter(cv.CV_8UC1, -1, np.eye(3), np.eye(3))
_filter = cv.cuda.createDerivFilter(cv.CV_8UC1, -1, 1, 1, 3)
_filter = cv.cuda.createSobelFilter(cv.CV_8UC1, -1, 1, 1)
_filter = cv.cuda.createScharrFilter(cv.CV_8UC1, -1, 1, 0)
_filter = cv.cuda.createGaussianFilter(cv.CV_8UC1, -1, (3, 3), 16)
_filter = cv.cuda.createMorphologyFilter(cv.MORPH_DILATE, cv.CV_32FC1, np.eye(3))
_filter = cv.cuda.createBoxMaxFilter(cv.CV_8UC1, (3, 3))
_filter = cv.cuda.createBoxMinFilter(cv.CV_8UC1, (3, 3))
_filter = cv.cuda.createRowSumFilter(cv.CV_8UC1, cv.CV_32FC1, 3)
_filter = cv.cuda.createColumnSumFilter(cv.CV_8UC1, cv.CV_32FC1, 3)
_filter = cv.cuda.createMedianFilter(cv.CV_8UC1, 3)
self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_cudafilters_laplacian(self):
npMat = (np.random.random((128, 128)) * 255).astype(np.uint16)
cuMat = cv.cuda_GpuMat()
cuMat.upload(npMat)
self.assertTrue(np.allclose(cv.cuda.createLaplacianFilter(cv.CV_16UC1, -1, ksize=3).apply(cuMat).download(),
cv.Laplacian(npMat, cv.CV_16UC1, ksize=3)))
def test_cudaimgproc(self):
npC1 = (np.random.random((128, 128)) * 255).astype(np.uint8)
npC3 = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
npC4 = (np.random.random((128, 128, 4)) * 255).astype(np.uint8)
cuC1 = cv.cuda_GpuMat()
cuC3 = cv.cuda_GpuMat()
cuC4 = cv.cuda_GpuMat()
cuC1.upload(npC1)
cuC3.upload(npC3)
cuC4.upload(npC4)
cv.cuda.cvtColor(cuC3, cv.COLOR_RGB2HSV)
cv.cuda.demosaicing(cuC1, cv.cuda.COLOR_BayerGR2BGR_MHT)
cv.cuda.gammaCorrection(cuC3)
cv.cuda.alphaComp(cuC4, cuC4, cv.cuda.ALPHA_XOR)
cv.cuda.calcHist(cuC1)
cv.cuda.equalizeHist(cuC1)
cv.cuda.evenLevels(3, 0, 255)
cv.cuda.meanShiftFiltering(cuC4, 10, 5)
cv.cuda.meanShiftProc(cuC4, 10, 5)
cv.cuda.bilateralFilter(cuC3, 3, 16, 3)
cv.cuda.blendLinear
cv.cuda.meanShiftSegmentation(cuC4, 10, 5, 5).download()
clahe = cv.cuda.createCLAHE()
clahe.apply(cuC1, cv.cuda_Stream.Null())
histLevels = cv.cuda.histEven(cuC3, 20, 0, 255)
cv.cuda.histRange(cuC1, histLevels)
detector = cv.cuda.createCannyEdgeDetector(0, 100)
detector.detect(cuC1)
detector = cv.cuda.createHoughLinesDetector(3, np.pi / 180, 20)
detector.detect(cuC1)
detector = cv.cuda.createHoughSegmentDetector(3, np.pi / 180, 20, 5)
detector.detect(cuC1)
detector = cv.cuda.createHoughCirclesDetector(3, 20, 10, 10, 20, 100)
detector.detect(cuC1)
detector = cv.cuda.createGeneralizedHoughBallard()
#BUG: detect accept only Mat!
#Even if generate_gpumat_decls is set to True, it only wraps overload CUDA functions.
#The problem is that Mat and GpuMat are not fully compatible to enable system-wide overloading
#detector.detect(cuC1, cuC1, cuC1)
detector = cv.cuda.createGeneralizedHoughGuil()
#BUG: same as above..
#detector.detect(cuC1, cuC1, cuC1)
detector = cv.cuda.createHarrisCorner(cv.CV_8UC1, 15, 5, 1)
detector.compute(cuC1)
detector = cv.cuda.createMinEigenValCorner(cv.CV_8UC1, 15, 5, 1)
detector.compute(cuC1)
detector = cv.cuda.createGoodFeaturesToTrackDetector(cv.CV_8UC1)
detector.detect(cuC1)
matcher = cv.cuda.createTemplateMatching(cv.CV_8UC1, cv.TM_CCOEFF_NORMED)
matcher.match(cuC3, cuC3)
self.assertTrue(True) #It is sufficient that no exceptions have been there
def test_cudaimgproc_cvtColor(self):
npMat = (np.random.random((128, 128, 3)) * 255).astype(np.uint8)
cuMat = cv.cuda_GpuMat()
cuMat.upload(npMat)
self.assertTrue(np.allclose(cv.cuda.cvtColor(cuMat, cv.COLOR_BGR2HSV).download(),
cv.cvtColor(npMat, cv.COLOR_BGR2HSV)))
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server import util
class ExpertiseLevels(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, expertise_levels=None, n_expertise_levels=None): # noqa: E501
"""ExpertiseLevels - a model defined in OpenAPI
:param expertise_levels: The expertise_levels of this ExpertiseLevels. # noqa: E501
:type expertise_levels: List[ExpertiseLevel]
:param n_expertise_levels: The n_expertise_levels of this ExpertiseLevels. # noqa: E501
:type n_expertise_levels: int
"""
self.openapi_types = {
'expertise_levels': List[ExpertiseLevel],
'n_expertise_levels': int
}
self.attribute_map = {
'expertise_levels': 'expertise_levels',
'n_expertise_levels': 'n_expertise_levels'
}
self._expertise_levels = expertise_levels
self._n_expertise_levels = n_expertise_levels
@classmethod
def from_dict(cls, dikt) -> 'ExpertiseLevels':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ExpertiseLevels of this ExpertiseLevels. # noqa: E501
:rtype: ExpertiseLevels
"""
return util.deserialize_model(dikt, cls)
@property
def expertise_levels(self):
"""Gets the expertise_levels of this ExpertiseLevels.
:return: The expertise_levels of this ExpertiseLevels.
:rtype: List[ExpertiseLevel]
"""
return self._expertise_levels
@expertise_levels.setter
def expertise_levels(self, expertise_levels):
"""Sets the expertise_levels of this ExpertiseLevels.
:param expertise_levels: The expertise_levels of this ExpertiseLevels.
:type expertise_levels: List[ExpertiseLevel]
"""
self._expertise_levels = expertise_levels
@property
def n_expertise_levels(self):
"""Gets the n_expertise_levels of this ExpertiseLevels.
:return: The n_expertise_levels of this ExpertiseLevels.
:rtype: int
"""
return self._n_expertise_levels
@n_expertise_levels.setter
def n_expertise_levels(self, n_expertise_levels):
"""Sets the n_expertise_levels of this ExpertiseLevels.
:param n_expertise_levels: The n_expertise_levels of this ExpertiseLevels.
:type n_expertise_levels: int
"""
self._n_expertise_levels = n_expertise_levels
|
from spaceone.api.cost_analysis.v1 import job_pb2, job_pb2_grpc
from spaceone.core.pygrpc import BaseAPI
class Job(BaseAPI, job_pb2_grpc.JobServicer):
pb2 = job_pb2
pb2_grpc = job_pb2_grpc
def cancel(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('JobService', metadata) as job_service:
return self.locator.get_info('JobInfo', job_service.cancel(params))
def get(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('JobService', metadata) as job_service:
return self.locator.get_info('JobInfo', job_service.get(params))
def list(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('JobService', metadata) as job_service:
job_vos, total_count = job_service.list(params)
return self.locator.get_info('JobsInfo',
job_vos,
total_count,
minimal=self.get_minimal(params))
def stat(self, request, context):
params, metadata = self.parse_request(request, context)
with self.locator.get_service('JobService', metadata) as job_service:
return self.locator.get_info('StatisticsInfo', job_service.stat(params))
|
from utils.solution_base import SolutionBase
class Solution(SolutionBase):
def solve(self, part_num: int):
self.test_runner(part_num)
func = getattr(self, f"part{part_num}")
result = func(self.data)
return result
def test_runner(self, part_num):
test_inputs = self.get_test_input()
test_results = self.get_test_result(part_num)
test_counter = 1
func = getattr(self, f"part{part_num}")
for i, r in zip(test_inputs, test_results):
if len(r):
if (tr := str(func(i))) == r[0]:
print(f"test {test_counter} passed")
else:
print(f"your result: {tr}")
print(f"test answer: {r[0]}")
print(f"test {test_counter} NOT passed")
test_counter += 1
print()
def part1(self, data):
amp = Amp(data[0], 0)
amp.inputs = [1]
signals = []
while 1:
amp.status = Status.RUNNING
outputs = [*amp.run()]
if amp.status == Status.HALT:
break
signals += [str(outputs[-1])]
return ",".join(signals)
def part2(self, data):
amp = Amp(data[0], 0)
amp.inputs = [2]
signals = []
while 1:
amp.status = Status.RUNNING
outputs = [*amp.run()]
if amp.status == Status.HALT:
break
signals += [str(outputs[-1])]
return ",".join(signals)
class Status:
RUNNING = 1
WAIT = 2
HALT = 0
class Amp:
def __init__(self, insts_raw, phase):
self.insts = [*map(int, insts_raw.split(","))] + [0] * 1000
self.phase = phase
self.status = Status.RUNNING
self.pntr = 0
self.inputs = [phase]
self.base = 0
def run(self):
while self.status == Status.RUNNING:
opcode = f"{self.insts[self.pntr]:05}"
modes = list(opcode[:-2][::-1])
opcode = int(opcode[-2:])
if opcode == 1:
val1 = self.insts[self.insts[self.pntr + 1]] if modes[0] == "0" else self.insts[self.insts[self.pntr + 1] + self.base] if modes[0] == "2" else self.insts[self.pntr + 1]
val2 = self.insts[self.insts[self.pntr + 2]] if modes[1] == "0" else self.insts[self.insts[self.pntr + 2] + self.base] if modes[1] == "2" else self.insts[self.pntr + 2]
address = self.insts[self.pntr + 3] if modes[2] == "0" else self.insts[self.pntr + 3] + self.base if modes[2] == "2" else self.pntr + 3
if address >= 0:
self.insts[address] = val1 + val2
self.pntr += 4
elif opcode == 2:
val1 = self.insts[self.insts[self.pntr + 1]] if modes[0] == "0" else self.insts[self.insts[self.pntr + 1] + self.base] if modes[0] == "2" else self.insts[self.pntr + 1]
val2 = self.insts[self.insts[self.pntr + 2]] if modes[1] == "0" else self.insts[self.insts[self.pntr + 2] + self.base] if modes[1] == "2" else self.insts[self.pntr + 2]
address = self.insts[self.pntr + 3] if modes[2] == "0" else self.insts[self.pntr + 3] + self.base if modes[2] == "2" else self.pntr + 3
if address >= 0:
self.insts[address] = val1 * val2
self.pntr += 4
elif opcode == 3:
if len(self.inputs):
val = self.inputs.pop(0)
address = self.insts[self.pntr + 1] if modes[0] == "0" else self.insts[self.pntr + 1] + self.base if modes[0] == "2" else self.pntr + 1
if address >= 0:
self.insts[address] = val
self.pntr += 2
else:
self.status = Status.WAIT
elif opcode == 4:
address = self.insts[self.pntr + 1] if modes[0] == "0" else self.insts[self.pntr + 1] + self.base if modes[0] == "2" else self.pntr + 1
if address >= 0:
yield self.insts[address]
self.pntr += 2
self.status = Status.WAIT
elif opcode == 5:
val1 = self.insts[self.insts[self.pntr + 1]] if modes[0] == "0" else self.insts[self.insts[self.pntr + 1] + self.base] if modes[0] == "2" else self.insts[self.pntr + 1]
val2 = self.insts[self.insts[self.pntr + 2]] if modes[1] == "0" else self.insts[self.insts[self.pntr + 2] + self.base] if modes[1] == "2" else self.insts[self.pntr + 2]
if val1 != 0:
self.pntr = val2
else:
self.pntr += 3
elif opcode == 6:
val1 = self.insts[self.insts[self.pntr + 1]] if modes[0] == "0" else self.insts[self.insts[self.pntr + 1] + self.base] if modes[0] == "2" else self.insts[self.pntr + 1]
val2 = self.insts[self.insts[self.pntr + 2]] if modes[1] == "0" else self.insts[self.insts[self.pntr + 2] + self.base] if modes[1] == "2" else self.insts[self.pntr + 2]
if val1 == 0:
self.pntr = val2
else:
self.pntr += 3
elif opcode == 7:
val1 = self.insts[self.insts[self.pntr + 1]] if modes[0] == "0" else self.insts[self.insts[self.pntr + 1] + self.base] if modes[0] == "2" else self.insts[self.pntr + 1]
val2 = self.insts[self.insts[self.pntr + 2]] if modes[1] == "0" else self.insts[self.insts[self.pntr + 2] + self.base] if modes[1] == "2" else self.insts[self.pntr + 2]
address = self.insts[self.pntr + 3] if modes[2] == "0" else self.insts[self.pntr + 3] + self.base if modes[2] == "2" else self.pntr + 3
if address >= 0:
self.insts[address] = 1 if val1 < val2 else 0
self.pntr += 4
elif opcode == 8:
val1 = self.insts[self.insts[self.pntr + 1]] if modes[0] == "0" else self.insts[self.insts[self.pntr + 1] + self.base] if modes[0] == "2" else self.insts[self.pntr + 1]
val2 = self.insts[self.insts[self.pntr + 2]] if modes[1] == "0" else self.insts[self.insts[self.pntr + 2] + self.base] if modes[1] == "2" else self.insts[self.pntr + 2]
address = self.insts[self.pntr + 3] if modes[2] == "0" else self.insts[self.pntr + 3] + self.base if modes[2] == "2" else self.pntr + 3
if address >= 0:
self.insts[address] = 1 if val1 == val2 else 0
self.pntr += 4
elif opcode == 9:
val = self.insts[self.insts[self.pntr + 1]] if modes[0] == "0" else self.insts[self.insts[self.pntr + 1] + self.base] if modes[0] == "2" else self.insts[self.pntr + 1]
self.base += val
self.pntr += 2
elif opcode == 99:
self.status = Status.HALT
self.pntr += 1
else:
raise Exception(f"unknown opcode: {opcode}")
|
import sys
from lexer import CalcLexer
from parser import CalcParser
def repl(lexer, parser):
print('Custom language v0.0.1')
print('Type "exit" to quit the REPL')
linecount = 0
while True:
try:
text = input(f'λ({linecount}) ⇒ ')
except EOFError:
break
if text:
if text == 'exit':
break
run(lexer, parser, text)
print(parser.last_item_on_stack)
linecount = linecount + 1
def run(lexer, parser, text):
parser.parse(lexer.tokenize(text))
def runFile(lexer, parser, fileName):
with open(fileName) as f:
content = f.readlines()
for line in content:
run(lexer, parser, line)
print(parser.last_item_on_stack)
# run(lexer, parser, "".join(content))
if __name__ == '__main__':
lexer = CalcLexer()
parser = CalcParser()
if len(sys.argv) > 1:
runFile(lexer, parser, sys.argv[1])
else:
repl(lexer, parser)
|
# encoding: utf8
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import logging
logger = logging.getLogger(__name__)
from collections import defaultdict
from lxml import etree, html
from lxml.html.builder import E
from spyne.util import coroutine, Break
from spyne.util.oset import oset
from spyne.protocol.cloth import XmlCloth
from spyne.protocol.cloth._base import XmlClothProtocolContext
def parse_html_fragment_file(T_FILES):
elt = html.fromstring(open(T_FILES).read())
elt.getparent().remove(elt)
return elt
class HtmlClothProtocolContext(XmlClothProtocolContext):
def __init__(self, parent, transport, type=None):
super(HtmlClothProtocolContext, self).__init__(parent, transport, type)
self.assets = []
self.eltstack = defaultdict(list)
self.ctxstack = defaultdict(list)
self.rootstack = oset()
self.tags = set()
self.objcache = dict()
# these are supposed to be for neurons.base.screen.ScreenBase subclasses
self.screen = None
self.prev_view = None
self.next_view = None
class HtmlCloth(XmlCloth):
mime_type = 'text/html; charset=UTF-8'
def __init__(self, app=None, mime_type=None, ignore_uncap=False,
ignore_wrappers=False, cloth=None, cloth_parser=None,
polymorphic=True, hier_delim='.', doctype=None):
super(HtmlCloth, self).__init__(app=app, mime_type=mime_type,
ignore_uncap=ignore_uncap, ignore_wrappers=ignore_wrappers,
cloth=cloth, cloth_parser=cloth_parser, polymorphic=polymorphic)
self.hier_delim = hier_delim
self.doctype = doctype
def _parse_file(self, file_name, cloth_parser):
if cloth_parser is None:
cloth_parser = html.HTMLParser(remove_comments=True)
cloth = html.parse(file_name, parser=cloth_parser)
return cloth.getroot()
def docfile(self, *args, **kwargs):
return etree.htmlfile(*args, **kwargs)
def write_doctype(self, ctx, parent, cloth=None):
if self.doctype is not None:
dt = self.doctype
elif cloth is not None:
dt = cloth.getroottree().docinfo.doctype
elif self._root_cloth is not None:
dt = self._root_cloth.getroottree().docinfo.doctype
elif self._cloth is not None:
dt = self._cloth.getroottree().docinfo.doctype
else:
return
parent.write_doctype(dt)
ctx.protocol.doctype_written = True
logger.debug("Doctype written as: '%s'", dt)
def get_context(self, parent, transport):
return HtmlClothProtocolContext(parent, transport)
@staticmethod
def get_class_cloth(cls):
return cls.Attributes._html_cloth
@staticmethod
def get_class_root_cloth(cls):
return cls.Attributes._html_root_cloth
def dict_to_parent(self, ctx, cls, inst, parent, name, **kwargs):
parent.write(str(inst))
@staticmethod
def add_html_attr(attr_name, attr_dict, class_name):
if attr_name in attr_dict:
attr_dict[attr_name] = ' '.join(
(attr_dict.get('class', ''), class_name))
else:
attr_dict[attr_name] = class_name
@staticmethod
def add_style(attr_dict, data):
style = attr_dict.get('style', None)
if style is not None:
attr_dict['style'] = ';'.join(style, data)
else:
attr_dict['style'] = data
def null_to_parent(self, ctx, cls, inst, parent, name, **kwargs):
cls_attrs = self.get_cls_attrs(cls)
if cls_attrs.min_occurs >= 1:
parent.write(E(name))
@coroutine
def complex_to_parent(self, ctx, cls, inst, parent, name, use_ns=False,
**kwargs):
inst = cls.get_serialization_instance(inst)
# TODO: Put xml attributes as well in the below element() call.
with parent.element(name):
ret = self._write_members(ctx, cls, inst, parent, use_ns=False,
**kwargs)
if ret is not None:
try:
while True:
sv2 = (yield) # may throw Break
ret.send(sv2)
except Break:
try:
ret.throw(Break())
except StopIteration:
pass
# FIXME: Deprecated
HtmlBase = HtmlCloth
|
# Copyright 2013, Michael H. Goldwasser
#
# Developed for use with the book:
#
# Data Structures and Algorithms in Python
# Michael T. Goodrich, Roberto Tamassia, and Michael H. Goldwasser
# John Wiley & Sons, 2013
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
age = -1 # an initially invalid choice
while age <= 0:
try:
age = int(input('Enter your age in years: '))
if age <= 0:
print('Your age must be positive')
except (ValueError, EOFError):
print('Invalid response')
|
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| ___| | | / _ \ | ___ \_ _| _
| |_ ___ __ _| |_ _ _ _ __ ___ ___ / /_\ \| |_/ / | | (_)
| _/ _ \/ _` | __| | | | '__/ _ \/ __| | _ || __/ | |
| || __/ (_| | |_| |_| | | | __/\__ \ | | | || | _| |_ _
\_| \___|\__,_|\__|\__,_|_| \___||___/ \_| |_/\_| \___/ (_)
___ _ _
/ _ \ | (_)
/ /_\ \_ _ __| |_ ___
| _ | | | |/ _` | |/ _ \
| | | | |_| | (_| | | (_) |
\_| |_/\__,_|\__,_|_|\___/
This will featurize folders of audio files if the default_audio_features = ['librosa_features']
Extracts acoustic features using the LibROSA library;
saves them as mean, standard devaition, amx, min, and median
in different classes: onset, rhythm, spectral, and power categories.
Note this is quite a powerful audio feature set that can be used
for a variety of purposes.
For more information, check out libROSA's documentation: https://librosa.org/
'''
import librosa, os
if librosa.__version__ != '0.6.2':
os.system('pip3 install librosa==0.6.2')
import librosa
import numpy as np
# get statistical features in numpy
def stats(matrix):
mean=np.mean(matrix)
std=np.std(matrix)
maxv=np.amax(matrix)
minv=np.amin(matrix)
median=np.median(matrix)
output=np.array([mean,std,maxv,minv,median])
return output
# get labels for later
def stats_labels(label, sample_list):
mean=label+'_mean'
std=label+'_std'
maxv=label+'_maxv'
minv=label+'_minv'
median=label+'_median'
sample_list.append(mean)
sample_list.append(std)
sample_list.append(maxv)
sample_list.append(minv)
sample_list.append(median)
return sample_list
# featurize with librosa following documentation
# https://librosa.github.io/librosa/feature.html
def librosa_featurize(filename, categorize):
# if categorize == True, output feature categories
print('librosa featurizing: %s'%(filename))
# initialize lists
onset_labels=list()
y, sr = librosa.load(filename)
# FEATURE EXTRACTION
######################################################
# extract major features using librosa
mfcc=librosa.feature.mfcc(y)
poly_features=librosa.feature.poly_features(y)
chroma_cens=librosa.feature.chroma_cens(y)
chroma_cqt=librosa.feature.chroma_cqt(y)
chroma_stft=librosa.feature.chroma_stft(y)
tempogram=librosa.feature.tempogram(y)
spectral_centroid=librosa.feature.spectral_centroid(y)[0]
spectral_bandwidth=librosa.feature.spectral_bandwidth(y)[0]
spectral_contrast=librosa.feature.spectral_contrast(y)[0]
spectral_flatness=librosa.feature.spectral_flatness(y)[0]
spectral_rolloff=librosa.feature.spectral_rolloff(y)[0]
onset=librosa.onset.onset_detect(y)
onset=np.append(len(onset),stats(onset))
# append labels
onset_labels.append('onset_length')
onset_labels=stats_labels('onset_detect', onset_labels)
tempo=librosa.beat.tempo(y)[0]
onset_features=np.append(onset,tempo)
# append labels
onset_labels.append('tempo')
onset_strength=librosa.onset.onset_strength(y)
onset_labels=stats_labels('onset_strength', onset_labels)
zero_crossings=librosa.feature.zero_crossing_rate(y)[0]
rmse=librosa.feature.rmse(y)[0]
# FEATURE CLEANING
######################################################
# onset detection features
onset_features=np.append(onset_features,stats(onset_strength))
# rhythm features (384) - take the first 13
rhythm_features=np.concatenate(np.array([stats(tempogram[0]),
stats(tempogram[1]),
stats(tempogram[2]),
stats(tempogram[3]),
stats(tempogram[4]),
stats(tempogram[5]),
stats(tempogram[6]),
stats(tempogram[7]),
stats(tempogram[8]),
stats(tempogram[9]),
stats(tempogram[10]),
stats(tempogram[11]),
stats(tempogram[12])]))
rhythm_labels=list()
for i in range(13):
rhythm_labels=stats_labels('rhythm_'+str(i), rhythm_labels)
# spectral features (first 13 mfccs)
spectral_features=np.concatenate(np.array([stats(mfcc[0]),
stats(mfcc[1]),
stats(mfcc[2]),
stats(mfcc[3]),
stats(mfcc[4]),
stats(mfcc[5]),
stats(mfcc[6]),
stats(mfcc[7]),
stats(mfcc[8]),
stats(mfcc[9]),
stats(mfcc[10]),
stats(mfcc[11]),
stats(mfcc[12]),
stats(poly_features[0]),
stats(poly_features[1]),
stats(spectral_centroid),
stats(spectral_bandwidth),
stats(spectral_contrast),
stats(spectral_flatness),
stats(spectral_rolloff)]))
spectral_labels=list()
for i in range(13):
spectral_labels=stats_labels('mfcc_'+str(i), spectral_labels)
for i in range(2):
spectral_labels=stats_labels('poly_'+str(i), spectral_labels)
spectral_labels=stats_labels('spectral_centroid', spectral_labels)
spectral_labels=stats_labels('spectral_bandwidth', spectral_labels)
spectral_labels=stats_labels('spectral_contrast', spectral_labels)
spectral_labels=stats_labels('spectral_flatness', spectral_labels)
spectral_labels=stats_labels('spectral_rolloff', spectral_labels)
# power features
power_features=np.concatenate(np.array([stats(zero_crossings),
stats(rmse)]))
power_labels=list()
power_labels=stats_labels('zero_crossings',power_labels)
power_labels=stats_labels('RMSE', power_labels)
# you can also concatenate the features
if categorize == True:
# can output feature categories if true
features={'onset':onset_features,
'rhythm':rhythm_features,
'spectral':spectral_features,
'power':power_features}
labels={'onset':onset_labels,
'rhythm':rhythm_labels,
'spectral':spectral_labels,
'power': power_labels}
else:
# can output numpy array of everything if we don't need categorizations
features = np.concatenate(np.array([onset_features,
rhythm_features,
spectral_features,
power_features]))
labels=onset_labels+rhythm_labels+spectral_labels+power_labels
return features, labels
|
# Generated by Django 3.2.7 on 2021-11-14 13:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('school_management', '0007_student_otherschool'),
]
operations = [
migrations.RemoveField(
model_name='student',
name='otherschool',
),
]
|
from creds import *
import tweepy
import markovify
import os
import argparse
# Execute in script directory
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
def generate_tweet(test_mode=False):
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# Read content to feed Markov model
with open("tweet-content.txt", 'r') as f:
text = f.read()
text_model = markovify.NewlineText(text)
# Generate the tweet text (use Twitter regular form of 140 characters)
tweet = text_model.make_short_sentence(140)
if test_mode:
print(tweet)
else:
api.update_status(tweet)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate a random tweet using Markov chain generation and post it.')
parser.add_argument('--test', action='store_true', help='Test the functionality by printing the tweet')
args = parser.parse_args()
generate_tweet(args.test)
|
#!/usr/bin/env python3
import os, re, sys
class Validator( object ):
def __init__( self, vmap = dict(), **opt ):
self._validator_map = vmap
if __name__ == "__main__":
pass |
import os
from setuptools import setup, find_packages
from setuptools.command.install import install
from setuptools.command.develop import develop
import pathlib
NAME = 'git-scan'
DESCRIPTION = "Scan local or remote git repositories for history divergent from origin"
URL = 'https://github.com/johnaparker/git-scan'
EMAIL = '[email protected]'
AUTHOR = 'John Parker'
KEYWORDS = 'git scan ssh tmux repositories'
VERSION = '0.2.1'
LICENSE = 'MIT'
REQUIRED = [
'termcolor',
'toml',
'libtmux',
'paramiko',
]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
def post_install():
config_str = """
repositories = [
]""".strip()
config_path = pathlib.Path('~/.config/git-scan/git-scan.conf').expanduser()
if not os.path.exists(config_path):
os.makedirs(config_path.parent, exist_ok=True)
with open(config_path, 'w') as f:
f.write(config_str)
class PostInstallCommand(install):
"""Post-installation for installation mode."""
def run(self):
post_install()
install.run(self)
class PostDevelopCommand(develop):
"""Post-installation for installation mode."""
def run(self):
post_install()
develop.run(self)
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=EMAIL,
description=DESCRIPTION,
license=LICENSE,
keywords=KEYWORDS,
url=URL,
scripts=['git-scan/git-scan'],
long_description=read('README.md'),
long_description_content_type='text/markdown',
install_requires=REQUIRED,
cmdclass={
'develop': PostDevelopCommand,
'install': PostInstallCommand,
},
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Operating System :: POSIX',
'Operating System :: MacOS',
'Development Status :: 3 - Alpha',
'Topic :: Software Development :: Version Control',
'Topic :: Software Development :: Version Control :: Git',
],
)
|
import sys
import inspect
import pytest
from botorum.servicecatalog.models.portfolio import Portfolio
from botorum.servicecatalog.models.tagoption import TagOption
@pytest.fixture(scope="module")
def tagoption_config():
return {
'Key': 'test-portfolio-tagoption',
'Value': 'arbitrary'
}
@pytest.fixture(scope="module")
def portfolio_config():
return {
'DisplayName': 'arbitrary',
'Description': 'arbitrary',
'ProviderName': 'arbitrary',
'Tags': [
{
'Key': 'arbitrary',
'Value': 'arbitrary'
},
]
}
def test_classes_exist():
assert inspect.isclass(Portfolio)
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6")
def test_001_methods_exist():
assert inspect.isfunction(Portfolio.__init__)
assert inspect.isfunction(Portfolio.__getattr__)
assert inspect.isfunction(Portfolio.__eq__)
assert inspect.isfunction(Portfolio.__ne__)
assert inspect.isfunction(Portfolio.__str__)
assert inspect.isfunction(Portfolio.__unicode__)
assert isinstance(Portfolio.client, property)
assert isinstance(Portfolio.tag_options, property)
assert inspect.isfunction(Portfolio._set_attrs)
assert inspect.isfunction(Portfolio._flatten_object_details)
assert inspect.isgeneratorfunction(Portfolio.list)
assert inspect.ismethod(Portfolio.create)
assert inspect.ismethod(Portfolio.get)
assert inspect.ismethod(Portfolio.search)
assert inspect.isfunction(Portfolio.update)
assert inspect.isfunction(Portfolio.delete)
assert inspect.isfunction(Portfolio.add_tag_option)
assert inspect.isfunction(Portfolio.remove_tag_option)
def test_002_list_generator(portfolio_config):
Portfolio.create(**portfolio_config)
all_portfolios = [item for item in Portfolio.list()]
assert len(all_portfolios) > 0
assert isinstance(all_portfolios[0], Portfolio)
def test_002a_search(portfolio_config):
search_term = portfolio_config['DisplayName']
search_attr = 'DisplayName'
results = Portfolio.search(search_attr, [search_term])
assert len(results) >= 1
assert results[0].display_name == search_term
def test_003_instance_creation(portfolio_config):
test_portfolio = Portfolio.create(**portfolio_config)
assert str(test_portfolio) == str(test_portfolio.id)
assert test_portfolio.__unicode__() == str(test_portfolio.id)
def test_004_instance_attributes(portfolio_config):
test_portfolio = Portfolio.create(**portfolio_config)
assert test_portfolio.Id == test_portfolio.id
assert test_portfolio.ARN == test_portfolio.arn
assert test_portfolio.CreatedTime == test_portfolio.created_time
assert test_portfolio.DisplayName == test_portfolio.display_name
assert test_portfolio.Description == test_portfolio.description
assert test_portfolio.ProviderName == test_portfolio.provider_name
assert test_portfolio.DisplayName == portfolio_config['DisplayName']
assert test_portfolio.Description == portfolio_config['Description']
assert test_portfolio.ProviderName == portfolio_config['ProviderName']
with pytest.raises(AttributeError):
assert test_portfolio.ArbitraryAttr
tag_list = portfolio_config['Tags']
assert test_portfolio.Tags == {x['Key']: x['Value'] for x in tag_list}
def test_005_instance_load(portfolio_config):
test_portfolio = Portfolio.create(**portfolio_config)
arbitrary_portfolio = Portfolio.get(test_portfolio.Id)
assert test_portfolio is not arbitrary_portfolio
assert test_portfolio == arbitrary_portfolio
assert not test_portfolio != arbitrary_portfolio
def test_006_instance_update(portfolio_config):
test_portfolio = Portfolio.create(**portfolio_config)
assert test_portfolio.display_name == portfolio_config['DisplayName']
assert test_portfolio.description == portfolio_config['Description']
assert test_portfolio.provider_name == portfolio_config['ProviderName']
assert test_portfolio.tags == {x['Key']: x['Value'] for x in portfolio_config['Tags']}
update_params = {
"DisplayName": "NewName",
"Description": "NewDescription",
"ProviderName": "NewProvider",
"AddTags": [
{
'Key': 'example',
'Value': 'example'
},
],
"RemoveTags": [
"arbitrary"
]
}
test_portfolio.update(**update_params)
assert test_portfolio.display_name == update_params["DisplayName"]
assert test_portfolio.description == update_params["Description"]
assert test_portfolio.provider_name == update_params["ProviderName"]
assert test_portfolio.tags == {x['Key']: x['Value'] for x in update_params['AddTags']}
def test_007_add_tagoption(portfolio_config, tagoption_config):
test_portfolio = Portfolio.create(**portfolio_config)
test_tagoption = TagOption.get_or_create(**tagoption_config)
test_portfolio.add_tag_option(test_tagoption)
test_portfolio.add_tag_option(test_tagoption)
assert len(test_portfolio.tag_options) == 1
assert test_portfolio.get_tag_option(test_tagoption.key)
with pytest.raises(LookupError):
assert test_portfolio.get_tag_option('NonExistantTagOptionKey')
def test_008_remove_tagoption(portfolio_config, tagoption_config):
test_portfolio = Portfolio.create(**portfolio_config)
test_tagoption = TagOption.get_or_create(**tagoption_config)
test_portfolio.add_tag_option(test_tagoption)
assert len(test_portfolio.tag_options) == 1
test_portfolio.remove_tag_option(test_tagoption)
test_portfolio.remove_tag_option(test_tagoption)
assert len(test_portfolio.tag_options) == 0
def test_999_teardown():
for p in Portfolio.list():
portfolio = Portfolio(id=p.Id)
portfolio.delete()
assert len([item for item in Portfolio.list()]) == 0
|
#!/usr/bin/env python3
# coding: utf-8
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2015-2020 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"test suite for OpenCL code"
__author__ = "Jérôme Kieffer"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "20/01/2021"
import unittest
import os
import time
import fabio
import numpy
import logging
import shutil
import platform
logger = logging.getLogger(__name__)
from .. import ocl
if ocl is not None:
from .. import pyopencl, read_cl_file
import pyopencl.array
from ... import load
from ...test import utilstest
from ... import load_integrators
from ...method_registry import IntegrationMethod
from ...test.utilstest import test_options
from ...utils import mathutil
from ...utils.decorators import depreclog
@unittest.skipIf(test_options.opencl is False, "User request to skip OpenCL tests")
@unittest.skipIf(ocl is None, "OpenCL is not available")
class TestMask(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tmp_dir = os.path.join(test_options.tempdir, "opencl")
if not os.path.isdir(cls.tmp_dir):
os.makedirs(cls.tmp_dir)
cls.N = 500
cls.datasets = [{"img": test_options.getimage("Pilatus1M.edf"),
"poni": test_options.getimage("Pilatus1M.poni"),
"spline": None},
# {"img": test_options.getimage("halfccd.edf"),
# "poni": test_options.getimage("halfccd.poni"),
# "spline": test_options.getimage("halfccd.spline")},
# {"img": test_options.getimage("Frelon2k.edf"),
# "poni": test_options.getimage("Frelon2k.poni"),
# "spline": test_options.getimage("frelon.spline")},
# {"img": test_options.getimage("Pilatus6M.cbf"),
# "poni": test_options.getimage("Pilatus6M.poni"),
# "spline": None},
]
for ds in cls.datasets:
if ds["spline"] is not None:
with open(ds["poni"], "r") as ponifile:
data = ponifile.read()
# spline = os.path.basename(ds["spline"])
with open(ds["poni"]) as f:
data = []
for line in f:
if line.startswith("SplineFile:"):
data.append("SplineFile: " + ds["spline"])
else:
data.append(line.strip())
ds["poni"] = os.path.join(cls.tmp_dir, os.path.basename(ds["poni"]))
with open(ds["poni"], "w") as f:
f.write(os.linesep.join(data))
@classmethod
def tearDownClass(cls):
super(TestMask, cls).tearDownClass()
shutil.rmtree(cls.tmp_dir)
cls.tmp_dir = cls.N = cls.datasets = None
@unittest.skipIf(test_options.low_mem, "test using >200M")
def test_histogram(self):
logger.info("Testing histogram-based algorithm (forward-integration)")
ids = ocl.select_device("ALL", extensions=["cl_khr_int64_base_atomics"], memory=1e8)
to_test = [v for k, v in IntegrationMethod._registry.items() if k.target == ids and k.split == "no" and k.algo == "histogram" and k.dim == 1]
for ds in self.datasets:
ai = load(ds["poni"])
data = fabio.open(ds["img"]).data
ref = ai.integrate1d_ng(data, self.N, method=("no", "histogram", "cython"), unit="2th_deg")
for method in to_test:
res = ai.integrate1d_ng(data, self.N, method=method, unit="2th_deg")
r = mathutil.rwp(ref, res)
logger.info(f"OpenCL {method} has R={r} (vs cython) for dataset {ds}")
self.assertLess(r, 3, "Rwp=%.3f for OpenCL histogram processing of %s" % (r, ds))
@unittest.skipIf(test_options.low_mem, "test using >500M")
def test_OpenCL_sparse(self):
logger.info("Testing LUT-based algorithm (backward-integration)")
ids = ocl.select_device("ALL", best=True, memory=1e8)
to_test = [v for k, v in IntegrationMethod._registry.items() if k.target == ids and k.split == "bbox" and k.algo in ("lut", "csr") and k.dim == 1]
for ds in self.datasets:
ai = load(ds["poni"])
data = fabio.open(ds["img"]).data
ref = ai.integrate1d_ng(data, self.N, method=("bbox", "histogram", "cython"), unit="2th_deg")
for method in to_test:
res = ai.integrate1d_ng(data, self.N, method=method, unit="2th_deg")
r = mathutil.rwp(ref, res)
logger.info(f"OpenCL {method} has R={r} (vs cython) for dataset {ds}")
self.assertLess(r, 3, "Rwp=%.3f for OpenCL histogram processing of %s" % (r, ds))
@unittest.skipIf(test_options.low_mem, "test using >200M")
def test_OpenCL_sigma_clip(self):
logger.info("Testing OpenCL sigma-clipping")
ids = ocl.select_device("ALL", best=True, memory=1e8)
# print(ids)
to_test = [v for k, v in IntegrationMethod._registry.items() if k.target == ids and k.split == "no" and k.algo == "csr" and k.dim == 1]
N = 100
# print(to_test)
for ds in self.datasets:
ai = load(ds["poni"])
data = fabio.open(ds["img"]).data
ref = ai.integrate1d_ng(data, N, method=("no", "histogram", "cython"), unit="2th_deg")
for method in to_test:
# print(method)
try:
res = ai.sigma_clip_ng(data, N, method=method, unit="2th_deg")
except (pyopencl.MemoryError, MemoryError, pyopencl.RuntimeError, RuntimeError) as error:
logger.warning("Memory error on %s dataset %s: %s%s. Converted into Warning: device may not have enough memory.", method, os.path.basename(ds["img"]), os.linesep, error)
break
else:
# This is not really a precise test.
r = mathutil.rwp(ref, res)
logger.info("OpenCL sigma clipping has R= %.3f for dataset %s", r, ds)
# print(r)
self.assertLess(r, 10, "Rwp=%.3f for OpenCL CSR processing of %s" % (r, ds))
@unittest.skipIf(test_options.opencl is False, "User request to skip OpenCL tests")
@unittest.skipIf(ocl is None, "OpenCL is not available")
class TestSort(unittest.TestCase):
"""
Test the kernels for vector and image sorting
"""
@classmethod
def setUpClass(cls):
super(TestSort, cls).setUpClass()
cls.N = 1024
cls.ws = cls.N // 8
cls.h_data = numpy.random.random(cls.N).astype("float32")
cls.h2_data = numpy.random.random((cls.N, cls.N)).astype("float32").reshape((cls.N, cls.N))
cls.ctx = ocl.create_context(devicetype="GPU")
device = cls.ctx.devices[0]
try:
devtype = pyopencl.device_type.to_string(device.type).upper()
except ValueError:
# pocl does not describe itself as a CPU !
devtype = "CPU"
workgroup = device.max_work_group_size
if (devtype == "CPU") and (device.platform.vendor == "Apple"):
logger.info("For Apple's OpenCL on CPU: enforce max_work_goup_size=1")
workgroup = 1
cls.ws = min(workgroup, cls.ws)
cls.queue = pyopencl.CommandQueue(cls.ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE)
cls.local_mem = pyopencl.LocalMemory(cls.ws * 32) # 2float4 = 2*4*4 bytes per workgroup size
src = read_cl_file("pyfai:openCL/bitonic.cl")
cls.prg = pyopencl.Program(cls.ctx, src).build()
@classmethod
def tearDownClass(cls):
super(TestSort, cls).tearDownClass()
cls.h_data = None
cls.queue = None
cls.ctx = None
cls.local_mem = None
cls.h2_data = None
@staticmethod
def extra_skip(ctx):
"This is a known buggy configuration"
device = ctx.devices[0]
if ("apple" in device.platform.name.lower() and
"cpu" in pyopencl.device_type.to_string(device.type).lower()):
logger.info("Apple CPU driver spotted, skipping")
return True
if ("portable" in device.platform.name.lower() and
"cpu" in pyopencl.device_type.to_string(device.type).lower()):
logger.info("PoCL CPU driver spotted, skipping")
return True
return False
def test_reference_book(self):
if self.extra_skip(self.ctx):
self.skipTest("known buggy configuration")
d_data = pyopencl.array.to_device(self.queue, self.h_data)
t0 = time.perf_counter()
hs_data = numpy.sort(self.h_data)
t1 = time.perf_counter()
time_sort = 1e3 * (t1 - t0)
evt = self.prg.bsort_book(self.queue, (self.ws,), (self.ws,), d_data.data, self.local_mem)
evt.wait()
err = abs(hs_data - d_data.get()).max()
logger.info("test_reference_book")
logger.info("Numpy sort on %s element took %s ms", self.N, time_sort)
logger.info("Reference sort time: %s ms, err=%s ", 1e-6 * (evt.profile.end - evt.profile.start), err)
# this test works under linux:
if platform.system() == "Linux":
self.assertTrue(err == 0.0)
else:
logger.warning("Measured error on %s is %s", platform.system(), err)
def test_reference_file(self):
if self.extra_skip(self.ctx):
self.skipTest("known buggy configuration")
d_data = pyopencl.array.to_device(self.queue, self.h_data)
t0 = time.perf_counter()
hs_data = numpy.sort(self.h_data)
t1 = time.perf_counter()
time_sort = 1e3 * (t1 - t0)
evt = self.prg.bsort_file(self.queue, (self.ws,), (self.ws,), d_data.data, self.local_mem)
evt.wait()
err = abs(hs_data - d_data.get()).max()
logger.info("test_reference_file")
logger.info("Numpy sort on %s element took %s ms", self.N, time_sort)
logger.info("Reference sort time: %s ms, err=%s", 1e-6 * (evt.profile.end - evt.profile.start), err)
# this test works anywhere !
self.assertEqual(err, 0.0)
def test_sort_all(self):
if self.extra_skip(self.ctx):
self.skipTest("known buggy configuration")
d_data = pyopencl.array.to_device(self.queue, self.h_data)
t0 = time.perf_counter()
hs_data = numpy.sort(self.h_data)
t1 = time.perf_counter()
time_sort = 1e3 * (t1 - t0)
evt = self.prg.bsort_all(self.queue, (self.ws,), (self.ws,), d_data.data, self.local_mem)
evt.wait()
err = abs(hs_data - d_data.get()).max()
logger.info("test_sort_all")
logger.info("Numpy sort on %s element took %s ms", self.N, time_sort)
logger.info("modified function execution time: %s ms, err=%s", 1e-6 * (evt.profile.end - evt.profile.start), err)
self.assertEqual(err, 0.0)
def test_sort_horizontal(self):
if self.extra_skip(self.ctx):
self.skipTest("known buggy configuration")
d2_data = pyopencl.array.to_device(self.queue, self.h2_data)
t0 = time.perf_counter()
h2s_data = numpy.sort(self.h2_data, axis=-1)
t1 = time.perf_counter()
time_sort = 1e3 * (t1 - t0)
evt = self.prg.bsort_horizontal(self.queue, (self.N, self.ws), (1, self.ws), d2_data.data, self.local_mem)
evt.wait()
err = abs(h2s_data - d2_data.get()).max()
logger.info("Numpy horizontal sort on %sx%s elements took %s ms", self.N, self.N, time_sort)
logger.info("Horizontal execution time: %s ms, err=%s", 1e-6 * (evt.profile.end - evt.profile.start), err)
self.assertEqual(err, 0.0)
def test_sort_vertical(self):
if self.extra_skip(self.ctx):
self.skipTest("known buggy configuration")
d2_data = pyopencl.array.to_device(self.queue, self.h2_data)
t0 = time.perf_counter()
h2s_data = numpy.sort(self.h2_data, axis=0)
t1 = time.perf_counter()
time_sort = 1e3 * (t1 - t0)
evt = self.prg.bsort_vertical(self.queue, (self.ws, self.N), (self.ws, 1), d2_data.data, self.local_mem)
evt.wait()
err = abs(h2s_data - d2_data.get()).max()
logger.info("Numpy vertical sort on %sx%s elements took %s ms", self.N, self.N, time_sort)
logger.info("Vertical execution time: %s ms, err=%s ", 1e-6 * (evt.profile.end - evt.profile.start), err)
self.assertEqual(err, 0.0)
@unittest.skipIf(test_options.opencl is False, "User request to skip OpenCL tests")
@unittest.skipIf(ocl is None, "OpenCL is not available")
class TestKahan(unittest.TestCase):
"""
Test the kernels for compensated math in OpenCL
"""
@classmethod
def setUpClass(cls):
super(TestKahan, cls).setUpClass()
cls.ctx = ocl.create_context()
cls.queue = pyopencl.CommandQueue(cls.ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE)
# this is running 32 bits OpenCL with POCL
if (platform.machine() in ("i386", "i686", "x86_64") and (tuple.__itemsize__ == 4) and
cls.ctx.devices[0].platform.name == 'Portable Computing Language'):
cls.args = "-DX87_VOLATILE=volatile"
else:
cls.args = ""
@classmethod
def tearDownClass(cls):
cls.queue = None
cls.ctx = None
@staticmethod
def dummy_sum(ary, dtype=None):
"perform the actual sum in a dummy way "
if dtype is None:
dtype = ary.dtype.type
sum_ = dtype(0)
for i in ary:
sum_ += i
return sum_
def test_kahan(self):
# simple test
N = 26
data = (1 << (N - 1 - numpy.arange(N))).astype(numpy.float32)
ref64 = numpy.sum(data, dtype=numpy.float64)
ref32 = self.dummy_sum(data)
if (ref64 == ref32):
logger.warning("Kahan: invalid tests as float32 provides the same result as float64")
# Dummy kernel to evaluate
src = """
kernel void summation(global float* data,
int size,
global float* result)
{
float2 acc = (float2)(0.0f, 0.0f);
for (int i=0; i<size; i++)
{
acc = kahan_sum(acc, data[i]);
}
result[0] = acc.s0;
result[1] = acc.s1;
}
"""
prg = pyopencl.Program(self.ctx, read_cl_file("pyfai:openCL/kahan.cl") + src).build(self.args)
ones_d = pyopencl.array.to_device(self.queue, data)
res_d = pyopencl.array.zeros(self.queue, 2, numpy.float32)
evt = prg.summation(self.queue, (1,), (1,), ones_d.data, numpy.int32(N), res_d.data)
evt.wait()
res = res_d.get().sum(dtype=numpy.float64)
self.assertEqual(ref64, res, "test_kahan")
def test_dot16(self):
# simple test
N = 16
data = (1 << (N - 1 - numpy.arange(N))).astype(numpy.float32)
ref64 = numpy.dot(data.astype(numpy.float64), data.astype(numpy.float64))
ref32 = numpy.dot(data, data)
if (ref64 == ref32):
logger.warning("dot16: invalid tests as float32 provides the same result as float64")
# Dummy kernel to evaluate
src = """
kernel void test_dot16(global float* data,
int size,
global float* result)
{
float2 acc = (float2)(0.0f, 0.0f);
float16 data16 = (float16) (data[0],data[1],data[2],data[3],data[4],
data[5],data[6],data[7],data[8],data[9],
data[10],data[11],data[12],data[13],data[14],data[15]);
acc = comp_dot16(data16, data16);
result[0] = acc.s0;
result[1] = acc.s1;
}
kernel void test_dot8(global float* data,
int size,
global float* result)
{
float2 acc = (float2)(0.0f, 0.0f);
float8 data0 = (float8) (data[0],data[2],data[4],data[6],data[8],data[10],data[12],data[14]);
float8 data1 = (float8) (data[1],data[3],data[5],data[7],data[9],data[11],data[13],data[15]);
acc = comp_dot8(data0, data1);
result[0] = acc.s0;
result[1] = acc.s1;
}
kernel void test_dot4(global float* data,
int size,
global float* result)
{
float2 acc = (float2)(0.0f, 0.0f);
float4 data0 = (float4) (data[0],data[4],data[8],data[12]);
float4 data1 = (float4) (data[3],data[7],data[11],data[15]);
acc = comp_dot4(data0, data1);
result[0] = acc.s0;
result[1] = acc.s1;
}
kernel void test_dot3(global float* data,
int size,
global float* result)
{
float2 acc = (float2)(0.0f, 0.0f);
float3 data0 = (float3) (data[0],data[4],data[12]);
float3 data1 = (float3) (data[3],data[11],data[15]);
acc = comp_dot3(data0, data1);
result[0] = acc.s0;
result[1] = acc.s1;
}
kernel void test_dot2(global float* data,
int size,
global float* result)
{
float2 acc = (float2)(0.0f, 0.0f);
float2 data0 = (float2) (data[0],data[14]);
float2 data1 = (float2) (data[1],data[15]);
acc = comp_dot2(data0, data1);
result[0] = acc.s0;
result[1] = acc.s1;
}
"""
prg = pyopencl.Program(self.ctx, read_cl_file("pyfai:openCL/kahan.cl") + src).build(self.args)
ones_d = pyopencl.array.to_device(self.queue, data)
res_d = pyopencl.array.zeros(self.queue, 2, numpy.float32)
evt = prg.test_dot16(self.queue, (1,), (1,), ones_d.data, numpy.int32(N), res_d.data)
evt.wait()
res = res_d.get().sum(dtype="float64")
self.assertEqual(ref64, res, "test_dot16")
res_d.fill(0)
data0 = data[0::2]
data1 = data[1::2]
ref64 = numpy.dot(data0.astype(numpy.float64), data1.astype(numpy.float64))
ref32 = numpy.dot(data0, data1)
if (ref64 == ref32):
logger.warning("dot8: invalid tests as float32 provides the same result as float64")
evt = prg.test_dot8(self.queue, (1,), (1,), ones_d.data, numpy.int32(N), res_d.data)
evt.wait()
res = res_d.get().sum(dtype="float64")
self.assertEqual(ref64, res, "test_dot8")
res_d.fill(0)
data0 = data[0::4]
data1 = data[3::4]
ref64 = numpy.dot(data0.astype(numpy.float64), data1.astype(numpy.float64))
ref32 = numpy.dot(data0, data1)
if (ref64 == ref32):
logger.warning("dot4: invalid tests as float32 provides the same result as float64")
evt = prg.test_dot4(self.queue, (1,), (1,), ones_d.data, numpy.int32(N), res_d.data)
evt.wait()
res = res_d.get().sum(dtype="float64")
self.assertEqual(ref64, res, "test_dot4")
res_d.fill(0)
data0 = numpy.array([data[0], data[4], data[12]])
data1 = numpy.array([data[3], data[11], data[15]])
ref64 = numpy.dot(data0.astype(numpy.float64), data1.astype(numpy.float64))
ref32 = numpy.dot(data0, data1)
if (ref64 == ref32):
logger.warning("dot3: invalid tests as float32 provides the same result as float64")
evt = prg.test_dot3(self.queue, (1,), (1,), ones_d.data, numpy.int32(N), res_d.data)
evt.wait()
res = res_d.get().sum(dtype="float64")
self.assertEqual(ref64, res, "test_dot3")
res_d.fill(0)
data0 = numpy.array([data[0], data[14]])
data1 = numpy.array([data[1], data[15]])
ref64 = numpy.dot(data0.astype(numpy.float64), data1.astype(numpy.float64))
ref32 = numpy.dot(data0, data1)
if (ref64 == ref32):
logger.warning("dot2: invalid tests as float32 provides the same result as float64")
evt = prg.test_dot2(self.queue, (1,), (1,), ones_d.data, numpy.int32(N), res_d.data)
evt.wait()
res = res_d.get().sum(dtype="float64")
self.assertEqual(ref64, res, "test_dot2")
def suite():
testsuite = unittest.TestSuite()
loader = unittest.defaultTestLoader.loadTestsFromTestCase
testsuite.addTest(loader(TestMask))
testsuite.addTest(loader(TestSort))
testsuite.addTest(loader(TestKahan))
return testsuite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
|
"""
The :mod:`sklearn.model_selection._validation` module includes classes and
functions to validate the model.
"""
#
# Authors of sklearn.model_selection._validation:
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# Author: Marcell Stippinger 2018
# License: BSD 3 clause
#
# Notes:
# This module was based on sklearn.model_selection._validation
# We extend its functionality by allowing different (but identically shaped)
# data to be used for testing. It is useful when training and test data have
# related samples, e.g. when testing morning vs afternoon sessions over the
# same period of the year.
# We found it useful to move the parallel loop out of cross_validate because
# it allows more efficient parallelization if there are only a few cv steps
# but several estimators, data sets or feature selections.
# We also implemented nested cross-validation similar to the one found in
# LogisticRegressionCV. This allows to test models which have many hyper-
# parameters and the user needs independent validation and test results.
# Validation in this case is transparent and requires post-processing, i.e.,
# all results for all possible hyperparameters are reported, the best is not
# selected here.
# Ref for nested cross-validation:
# https://stats.stackexchange.com/questions/65128/nested-cross-validation-for-model-selection
# https://sites.google.com/site/dtclabdcv/
# https://www.researchgate.net/post/What_is_double_cross_validation
# https://www.r-project.org/conferences/useR-2006/Abstracts/Francois+Langrognet.pdf
from __future__ import print_function
from __future__ import division
import warnings
import numbers
import time
import numpy as np
import scipy.sparse as sp
from sklearn.base import is_classifier, clone
from sklearn.utils import indexable, check_random_state, safe_indexing
from sklearn.utils.deprecation import DeprecationDict
from sklearn.utils.validation import _is_arraylike, _num_samples
from sklearn.utils.metaestimators import _safe_split
from sklearn.externals.joblib import Parallel, delayed, logger
from sklearn.externals.six.moves import zip
from sklearn.metrics.scorer import check_scoring, _check_multimetric_scoring
from sklearn.exceptions import FitFailedWarning
from sklearn.model_selection._split import check_cv
from sklearn.preprocessing import LabelEncoder
# own
from sklearn.utils.validation import check_consistent_length
from itertools import product as iter_product
from sklearn.exceptions import DataConversionWarning
try:
from .timeout_decorator import _Timeout
except (ModuleNotFoundError, ImportError):
def _Timeout(fun, timeout_exception, exception_message, limit):
del timeout_exception, exception_message, limit
return fun
__all__ = ['cross_validate', 'cross_val_score', 'cross_val_predict', 'nested_cross_validate',
'concatenate_score_dicts']
def cross_validate(estimator, X, y=None, groups=None, scoring=None, cv=None,
X_for_test=None, n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', return_train_score="warn"):
"""Evaluate metric(s) by cross-validation and also record fit/score times.
Read more in the :ref:`User Guide <multimetric_cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be for example a list, or an array.
X_for_test : array-like
The data to test. When omitted, X is used. It is assumed that
the samples in X are related pairwisely to these samples.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's default scorer (if available) is used.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
return_train_score : boolean, optional
Whether to include train scores.
Current default is ``'warn'``, which behaves as ``True`` in addition
to raising a warning when a training score is looked up.
That default will be changed to ``False`` in 0.21.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
Returns
-------
scores : dict of float arrays of shape=(n_splits,)
Array of scores of the estimator for each run of the cross validation.
A dict of arrays containing the score/time arrays for each scorer is
returned. The possible keys for this ``dict`` are:
``test_score``
The score array for test scores on each cv split.
``train_score``
The score array for train scores on each cv split.
This is available only if ``return_train_score`` parameter
is ``True``.
``fit_time``
The time for fitting the estimator on the train
set for each cv split.
``score_time``
The time for scoring the estimator on the test set for each
cv split. (Note time for scoring on the train set is not
included even if ``return_train_score`` is set to ``True``
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_validate
>>> from sklearn.metrics.scorer import make_scorer
>>> from sklearn.metrics import confusion_matrix
>>> from sklearn.svm import LinearSVC
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
Single metric evaluation using ``cross_validate``
>>> cv_results = cross_validate(lasso, X, y, return_train_score=False)
>>> sorted(cv_results.keys()) # doctest: +ELLIPSIS
['fit_time', 'score_time', 'test_score']
>>> cv_results['test_score'] # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
array([ 0.33sklearn.., 0.08sklearn.., 0.03sklearn..])
Multiple metric evaluation using ``cross_validate``
(please refer the ``scoring`` parameter doc for more information)
>>> scores = cross_validate(lasso, X, y,
sklearn.. scoring=('r2', 'neg_mean_squared_error'))
>>> print(scores['test_neg_mean_squared_error']) # doctest: +ELLIPSIS
[-3635.5sklearn.. -3573.3sklearn.. -6114.7sklearn..]
>>> print(scores['train_r2']) # doctest: +ELLIPSIS
[ 0.28sklearn.. 0.39sklearn.. 0.22sklearn..]
See Also
---------
:func:`sklearn.model_selection.cross_val_score`:
Run cross-validation for single metric evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, groups = indexable(X, y, groups)
if X_for_test is None:
X_for_test = X
else:
X_for_test, y = indexable(X_for_test, y)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorers, _ = _check_multimetric_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(
delayed(_fit_and_score)(
clone(estimator), X, X_for_test, y, scorers, train, test, verbose, None,
fit_params, return_train_score=return_train_score,
return_times=True)
for train, test in cv.split(X, y, groups))
if return_train_score:
train_scores, test_scores, fit_times, score_times = zip(*scores)
train_scores = _aggregate_score_dicts(train_scores)
else:
test_scores, fit_times, score_times = zip(*scores)
test_scores = _aggregate_score_dicts(test_scores)
# TODO: replace by a dict in 0.21
ret = DeprecationDict() if return_train_score == 'warn' else {}
ret['fit_time'] = np.array(fit_times)
ret['score_time'] = np.array(score_times)
for name in scorers:
ret['test_%s' % name] = np.array(test_scores[name])
if return_train_score:
key = 'train_%s' % name
ret[key] = np.array(train_scores[name])
if return_train_score == 'warn':
message = (
'You are accessing a training score ({!r}), '
'which will not be available by default '
'any more in 0.21. If you need training scores, '
'please set return_train_score=True').format(key)
# warn on key access
ret.add_warning(key, message, FutureWarning)
return ret
def nested_cross_validate(estimator, X, y=None, groups=None, scoring=None, cv=None,
X_for_test=None, n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', return_train_score=True):
"""Evaluate metric(s) by cross-validation and also record fit/score times.
Read more in the :ref:`User Guide <multimetric_cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be for example a list, or an array.
X_for_test : array-like
The data to test. When omitted, X is used. It is assumed that
the samples in X are related pairwisely to these samples.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable, list/tuple, dict or None, default: None
A single string (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's default scorer (if available) is used.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : dict of float arrays of shape=(n_splits,)
Array of scores of the estimator for each run of the cross validation.
A dict of arrays containing the score/time arrays for each scorer is
returned. The possible keys for this ``dict`` are:
``test_score``
The score array for test scores on each cv split.
``validation_score``
The score array for train scores on each cv split.
``calibration_score``
The score array for train scores on each cv split.
``fit_time``
The time for fitting the estimator on the train
set for each cv split.
``score_time``
The time for scoring the estimator on the test set for each
cv split. (Note time for scoring on the train set is not
included even if ``return_train_score`` is set to ``True``
See Also
---------
:func:`sklearn.model_selection.cross_val_score`:
Run cross-validation for single metric evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y, groups = indexable(X, y, groups)
if X_for_test is None:
X_for_test = X
else:
X_for_test, y = indexable(X_for_test, y)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
scorers, _ = _check_multimetric_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(
delayed(_nested_fit_and_score)(
clone(estimator), X, X_for_test, y, groups, scorers, calibrate_validate, test, cv, verbose, None,
fit_params, return_train_score=True, return_times=True)
for calibrate_validate, test in cv.split(X, y, groups))
calibration_scores, validation_scores, test_scores, fit_times, score_times = zip(*scores)
validation_scores = _aggregate_score_dicts(validation_scores)
calibration_scores = _aggregate_score_dicts(calibration_scores)
test_scores = _aggregate_score_dicts(test_scores)
# TODO: replace by a dict in 0.21
ret = DeprecationDict() if return_train_score == 'warn' else {}
ret['fit_time'] = np.array(fit_times)
ret['score_time'] = np.array(score_times)
for name in scorers:
ret['test_%s' % name] = np.array(test_scores[name])
ret['validation_%s' % name] = np.array(validation_scores[name])
ret['calibration_%s' % name] = np.array(calibration_scores[name])
return ret
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
X_for_test=None, n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be for example a list, or an array.
X_for_test : array-like
The data to test. When omitted, X is used. It is assumed that
the samples in X are related pairwisely to these samples.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.model_selection.cross_validate`:
To run cross-validation on multiple metrics and also to return
train scores, fit times and score times.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
# To ensure multimetric format is not supported
scorer = check_scoring(estimator, scoring=scoring)
cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups,
scoring={'score': scorer}, cv=cv,
return_train_score=False,
n_jobs=n_jobs, verbose=verbose,
fit_params=fit_params,
pre_dispatch=pre_dispatch)
return cv_results['test_score']
def _fit_and_score(estimator, X, X_for_test, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
X_for_test : array-like
The data to test. When omitted, X is used. It is assumed that
the samples in X are related pairwisely to these samples.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
return_n_test_samples : boolean, optional, default: False
Whether to return the ``n_test_samples``
return_times : boolean, optional, default: False
Whether to return the fit/score times.
Returns
-------
train_scores : dict of scorer name -> float, optional
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float, optional
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
test_scores = {}
train_scores = {}
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X_for_test, y, test, train)
is_multimetric = not callable(scorer)
n_scorers = len(scorer.keys()) if is_multimetric else 1
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
if is_multimetric:
test_scores = dict(zip(scorer.keys(),
[error_score, ] * n_scorers))
if return_train_score:
train_scores = dict(zip(scorer.keys(),
[error_score, ] * n_scorers))
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
# _score will return dict if is_multimetric is True
test_scores = _score(estimator, X_test, y_test, scorer, is_multimetric)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(estimator, X_train, y_train, scorer,
is_multimetric)
if verbose > 2:
if is_multimetric:
for scorer_name, score in test_scores.items():
msg += ", %s=%s" % (scorer_name, score)
else:
msg += ", score=%s" % test_scores
if verbose > 1:
total_time = score_time + fit_time
end_msg = "%s, total=%s" % (msg, logger.short_format_time(total_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_scores, test_scores] if return_train_score else [test_scores]
if return_n_test_samples:
ret.append(_num_samples(X_test))
if return_times:
ret.extend([fit_time, score_time])
if return_parameters:
ret.append(parameters)
return ret
def _nested_fit_and_score(estimator, X, X_for_test, y, groups, scorer, calibrate_validate, test, cv, verbose,
parameters, fit_params, return_train_score=True,
return_parameters=False, return_n_test_samples=False,
return_times=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
X_for_test : array-like
The data to test. When omitted, X is used. It is assumed that
the samples in X are related pairwisely to these samples.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
calibrate_validate : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
return_n_test_samples : boolean, optional, default: False
Whether to return the ``n_test_samples``
return_times : boolean, optional, default: False
Whether to return the fit/score times.
Returns
-------
calibration_scores : dict of scorer name -> float, optional
Score on training set (for all the scorers).
validation_scores : dict of scorer name -> float, optional
Score on training set (for all the scorers).
test_scores : dict of scorer name -> float, optional
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
nested_estimator = clone(estimator)
results = _fit_and_score(estimator, X, X_for_test, y, scorer, calibrate_validate, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=return_parameters, return_n_test_samples=return_n_test_samples,
return_times=return_times, error_score=error_score)
X_ = safe_indexing(X, calibrate_validate)
y_ = safe_indexing(y, calibrate_validate)
g_ = groups if groups is None else safe_indexing(groups, calibrate_validate)
Xt_ = X_for_test if X_for_test is None else safe_indexing(X_for_test, calibrate_validate)
# TODO: make sense for X_for_test here
nested_scores = cross_validate(nested_estimator, X_, y=y_, groups=g_, scoring=scorer, cv=cv,
X_for_test=Xt_, n_jobs=1, verbose=0, fit_params=fit_params,
return_train_score=True)
calibration_scores = {k[6:]: np.nanmean(v) for k, v in nested_scores.items() if k.startswith('train_')}
validation_scores = {k[5:]: np.nanmean(v) for k, v in nested_scores.items() if k.startswith('test_')}
return [calibration_scores, validation_scores, *results]
def _score(estimator, X_test, y_test, scorer, is_multimetric=False):
"""Compute the score(s) of an estimator on a given test set.
Will return a single float if is_multimetric is False and a dict of floats,
if is_multimetric is True
"""
if is_multimetric:
return _multimetric_score(estimator, X_test, y_test, scorer)
else:
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) "
"instead. (scorer=%r)"
% (str(score), type(score), scorer))
return score
def _multimetric_score(estimator, X_test, y_test, scorers):
"""Return a dict of score for multimetric scoring"""
scores = {}
for name, scorer in scorers.items():
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
scores[name] = score
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) "
"instead. (scorer=%s)"
% (str(score), type(score), name))
return scores
def cross_val_predict(estimator, X, y=None, groups=None, cv=None,
X_for_test=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs',
method='predict'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
X_for_test : array-like
The data to test. When omitted, X is used. It is assumed that
the samples in X are related pairwisely to these samples.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
method : string, optional, default: 'predict'
Invokes the passed method name of the passed estimator. For
method='predict_proba', the columns correspond to the classes
in sorted order.
Returns
-------
predictions : ndarray
This is the result of calling ``method``
Notes
-----
In the case that one or more classes are absent in a training portion, a
default score needs to be assigned to all instances for that class if
``method`` produces columns per class, as in {'decision_function',
'predict_proba', 'predict_log_proba'}. For ``predict_proba`` this value is
0. In order to ensure finite output, we approximate negative infinity by
the minimum finite float value for the dtype in other cases.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.model_selection import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y, groups = indexable(X, y, groups)
if X_for_test is None:
X_for_test = X
else:
X_for_test, y = indexable(X_for_test, y)
cv = check_cv(cv, y, classifier=is_classifier(estimator))
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
le = LabelEncoder()
y = le.fit_transform(y)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
prediction_blocks = parallel(delayed(_fit_and_predict)(
clone(estimator), X, X_for_test, y, train, test, verbose, fit_params, method)
for train, test in cv.split(X, X_for_test, y, groups))
# Concatenate the predictions
predictions = [pred_block_i for pred_block_i, _ in prediction_blocks]
test_indices = np.concatenate([indices_i
for _, indices_i in prediction_blocks])
if not _check_is_permutation(test_indices, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_test_indices = np.empty(len(test_indices), dtype=int)
inv_test_indices[test_indices] = np.arange(len(test_indices))
# Check for sparse predictions
if sp.issparse(predictions[0]):
predictions = sp.vstack(predictions, format=predictions[0].format)
else:
predictions = np.concatenate(predictions)
return predictions[inv_test_indices]
def _fit_and_predict(estimator, X, X_for_test, y, train, test, verbose, fit_params,
method):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
X_for_test : array-like
The data to test. When omitted, X is used. It is assumed that
the samples in X are related pairwisely to these samples.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
method : string
Invokes the passed method name of the passed estimator.
Returns
-------
predictions : sequence
Result of calling 'estimator.method'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X_for_test, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
func = getattr(estimator, method)
predictions = func(X_test)
if method in ['decision_function', 'predict_proba', 'predict_log_proba']:
n_classes = len(set(y))
if n_classes != len(estimator.classes_):
recommendation = (
'To fix this, use a cross-validation '
'technique resulting in properly '
'stratified folds')
warnings.warn('Number of classes in training fold ({}) does '
'not match total number of classes ({}). '
'Results may not be appropriate for your use case. '
'{}'.format(len(estimator.classes_),
n_classes, recommendation),
RuntimeWarning)
if method == 'decision_function':
if (predictions.ndim == 2 and
predictions.shape[1] != len(estimator.classes_)):
# This handles the case when the shape of predictions
# does not match the number of classes used to train
# it with. This case is found when sklearn.svm.SVC is
# set to `decision_function_shape='ovo'`.
raise ValueError('Output shape {} of {} does not match '
'number of classes ({}) in fold. '
'Irregular decision_function outputs '
'are not currently supported by '
'cross_val_predict'.format(
predictions.shape, method,
len(estimator.classes_),
recommendation))
if len(estimator.classes_) <= 2:
# In this special case, `predictions` contains a 1D array.
raise ValueError('Only {} class/es in training fold, this '
'is not supported for decision_function '
'with imbalanced folds. {}'.format(
len(estimator.classes_),
recommendation))
float_min = np.finfo(predictions.dtype).min
default_values = {'decision_function': float_min,
'predict_log_proba': float_min,
'predict_proba': 0}
predictions_for_all_classes = np.full((_num_samples(predictions),
n_classes),
default_values[method])
predictions_for_all_classes[:, estimator.classes_] = predictions
predictions = predictions_for_all_classes
return predictions, test
def _check_is_permutation(indices, n_samples):
"""Check whether indices is a reordering of the array np.arange(n_samples)
Parameters
----------
indices : ndarray
integer array to test
n_samples : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(indices) is np.arange(n)
"""
if len(indices) != n_samples:
return False
hit = np.zeros(n_samples, dtype=bool)
hit[indices] = True
if not np.all(hit):
return False
return True
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def _aggregate_score_dicts(scores):
"""Aggregate the list of dict to dict of np ndarray
The aggregated output of _fit_and_score will be a list of dict
of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.2, 'acc':0.8}, sklearn..]
Convert it to a dict of array {'prec': np.array([0.1 0.2 sklearn..]), sklearn..}
Parameters
----------
scores : list of dict
List of dicts of the scores for all scorers. This is a flat list,
assumed originally to be of row major order.
Example
-------
>>> scores = [{'a': 1, 'b':10}, {'a': 2, 'b':2}, {'a': 3, 'b':3}, \
sklearn.. {'a': 10, 'b': 10}] # doctest: +SKIP
>>> _aggregate_score_dicts(scores) # doctest: +SKIP
{'a': array([1, 2, 3, 10]),
'b': array([10, 2, 3, 10])}
>>> scores = [{'a': [1, 11], 'b':10}, {'a': [2, 22], 'b':2}, {'a': [3, 33], 'b':3}, \
sklearn.. {'a': [10, 100], 'b': 10}] # doctest: +SKIP
>>> _aggregate_score_dicts(scores) # doctest: +SKIP
{'a': array([[ 1, 20],
[ 2, 20],
[ 3, 20]]),
'b': array([10, 2, 3])}
"""
out = {}
if len(scores):
for key in scores[0]:
out[key] = np.asarray([score[key] for score in scores])
return out
def concatenate_score_dicts(scores):
"""Concatenate the list of dict to dict of np ndarray
The aggregated output of _fit_and_score will be a list of dict
of form [{'prec': 0.1, 'acc':1.0}, {'prec': 0.1, 'acc':1.0}, sklearn..]
Convert it to a dict of array {'prec': np.array([0.1 sklearn..]), sklearn..}
Parameters
----------
scores : list of dict
List of dicts of the scores for all scorers. This is a flat list,
assumed originally to be of row major order.
Example
-------
>>> scores = [{'a': [1, 11], 'b':10}, {'a': [2, 22], 'b':2}, {'a': [3, 33], 'b':3}, \
sklearn.. {'a': [10, 100], 'b': 10}] # doctest: +SKIP
>>> concatenate_score_dicts(scores) # doctest: +SKIP
{'a': array([ 1, 11, 2, 22, 3, 33, 10, 100]),
'b': array([10, 2, 3, 10])}
"""
out = {}
for key in scores[0]:
out[key] = np.concatenate([np.atleast_1d(score[key]) for score in scores])
return out
def to_indexable(X):
"""Make indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
X : list, dataframe, array, sparse matrix
"""
if sp.issparse(X):
return X.tocsr()
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
return X
elif X is None:
return X
else:
return np.array(X, ndmin=1)
def to_dict(X):
"""Make dict-like.
Parameters
----------
X : dict
"""
if hasattr(X, "items"):
return X
elif X is None:
return {None: None}
else:
return {None: X}
def dict_indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if hasattr(X, "items"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length([v for d in result for k, v in d.items()])
return result
def safe_features(X, indices):
# Like safe_indexing in sklearn.utils
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list, pandas.DataFrame, pandas.Series.
Data from which to sample rows or items.
indices : array-like of int
Indices according to which X will be subsampled.
Returns
-------
subset
Subset of X on first axis
Notes
-----
CSR, CSC, and LIL sparse matrices are supported. COO sparse matrices are
not supported.
Plain python list of indices won't work with pandas.DataFrame as it does
not have flags, convert to numpy.ndarray instead.
"""
if hasattr(X, "iloc"):
# Work-around for indexing with read-only indices in pandas
indices = indices if indices.flags.writeable else indices.copy()
# Pandas Dataframes and Series
try:
return X.iloc[:, indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[:, indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=1)
else:
return X[:, indices]
else:
return [[row[idx] for idx in indices] for row in X]
def _cross_validate_inner(est_key, train_key, test_key, feat, est, train, y, groups, scoring, cv, test, cv_params,
nested, on_failure, timeout):
"""
Inner loop for cross-validating a selected classifier.
Parameters
----------
est_key, train_key, test_key:
Identifiers to be passed through unchanged.
feat: array of bools or ordinals
Selected features
est: estimator object implementing ‘fit’
The object to use to fit the data.
train, test: array-like, shape (n_samples, n_features)
The data to fit and test, n_samples allowed to differ for train and test
y: array-like, shape (n_samples,)
The target variable to try to predict in the case of supervised learning.
groups: array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into train/test set.
scoring: string, callable, list/tuple, dict or None
See `cross_validate`.
cv: int, cross-validation generator or an iterable
Determines the cross-validation splitting strategy.
cv_params: dict
Arguments to pass to `cross_validate`.
nested: bool
Whether to perform nested cross-validation. Typically used in hyperparameter selection.
on_failure: str('ignore' | 'report' | 'warn' | 'raise'), default: 'raise'
How to handle cross-validation errors
timeout: float
Timeout for task to complete in seconds. No timeout is implied if 0.
Returns
-------
est_key, train_key, test_key:
Just like the input
n_features: int,
len(feat)
feat: array-like
The feature selector used
result: dict of float arrays of shape=(n_splits,)
The output of `cross_validate` or `nested_cross_validate`
"""
# delay feature selection and cloning as it may require a lot of memory
train, test = safe_features(train, feat), (test if test is None else safe_features(test, feat))
cloned_est = clone(est)
try:
my_function = nested_cross_validate if nested else cross_validate
if timeout is not None:
my_function = _Timeout(my_function, TimeoutError, None, timeout)
result = my_function(cloned_est, train, y, groups, scoring, cv, test, **cv_params)
except Exception as e:
msg = 'Failure to cross-validate %s' % est
reason = 'For the following reason: %s: %s' % (type(e), e)
if on_failure == 'ignore':
pass
elif on_failure == 'report':
print(msg, reason)
elif on_failure == 'warn':
warnings.warn(msg + reason)
else:
raise ValueError(msg) from e
result = {}
return est_key, train_key, test_key, len(feat), feat, result
def _no_progress_bar(x, *args, **kwargs):
return x
def _default_feature_selection(n_feature):
return [np.arange(n_feature)]
def _first_of_dict(d):
return d[next(iter(d.keys()))]
class random_feature_selector():
def __init__(self, n_total, random_state):
self.random_state = check_random_state(random_state)
self.n_total = int(n_total)
def __call__(self, n_select):
return [self.random_state.choice(self.n_total, n_select, replace=False)]
def cross_validate_iterator(estimator, X, y=None, groups=None, scoring=None, cv=None, X_for_test=None,
feature_selection_generator=None, n_feature=0, how='product',
progress_bar=None, progress_params=None, n_jobs=1, verbose=0, fit_params=None,
pre_dispatch='2*n_jobs', return_train_score="warn", nested=False,
on_failure='raise', timeout=None):
"""
Iterate over inputs to cross-validation to keep the parallel execution queue filled.
Parameters
----------
estimator: estimator or dict of any to estimator implementing ‘fit’
The object(s) to use to fit the data.
X: array-like or dict of any to array-like
The data to fit. Can be for example a list, or an array.
y: array-like or dict of any to array_like
The target variable to try to predict in the case of supervised learning.
groups: array-like # TODO: handle them if other are dict
Group labels for the samples used while splitting the dataset into train/test set.
scoring: string, callable, list/tuple, dict or None
Defines the scorer.
cv: int, cross-validation generator or an iterable
Determines the cross-validation splitting strategy.
X_for_test: array-like or dict of any to array-like
The data to test. Can be for example a list, or an array.
feature_selection_generator: callable
Must take one variable: the number of features to select.
n_feature: int, array-like
The number of features to fit and test. 0 means use them all.
how: str('product')|str('zip')
How to iterate over the training and test data:
* 'zip' uses them in parallel by key
* 'product' forms a Cartesian product of the keys
progress_bar: callable
tqdm-like
progress_params: dict
Parameters to pass to the progress bar
n_jobs: int
The number of CPUs to use to do the computation. -1 means ‘all CPUs’.
verbose: int
The verbosity level.
fit_params: dict
Parameters to pass to the fit method of the estimator.
pre_dispatch: int, or string
Controls the number of jobs that get dispatched during parallel execution.
return_train_score: bool
Whether to include train scores.
nested: bool
Whether to perform nested cross-validation. Typically used in hyperparameter selection.
on_failure: str('ignore' | 'report' | 'warn' | 'raise'), default: 'raise'
How to handle errors raised by estimators
Returns
-------
results: dict
"""
# Limitation:
# * since the Cartesian product of X and X_for_test is taken (if provided) the number of samples in all
# datasets must be the same, and only one set of y labels can be provided
# *
# This is not the place for generating synthetic data, because:
# * the iteration structure requires a lot of extra memory (instead of just duplicating X)
# * not clear what is going to be the train and the test window
estimator, X, X_for_test = to_dict(estimator), to_dict(X), to_dict(X_for_test)
n_feature = to_indexable(n_feature)
if type(y) is dict:
same_y = False
cv = check_cv(cv, _first_of_dict(y), classifier=is_classifier(estimator))
else:
same_y = True
cv = check_cv(cv, y, classifier=is_classifier(estimator))
# scorers, _ = _check_multimetric_scoring(estimator, scoring=scoring)
if how == 'product':
conditions = (estimator.items(), X.items(), X_for_test.items(), n_feature)
conditions_iterator = iter_product(*conditions)
total = np.product([len(x) for x in conditions])
else:
conditions = (estimator.items(), X.items(), n_feature)
conditions_iterator = ((est, (train_key, train), (train_key, X_for_test[train_key]), n_feat) for
est, (train_key, train), n_feat in iter_product(*conditions))
total = np.product([len(x) for x in conditions])
if progress_bar is None:
progress_bar = _no_progress_bar
# TODO: random feature selection gets a different set of features for each classifier,
# not only when n_features changes, in addition, it does not play well with the purpose of repeated K-fold
if feature_selection_generator is None:
feature_selection_generator = _default_feature_selection
inner_progress_bar = _no_progress_bar
else:
def inner_progress_bar(arr, *args, **kwargs):
if len(arr) > 1:
return progress_bar(arr, *args, **kwargs)
else:
return arr
progress_params = {**({} if progress_params is None else progress_params), 'total': total}
cv_params = {'verbose': verbose, 'fit_params': fit_params, 'return_train_score': return_train_score}
if on_failure not in ['ignore', 'report', 'warn', 'raise']:
raise ValueError('Unsupported failure handling.')
# We need to clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch, timeout=None)
results = parallel(
(delayed(_cross_validate_inner)(
est_key, train_key, test_key, feat, est, train, (y if same_y else y[train_key]),
groups, scoring, cv, test, cv_params, nested, on_failure, timeout))
for (est_key, est), (train_key, train), (test_key, test), n_feat in
progress_bar(conditions_iterator, **progress_params)
# no harm to convert to list, this is is going to be stored in memory anyway
for feat in inner_progress_bar(list(feature_selection_generator(train.shape[1] if n_feat == 0 else n_feat)),
desc='Feat', leave=False))
# Make a dict of arrays from "results.T"
keys = ['estimator', 'train', 'test', 'n_feature', 'feature', 'scores_stats']
results = {k: np.asarray(v) for k, v in zip(keys, zip(*results))}
scores_stats = results.pop('scores_stats')
# Filter results and format scores: shape (..., n_fold)
success = [bool(len(v)) for v in scores_stats]
results = {k: v[success] for k, v in results.items()}
results.update(_aggregate_score_dicts(scores_stats[success]))
return results
|
"""Test suite for the notion_paperpile_ package."""
|
from collections import OrderedDict
from toml import TomlEncoder
from toml import TomlDecoder
class TomlOrderedDecoder(TomlDecoder):
def __init__(self):
super(self.__class__, self).__init__(_dict=OrderedDict)
class TomlOrderedEncoder(TomlEncoder):
def __init__(self):
super(self.__class__, self).__init__(_dict=OrderedDict)
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import pprint
import sys
from typing import Any, List
import torch
from omegaconf import DictConfig, OmegaConf
from vissl.config import AttrDict, check_cfg_version
from vissl.utils.io import save_file
def save_attrdict_to_disk(cfg: AttrDict):
from vissl.utils.checkpoint import get_checkpoint_folder
yaml_output_file = f"{get_checkpoint_folder(cfg)}/train_config.yaml"
save_file(cfg.to_dict(), yaml_output_file)
def convert_to_attrdict(cfg: DictConfig, cmdline_args: List[Any] = None):
"""
Given the user input Hydra Config, and some command line input options
to override the config file:
1. merge and override the command line options in the config
2. Convert the Hydra OmegaConf to AttrDict structure to make it easy
to access the keys in the config file
3. Also check the config version used is compatible and supported in vissl.
In future, we would want to support upgrading the old config versions if
we make changes to the VISSL default config structure (deleting, renaming keys)
4. We infer values of some parameters in the config file using the other
parameter values.
"""
if cmdline_args:
# convert the command line args to DictConfig
sys.argv = cmdline_args
cli_conf = OmegaConf.from_cli(cmdline_args)
# merge the command line args with config
cfg = OmegaConf.merge(cfg, cli_conf)
# convert the config to AttrDict
cfg = OmegaConf.to_container(cfg)
cfg = AttrDict(cfg)
# check the cfg has valid version
check_cfg_version(cfg)
# assert the config and infer
config = cfg.config
infer_and_assert_hydra_config(config)
save_attrdict_to_disk(config)
convert_fsdp_dtypes(config)
return cfg, config
def convert_fsdp_dtypes(config: AttrDict):
"""
Transform configuration types (primitive types) to VISSL specific types
"""
# TODO (Quentin) - remove this once FSDP accepts a boolean
if config["MODEL"]["FSDP_CONFIG"]["compute_dtype"] == "float32":
config["MODEL"]["FSDP_CONFIG"]["compute_dtype"] = torch.float32
else:
config["MODEL"]["FSDP_CONFIG"]["compute_dtype"] = torch.float16
def is_hydra_available():
"""
Check if Hydra is available. Simply python import to test.
"""
try:
import hydra # NOQA
hydra_available = True
except ImportError:
hydra_available = False
return hydra_available
def print_cfg(cfg):
"""
Supports printing both Hydra DictConfig and also the AttrDict config
"""
logging.info("Training with config:")
if isinstance(cfg, DictConfig):
if hasattr(cfg, "pretty"):
# Backward compatibility
logging.info(cfg.pretty())
else:
# Newest version of OmegaConf
logging.info(OmegaConf.to_yaml(cfg))
else:
logging.info(pprint.pformat(cfg))
def resolve_linear_schedule(cfg, param_schedulers):
"""
For the given composite schedulers, for each linear schedule,
if the training is 1 node only, the https://arxiv.org/abs/1706.02677 linear
warmup rule has to be checked if the rule is applicable and necessary.
We set the end_value = scaled_lr (assuming it's a linear warmup).
In case only 1 machine is used in training, the start_lr = scaled_lr and then
the linear warmup is not needed.
"""
# compute what should be the linear warmup start LR value.
# this depends on batchsize per node.
num_nodes = cfg.DISTRIBUTED.NUM_NODES
num_gpus_per_node = cfg.DISTRIBUTED.NUM_PROC_PER_NODE
bs_per_gpu = cfg.DATA.TRAIN.BATCHSIZE_PER_REPLICA
batch_size_per_node = bs_per_gpu * num_gpus_per_node
base_lr = param_schedulers.auto_lr_scaling.base_value
base_lr_batch_size = param_schedulers.auto_lr_scaling.base_lr_batch_size
scale_factor = float(batch_size_per_node) / base_lr_batch_size
start_value = base_lr * scale_factor
remove_linear_idx = -1
for idx in range(len(param_schedulers["schedulers"])):
if param_schedulers["schedulers"][idx]["name"] == "linear":
param_schedulers["schedulers"][idx]["start_value"] = start_value
if num_nodes == 1:
end_value = param_schedulers["schedulers"][idx]["end_value"]
if start_value <= end_value:
# linear schedule is not meaningful as linear warmup is not needed.
remove_linear_idx = idx
# check if linear warmup should be removed as its not meaningul
if remove_linear_idx >= 0:
del param_schedulers["schedulers"][remove_linear_idx]
# if after removing linear warmup, there's only one scheduler, then a composite
# schedule is no longer needed. The remaining scheduler becomes the primary
# scheduler
if len(param_schedulers["schedulers"]) == 1:
for key, value in param_schedulers["schedulers"][0].items():
param_schedulers[key] = value
return param_schedulers
def get_scaled_lr_scheduler(cfg, param_schedulers, scaled_lr):
"""
Scale learning rate value for different Learning rate types. See infer_learning_rate()
for how the scaled LR is calculated.
Values changed for learning rate schedules:
1. cosine:
end_value = scaled_lr * (end_value / start_value)
start_value = scaled_lr and
2. multistep:
gamma = values[1] / values[0]
values = [scaled_lr * pow(gamma, idx) for idx in range(len(values))]
3. step_with_fixed_gamma
base_value = scaled_lr
4. linear:
end_value = scaled_lr
5. inverse_sqrt:
start_value = scaled_lr
6. constant:
value = scaled_lr
7. composite:
recursively call to scale each composition. If the composition consists of a linear
schedule, we assume that a linear warmup is applied. If the linear warmup is
applied, it's possible the warmup is not necessary if the global batch_size is smaller
than the base_lr_batch_size and in that case, we remove the linear warmup from the
schedule.
"""
if "cosine" in param_schedulers["name"]:
start_value = param_schedulers["start_value"]
end_value = param_schedulers["end_value"]
decay_multiplier = end_value / start_value
param_schedulers["start_value"] = float(scaled_lr)
param_schedulers["end_value"] = float(scaled_lr * decay_multiplier)
elif param_schedulers["name"] == "multistep" or param_schedulers["name"] == "step":
values = param_schedulers["values"]
gamma = 1.0
if len(values) > 1:
gamma = round(values[1] / values[0], 6)
new_values = []
for idx in range(len(values)):
new_values.append(round(float(scaled_lr * pow(gamma, idx)), 8))
param_schedulers["values"] = new_values
elif param_schedulers["name"] == "step_with_fixed_gamma":
param_schedulers["base_value"] = scaled_lr
elif param_schedulers["name"] == "composite":
has_linear_warmup = False
for idx in range(len(param_schedulers["schedulers"])):
if param_schedulers["schedulers"][idx]["name"] == "linear":
has_linear_warmup = True
scheduler = get_scaled_lr_scheduler(
cfg, param_schedulers["schedulers"][idx], scaled_lr
)
param_schedulers["schedulers"][idx] = scheduler
# in case of composite LR schedule, if there's linear warmup specified,
# we check if the warmup is meaningful or not. If not, we simplify the
# schedule.
if has_linear_warmup:
resolve_linear_schedule(cfg, param_schedulers)
elif param_schedulers["name"] == "linear":
param_schedulers["end_value"] = scaled_lr
elif param_schedulers["name"] == "inverse_sqrt":
param_schedulers["start_value"] = scaled_lr
elif param_schedulers["name"] == "constant":
param_schedulers["value"] = scaled_lr
else:
raise RuntimeError(
f"Unknow param_scheduler: {param_schedulers['name']}. NOT scaling linearly"
)
return param_schedulers
def infer_learning_rate(cfg):
"""
1) Assert the Learning rate here. LR is scaled as per https://arxiv.org/abs/1706.02677.
to turn this automatic scaling off,
set config.OPTIMIZER.param_schedulers.lr.auto_lr_scaling.auto_scale=false
scaled_lr is calculated:
given base_lr_batch_size = batch size for which the base learning rate is specified,
base_value = base learning rate value that will be scaled,
The current batch size is used to determine how to scale the base learning rate
value.
scale_factor = (batchsize_per_gpu * world_size) / base_lr_batch_size
if scaling_type is sqrt, scale factor = sqrt(scale_factor)
scaled_lr = scale_factor * base_value
We perform this auto-scaling for head learning rate as well if user wants to use a different
learning rate for the head
2) infer the model head params weight decay: if the head should use a different weight
decay value than the trunk.
If using different weight decay value for the head, set here. otherwise, the
same value as trunk will be automatically used.
"""
if cfg.OPTIMIZER.param_schedulers.lr.auto_lr_scaling.auto_scale:
world_size = cfg.DISTRIBUTED.NUM_NODES * cfg.DISTRIBUTED.NUM_PROC_PER_NODE
batch_size = cfg.DATA.TRAIN.BATCHSIZE_PER_REPLICA * world_size
param_schedulers = cfg.OPTIMIZER.param_schedulers.lr
base_lr = param_schedulers.auto_lr_scaling.base_value
base_lr_batch_size = param_schedulers.auto_lr_scaling.base_lr_batch_size
scaling_type = param_schedulers.auto_lr_scaling.scaling_type
assert scaling_type in [
"sqrt",
"linear",
], "Only linear | sqrt scaling_types are supported"
scale_factor = float(batch_size) / base_lr_batch_size
if scaling_type == "sqrt":
scale_factor = scale_factor ** 0.5
scaled_lr = base_lr * scale_factor
cfg.OPTIMIZER.param_schedulers.lr = get_scaled_lr_scheduler(
cfg, param_schedulers, scaled_lr
)
if not cfg.OPTIMIZER.head_optimizer_params.use_different_lr:
# if not using the different value for the head, we set the weight decay and LR
# param scheduler same as the trunk.
cfg.OPTIMIZER.param_schedulers.lr_head = cfg.OPTIMIZER.param_schedulers.lr
elif (
cfg.OPTIMIZER.head_optimizer_params.use_different_lr
and cfg.OPTIMIZER.param_schedulers.lr_head
and cfg.OPTIMIZER.param_schedulers.lr_head.auto_lr_scaling.auto_scale
):
# if the user wants a different LR value for the head, then we
# automatically infer the LR values for the head as well (similar to
# trunk above)
world_size = cfg.DISTRIBUTED.NUM_NODES * cfg.DISTRIBUTED.NUM_PROC_PER_NODE
batch_size = cfg.DATA.TRAIN.BATCHSIZE_PER_REPLICA * world_size
param_schedulers = cfg.OPTIMIZER.param_schedulers.lr_head
base_lr = param_schedulers.auto_lr_scaling.base_value
base_lr_batch_size = param_schedulers.auto_lr_scaling.base_lr_batch_size
scaling_type = param_schedulers.auto_lr_scaling.scaling_type
assert scaling_type in [
"sqrt",
"linear",
], "Only linear | sqrt scaling_types are supported"
scale_factor = float(batch_size) / base_lr_batch_size
if scaling_type == "sqrt":
scale_factor = scale_factor ** 0.5
scaled_lr = base_lr * scale_factor
cfg.OPTIMIZER.param_schedulers.lr_head = get_scaled_lr_scheduler(
cfg, param_schedulers, scaled_lr
)
# for the head, if we want to use a different weight decay value,
# we verify that the specified weight decay value is valid. Otherwise,
# we do the inference and set the weight decay value same as the trunk.
if not cfg.OPTIMIZER.head_optimizer_params.use_different_wd:
cfg.OPTIMIZER.head_optimizer_params.weight_decay = cfg.OPTIMIZER.weight_decay
else:
assert (
cfg.OPTIMIZER.head_optimizer_params.weight_decay >= 0.0
), "weight decay for head should be >=0"
return cfg
def infer_losses_config(cfg):
"""
Infer settings for various self-supervised losses. Takes care of setting various loss
parameters correctly like world size, batch size per gpu, effective global batch size,
collator etc.
Each loss has additional set of parameters that can be inferred to ensure smooth
training in case user forgets to adjust all the parameters.
"""
train_transforms = cfg.DATA.TRAIN.TRANSFORMS
total_num_crops = next(
(
transform["total_num_crops"]
for transform in train_transforms
if "total_num_crops" in transform
),
None,
)
# some inference for the Info-NCE loss.
if "simclr_info_nce_loss" in cfg.LOSS.name:
cfg.LOSS[cfg.LOSS.name]["buffer_params"]["world_size"] = (
cfg.DISTRIBUTED.NUM_NODES * cfg.DISTRIBUTED.NUM_PROC_PER_NODE
)
world_size = cfg.LOSS[cfg.LOSS.name]["buffer_params"]["world_size"]
batch_size = cfg.DATA.TRAIN.BATCHSIZE_PER_REPLICA
num_positives = 2 # simclr uses 2 copies per image
cfg.LOSS[cfg.LOSS.name]["buffer_params"]["effective_batch_size"] = (
num_positives * batch_size * world_size
)
# bce_logits_multiple_output_single_target
if cfg.LOSS.name == "bce_logits_multiple_output_single_target":
world_size = cfg.DISTRIBUTED.NUM_NODES * cfg.DISTRIBUTED.NUM_PROC_PER_NODE
cfg.LOSS.bce_logits_multiple_output_single_target.world_size = world_size
# multicrop version of simclr loss
if cfg.LOSS.name == "multicrop_simclr_info_nce_loss":
world_size = cfg.LOSS.multicrop_simclr_info_nce_loss.buffer_params.world_size
batch_size = cfg.DATA.TRAIN.BATCHSIZE_PER_REPLICA
cfg.LOSS.multicrop_simclr_info_nce_loss.buffer_params.world_size = world_size
cfg.LOSS.multicrop_simclr_info_nce_loss.buffer_params.effective_batch_size = (
batch_size * world_size
)
cfg.LOSS.multicrop_simclr_info_nce_loss.num_crops = (
total_num_crops or cfg.LOSS.multicrop_simclr_info_nce_loss.num_crops
)
cfg.DATA.TRAIN.COLLATE_FUNCTION = "multicrop_collator"
# some inference for the DeepCluster-v2 loss.
if cfg.LOSS.name == "deepclusterv2_loss":
cfg.LOSS.deepclusterv2_loss.DROP_LAST = cfg.DATA.TRAIN.DROP_LAST
cfg.LOSS.deepclusterv2_loss.BATCHSIZE_PER_REPLICA = (
cfg.DATA.TRAIN.BATCHSIZE_PER_REPLICA
)
cfg.LOSS.deepclusterv2_loss.num_crops = (
total_num_crops or cfg.LOSS.deepclusterv2_loss.num_crops
)
cfg.DATA.TRAIN.COLLATE_FUNCTION = "multicrop_collator"
# some inference for the SwAV loss.
if cfg.LOSS.name == "swav_loss":
assert len(cfg.MODEL.HEAD.PARAMS) == 1
assert cfg.MODEL.HEAD.PARAMS[0][0] in {"swav_head", "swav_head_fsdp"}
assert cfg.DATA.TRAIN.COLLATE_FUNCTION in [
"multicrop_collator",
"multicrop_mixup_collator",
"cutmixup_collator",
], (
"for swav loss, use either a collator from "
"[multicrop_collator, multicrop_mixup_collator]"
)
cfg.LOSS.swav_loss.num_prototypes = cfg.MODEL.HEAD.PARAMS[0][1]["num_clusters"]
cfg.LOSS.swav_loss.embedding_dim = cfg.MODEL.HEAD.PARAMS[0][1]["dims"][-1]
cfg.LOSS.swav_loss.num_crops = total_num_crops or cfg.LOSS.swav_loss.num_crops
from vissl.utils.checkpoint import get_checkpoint_folder
cfg.LOSS.swav_loss.output_dir = get_checkpoint_folder(cfg)
world_size = cfg.DISTRIBUTED.NUM_NODES * cfg.DISTRIBUTED.NUM_PROC_PER_NODE
batch_size = cfg.DATA.TRAIN.BATCHSIZE_PER_REPLICA
batch_size *= world_size
queue_length = cfg.LOSS.swav_loss.queue.queue_length
queue_length -= queue_length % batch_size
cfg.LOSS.swav_loss.queue.queue_length = queue_length
cfg.LOSS.swav_loss.queue.local_queue_length = queue_length // world_size
# some inference for the SwAV momentum loss.
if cfg.LOSS.name == "swav_momentum_loss":
assert len(cfg.MODEL.HEAD.PARAMS) == 1
assert cfg.MODEL.HEAD.PARAMS[0][0] == "swav_head"
cfg.LOSS.swav_momentum_loss.num_prototypes = cfg.MODEL.HEAD.PARAMS[0][1][
"num_clusters"
]
cfg.LOSS.swav_momentum_loss.embedding_dim = cfg.MODEL.HEAD.PARAMS[0][1]["dims"][
-1
]
cfg.LOSS.swav_momentum_loss.num_crops = (
total_num_crops or cfg.LOSS.swav_momentum_loss.num_crops
)
cfg.DATA.TRAIN.COLLATE_FUNCTION = "multicrop_collator"
world_size = cfg.DISTRIBUTED.NUM_NODES * cfg.DISTRIBUTED.NUM_PROC_PER_NODE
batch_size = cfg.DATA.TRAIN.BATCHSIZE_PER_REPLICA
batch_size *= world_size
queue_length = cfg.LOSS.swav_momentum_loss.queue.queue_length
queue_length -= queue_length % batch_size
cfg.LOSS.swav_momentum_loss.queue.queue_length = queue_length
cfg.LOSS.swav_momentum_loss.queue.local_queue_length = (
queue_length // world_size
)
# some inference for Simdist loss.
if cfg.LOSS.name == "dino_loss":
assert len(cfg.MODEL.HEAD.PARAMS) == 1
assert cfg.MODEL.HEAD.PARAMS[0][0] == "swav_head"
cfg.LOSS.dino_loss.output_dim = cfg.MODEL.HEAD.PARAMS[0][1]["num_clusters"][0]
cfg.LOSS.dino_loss.num_crops = total_num_crops or cfg.LOSS.dino_loss.num_crops
cfg.DATA.TRAIN.COLLATE_FUNCTION = "multicrop_collator"
return cfg
def infer_and_assert_hydra_config(cfg):
"""
Infer values of few parameters in the config file using the value of other config parameters
1. Inferring losses
2. Auto scale learning rate if user has specified auto scaling to be True.
3. Infer meter names (model layer name being evaluated) since we support list meters
that have multiple output and same target. This is very common in self-supervised
learning where we want to evaluate metric for several layers of the models. VISSL
supports running evaluation for multiple model layers in a single training run.
4. Support multi-gpu DDP eval model by attaching a dummy parameter. This is particularly
helpful for the multi-gpu feature extraction especially when the dataset is large for
which features are being extracted.
5. Infer what kind of labels are being used. If user has specified a labels source, we set
LABEL_TYPE to "standard" (also vissl default), otherwise if no label is specified, we
set the LABEL_TYPE to "sample_index".
"""
cfg = infer_losses_config(cfg)
cfg = infer_learning_rate(cfg)
# pass the seed to cfg["MODEL"] so that model init on different nodes can
# use the same seed.
# TODO (Min): once FSDP supports sync'ing weights from rank 0, we don't need
# this anymore.
cfg["MODEL"]["_MODEL_INIT_SEED"] = cfg.SEED_VALUE
# in case of linear evaluation, we often evaluate several layers at a time. For each
# layer, there's a separate accuracy meter. In such case, we want to output the layer
# name in the meters output to make it easy to interpret the results. This is
# currently only supported for cases where we have linear evaluation.
if cfg.METERS is not None:
from vissl.models import is_feature_extractor_model
meter_name = cfg.METERS.get("name", "")
valid_meters = ["accuracy_list_meter", "mean_ap_list_meter"]
if meter_name:
if meter_name in valid_meters and is_feature_extractor_model(cfg.MODEL):
cfg.METERS[meter_name]["num_meters"] = len(
cfg.MODEL.FEATURE_EVAL_SETTINGS.LINEAR_EVAL_FEAT_POOL_OPS_MAP
)
cfg.METERS[meter_name]["meter_names"] = [
item[0]
for item in cfg.MODEL.FEATURE_EVAL_SETTINGS.LINEAR_EVAL_FEAT_POOL_OPS_MAP
]
# in case of feature evaluation mode, we freeze the trunk. The Feature evaluation mode
# is used for the feature extraction of trunk as well. VISSL supports distributed feature
# extraction to speed up the extraction time. Since the model needs to be DDP for the
# distributed extraction, we need some dummy parameters in the model otherwise model
# can't be converted to DDP. So we attach some dummy head to the model.
world_size = cfg.DISTRIBUTED.NUM_NODES * cfg.DISTRIBUTED.NUM_PROC_PER_NODE
if (
cfg.MODEL.FEATURE_EVAL_SETTINGS.EVAL_MODE_ON
and cfg.MODEL.FEATURE_EVAL_SETTINGS.FREEZE_TRUNK_ONLY
and cfg.MODEL.FEATURE_EVAL_SETTINGS.EXTRACT_TRUNK_FEATURES_ONLY
and world_size > 1
and len(cfg.MODEL.HEAD.PARAMS) == 0
):
cfg.MODEL.HEAD.PARAMS = [["mlp", {"dims": [2048, 1000]}]]
# in SSL, during pre-training we don't want to use annotated labels or during feature
# extraction, we don't have annotated labels for some datasets. In such cases, we set
# the label type to be just the image index in the dataset, unless the
# user has specifically provided "zero" as the label type, which is
# necessary when the CutMixUp collator is being used for self-supervised
# training.
if len(cfg.DATA.TRAIN.LABEL_SOURCES) == 0 and cfg.DATA.TRAIN.LABEL_TYPE != "zero":
cfg.DATA.TRAIN.LABEL_TYPE = "sample_index"
if len(cfg.DATA.TEST.LABEL_SOURCES) == 0 and cfg.DATA.TEST.LABEL_TYPE != "zero":
cfg.DATA.TEST.LABEL_TYPE = "sample_index"
# if the user has specified the model initialization from a params_file, we check if
# the params_file is a url. If it is, we download the file to a local cache directory
# and use that instead
from vissl.utils.checkpoint import get_checkpoint_folder
from vissl.utils.io import cache_url, is_url
if is_url(cfg.MODEL.WEIGHTS_INIT.PARAMS_FILE):
checkpoint_dir = get_checkpoint_folder(cfg)
cache_dir = f"{checkpoint_dir}/params_file_cache/"
cached_url_path = cache_url(
url=cfg.MODEL.WEIGHTS_INIT.PARAMS_FILE, cache_dir=cache_dir
)
cfg.MODEL.WEIGHTS_INIT.PARAMS_FILE = cached_url_path
# ZeRO2: Infer the settings for ShardedDDP which shards the optimizer state
# and the model weights. For ShardedDDP, we must use the OSS optimizer,
# set the right task name, use the PyTorch AMP if AMP is used.
if cfg.MODEL.SHARDED_DDP_SETUP.USE_SDP:
cfg.OPTIMIZER.use_zero = True
cfg.TRAINER.TASK_NAME = "self_supervision_sdp_task"
if cfg.MODEL.AMP_PARAMS.USE_AMP:
cfg.MODEL.AMP_PARAMS.AMP_TYPE = "pytorch"
# if we use a zero optimizer, we nest the optimizer related settings under the
# base_optimizer.
if cfg.OPTIMIZER.use_zero:
cfg.OPTIMIZER["base_optimizer"] = cfg.OPTIMIZER.copy()
cfg.OPTIMIZER.name = "zero"
del cfg.OPTIMIZER.base_optimizer["param_schedulers"]
del cfg.OPTIMIZER.base_optimizer["regularize_bn"]
del cfg.OPTIMIZER.base_optimizer["regularize_bias"]
del cfg.OPTIMIZER.base_optimizer["num_epochs"]
del cfg.OPTIMIZER.base_optimizer["use_zero"]
del cfg.OPTIMIZER.base_optimizer["head_optimizer_params"]
# inference for the FSDP settings. Conditions are:
# 1) use the FSDP task
# 2) use the single param group in the optimizer
# 3) if AMP is used, it must be PyTorch AMP
# 4) If training SwAV, we automatically set the head to SwAV FSDP head
# 4) Inference for the FSDP parameters to ensure the good convergence
if cfg.MODEL.FSDP_CONFIG.AUTO_SETUP_FSDP:
cfg.TRAINER.TASK_NAME = "self_supervision_fsdp_task"
cfg.OPTIMIZER.construct_single_param_group_only = True
# safely set flatten_parameters=True for FSDP trainings.
cfg["MODEL"]["FSDP_CONFIG"]["flatten_parameters"] = True
# recommended FSDP settings below for the convergence
cfg["MODEL"]["FSDP_CONFIG"]["compute_dtype"] = "float32"
# Inference of optimizer configuration
if cfg["OPTIMIZER"]["use_larc"]:
cfg["OPTIMIZER"]["name"] = "sgd_fsdp"
# AMP based inference
if cfg["MODEL"]["AMP_PARAMS"]["USE_AMP"]:
cfg["MODEL"]["AMP_PARAMS"]["AMP_TYPE"] = "pytorch"
cfg["MODEL"]["FSDP_CONFIG"]["mixed_precision"] = True
cfg["MODEL"]["FSDP_CONFIG"]["fp32_reduce_scatter"] = True
else:
# if not using AMP, we can't use mixed_precision as it requires PyTorch AMP
cfg["MODEL"]["FSDP_CONFIG"]["mixed_precision"] = False
# if mixed_precision=False, FSDP mandates setting fp32_reduce_scatter=False
cfg["MODEL"]["FSDP_CONFIG"]["fp32_reduce_scatter"] = False
# Inference of the head in case of training with FSDP
for i, head_param in enumerate(cfg.MODEL.HEAD.PARAMS):
if head_param[0] == "swav_head":
cfg.MODEL.HEAD.PARAMS[i][0] = "swav_head_fsdp"
if head_param[0] == "eval_mlp":
cfg.MODEL.HEAD.PARAMS[i][0] = "eval_mlp_fsdp"
if head_param[0] == "mlp":
cfg.MODEL.HEAD.PARAMS[i][0] = "mlp_fsdp"
# Inference of the trunk in case of training with FSDP
if cfg.MODEL.TRUNK.NAME == "regnet":
cfg.MODEL.TRUNK.NAME = "regnet_fsdp"
# Profiling the communication requires some setup for FSDP
if cfg.PROFILING.MEMORY_PROFILING.TRACK_BY_LAYER_MEMORY:
cfg["MODEL"]["FSDP_CONFIG"]["_TRACK_COMMUNICATIONS"] = True
logging.info(f"Using the FSDP config: {cfg.MODEL.FSDP_CONFIG}")
# Delete the AUTO_SETUP_FSDP key since we send the FSDP_CONFIG
# to FSDP from fairscale which doesn't know about AUTO_SETUP_FSDP
del cfg.MODEL.FSDP_CONFIG["AUTO_SETUP_FSDP"]
if cfg.DATA.TRAIN.BASE_DATASET == "generic_ssl":
assert (
cfg.DATA.TRAIN.get("TRAIN_PHASES_PER_EPOCH", 1) == 1
), "When using the generic_ssl, we must set TRAIN_PHASES_PER_EPOCH = 1."
|
from datetime import datetime
def greetingTime():
current_hour = datetime.now().hour
if current_hour < 12:
return "Buenos días"
elif 12 <= current_hour < 18:
return "Buenas tardes"
else:
return "Buenas noches" |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-08-18 06:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0011_auto_20190818_0653'),
]
operations = [
migrations.CreateModel(
name='BanksMerch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bank_name', models.CharField(max_length=70)),
],
),
migrations.AddField(
model_name='historymerch',
name='access',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Accesses'),
),
migrations.AddField(
model_name='historymerch',
name='bank',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Banks'),
),
migrations.AddField(
model_name='historymerch',
name='transaction',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='account.Types'),
),
]
|
import os
import shutil
import tempfile
import unittest
from lobster.core import Dataset
from lobster import fs, se, util
class TestDataset(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = os.path.expandvars(
os.environ.get('LOBSTER_STORAGE', '/hadoop/store/user/') +
os.environ.get('LOBSTER_USER', os.environ['USER']) + '/')
if not os.path.exists(path):
os.makedirs(path)
cls.workdir = tempfile.mkdtemp(prefix=path)
os.chmod(cls.workdir, 0777)
os.makedirs(os.path.join(cls.workdir, 'eggs'))
for i in range(10):
with open(os.path.join(cls.workdir, 'eggs', str(i) + '.txt'), 'w') as f:
f.write('stir-fry')
os.makedirs(os.path.join(cls.workdir, 'ham'))
for i in range(5):
with open(os.path.join(cls.workdir, 'ham', str(i) + '.txt'), 'w') as f:
f.write('bacon')
os.makedirs(os.path.join(cls.workdir, 'spam'))
os.makedirs(os.path.join(cls.workdir, 'spam', 'log'))
for i in range(5):
with open(os.path.join(cls.workdir, 'spam', str(i) + '.txt'), 'w') as f:
f.write('mail')
for i in range(2):
with open(os.path.join(cls.workdir, 'spam', str(i) + '.trash'), 'w') as f:
f.write('mail')
for i in range(3):
with open(os.path.join(cls.workdir, 'spam', 'log', str(i) + '.log'), 'w') as f:
f.write('thing')
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.workdir)
def test_basics(self):
with util.PartiallyMutable.unlock():
s = se.StorageConfiguration(
output=[], input=['file://' + self.workdir])
s.activate()
with fs.alternative():
info = Dataset(files='eggs').get_info()
assert len(info.files) == 10
info = Dataset(files=['eggs', 'ham']).get_info()
assert len(info.files) == 15
info = Dataset(files='eggs/1.txt').get_info()
assert len(info.files) == 1
def test_flatten(self):
with util.PartiallyMutable.unlock():
s = se.StorageConfiguration(
output=[], input=['file://' + self.workdir])
s.activate()
with fs.alternative():
info = Dataset(files=['spam']).get_info()
assert len(info.files) == 8
info = Dataset(files=['spam'], patterns=['*.txt']).get_info()
assert len(info.files) == 5
info = Dataset(files=['spam'], patterns=['[12].txt']).get_info()
assert len(info.files) == 2
|
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
### Hyperparameters ###
IMG_X = 28
IMG_Y = 28
INPUT_DIM = IMG_X * IMG_Y
OUTPUT_DIM = 10
LR = 0.1
MAX_LOOP = 20000
BATCH_SIZE = 100
### Hyperparameters ###
# load data
mnist = input_data.read_data_sets('D:/data/ml_data/MNIST_data', one_hot=True)
X_train = mnist.train.images
y_train = mnist.train.labels
X_test = mnist.test.images
y_test = mnist.test.labels
def get_batch_data(X, y, batch_size):
ix = np.random.randint(0, len(X), batch_size)
X_batch = X[ix]
y_batch = y[ix]
return X_batch, y_batch
# build a nueral network layer
def nn_layer(inputs, in_dim, out_dim, act=None):
weights = tf.Variable(tf.random_normal(shape=[in_dim, out_dim]), dtype=tf.float32)
biases = tf.Variable(tf.zeros(shape=[out_dim]) + 0.1)
z = tf.matmul(inputs, weights) + biases
if act is None:
return z
else:
return act(z)
# set placeholder
xs = tf.placeholder(dtype=tf.float32, shape=[None, INPUT_DIM])
ys = tf.placeholder(dtype=tf.float32, shape=[None, OUTPUT_DIM])
# build the neural network
hidden_layer_1 = nn_layer(xs, INPUT_DIM, OUTPUT_DIM, act=tf.nn.softmax)
y_pred = hidden_layer_1
# loss and train
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=ys, logits=y_pred))
train_step = tf.train.AdamOptimizer(learning_rate=LR).minimize(loss)
# evaluate model
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(ys, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# training the model
#with tf.Session() as sess:
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
for i in range(MAX_LOOP):
X_train_batch, y_train_batch = get_batch_data(X_train, y_train, BATCH_SIZE)
#X_train_batch, y_train_batch = mnist.train.next_batch(BATCH_SIZE)
sess.run(train_step, feed_dict={xs: X_train_batch, ys: y_train_batch})
if i % 50 == 0:
print('train error:\t', sess.run(loss, feed_dict={xs: X_train, ys: y_train}))
print('train accurary:\t', sess.run(accuracy, feed_dict={xs: X_train, ys: y_train}))
print('test error:\t', sess.run(loss, feed_dict={xs: X_test, ys: y_test}))
print('test accurary:\t', sess.run(accuracy, feed_dict={xs: X_test, ys: y_test}))
print('-----------------------------------')
print('---------------FINAL-------------')
print('train error:\t', sess.run(loss, feed_dict={xs: X_train, ys: y_train}))
print('train accurary:\t', sess.run(accuracy, feed_dict={xs: X_train, ys: y_train}))
print('test error:\t', sess.run(loss, feed_dict={xs: X_test, ys: y_test}))
print('test accurary:\t', sess.run(accuracy, feed_dict={xs: X_test, ys: y_test}))
print('---------------FINAL-------------')
import img_proc
filename = 'my_9_28x28.jpg'
#img_proc.resizeImg('E:/data/ml_data/my_9.jpg', 'my_9_28x28.jpg', 28, 28)
digit_test = img_proc.getImgAsMatFromFile(filename)
digit_test = digit_test.reshape((-1))
print('predict:\t', sess.run(y_pred, feed_dict={xs: digit_test[np.newaxis, :]}))
print('\n--- DONE! ---')
|
import demes
import numpy as np
import matplotlib
from demesdraw import utils
def size_history(
graph: demes.Graph,
ax: matplotlib.axes.Axes = None,
colours: utils.ColourOrColourMapping = None,
log_time: bool = False,
log_size: bool = False,
title: str = None,
inf_ratio: float = 0.1,
inf_label: bool = False,
invert_x: bool = False,
annotate_epochs: bool = False,
num_points: int = 100,
) -> matplotlib.axes.Axes:
"""
Plot population size as a function of time for each deme in the graph.
:param demes.Graph graph:
The demes graph to plot.
:param ax:
The matplotlib axes onto which the figure
will be drawn. If None, an empty axes will be created for the figure.
:type ax: Optional[matplotlib.axes.Axes]
:param colours:
A mapping from deme name to matplotlib colour. Alternately,
``colours`` may be a named colour that will be used for all demes.
:type colours: Optional[dict or str]
:param log_time:
If True, use a log-10 scale for the time axis.
If False (*default*), a linear scale will be used.
:param bool log_size:
If True, use a log-10 scale for the size axis.
If False (*default*), a linear scale will be used.
:param title:
The title of the figure.
:param inf_ratio:
The proportion of the time axis that will be
used for the time interval which stretches towards infinity.
:param inf_label:
Write "inf" by the arrow that points towards infinity.
:param invert_x:
If True, the horizontal axis will have infinity on the left and
zero on the right, and the vertical axis will be drawn on the right.
If False (*default*), the horizontal axis will have zero on the left
and infinity on the right, and the vertical axis will be drawn
on the left.
:param annotate_epochs:
If True, annotate the figure with epoch indices over the relevant
parts of the lines. This may be useful as a pedagogical tool.
If False (*default*), do not annotate the epochs.
:return: The matplotlib axes onto which the figure was drawn.
"""
if ax is None:
_, ax = utils.get_fig_axes()
if invert_x:
arrowhead = "<"
else:
arrowhead = ">"
colours = utils._get_colours(graph, colours)
inf_start_time = utils._inf_start_time(graph, inf_ratio, log_time)
linestyles = ["solid"] # , "dashed", "dashdot"]
linewidths = [2, 4, 8, 1]
legend_handles = []
# Top of the z order stacking.
z_top = 1 + len(graph.demes) + max(linewidths)
for j, deme in enumerate(graph.demes):
colour = colours[deme.name]
linestyle = linestyles[j % len(linestyles)]
linewidth = linewidths[j % len(linewidths)]
plot_kwargs = dict(
color=colour,
linestyle=linestyle,
linewidth=linewidth,
label=deme.name,
alpha=0.7,
zorder=z_top - linewidth,
capstyle="butt",
fill=False,
path_effects=[
matplotlib.patheffects.withStroke(linewidth=3, foreground="white")
],
)
discontinuity_kwargs = plot_kwargs.copy()
discontinuity_kwargs.update(linestyle=":")
legend_kwargs = plot_kwargs.copy()
legend_kwargs.pop("fill")
# Line2D and Patch use different keywords for the capstyle. *Sigh*
legend_kwargs.update(solid_capstyle=legend_kwargs.pop("capstyle"))
legend_handles.append(matplotlib.lines.Line2D([], [], **legend_kwargs))
# Path for the main line (solid).
vertices_main = []
codes_main = []
# Path for the discontinuity lines (dashed).
vertices_discontinuity = []
codes_discontinuity = []
for k, epoch in enumerate(deme.epochs):
start_time = epoch.start_time
if np.isinf(start_time):
start_time = inf_start_time
end_time = epoch.end_time
if log_time:
end_time = max(1, end_time)
if epoch.size_function == "constant":
x = np.array([start_time, end_time])
y = np.array([epoch.start_size, epoch.end_size])
elif epoch.size_function == "exponential":
x = np.linspace(start_time, end_time, num=num_points)
dt = np.linspace(0, 1, num=num_points)
r = np.log(epoch.end_size / epoch.start_size)
y = epoch.start_size * np.exp(r * dt)
elif epoch.size_function == "linear":
x = np.linspace(start_time, end_time, num=num_points)
dt = np.linspace(0, 1, num=num_points)
y = epoch.start_size + (epoch.end_size - epoch.start_size) * dt
else:
raise ValueError(
f"Don't know how to plot epoch {k} with "
f'"{epoch.size_function}" size_function.'
)
vertices_main.extend(list(zip(x, y)))
if k == 0 or deme.epochs[k - 1].end_size != epoch.start_size:
codes_main.append(matplotlib.path.Path.MOVETO)
else:
codes_main.append(matplotlib.path.Path.LINETO)
codes_main.extend([matplotlib.path.Path.LINETO] * (len(x) - 1))
if k > 0 and deme.epochs[k - 1].end_size != epoch.start_size:
# Size discontinuity.
vertices_discontinuity.extend(
[
(deme.epochs[k - 1].end_time, deme.epochs[k - 1].end_size),
(epoch.start_time, epoch.start_size),
]
)
codes_discontinuity.extend(
[matplotlib.path.Path.MOVETO, matplotlib.path.Path.LINETO]
)
if annotate_epochs:
if log_time:
text_x = np.exp((np.log(start_time) + np.log(end_time)) / 2)
else:
text_x = (start_time + end_time) / 2
if log_size:
text_y = np.exp(
(np.log(epoch.start_size) + np.log(max(1, epoch.end_size))) / 2
)
else:
text_y = (epoch.start_size + epoch.end_size) / 2
ax.annotate(
f"epoch {k}",
(text_x, text_y),
ha="center",
va="bottom",
xytext=(0, 4 + linewidth / 2), # vertical offset
textcoords="offset points",
# Give the text some contrast with its background.
bbox=dict(
boxstyle="round", fc="white", ec="none", alpha=0.6, pad=0
),
# This is only really a useful feature with 1 deme,
# but at least try to do something reasonable for more demes.
color="black" if len(graph.demes) == 1 else colour,
zorder=z_top,
)
# Indicate population size discontinuities from ancestor demes.
for ancestor_id in deme.ancestors:
anc = graph[ancestor_id]
anc_N = anc.size_at(deme.start_time)
deme_N = deme.epochs[0].start_size
if anc_N != deme_N:
vertices_discontinuity.extend(
[(deme.start_time, anc_N), (deme.start_time, deme_N)]
)
codes_discontinuity.extend(
[matplotlib.path.Path.MOVETO, matplotlib.path.Path.LINETO]
)
size_path_patch = matplotlib.patches.PathPatch(
matplotlib.path.Path(vertices_main, codes_main), **plot_kwargs
)
ax.add_patch(size_path_patch)
if len(vertices_discontinuity) > 0:
discontinuity_path_patch = matplotlib.patches.PathPatch(
matplotlib.path.Path(vertices_discontinuity, codes_discontinuity),
**discontinuity_kwargs,
)
ax.add_patch(discontinuity_path_patch)
if np.isinf(deme.start_time):
# Plot an arrow at the end of the line, to indicate this
# line extends towards infinity.
ax.plot(
inf_start_time,
deme.epochs[0].start_size,
arrowhead,
color=colour,
clip_on=False,
zorder=z_top,
)
if inf_label:
ax.annotate(
"inf",
(inf_start_time, deme.epochs[0].start_size),
xytext=(0, -6), # vertical offset
textcoords="offset points",
clip_on=False,
ha="center",
va="top",
)
# Update the axes view. ax.add_patch() doesn't do this itself.
ax.autoscale_view()
if len(graph.demes) > 1:
leg = ax.legend(handles=legend_handles, ncol=len(graph.demes) // 2)
leg.set_zorder(z_top)
if title is not None:
ax.set_title(title)
# Arrange the axes spines, ticks and labels.
ax.set_xlim(1 if log_time else 0, inf_start_time)
if not log_size:
ax.set_ylim(bottom=0)
for spine in ax.spines.values():
spine.set_zorder(z_top)
ax.spines["top"].set_visible(False)
if invert_x:
ax.spines["left"].set_visible(False)
ax.invert_xaxis()
ax.yaxis.tick_right()
ax.yaxis.set_label_position("right")
else:
ax.spines["right"].set_visible(False)
ax.set_xlabel(f"time ago ({graph.time_units})")
# ax.set_ylabel("N", rotation=0, ha="left" if invert_x else "right")
ax.set_ylabel("deme\nsize", rotation=0, labelpad=20)
if log_time:
ax.set_xscale("log", base=10)
if log_size:
ax.set_yscale("log", base=10)
return ax
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'chengzhi'
from tqsdk import TqApi, TargetPosTask
'''
如果当前价格大于10秒K线的MA15则开多仓
如果小于则平仓
'''
api = TqApi()
# 获得 m1909 10秒K线的引用
klines = api.get_kline_serial("DCE.m1909", 10)
# 创建 m1909 的目标持仓 task,该 task 负责调整 m1909 的仓位到指定的目标仓位
target_pos = TargetPosTask(api, "DCE.m1909")
while True:
api.wait_update()
if api.is_changing(klines):
ma = sum(klines.close.iloc[-15:]) / 15
print("最新价", klines.close.iloc[-1], "MA", ma)
if klines.close.iloc[-1] > ma:
print("最新价大于MA: 目标多头5手")
# 设置目标持仓为多头5手
target_pos.set_target_volume(5)
elif klines.close.iloc[-1] < ma:
print("最新价小于MA: 目标空仓")
# 设置目标持仓为空仓
target_pos.set_target_volume(0)
|
import glob
import json
import anyconfig
import scalpl
from datetime import datetime
from .logger_setup import logger
# Class: ConfigManager
# Function: To store and load settings for BoxBot 2.0
# :
# : - Register a setting to the bot's config model
# : - Retrieve a value from the bot's config model
# : - Load a config file into the bot's config model
# : - Save the model into a file
class ConfigManager:
def __init__(self, filepath="./config/settings.toml", max_backups=10):
self._settings_file = filepath
self._max_backups = max_backups
self.config = scalpl.Cut(self.loadConfig())
# To do: Limit total number of backups
# : Check to see if backup is redundant
# : Add code to maintain up to max_backups number of backup configs
def __backupConfigFile(self):
'''Backup existing config file to a new timestamped file'''
anyconfig.dump(self.config, f"./config/config.backup_{datetime.now()}".replace(":", "_") + ".toml")
def saveConfig(self):
'''Save config model to a file'''
self.__backupConfigFile()
anyconfig.dump(self.config.data, self._settings_file)
def loadConfig(self) -> dict():
'''Load config model from a file'''
try:
config = anyconfig.load(self._settings_file)
except FileNotFoundError:
config = {}
return config
## Accessors ##
def put(self, keys:str, value):
'''Register a value in config to a period-delimited property string'''
try:
self.config[keys] = value
except KeyError as e:
logger.warning(f"No value for property {keys}!\n{e}")
return None
return self.config.data
def get(self, keys:str, default=None):
'''Retrieve the value in config registered to a period-delimited property string'''
return self.config.get(keys, default=default)
def __unfold(self, properties:str) -> list():
'''Turns a period-delimited property name into a list of property strings'''
return properties.strip().split(".")
boxconfig = ConfigManager()
|
class ThemeConfig:
def __init__(self, colors, extras, base_text_states):
self._light_scheme=f"""[ColorEffects:Disabled]
Color={extras['LightSurface1']}
ColorAmount=0.55
ColorEffect=3
ContrastAmount=0.65
ContrastEffect=0
IntensityAmount=0.1
IntensityEffect=0
[ColorEffects:Inactive]
ChangeSelectionColor=false
Color={colors['light']['SurfaceVariant']}
ColorAmount=1
ColorEffect=0
ContrastAmount=1
ContrastEffect=0
Enable=false
IntensityAmount=10
IntensityEffect=10
[Colors:Button]
BackgroundAlternate={colors['light']['SurfaceVariant']}
BackgroundNormal={colors['light']['Surface']}
DecorationFocus={colors['light']['Primary']}
DecorationHover={colors['light']['Primary']}
ForegroundActive={colors['light']['OnSurface']}
ForegroundInactive={colors['light']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['light']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['light']['OnSurface']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:Header]
BackgroundNormal={colors['light']['SurfaceVariant']}
[Colors:Selection]
BackgroundAlternate={colors['light']['Primary']}
BackgroundNormal={colors['light']['Primary']}
DecorationFocus={colors['light']['Primary']}
DecorationHover={colors['light']['Primary']}
ForegroundActive={colors['light']['OnPrimary']}
ForegroundInactive={colors['light']['OnPrimary']}
ForegroundLink={extras['LinkOnPrimaryLight']}
ForegroundNegative={extras['NegativeOnPrimaryLight']}
ForegroundNeutral={extras['NeutralOnPrimaryLight']}
ForegroundNormal={colors['light']['OnPrimary']}
ForegroundPositive={extras['PositiveOnPrimaryLight']}
ForegroundVisited={extras['LinkVisitedOnPrimaryLight']}
[Colors:Tooltip]
BackgroundAlternate={colors['light']['SurfaceVariant']}
BackgroundNormal={colors['light']['Surface']}
DecorationFocus={colors['light']['Primary']}
DecorationHover={colors['light']['Primary']}
ForegroundActive={colors['light']['OnSurface']}
ForegroundInactive={colors['light']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['light']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['light']['OnSurface']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:View]
BackgroundAlternate={colors['light']['Surface']}
BackgroundNormal={extras['LightSurface1']}
DecorationFocus={colors['light']['Primary']}
#-----------------------------------------------
DecorationHover={colors['light']['Primary']}
ForegroundActive={colors['light']['InverseSurface']}
ForegroundInactive={colors['light']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['light']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['light']['OnSurfaceVariant']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:Window]
BackgroundAlternate={colors['light']['Surface']}
BackgroundNormal={colors['light']['SurfaceVariant']}
DecorationFocus={colors['light']['Primary']}
DecorationHover={colors['light']['Primary']}
ForegroundActive={colors['light']['InverseSurface']}
ForegroundInactive={colors['light']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['light']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
#--- Window titles, context icons
ForegroundNormal={colors['light']['OnSurfaceVariant']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Negative']}
[General]
ColorScheme=MaterialYouLight
Name=Material You Light
shadeSortColumn=false
[KDE]
contrast=4
[WM]
activeBackground={colors['light']['SurfaceVariant']}
activeBlend=#ff0000
activeForeground={colors['light']['OnSurface']}
inactiveBackground={colors['light']['SecondaryContainer']}
inactiveBlend=#ff0000
inactiveForeground={colors['light']['OnSurfaceVariant']}
"""
self._dark_scheme=f"""[ColorEffects:Disabled]
Color={extras['DarkSurface1']}
ColorAmount=0.55
ColorEffect=3
ContrastAmount=0.65
ContrastEffect=0
IntensityAmount=0.1
IntensityEffect=0
[ColorEffects:Inactive]
ChangeSelectionColor=false
Color=Color={colors['dark']['SurfaceVariant']}
ColorAmount=-0.9
ColorEffect=0
ContrastAmount=0.1
ContrastEffect=0
Enable=true
IntensityAmount=0
IntensityEffect=0
[Colors:Button]
BackgroundAlternate={colors['dark']['SurfaceVariant']}
BackgroundNormal={extras['DarkSelectionAlt']}
DecorationFocus={colors['dark']['Primary']}
DecorationHover={colors['dark']['Primary']}
ForegroundActive={colors['dark']['OnSurface']}
ForegroundInactive={colors['dark']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['dark']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['dark']['OnSurface']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:Header]
BackgroundNormal={colors['dark']['SurfaceVariant']}
[Colors:Selection]
BackgroundAlternate={colors['dark']['Primary']}
BackgroundNormal={colors['dark']['Primary']}
DecorationFocus={colors['dark']['Primary']}
DecorationHover={colors['dark']['Primary']}
ForegroundActive={colors['dark']['OnPrimary']}
ForegroundInactive={colors['dark']['OnPrimary']}
ForegroundLink={extras['LinkOnPrimaryDark']}
ForegroundNegative={extras['NegativeOnPrimaryDark']}
ForegroundNeutral={extras['NeutralOnPrimaryDark']}
ForegroundNormal={colors['dark']['OnPrimary']}
ForegroundPositive={extras['PositiveOnPrimaryDark']}
ForegroundVisited={extras['LinkVisitedOnPrimaryDark']}
[Colors:Tooltip]
BackgroundAlternate={colors['dark']['SurfaceVariant']}
BackgroundNormal={colors['dark']['Surface']}
DecorationFocus={colors['dark']['Primary']}
DecorationHover={colors['dark']['Primary']}
ForegroundActive={colors['dark']['OnSurface']}
ForegroundInactive={colors['dark']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['dark']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['dark']['OnSurface']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:View]
BackgroundAlternate={colors['dark']['Surface']}
BackgroundNormal={extras['DarkSurface1']}
DecorationFocus={colors['dark']['Primary']}
#-----------------------------------------------
DecorationHover={colors['dark']['Primary']}
ForegroundActive={colors['dark']['InverseSurface']}
ForegroundInactive={colors['dark']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['dark']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
ForegroundNormal={colors['dark']['OnSurfaceVariant']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Visited']}
[Colors:Window]
BackgroundAlternate={colors['dark']['Surface']}
BackgroundNormal={colors['dark']['SurfaceVariant']}
DecorationFocus={colors['dark']['Primary']}
DecorationHover={colors['dark']['Primary']}
ForegroundActive={colors['dark']['InverseSurface']}
ForegroundInactive={colors['dark']['Outline']}
ForegroundLink={base_text_states['Link']}
ForegroundNegative={colors['dark']['Error']}
ForegroundNeutral={base_text_states['Neutral']}
#--- Window titles, context icons
ForegroundNormal={colors['dark']['OnSurfaceVariant']}
ForegroundPositive={base_text_states['Positive']}
ForegroundVisited={base_text_states['Negative']}
[General]
ColorScheme=MaterialYouDark
Name=Material You dark
shadeSortColumn=true
[KDE]
contrast=4
[WM]
activeBackground={colors['dark']['SurfaceVariant']}
activeBlend=#ff0000
activeForeground={colors['dark']['OnSurface']}
inactiveBackground={colors['dark']['SecondaryContainer']}
inactiveBlend=#ff0000
inactiveForeground={colors['dark']['OnSecondaryContainer']}
"""
def get_light_scheme(self):
return(self._light_scheme)
def get_dark_scheme(self):
return(self._dark_scheme) |
def maior(a, b):
return (a + b + abs(a - b))/2
inp = list(map(int, input().split()))
a, b, c = inp
print('%d eh o maior' % (maior(a, maior(b, c))))
|
##
# Copyright (c) 2016, Microsoft Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##
# This is a sample tool and should not be used in production environments.
#
# This tool takes in sample certificates (.cer files) and outputs a .h file containing the
# certificates.
###
import re
import sys
print "This is a sample tool and should not be used in production environments\n"
raw_input('Press any key to continue . . .\n')
### Parse input parameters ###
if len(sys.argv) == 1 or sys.argv[1] == "-h" or sys.argv[1] == "-H" or sys.argv[1] == "-?":
print "This tool creates Certs.h with one or more certificates\n"
print "usage: ConvertCerToH.py <CertFiles...>"
print "example: ConvertCerToH.py SAMPLE_DEVELOPMENT.cer SAMPLE_PRODUCTION.cer"
print "example: ConvertCerToH.py SAMPLE_DEVELOPMENT1.cer SAMPLE_DEVELOPMENT2.cer SAMPLE_PRODUCTION.cer"
print "example: ConvertCerToH.py SAMPLE_PRODUCTION.cer"
sys.exit(-1)
if len(sys.argv) > 11:
print "Error: Currently limiting number of certificates to 10"
print "usage: ConvertCerToH.py <CertFiles...>"
sys.exit(-1)
### Process Certificates ###
Certs = []
sys.argv.remove(sys.argv[0])
for fileName in sys.argv:
print "Processing", fileName
# Open cert file
file = open(fileName, "rb")
# Read binary file
Cert = file.read()
# Close cert file
file.close()
CertHex = map(hex,map(ord,Cert))
Cert = re.sub(r'\'|\[|\]', "", str(CertHex))
Certs.append(Cert)
### Write certificates to Certs.h ###
# Open header file
HeaderFile = open("Certs.h", "w")
HeaderFile.write("//\n")
HeaderFile.write("// Certs.h\n")
HeaderFile.write("//\n\n")
HeaderFile.write("//\n")
HeaderFile.write("// These are the binary DER encoded Product Key certificates \n")
HeaderFile.write("// used to sign the UEFI capsule payload.\n")
HeaderFile.write("//\n\n")
index = 1
for Cert in Certs:
HeaderFile.write("CONST UINT8 CapsulePublicKeyCert"+str(index)+"[] =\n")
HeaderFile.write("{\n")
HeaderFile.write(Cert)
HeaderFile.write("\n};\n\n")
index = index + 1
HeaderFile.write("CONST CAPSULE_VERIFICATION_CERTIFICATE CapsuleVerifyCertificates[] = {\n")
index = 1
for Cert in Certs:
HeaderFile.write(" {CapsulePublicKeyCert"+str(index)+", sizeof(CapsulePublicKeyCert"+str(index)+")},\n")
index = index + 1
HeaderFile.write("};\n\n")
HeaderFile.write("CONST CAPSULE_VERIFICATION_CERTIFICATE_LIST CapsuleVerifyCertificateList = {\n")
HeaderFile.write(" sizeof(CapsuleVerifyCertificates)/sizeof(CAPSULE_VERIFICATION_CERTIFICATE),\n")
HeaderFile.write(" CapsuleVerifyCertificates\n")
HeaderFile.write("};\n\n")
# Close header file
HeaderFile.close()
print "\nCopy the output file Certs.h to folder MsSampleFmpDevicePkg\Library\CapsuleKeyBaseLib"
|
"""
Import data from dataset, and preprocess it.
To use take advantage of the preprocess in this file, simply import this file to
your python code and call `train_table, valid_table = dataset.preprocess()` to get
all images in one table with their labels and metadata.
Once you have the training and validation table, you pass them into the utility
methods to select the dataset you need, and use `dataset.load_images(result_table)`
to output the actual images and labels as ndarray. You can also use
`dataset.resize_img(imgs, img_size)` to resize your images to desired image size.
"""
import imghdr
import math
import os
import re
import keras
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import util
IMG_SIZE = 512
DATA_VIR = "1.1"
DATA_DIR = os.path.abspath(__file__) # ?/dataset.py
ROOT_DIR = os.path.dirname(DATA_DIR) # ?/
DATA_DIR = os.path.join(
ROOT_DIR,
"dataset",
"MURA-v" + DATA_VIR,
) # ?/dataset/MURA-v1.1/
TRAIN_DIR = os.path.join(DATA_DIR, "train")
VALID_DIR = os.path.join(DATA_DIR, "valid")
BPARTS = ["elbow", "finger", "forearm", "hand", "humerus", "shoulder", "wrist"]
def load_dataframe():
"""
Import csv files into Dataframes.
:return:
"""
train_labeled = pd.read_csv(
os.path.join(DATA_DIR, "train_labeled_studies.csv"),
names=["patient", "label"]
)
valid_labeled = pd.read_csv(
os.path.join(DATA_DIR, "valid_labeled_studies.csv"),
names=["patient", "label"]
)
# import image paths
train_path = pd.read_csv(
os.path.join(DATA_DIR, "train_image_paths.csv"),
names=["path"]
)
valid_path = pd.read_csv(
os.path.join(DATA_DIR, "valid_image_paths.csv"),
names=["path"]
)
return train_labeled, valid_labeled, train_path, valid_path
def classify_bpart(data):
"""
Divide TRAIN_LABELED into sub-sets based on the body parts in the image.
Also add body part as a new feature of the dataset.
:param data: dataset to process.
:return:
"""
for bpart in BPARTS:
data.loc[data["path"].str.contains(bpart.upper()), "body_part"] = bpart
def complete_path(data, column):
"""
Convert relative image path to absolute path so that the execution does not depend
on working directory. Also clean up the patient name
:param data: dataset to process.
:param column: column to perform the operation.
:return:
"""
data[column] = np.where(
data[column].str.startswith("MURA-v" + DATA_VIR),
data[column].str.replace("MURA-v" + DATA_VIR, DATA_DIR),
data[column]
)
def extract_study(row):
"""
Callback function to generate a column for unique patient-study combo.
:param row: a row from processing table
:return:
"""
match = re.search("study\d+", row["path"])
if match:
study = match.group()
return "{}-{}-{}".format(row["patient"], row["body_part"], study)
else:
raise ValueError("study not found in " + row["path"])
def get_patient(row):
"""
Call back function to check if the image column is a valid path,
and grab the parent directory if it is.
:param row: a row from processing table
:return:
"""
try:
img_type = imghdr.what(row["path"])
except IsADirectoryError:
img_type = None
if img_type:
return os.path.dirname(row["path"]) + "/"
return row["patient"]
def build_dataframe(df_label, df_path):
"""
Build datasets by combining image paths with labels, so that we have a dataframe
where each row is an image and has the patient it belongs to, as well as the label
:param df_label: labeled dataset.
:param df_path: image paths.
:return: training table, validation table
"""
df_label = df_label.copy(deep=True)
df_path = df_path.copy(deep=True)
complete_path(df_path, "path")
complete_path(df_label, "patient")
# Apply a transformation over each row to save image directory as a new column
df_path["patient"] = df_path.apply(get_patient, axis=1)
# Merge two table on patient column
result = df_path.merge(df_label, on="patient")
classify_bpart(result)
# change .../patient00001/... to patient00001
result["patient"] = result["patient"].str.extract("(patient\d{5})")
# Apply a transformation over each row to create a column for unique
# patient-bpart-study combo
result["study"] = result.apply(extract_study, axis=1)
return result
def preprocess():
"""
Preprocess datasets.
:return: training set, validation set
"""
train_labeled, valid_labeled, train_path, valid_path = load_dataframe()
df_train = build_dataframe(train_labeled, train_path)
df_valid = build_dataframe(valid_labeled, valid_path)
return df_train, df_valid
#################################
# Utility Fnctions #
#################################
def pick_bpart(df, bpart):
"""
Create a sub dataset of particular body part.
:param df: dataframe to process
:param bpart: body part to extract
:return: trimmed dataframe
"""
if bpart == "all":
return df
return df[df["body_part"] == bpart].reset_index()
def pick_n_per_patient(df, num):
"""
Create a sub dataset that pick first n images from each patient. Will return error
if num is greater than the minial count
:param df: dataframe to process
:param num: number of images to pick from each patient. if set to 0, then pick all.
:return: trimmed dataframe
"""
if num == 0:
return df
min_count = df.groupby("study")["path"].count().min()
if num > min_count:
raise ValueError("num is greater than minimum count of images per patient: {}".format(
min_count
))
result = pd.DataFrame()
for study in df["study"].unique():
result = result.append(df[df["study"] == study][:num])
return result.reset_index()
def zero_pad(img):
"""
Add black padding to the image.
for each side of the image, each colour channel shall be padded with 0s of size
(512 - image_width/height)/2 on each end, so that the image stays in the center,
and is surrounded with black.
:param img: Image to process in nparray.
:return: Processed image.
"""
result = np.zeros((IMG_SIZE, IMG_SIZE, img.shape[2]))
horz_start = int((IMG_SIZE - img.shape[0]) / 2)
horz_cord = range(horz_start, horz_start + img.shape[0])
vert_start = int((IMG_SIZE - img.shape[1]) / 2)
vert_cord = range(vert_start, vert_start + img.shape[1])
result[np.ix_(horz_cord, vert_cord, range(img.shape[2]))] = img.reshape(
(img.shape[0], img.shape[1], img.shape[2])
)
return result
def load_image(img_path, is_grayscale=False):
"""
Load a single image into a ndarray.
Args:
img_path:
path to the image
is_grayscale:
if load the image to grayscale or RGB
Returns: image as ndarray
"""
im = keras.preprocessing.image.load_img(img_path, grayscale=is_grayscale)
im = keras.preprocessing.image.img_to_array(im) # converts image to numpy array
return zero_pad(im)
def plot_first_n_img(imgs, num=9):
"""
Plot first n images from the given list.
:param imgs: ndarry of images
:param num: number of images to show
:return:
"""
n_row = int(math.sqrt(num))
n_col = math.ceil(math.sqrt(num))
plt.figure(1)
plt.tight_layout()
for i in range(num):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(imgs[i, :, :, 0], cmap='gray')
def save_img(img, filename):
"""
Utility method that convert a ndarray into image and save to a image file.
Args:
img: image in ndarray
filename: target filename including path
Returns:
"""
img = keras.preprocessing.image.array_to_img(img)
try:
img.save(filename)
except FileNotFoundError:
util.create_dir(os.path.dirname(filename))
img.save(filename)
def resize_img(img, size):
"""
Given a list of images in ndarray, resize them into target size.
Args:
img: Input image in ndarray
size: Target image size
Returns: Resized images in ndarray
"""
img = scipy.misc.imresize(img, (size, size))
if len(img.shape) == 2:
img = img.reshape((size, size, 1))
return img
|
import asyncio
from playwright.async_api import async_playwright
from urllib.parse import urlparse
from pathlib import Path
'''
popup
page.onDialog(dialog -> {
assertEquals("alert", dialog.type());
assertEquals("", dialog.defaultValue());
assertEquals("yo", dialog.message());
dialog.accept();
});
page.evaluate("alert('yo')");
https://python.plainenglish.io/handling-new-windows-with-python-and-playwright-c223a1e846d9
'''
async def handle_popup(popup):
await popup.wait_for_load_state()
print(await popup.title())
async def handle_dialog(dialog):
print(f' dialog is {dialog.message}, {dialog.type}')
await dialog.accept()
def dump_frame_tree(frame, indent):
print(indent + frame.name + '@' + frame.url)
for child in frame.child_frames:
dump_frame_tree(child, indent + " ")
def handle_page(page):
page.wait_for_load_state()
print(f'handle page {page.url}')
async def run(*, browser, url_list: list, output_dir: Path) -> None:
for url in url_list:
url_p = urlparse(url)
screenshot_file = url_p.netloc + '_' + url_p.path.replace('/', '_') + '.png'
abs_path = output_dir / screenshot_file
context = await browser.new_context()
page = await context.new_page()
page.on("request", lambda request: print(f'req url={request.url}'))
page.set_default_timeout(300 * 60) # its in ms
await page.goto(url, wait_until="networkidle")
await page.click('text="Continue"')
await page.click('text="Continue"')
await page.click('text="Continue"')
await page.click('text="Continue"')
page.on("popup", handle_popup)
page.on("dialog", handle_dialog)
page.on("page", handle_page)
dump_frame_tree(page.main_frame, "--- ")
print(f'page opening is {await page.opener()}')
# for commerce.gov.in await page.click("img[alt=\"nav-closed\"]")
await page.screenshot(path=str(abs_path), full_page=True)
await context.close()
# https://github.com/microsoft/playwright/issues/3151
# https://playwright.dev/python/docs/api/class-browser/#browser-new-context
# cnn.com takes looong time to load
# espncricinfo.com - nothing found
done_url_list = [
'https://gst.gov.in',
'https://unifiedportal-mem.epfindia.gov.in/',
]
async def main():
cur = Path.cwd()
output_dir = cur / 'screenshots'
output_dir.mkdir(exist_ok=True)
async with async_playwright() as p:
browser = await p.chromium.launch(headless=False)
url_list = [
'https://www.tin-nsdl.com/',
'https://dol.ny.gov/unemployment-insurance-rate-information',
]
await run(browser = browser, url_list = url_list, output_dir = output_dir)
await browser.close()
asyncio.run(main())
|
LEFT = complex(0, 1)
RIGHT = complex(0, -1)
def read_infect_map(filename):
with open(filename) as f:
infect_map = dict()
for i, line in enumerate(f):
for j, c in enumerate(line):
if c == '#':
infect_map[complex(i, j)] = 'I'
return infect_map
def activity(initial_infection, steps=10000000):
infection = dict(initial_infection)
direction = complex(-1, 0)
position = complex(12, 12)
infections = 0
for i in range(steps):
status = infection.get(position, '')
if status == '':
direction *= LEFT
elif status == 'W':
direction = direction
elif status == 'I':
direction *= RIGHT
else:
direction *= RIGHT
direction *= RIGHT
if status == '':
infection[position] = 'W'
elif status == 'W':
infection[position] = 'I'
infections += 1
elif status == 'I':
infection[position] = 'F'
else:
del infection[position]
position += direction
return infections
infect_map = read_infect_map('input-22.txt')
print(activity(infect_map))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.