content
stringlengths 5
1.05M
|
---|
import numpy as np
import torch.nn as nn
import torch
from .shared import Conv_Block
from collections import OrderedDict
class Conv_net(nn.Module):
def __init__(self, input_dim, num_conv_layers, num_conv_layers_mem, hidden_dim, kernel_size, dilation_rate):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_dim: int
Number of channels of input tensor.
num_conv_layers: int
Number of convolutional blocks within the cell
num_conv_layers_mem: int
Number of convolutional blocks for the weight matrices that perform a hadamard product with current memory
(should be much lower than num_conv_layers)
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
"""
super(Conv_LSTM_Cell, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.dilation_rate = dilation_rate
self.num_conv_layers = num_conv_layers
self.num_conv_layers_mem = num_conv_layers_mem
self.kernel_size = kernel_size
self.conv_block = Conv_Block(in_channels=self.input_dim + self.hidden_dim,
out_channels=4*self.hidden_dim,
dilation_rate=self.dilation_rate,
num_conv_layers=self.num_conv_layers,
kernel_size=self.kernel_size)
self.conv_block_mem = Conv_Block(in_channels=self.input_dim + 2*self.hidden_dim,
out_channels=3*self.hidden_dim,
dilation_rate=self.dilation_rate,
num_conv_layers=self.num_conv_layers_mem,
kernel_size=self.kernel_size)
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv_block(combined)
combined_conv_weights = self.conv_block_mem(torch.concat([combined, c_cur], dim=1))
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
w_i, w_f, w_o = torch.split(combined_conv_weights, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i + w_i * c_cur)
f = torch.sigmoid(cc_f + w_f * c_cur)
o = torch.sigmoid(cc_o + w_o * c_cur)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size, image_size):
height, width = image_size
return (torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv_block_mem.in_mid_conv.weight.device),
torch.zeros(batch_size, self.hidden_dim, height, width, device=self.conv_block_mem.in_mid_conv.weight.device))
class Conv_LSTM(nn.Module):
"""
Parameters:
input_dim: Number of channels in input
hidden_dim: Number of hidden channels
kernel_size: Size of kernel in convolutions
num_conv_layers: Number of convolutional layers within the cell
num_conv_layers_mem: Number of convolutional blocks for the weight matrices that perform a
hadamard product with current memory (should be much lower than num_conv_layers)
dilation_rate: Size of holes in convolutions
num_layers: Number of LSTM layers stacked on each other
batch_first: Whether or not dimension 0 is the batch or not
Note: Will do same padding.
Input:
A tensor of shape (b, c, w, h, t)
Output:
The residual from the mean cube
"""
def __init__(self, input_dim, hidden_dim, kernel_size, num_conv_layers, num_conv_layers_mem,
num_layers, dilation_rate, batch_first=False, baseline="mean_cube"):
super(Conv_LSTM, self).__init__()
self._check_kernel_size_consistency(kernel_size)
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.input_dim = input_dim # n of channels in input pics
self.hidden_dim = hidden_dim # n of channels that go through hidden layers
self.kernel_size = kernel_size # n kernel size (no magic here)
self.num_layers = num_layers # n of cells in time
self.batch_first = batch_first # true if you have c_0, h_0
self.dilation_rate = dilation_rate
self.num_conv_layers = num_conv_layers
self.num_conv_layers_mem = num_conv_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i - 1]
cell_list.append(Conv_LSTM_Cell(input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
num_conv_layers=self.num_conv_layers,
num_conv_layers_mem=self.num_conv_layers_mem,
dilation_rate=self.dilation_rate))
self.cell_list = nn.ModuleList(cell_list)
self.baseline = baseline
def forward(self, input_tensor, baseline, non_pred_feat=None, prediction_count=1):
"""
Parameters
----------
input_tensor:
(b - batch_size, h - height, w - width, c - channel, t - time)
5-D Tensor either of shape (b, c, w, h, t)
non_pred_feat:
non-predictive features for future frames
baseline:
baseline computed on the input variables. Only needed for prediction_count > 1.
Returns
-------
pred_deltas
"""
#TODO: Make code slimmer (there are some redundancies)
b, _, w, h, _ = input_tensor.size()
hidden_state = self._init_hidden(batch_size=b, image_size=(h, w))
layer_output_list = []
last_state_list = []
last_memory_list = []
seq_len = input_tensor.size(-1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, :, :, :, t], cur_state=[h, c])
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=-1)
cur_layer_input = layer_output
layer_output_list.append(layer_output)
last_state_list.append(h)
last_memory_list.append(c)
pred_deltas = [layer_output_list[-1:][0][:, :, :, :, -1]]
baselines = [baseline]
predictions = [torch.add(baseline, layer_output_list[-1:][0][:, :, :, :, -1])]
# allow for multiple pred_deltas in a self feedback manner
if prediction_count > 1:
if non_pred_feat is None:
raise ValueError('If prediction_count > 1, you need to provide non-prediction features for the '
'future time steps!')
non_pred_feat = torch.cat((torch.zeros((non_pred_feat.shape[0],
1,
non_pred_feat.shape[2],
non_pred_feat.shape[3],
non_pred_feat.shape[4]), device=non_pred_feat.device), non_pred_feat), dim = 1)
# output from layer beneath which for the lowest layer is the prediction from the previous time step
prev = predictions[0]
# update the baseline & glue together predicted + given channels
if self.baseline == "mean_cube":
baseline = 1/(seq_len + 1) * (prev + (baseline * seq_len))
else:
baseline = prev # We don't predict image quality, so we just feed in the last prediction
prev = torch.cat((prev, non_pred_feat[:,:,:,:,0]), axis=1)
for counter in range(prediction_count - 1):
for layer_idx in range(self.num_layers):
h, c = self.cell_list[layer_idx](input_tensor=prev, cur_state=[last_state_list[layer_idx],
last_memory_list[layer_idx]])
prev = h
last_state_list[layer_idx] = h
last_memory_list[layer_idx] = c
# in the last layer, make prediction
if layer_idx == (self.num_layers - 1):
pred_deltas.append(h)
baselines.append(baseline)
# next predicted entire image
prediction = baseline + h
predictions.append(prediction)
# update the baseline & glue together predicted + given channels
if self.baseline == "mean_cube":
baseline = 1/(seq_len + 1) * (prev + (baseline * seq_len))
else:
baseline = prev # We don't predict image quality, so we just feed in the last prediction
prev = torch.cat((prediction, non_pred_feat[:, :, :, :, counter]), axis=1)
return predictions, pred_deltas, baselines
def _init_hidden(self, batch_size, image_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size, image_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 04/01/2020
"""
import gym
import numpy
from gym.spaces import Discrete
__all__ = ["NormalisedActions"]
class NormalisedActions(gym.ActionWrapper):
def reverse_action(self, a: numpy.ndarray) -> numpy.ndarray:
""" """
if isinstance(self.env.action_space, Discrete):
return a.item()
low = self.env.action_space.low
high = self.env.action_space.high
a = 2 * (a - low) / (high - low) - 1
return numpy.clip(a, low, high)
def action(self, a: numpy.ndarray) -> numpy.ndarray:
""" """
if isinstance(self.env.action_space, Discrete):
return a.item()
low = self.env.action_space.low
high = self.env.action_space.high
a = low + (a + 1.0) * 0.5 * (high - low)
return numpy.clip(a, low, high)
|
print(__file__)
import psutil
import nslsii
# warn the user if there is another bsui running
def get_bsui_processes():
bsui_processes = []
for process in psutil.process_iter():
if "bsui" in process.name():
bsui_processes.append(process)
return bsui_processes
bsui_processes = get_bsui_processes()
if len(bsui_processes) > 1:
print("WARNING: more than one bsui process is running!")
print("\n".join([str(process) for process in bsui_processes]))
input("Press CTRL-C to quit (recommended) or ENTER to continue")
import ophyd
try:
ophyd.signal.EpicsSignalBase.set_default_timeout(timeout=60, connection_timeout=60)
except AttributeError:
pass
beamline_id = 'qas'
from databroker.v0 import Broker
db = Broker.named(beamline_id)
nslsii.configure_base(get_ipython().user_ns, db, bec=False)
# nslsii.configure_base(get_ipython().user_ns, beamline_id, bec=False)
# At the end of every run, verify that files were saved and
# print a confirmation message.
from bluesky.callbacks.broker import verify_files_saved
# RE.subscribe(post_run(verify_files_saved), 'stop')
# Optional: set any metadata that rarely changes.
# convenience imports
from bluesky.callbacks import *
from bluesky.callbacks.broker import *
from bluesky.simulators import *
from bluesky.plans import *
import numpy as np
from pyOlog.ophyd_tools import *
import os
import sys
from datetime import datetime
import functools
from bluesky.utils import ts_msg_hook
# The logs will be saved to the profile dir.
profile_startup_dir = get_ipython().profile_dir.startup_dir
# The name of the log file consists of the beamline id and the timestamp at the
# startup of bsui, so we don't have collisions of the names.
#log_filename = f'{beamline_id}-bsui-{datetime.now().strftime("%Y%m%d%H%M%S")}.log'
#log_filename = os.path.join(profile_startup_dir, log_filename)
#print(f'\n!!! The logs will be written to {log_filename} !!!\n')
#file = open(log_filename, 'a')
#func = functools.partial(ts_msg_hook, file=file)
#RE.msg_hook = func
RE.msg_hook = ts_msg_hook
#import caproto
import logging
logging.getLogger('caproto').setLevel('ERROR')
logging.getLogger('caproto.ch').setLevel('ERROR')
# caproto_log = os.path.join(profile_startup_dir, f'{beamline_id}-caproto-{datetime.now().strftime("%Y%m%d%H%M%S")}.log')
# caproto.set_handler(file=caproto_log)
# logging.getLogger('bluesky').setLevel('NOTSET')
# import bluesky
# bluesky_log = os.path.join(profile_startup_dir, f'{beamline_id}-bluesky-{datetime.now().strftime("%Y%m%d%H%M%S")}.log')
# bluesky.set_handler(file=bluesky_log)
# print(f'\nThe caproto logs will be written to {caproto_log}')
# print(f'The bluesky logs will be written to {bluesky_log}\n')
ROOT_PATH = '/nsls2/xf07bm'
RAW_FILEPATH = 'data'
USER_FILEPATH = 'users'
#def print_to_gui(string, stdout=sys.stdout):
# print(string, file=stdout, flush=True)
from pathlib import Path
import appdirs
try:
from bluesky.utils import PersistentDict
except ImportError:
import msgpack
import msgpack_numpy
import zict
class PersistentDict(zict.Func):
"""
A MutableMapping which syncs it contents to disk.
The contents are stored as msgpack-serialized files, with one file per item
in the mapping.
Note that when an item is *mutated* it is not immediately synced:
>>> d['sample'] = {"color": "red"} # immediately synced
>>> d['sample']['shape'] = 'bar' # not immediately synced
but that the full contents are synced to disk when the PersistentDict
instance is garbage collected.
"""
def __init__(self, directory):
self._directory = directory
self._file = zict.File(directory)
self._cache = {}
super().__init__(self._dump, self._load, self._file)
self.reload()
# Similar to flush() or _do_update(), but without reference to self
# to avoid circular reference preventing collection.
# NOTE: This still doesn't guarantee call on delete or gc.collect()!
# Explicitly call flush() if immediate write to disk required.
def finalize(zfile, cache, dump):
zfile.update((k, dump(v)) for k, v in cache.items())
import weakref
self._finalizer = weakref.finalize(
self, finalize, self._file, self._cache, PersistentDict._dump)
@property
def directory(self):
return self._directory
def __setitem__(self, key, value):
self._cache[key] = value
super().__setitem__(key, value)
def __getitem__(self, key):
return self._cache[key]
def __delitem__(self, key):
del self._cache[key]
super().__delitem__(key)
def __repr__(self):
return f"<{self.__class__.__name__} {dict(self)!r}>"
@staticmethod
def _dump(obj):
"Encode as msgpack using numpy-aware encoder."
# See https://github.com/msgpack/msgpack-python#string-and-binary-type
# for more on use_bin_type.
return msgpack.packb(
obj,
default=msgpack_numpy.encode,
use_bin_type=True)
@staticmethod
def _load(file):
return msgpack.unpackb(
file,
object_hook=msgpack_numpy.decode,
raw=False)
def flush(self):
"""Force a write of the current state to disk"""
for k, v in self.items():
super().__setitem__(k, v)
def reload(self):
"""Force a reload from disk, overwriting current cache"""
self._cache = dict(super().items())
runengine_metadata_dir = appdirs.user_data_dir(appname="bluesky") / Path("runengine-metadata")
# PersistentDict will create the directory if it does not exist
RE.md = PersistentDict(runengine_metadata_dir)
# these should *always* be QAS
RE.md['group'] = beamline_id
RE.md['beamline_id'] = beamline_id.upper()
RE.md['Facility'] = 'NSLS-II'
# RE.md['Mono_pulses_per_deg']=
# isstools reads these
# check these keys exist, if not set to default
keys = ["PI", "PROPOSAL", "SAF", "year", "cycle", "proposal_id"]
defaults = ["No PI", None, None, 2018, 1, None]
for key, default in zip(keys, defaults):
if key not in RE.md:
print("Warning {} not in RE.md.".format(key))
print("Set to default : {}".format(default))
RE.md[key] = default
RE.is_aborted = False
|
ticker = {
'children': 3,
'cats': 7,
'samoyeds': 2,
'pomeranians': 3,
'akitas': 0,
'vizslas': 0,
'goldfish': 5,
'trees': 3,
'cars': 2,
'perfumes': 1
}
aunts = {}
for line in open('input.txt').readlines():
l = line.split()
x = {}
for i in range(2, 8, 2):
x[l[i].replace(':', '')] = int(l[i + 1].replace(',', ''))
aunts[int(l[1].replace(':', ''))] = x
# Part 1
tests = {}
for i in range(1, 501):
j = 0
for element in aunts[i].keys():
if ticker[element] == aunts[i][element]:
j += 1
tests[i] = j
print(max(tests, key=tests.get))
# Part 2
tests = {}
for i in range(1, 501):
j = 0
for element in aunts[i].keys():
if element == 'cats' or element == 'trees':
if ticker[element] < aunts[i][element]:
j += 1
elif element == 'pomeranians' or element == 'goldfish':
if ticker[element] > aunts[i][element]:
j += 1
else:
if ticker[element] == aunts[i][element]:
j += 1
tests[i] = j
print(max(tests, key=tests.get)) |
#!/usr/bin/env python3
# Read a symbol file and a source file.
# Replace symbol addresses with names in the source file.
# XXX does not handle addresses split into parts, like:
# lui $a0, xxxx
# ori $a0, xxxx
import sys
def readSyms(path):
syms = {}
with open(path) as file:
while True:
line = file.readline()
if line == '': break
line = line.split('=', 1)
if len(line) != 2: continue
name = line[0].strip()
addr = line[1].strip()
addr = addr.split(';')[0]
syms[name] = addr
return syms
def doReplace(syms, asmPath):
with open(asmPath) as file:
while True:
line = file.readline()
if line == '': break
for name, addr in syms.items():
line = line.replace(addr, name)
print(line, end='')
def generateCodFile(syms):
for name, addr in syms.items():
print("0x%08X,%s" % (int(addr, 16), name))
def main(symPath, asmPath=None):
syms = readSyms(symPath)
#doReplace(syms, asmPath)
generateCodFile(syms)
if __name__ == '__main__':
sys.exit(main(*sys.argv[1:]))
|
#
# Autogenerated by Thrift Compiler (0.11.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style,no_utf8strings
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import impala._thrift_gen.ExecStats.ttypes
import impala._thrift_gen.Metrics.ttypes
import impala._thrift_gen.Types.ttypes
from thrift.transport import TTransport
all_structs = []
class TRuntimeProfileFormat(object):
STRING = 0
BASE64 = 1
THRIFT = 2
_VALUES_TO_NAMES = {
0: "STRING",
1: "BASE64",
2: "THRIFT",
}
_NAMES_TO_VALUES = {
"STRING": 0,
"BASE64": 1,
"THRIFT": 2,
}
class TCounter(object):
"""
Attributes:
- name
- unit
- value
"""
def __init__(self, name=None, unit=None, value=None,):
self.name = name
self.unit = unit
self.value = value
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.unit = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.value = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TCounter')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.unit is not None:
oprot.writeFieldBegin('unit', TType.I32, 2)
oprot.writeI32(self.unit)
oprot.writeFieldEnd()
if self.value is not None:
oprot.writeFieldBegin('value', TType.I64, 3)
oprot.writeI64(self.value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
if self.unit is None:
raise TProtocolException(message='Required field unit is unset!')
if self.value is None:
raise TProtocolException(message='Required field value is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TEventSequence(object):
"""
Attributes:
- name
- timestamps
- labels
"""
def __init__(self, name=None, timestamps=None, labels=None,):
self.name = name
self.timestamps = timestamps
self.labels = labels
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.timestamps = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in range(_size0):
_elem5 = iprot.readI64()
self.timestamps.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.labels = []
(_etype9, _size6) = iprot.readListBegin()
for _i10 in range(_size6):
_elem11 = iprot.readString()
self.labels.append(_elem11)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TEventSequence')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.timestamps is not None:
oprot.writeFieldBegin('timestamps', TType.LIST, 2)
oprot.writeListBegin(TType.I64, len(self.timestamps))
for iter12 in self.timestamps:
oprot.writeI64(iter12)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.labels is not None:
oprot.writeFieldBegin('labels', TType.LIST, 3)
oprot.writeListBegin(TType.STRING, len(self.labels))
for iter13 in self.labels:
oprot.writeString(iter13)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
if self.timestamps is None:
raise TProtocolException(message='Required field timestamps is unset!')
if self.labels is None:
raise TProtocolException(message='Required field labels is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TTimeSeriesCounter(object):
"""
Attributes:
- name
- unit
- period_ms
- values
- start_index
"""
def __init__(self, name=None, unit=None, period_ms=None, values=None, start_index=None,):
self.name = name
self.unit = unit
self.period_ms = period_ms
self.values = values
self.start_index = start_index
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.unit = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I32:
self.period_ms = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.LIST:
self.values = []
(_etype17, _size14) = iprot.readListBegin()
for _i18 in range(_size14):
_elem19 = iprot.readI64()
self.values.append(_elem19)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.start_index = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TTimeSeriesCounter')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.unit is not None:
oprot.writeFieldBegin('unit', TType.I32, 2)
oprot.writeI32(self.unit)
oprot.writeFieldEnd()
if self.period_ms is not None:
oprot.writeFieldBegin('period_ms', TType.I32, 3)
oprot.writeI32(self.period_ms)
oprot.writeFieldEnd()
if self.values is not None:
oprot.writeFieldBegin('values', TType.LIST, 4)
oprot.writeListBegin(TType.I64, len(self.values))
for iter20 in self.values:
oprot.writeI64(iter20)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.start_index is not None:
oprot.writeFieldBegin('start_index', TType.I64, 5)
oprot.writeI64(self.start_index)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
if self.unit is None:
raise TProtocolException(message='Required field unit is unset!')
if self.period_ms is None:
raise TProtocolException(message='Required field period_ms is unset!')
if self.values is None:
raise TProtocolException(message='Required field values is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TSummaryStatsCounter(object):
"""
Attributes:
- name
- unit
- sum
- total_num_values
- min_value
- max_value
"""
def __init__(self, name=None, unit=None, sum=None, total_num_values=None, min_value=None, max_value=None,):
self.name = name
self.unit = unit
self.sum = sum
self.total_num_values = total_num_values
self.min_value = min_value
self.max_value = max_value
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.unit = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.sum = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.total_num_values = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I64:
self.min_value = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I64:
self.max_value = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TSummaryStatsCounter')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.unit is not None:
oprot.writeFieldBegin('unit', TType.I32, 2)
oprot.writeI32(self.unit)
oprot.writeFieldEnd()
if self.sum is not None:
oprot.writeFieldBegin('sum', TType.I64, 3)
oprot.writeI64(self.sum)
oprot.writeFieldEnd()
if self.total_num_values is not None:
oprot.writeFieldBegin('total_num_values', TType.I64, 4)
oprot.writeI64(self.total_num_values)
oprot.writeFieldEnd()
if self.min_value is not None:
oprot.writeFieldBegin('min_value', TType.I64, 5)
oprot.writeI64(self.min_value)
oprot.writeFieldEnd()
if self.max_value is not None:
oprot.writeFieldBegin('max_value', TType.I64, 6)
oprot.writeI64(self.max_value)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
if self.unit is None:
raise TProtocolException(message='Required field unit is unset!')
if self.sum is None:
raise TProtocolException(message='Required field sum is unset!')
if self.total_num_values is None:
raise TProtocolException(message='Required field total_num_values is unset!')
if self.min_value is None:
raise TProtocolException(message='Required field min_value is unset!')
if self.max_value is None:
raise TProtocolException(message='Required field max_value is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRuntimeProfileNodeMetadata(object):
"""
Attributes:
- plan_node_id
- data_sink_id
"""
def __init__(self, plan_node_id=None, data_sink_id=None,):
self.plan_node_id = plan_node_id
self.data_sink_id = data_sink_id
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.plan_node_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.data_sink_id = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TRuntimeProfileNodeMetadata')
if self.plan_node_id is not None:
oprot.writeFieldBegin('plan_node_id', TType.I32, 1)
oprot.writeI32(self.plan_node_id)
oprot.writeFieldEnd()
if self.data_sink_id is not None:
oprot.writeFieldBegin('data_sink_id', TType.I32, 2)
oprot.writeI32(self.data_sink_id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRuntimeProfileNode(object):
"""
Attributes:
- name
- num_children
- counters
- metadata
- indent
- info_strings
- info_strings_display_order
- child_counters_map
- event_sequences
- time_series_counters
- summary_stats_counters
- node_metadata
"""
def __init__(self, name=None, num_children=None, counters=None, metadata=None, indent=None, info_strings=None, info_strings_display_order=None, child_counters_map=None, event_sequences=None, time_series_counters=None, summary_stats_counters=None, node_metadata=None,):
self.name = name
self.num_children = num_children
self.counters = counters
self.metadata = metadata
self.indent = indent
self.info_strings = info_strings
self.info_strings_display_order = info_strings_display_order
self.child_counters_map = child_counters_map
self.event_sequences = event_sequences
self.time_series_counters = time_series_counters
self.summary_stats_counters = summary_stats_counters
self.node_metadata = node_metadata
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.num_children = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.counters = []
(_etype24, _size21) = iprot.readListBegin()
for _i25 in range(_size21):
_elem26 = TCounter()
_elem26.read(iprot)
self.counters.append(_elem26)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.metadata = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.indent = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.MAP:
self.info_strings = {}
(_ktype28, _vtype29, _size27) = iprot.readMapBegin()
for _i31 in range(_size27):
_key32 = iprot.readString()
_val33 = iprot.readString()
self.info_strings[_key32] = _val33
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.info_strings_display_order = []
(_etype37, _size34) = iprot.readListBegin()
for _i38 in range(_size34):
_elem39 = iprot.readString()
self.info_strings_display_order.append(_elem39)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.MAP:
self.child_counters_map = {}
(_ktype41, _vtype42, _size40) = iprot.readMapBegin()
for _i44 in range(_size40):
_key45 = iprot.readString()
_val46 = set()
(_etype50, _size47) = iprot.readSetBegin()
for _i51 in range(_size47):
_elem52 = iprot.readString()
_val46.add(_elem52)
iprot.readSetEnd()
self.child_counters_map[_key45] = _val46
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.LIST:
self.event_sequences = []
(_etype56, _size53) = iprot.readListBegin()
for _i57 in range(_size53):
_elem58 = TEventSequence()
_elem58.read(iprot)
self.event_sequences.append(_elem58)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 10:
if ftype == TType.LIST:
self.time_series_counters = []
(_etype62, _size59) = iprot.readListBegin()
for _i63 in range(_size59):
_elem64 = TTimeSeriesCounter()
_elem64.read(iprot)
self.time_series_counters.append(_elem64)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 11:
if ftype == TType.LIST:
self.summary_stats_counters = []
(_etype68, _size65) = iprot.readListBegin()
for _i69 in range(_size65):
_elem70 = TSummaryStatsCounter()
_elem70.read(iprot)
self.summary_stats_counters.append(_elem70)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 12:
if ftype == TType.STRUCT:
self.node_metadata = TRuntimeProfileNodeMetadata()
self.node_metadata.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TRuntimeProfileNode')
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
if self.num_children is not None:
oprot.writeFieldBegin('num_children', TType.I32, 2)
oprot.writeI32(self.num_children)
oprot.writeFieldEnd()
if self.counters is not None:
oprot.writeFieldBegin('counters', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.counters))
for iter71 in self.counters:
iter71.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.metadata is not None:
oprot.writeFieldBegin('metadata', TType.I64, 4)
oprot.writeI64(self.metadata)
oprot.writeFieldEnd()
if self.indent is not None:
oprot.writeFieldBegin('indent', TType.BOOL, 5)
oprot.writeBool(self.indent)
oprot.writeFieldEnd()
if self.info_strings is not None:
oprot.writeFieldBegin('info_strings', TType.MAP, 6)
oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.info_strings))
for kiter72, viter73 in self.info_strings.items():
oprot.writeString(kiter72)
oprot.writeString(viter73)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.info_strings_display_order is not None:
oprot.writeFieldBegin('info_strings_display_order', TType.LIST, 7)
oprot.writeListBegin(TType.STRING, len(self.info_strings_display_order))
for iter74 in self.info_strings_display_order:
oprot.writeString(iter74)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.child_counters_map is not None:
oprot.writeFieldBegin('child_counters_map', TType.MAP, 8)
oprot.writeMapBegin(TType.STRING, TType.SET, len(self.child_counters_map))
for kiter75, viter76 in self.child_counters_map.items():
oprot.writeString(kiter75)
oprot.writeSetBegin(TType.STRING, len(viter76))
for iter77 in viter76:
oprot.writeString(iter77)
oprot.writeSetEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.event_sequences is not None:
oprot.writeFieldBegin('event_sequences', TType.LIST, 9)
oprot.writeListBegin(TType.STRUCT, len(self.event_sequences))
for iter78 in self.event_sequences:
iter78.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.time_series_counters is not None:
oprot.writeFieldBegin('time_series_counters', TType.LIST, 10)
oprot.writeListBegin(TType.STRUCT, len(self.time_series_counters))
for iter79 in self.time_series_counters:
iter79.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.summary_stats_counters is not None:
oprot.writeFieldBegin('summary_stats_counters', TType.LIST, 11)
oprot.writeListBegin(TType.STRUCT, len(self.summary_stats_counters))
for iter80 in self.summary_stats_counters:
iter80.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.node_metadata is not None:
oprot.writeFieldBegin('node_metadata', TType.STRUCT, 12)
self.node_metadata.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.name is None:
raise TProtocolException(message='Required field name is unset!')
if self.num_children is None:
raise TProtocolException(message='Required field num_children is unset!')
if self.counters is None:
raise TProtocolException(message='Required field counters is unset!')
if self.metadata is None:
raise TProtocolException(message='Required field metadata is unset!')
if self.indent is None:
raise TProtocolException(message='Required field indent is unset!')
if self.info_strings is None:
raise TProtocolException(message='Required field info_strings is unset!')
if self.info_strings_display_order is None:
raise TProtocolException(message='Required field info_strings_display_order is unset!')
if self.child_counters_map is None:
raise TProtocolException(message='Required field child_counters_map is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRuntimeProfileTree(object):
"""
Attributes:
- nodes
- exec_summary
"""
def __init__(self, nodes=None, exec_summary=None,):
self.nodes = nodes
self.exec_summary = exec_summary
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.nodes = []
(_etype84, _size81) = iprot.readListBegin()
for _i85 in range(_size81):
_elem86 = TRuntimeProfileNode()
_elem86.read(iprot)
self.nodes.append(_elem86)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.exec_summary = impala._thrift_gen.ExecStats.ttypes.TExecSummary()
self.exec_summary.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TRuntimeProfileTree')
if self.nodes is not None:
oprot.writeFieldBegin('nodes', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.nodes))
for iter87 in self.nodes:
iter87.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.exec_summary is not None:
oprot.writeFieldBegin('exec_summary', TType.STRUCT, 2)
self.exec_summary.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.nodes is None:
raise TProtocolException(message='Required field nodes is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TRuntimeProfileForest(object):
"""
Attributes:
- profile_trees
- host_profile
"""
def __init__(self, profile_trees=None, host_profile=None,):
self.profile_trees = profile_trees
self.host_profile = host_profile
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.profile_trees = []
(_etype91, _size88) = iprot.readListBegin()
for _i92 in range(_size88):
_elem93 = TRuntimeProfileTree()
_elem93.read(iprot)
self.profile_trees.append(_elem93)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.host_profile = TRuntimeProfileTree()
self.host_profile.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('TRuntimeProfileForest')
if self.profile_trees is not None:
oprot.writeFieldBegin('profile_trees', TType.LIST, 1)
oprot.writeListBegin(TType.STRUCT, len(self.profile_trees))
for iter94 in self.profile_trees:
iter94.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.host_profile is not None:
oprot.writeFieldBegin('host_profile', TType.STRUCT, 2)
self.host_profile.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.profile_trees is None:
raise TProtocolException(message='Required field profile_trees is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
all_structs.append(TCounter)
TCounter.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.I32, 'unit', None, None, ), # 2
(3, TType.I64, 'value', None, None, ), # 3
)
all_structs.append(TEventSequence)
TEventSequence.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.LIST, 'timestamps', (TType.I64, None, False), None, ), # 2
(3, TType.LIST, 'labels', (TType.STRING, None, False), None, ), # 3
)
all_structs.append(TTimeSeriesCounter)
TTimeSeriesCounter.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.I32, 'unit', None, None, ), # 2
(3, TType.I32, 'period_ms', None, None, ), # 3
(4, TType.LIST, 'values', (TType.I64, None, False), None, ), # 4
(5, TType.I64, 'start_index', None, None, ), # 5
)
all_structs.append(TSummaryStatsCounter)
TSummaryStatsCounter.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.I32, 'unit', None, None, ), # 2
(3, TType.I64, 'sum', None, None, ), # 3
(4, TType.I64, 'total_num_values', None, None, ), # 4
(5, TType.I64, 'min_value', None, None, ), # 5
(6, TType.I64, 'max_value', None, None, ), # 6
)
all_structs.append(TRuntimeProfileNodeMetadata)
TRuntimeProfileNodeMetadata.thrift_spec = (
None, # 0
(1, TType.I32, 'plan_node_id', None, None, ), # 1
(2, TType.I32, 'data_sink_id', None, None, ), # 2
)
all_structs.append(TRuntimeProfileNode)
TRuntimeProfileNode.thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
(2, TType.I32, 'num_children', None, None, ), # 2
(3, TType.LIST, 'counters', (TType.STRUCT, [TCounter, None], False), None, ), # 3
(4, TType.I64, 'metadata', None, None, ), # 4
(5, TType.BOOL, 'indent', None, None, ), # 5
(6, TType.MAP, 'info_strings', (TType.STRING, None, TType.STRING, None, False), None, ), # 6
(7, TType.LIST, 'info_strings_display_order', (TType.STRING, None, False), None, ), # 7
(8, TType.MAP, 'child_counters_map', (TType.STRING, None, TType.SET, (TType.STRING, None, False), False), None, ), # 8
(9, TType.LIST, 'event_sequences', (TType.STRUCT, [TEventSequence, None], False), None, ), # 9
(10, TType.LIST, 'time_series_counters', (TType.STRUCT, [TTimeSeriesCounter, None], False), None, ), # 10
(11, TType.LIST, 'summary_stats_counters', (TType.STRUCT, [TSummaryStatsCounter, None], False), None, ), # 11
(12, TType.STRUCT, 'node_metadata', [TRuntimeProfileNodeMetadata, None], None, ), # 12
)
all_structs.append(TRuntimeProfileTree)
TRuntimeProfileTree.thrift_spec = (
None, # 0
(1, TType.LIST, 'nodes', (TType.STRUCT, [TRuntimeProfileNode, None], False), None, ), # 1
(2, TType.STRUCT, 'exec_summary', [impala._thrift_gen.ExecStats.ttypes.TExecSummary, None], None, ), # 2
)
all_structs.append(TRuntimeProfileForest)
TRuntimeProfileForest.thrift_spec = (
None, # 0
(1, TType.LIST, 'profile_trees', (TType.STRUCT, [TRuntimeProfileTree, None], False), None, ), # 1
(2, TType.STRUCT, 'host_profile', [TRuntimeProfileTree, None], None, ), # 2
)
fix_spec(all_structs)
del all_structs
|
# to use this install package
#ModuleNotFoundError: No module named 'github'
# pip install PyGithub
from github import Github
import requests
# remove the minus sign from the key
# you can add this to your code just don't commit it
# or use an API key to your own repo
g = Github("57327b6f6a7fc5603ac601050b6f1a0b1ff6b14===7")
for repo in g.get_user().get_repos():
print(repo.name)
#repo.edit(has_wiki=False)
# to see all the available attributes and methods
#print(dir(repo))
repo = g.get_repo("ClodaghMurphy/aPrivateOne")
#print(repo.clone_url)
|
class Quiz:
def __init__(self,question, alt1, alt2, alt3, alt4, correct):
self.question = question
self.alt1 = alt1
self.alt2 = alt2
self.alt3 = alt3
self.alt4 = alt4
self.correct = correct
def check_anser(self, anser):
self.anser = anser
score = 0
if anser == self.correct:
score += 1
print("Correct \n")
print("score:",score)
elif anser > 4 or anser <1 :
print("you can only anser 1,2,3 or 4 ")
else:
print("Wrong \n")
def __str__(self):
return " {self.question} \n 1. {self.alt1} \n 2. {self.alt2} \n 3. {self.alt3} \n 4. {self.alt4}".format(self=self)
QST1 = Quiz("Whats the capitalt of Norway", "Bergen", "Oslo", "Stavanger", "Trondheim", 2)
QST2 = Quiz("what is the number 1001 form binary to the decimal system", "17", "14", "5","21",1)
print ("Question 1")
print(QST1)
anser1 = int(input("Select anser from 1 to 4 \n"))
QST1.check_anser(anser1)
print ("Question 2")
print(QST2)
anser2 = int(input("Select anser from 1 to 4 \n"))
QST2.check_anser(anser2)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2020, Brian Scholer (@briantist)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: vault_test_auth
author:
- Brian Scholer (@briantist)
short_description: A module for testing centralized auth methods
description: Test auth methods by performing a login to Vault and returning token information.
extends_documentation_fragment:
- community.hashi_vault.connection
- community.hashi_vault.auth
options:
want_exception:
type: bool
default: False
"""
import json
from ansible_collections.community.hashi_vault.plugins.module_utils._hashi_vault_module import HashiVaultModule
def dictify(thing):
return json.loads(
json.dumps(
thing,
skipkeys=True,
default=lambda o: getattr(o, '__dict__', str(o)),
)
)
# this module is for running tests only; no_log can interfere with return values
# and/or makie it harder to troubleshoot test failures.
def strip_no_log(spec):
for key in list(spec.keys()):
if 'no_log' in spec[key]:
spec[key]['no_log'] = False
def run_module():
argspec = HashiVaultModule.generate_argspec(
want_exception=dict(type='bool'),
)
strip_no_log(argspec)
module = HashiVaultModule(
argument_spec=argspec,
supports_check_mode=False
)
options = module.adapter
module.connection_options.process_connection_options()
client_args = module.connection_options.get_hvac_connection_options()
client = module.helper.get_vault_client(**client_args)
err = msg = response = None
try:
try:
module.authenticator.validate()
response = module.authenticator.authenticate(client)
except NotImplementedError as e:
module.fail_json(msg=str(e), exception=e)
except Exception as e:
msg = str(e)
if options.get_option('want_exception'):
err = dictify(e)
else:
module.fail_json(msg=msg, exception=e)
rob = {
'login': response,
'failed': False,
'inner': {'failed': False}
}
if err is not None:
rob['inner']['failed'] = True
rob['exception'] = err
rob['msg'] = msg
module.exit_json(**rob)
def main():
run_module()
if __name__ == '__main__':
main()
|
from google.oauth2 import id_token
from google.auth.transport import requests
from datetime import datetime
CLIENT_ID_1 = '242715444435-ss081r800k4ib43o4cusd3au76bktfb3.apps.googleusercontent.com'
CLIENT_ID_2 = '593881631501-9ah6is5851aass4lonh1lptc69slfo0e.apps.googleusercontent.com'
def has_authorization(token):
try:
idinfo = id_token.verify_oauth2_token(token, requests.Request())
if idinfo['aud'] not in [CLIENT_ID_1, CLIENT_ID_2]:
raise ValueError('Could not verify audience.')
if idinfo['iss'] not in ['accounts.google.com', 'https://accounts.google.com']:
raise ValueError('Wrong issuer.')
return isUserValid(idinfo)
except ValueError as e:
print(e)
return False
pass
def isUserValid(idinfo):
valid = idinfo['hd'] == 'motorola.com'
return valid |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 19 14:03:15 2018
@author: Sudhanva
"""
from tkinter import *
root = Tk()
frame = Frame(root)
frame.pack()
bottomframe = Frame(root)
bottomframe.pack(side = BOTTOM)
redbutton = Button(frame, text = 'Red', fg = 'red')
redbutton.pack(side = LEFT)
greenbutton = Button(frame, text = 'Green', fg = 'green')
greenbutton.pack(side = LEFT)
bluebutton = Button(frame, text = 'Blue', fg = 'blue')
bluebutton.pack(side = LEFT)
yellowbutton = Button(frame, text = 'Yellow', fg = 'yellow')
yellowbutton.pack(side = RIGHT)
blackbutton = Button(frame, text = 'Black', fg = 'black')
blackbutton.pack(side = RIGHT)
pinkbutton = Button(frame, text = 'Pink', fg = 'pink')
pinkbutton.pack(side = RIGHT)
mainloop()
|
#
# Copyright Michael Groys, 2012-2014
#
import miner_globals
from base import *
def p_for_command(p):
'''command : for_command'''
p[0] = p[1]
def p_for_select_command(p):
'''for_command : FOR SELECT aggregated_named_expression_list'''
p[0] = ForSelectCommand(p[3])
def p_for_dinstinct_select_command(p):
'''for_command : FOR DISTINCT named_expression_list SELECT aggregated_named_expression_list'''
p[0] = ForDistinctSelectCommand(p[3], p[5])
def p_for_dinstinct_select_ordered_command(p):
'''for_command : FOR DISTINCT named_expression_list ascending SELECT aggregated_named_expression_list'''
p[0] = ForDistinctSelectCommand(p[3], p[6], isOrdered=True, isAscending=p[4])
def p_for_in_select_command(p):
'''for_command : FOR named_expression UC_IN expression SELECT aggregated_named_expression_list'''
p[0] = ForInSelectCommand(p[2], p[4], p[6])
def p_for_each_of_select_command(p):
'''for_command : FOR EACH expression OF named_expression SELECT aggregated_named_expression_list'''
p[0] = ForEachOfSelectCommand(p[3], p[5], p[7])
def p_for_each_of_from_to_select_command(p):
'''for_command : FOR EACH expression UC_IN expression ',' expression OF named_expression SELECT aggregated_named_expression_list'''
p[0] = ForEachOfSelectCommand(p[3], p[9], p[11], p[5], p[7])
def p_for_each_of_from_select_command(p):
'''for_command : FOR EACH expression UC_IN expression ',' OF named_expression SELECT aggregated_named_expression_list'''
p[0] = ForEachOfSelectCommand(p[3], p[8], p[10], fromExpr=p[5])
def p_for_each_of_to_select_command(p):
'''for_command : FOR EACH expression UC_IN ',' expression OF named_expression SELECT aggregated_named_expression_list'''
p[0] = ForEachOfSelectCommand(p[3], p[8], p[10], toExpr=p[6])
def p_for_each_select_command(p):
'''command : FOR EACH named_expression SELECT aggregated_named_expression_list'''
p[0] = ForEachSelectCommand(p[3], p[5])
##############
# implementation classes
##############
class ForSelectCommand(TypicalCommand):
NAME = "FOR SELECT"
SHORT_HELP = "FOR SELECT <agg> exp1 [as name1], <agg> exp2 [as name2] ... - generates aggregated records"
@staticmethod
def LONG_HELP():
s = """FOR SELECT <agg> exp1 [as name1], <agg> exp2 [as name2]
FOR SELECT command performs aggregation query on the generated record set.
""" + ForSelectCommand.getAggregatorsHelp()
return s
@staticmethod
def MORE_SYMBOLS_FOR_COMPLETION():
return ["DISTINCT", "SELECT", "IN", "EACH", "OF"] + miner_globals.getAggregators()
@staticmethod
def getAggregatorsHelp():
s = "Available aggregators are:\n"
for agg in miner_globals.getAggregators():
s += " %-12s - %s\n" % (agg, miner_globals.getAggregatorHelp(agg))
return s
def __init__(self, aggregatedExpressions, aggregatedExpressionsOffset=0, preparationList=None):
TypicalCommand.__init__(self, preparationList)
self.myAggregatedExpressions = []
i = aggregatedExpressionsOffset
for (agg, exp, name, constructorArgs) in aggregatedExpressions:
if not name:
name = "_%d" % (i+1)
self.myAggregatedExpressions.append( (agg, exp, name, constructorArgs) )
i += 1
def getVariableNames(self):
return [e[2] for e in self.myAggregatedExpressions]
def getAggregateDictionary(self):
entries = []
for aggregated in self.myAggregatedExpressions:
constructorArgs = ""
if aggregated[3]:
constructorArgs = ", ".join(e.getValue() for e in aggregated[3])
if aggregated[0].find('.') == -1:
constructor = "%s(%s)" % (miner_globals.getAggregatorClass(aggregated[0]), constructorArgs)
else:
constructor = "%s(%s)" % (aggregated[0], constructorArgs)
entries.append("'%s': %s" %(aggregated[2], constructor))
return "{ %s }" % ", ".join(entries)
def getAddValuesSection(self, dictionaryName):
s = ""
for aggregated in self.myAggregatedExpressions:
s += " %s['%s'].add(%s)\n" % (dictionaryName, aggregated[2],
", ".join(e.getValue() for e in aggregated[1]))
return s
def getDictionaryValues(self):
return "".join(("_d['%s'].getValue(),"%aggregated[2]) for aggregated in self.myAggregatedExpressions)
def getStart(self):
s = TypicalCommand.getStart(self)
return s+" _d = %s\n" % self.getAggregateDictionary()
def getBody(self):
return self.getAddValuesSection("_d")
def getEnd(self):
return " yield (%s)\n" % self.getDictionaryValues()
def getGlobalExpressions(self):
globalExps = TypicalCommand.getGlobalExpressions(self)
for aggregated in self.myAggregatedExpressions:
for e in aggregated[1]:
globalExps.extend(e.getGlobalExpressions())
if aggregated[3]:
for e in aggregated[3]:
globalExps.extend(e.getGlobalExpressions())
return globalExps
def getStateForReduce(self):
return "_d"
def getReduceCommand(self, queueListStr, queueList):
s = """
_d = reduce(_runtime.mergeDictionaryItems, (q.get() for q in %s))
""" % (queueListStr, )
return s
class ForDistinctSelectCommand(ForSelectCommand):
NAME = "FOR DISTINCT"
SHORT_HELP = "FOR DISTINCT expr1, ... [ASC|DESC] SELECT <agg> exp1 [as name1], ... - generates aggregated records for distinct sets"
@staticmethod
def LONG_HELP():
s = """FOR DISTINCT expr1 [as name1], ... SELECT <agg> expN [as nameN], ...
FOR DISTINCT expr1 [as name1], ... (ASC|DESC) SELECT <agg> expN [as nameN], ...
FOR DISTINCT command performs selective aggregation on the distinct set of records.
If ASC or DESC is specified then results will be sorted accordingly, otherwise order of results is not defined
For example count traffic generated to distinct hosts:
FOR DISTINCT request.hosts SELECT sum response.length
""" + ForSelectCommand.getAggregatorsHelp()
return s
def __init__(self, distinctExpressions, aggregatedExpressions, isOrdered=False, isAscending=True, preparationList=None):
ForSelectCommand.__init__(self, aggregatedExpressions, len(distinctExpressions), preparationList)
self.myDistinctExpressions = distinctExpressions
self.isOrdered = isOrdered
self.isAscending = isAscending
i = 0
for exp in self.myDistinctExpressions:
if not exp.getName():
name = "_%d" % (i+1)
exp.setName(name)
i += 1
def getVariableNames(self):
return [e.getName() for e in self.myDistinctExpressions] + ForSelectCommand.getVariableNames(self)
def getStart(self):
s = ForSelectCommand.getStart(self)
return s+" _distincts = {}\n"
def getDistinctTuple(self):
tupleStr = createTupleString( [e.getValue() for e in self.myDistinctExpressions] )
return tupleStr
def getBody(self):
s = """
_t = %s
if not _t in _distincts:
_distincts[_t] = %s
_d = _distincts[_t]
%s
""" % ( self.getDistinctTuple(), self.getAggregateDictionary(), self.getAddValuesSection("_d"))
return s
def getEnd(self):
dictValues = self.getDictionaryValues()
if self.isOrdered:
reverseStr = "" if self.isAscending else ", reverse=True"
itemsStr = "sorted(_distincts.items()%s)" % reverseStr
else:
itemsStr = "_distincts.iteritems()"
return """
for t, _d in %s:
yield t + (%s)
""" % (itemsStr, dictValues)
def getGlobalExpressions(self):
globalExps = ForSelectCommand.getGlobalExpressions(self)
for e in self.myDistinctExpressions:
globalExps += e.getGlobalExpressions()
return globalExps
def getStateForReduce(self):
return "_distincts"
def getReduceCommand(self, queueListStr, queueList):
s = """
_distinctsList = [q.get() for q in %s]
_distinctsMerged = _runtime.mergeDictionaryItemsToList(_distinctsList)
_distincts = {}
for key, dlist in _distinctsMerged.iteritems():
d = reduce(_runtime.mergeDictionaryItems, dlist)
_distincts[key] = d
""" % (queueListStr, )
return s
class ForInSelectCommand(ForSelectCommand):
NAME = "FOR IN"
SHORT_HELP = "FOR expr [as name] IN sequenceExpr SELECT <agg> exp1 [as name1], ... - FOR IN - generates aggregated records for predefined sequence of values"
@staticmethod
def LONG_HELP():
s = """FOR expr [as name] IN sequenceExpr SELECT <agg> exp1 [as name1], ...
FOR IN command performs selective aggregation on the predefined sequence of values:
For example to count requests to hosts youtube.com and xvideos.com:
FOR request.host.split(".")[-2] as name in ["youtube", "xvideos"] SELECT count True as numRecords
""" + ForSelectCommand.getAggregatorsHelp()
return s
def __init__(self, forExpression, sequenceExpression, aggregatedExpressions):
ForSelectCommand.__init__(self, aggregatedExpressions, 1)
self.myForExpression = forExpression
if not self.myForExpression.getName():
self.myForExpression.setName("_1")
self.mySequenceExpression = sequenceExpression
def getVariableNames(self):
return [self.myForExpression.getName()] + ForSelectCommand.getVariableNames(self)
def getStart(self):
s = ForSelectCommand.getStart(self)
return s+""" _distincts = {}
_keys = [ k for k in %s ]
for k in _keys:
_distincts[k] = %s
""" % (self.mySequenceExpression.getValue(), self.getAggregateDictionary())
def getBody(self):
s = """
_key = %s
if not _key in _distincts:
continue
_d = _distincts[_key]
%s
""" % ( self.myForExpression.getValue(), self.getAddValuesSection("_d"))
return s
def getEnd(self):
dictValues = self.getDictionaryValues()
return """
for key in %s:
_d = _distincts[key]
yield (key, %s)
""" % (self.mySequenceExpression.getValue(), dictValues)
def getGlobalExpressions(self):
return ForSelectCommand.getGlobalExpressions(self) + self.myForExpression.getGlobalExpressions()
def getStateForReduce(self):
return "_distincts"
def getReduceCommand(self, queueListStr, queueList):
s = """
_distinctsList = [q.get() for q in %s]
_distinctsMerged = _runtime.mergeDictionaryItemsToList(_distinctsList)
_distincts = {}
for key, dlist in _distinctsMerged.iteritems():
d = reduce(_runtime.mergeDictionaryItems, dlist)
_distincts[key] = d
""" % (queueListStr, )
return s
class ForEachOfSelectCommand(ForSelectCommand):
NAME = "FOR EACH"
SHORT_HELP = "FOR EACH <bin-size> [IN <from>, <to>] OF <expr> [as <name] SELECT <agg> exp1 [as name1], ... - generates aggregated records for specified ranges"
@staticmethod
def LONG_HELP():
s = """FOR EACH <bin-size> [IN <from>, <to>] OF <expr> [as <name] SELECT <agg> exp1 [as name1], ...
FOR EACH command aggregates data in numeric ranges of specified by <bin-size>
If <from> and <to> are not specified then from is defined by as min(exprMin, 0)
and <to> is define by the expression maximum.
For example: to count requests to sum volume at 1 hour time intervals run:
FOR EACH 1h OF frecord.timeofday SELECT sum(frecord.downloadedContentBytes)
""" + ForSelectCommand.getAggregatorsHelp()
return s
def __init__(self, binSizeExpr, forExpression, aggregatedExpressions, fromExpr=None, toExpr=None):
ForSelectCommand.__init__(self, aggregatedExpressions, 1)
self.myForExpression = forExpression
self.myBinSizeExpr = binSizeExpr
self.myFromExpr = fromExpr
self.myToExpr = toExpr
if not self.myForExpression.getName():
self.myForExpression.setName("_1")
def getVariableNames(self):
return [self.myForExpression.getName()] + ForSelectCommand.getVariableNames(self)
def getStart(self):
s = ForSelectCommand.getStart(self)
s += """ _distincts = {}
_step = %s
_max = None
_min = None
""" % self.myBinSizeExpr.getValue()
if self.myFromExpr:
s += " _fromExpr = %s\n _start = _fromExpr\n" % self.myFromExpr.getValue()
else:
s += " _start = 0\n"
if self.myToExpr:
s += " _toExpr = %s\n _toExprKey = runtime.floor(_toExpr,_step, _start)\n" % self.myToExpr.getValue()
return s
def getBody(self):
s = " _key = runtime.floor(%s, _step, _start)\n" % self.myForExpression.getValue()
if self.myFromExpr:
s += """
if _key < _fromExpr:
continue
"""
if self.myToExpr:
s += """
if _key > _toExprKey:
continue
"""
s += """
if _key not in _distincts:
_distincts[_key] = %s
_d = _distincts[_key]
if _min is None or _key < _min:
_min = _key
if _max is None or _key > _max:
_max = _key
%s
""" % ( self.getAggregateDictionary(), self.getAddValuesSection("_d"))
return s
def getEnd(self):
dictValues = self.getDictionaryValues()
if self.myFromExpr:
fromExprStr = "_from = _start"
else:
fromExprStr = "_from = runtime.floor(_min, _step, _start)"
if self.myToExpr:
toExprStr = "_to = runtime.floor(_toExpr, _step, _start)"
else:
toExprStr = "_to = runtime.floor(_max, _step, _start)"
return """
if _max is None:
return
%s
%s
_key = _from
while True:
_lookup = runtime.floor(_key,_step,_start)
if _lookup not in _distincts:
_d = %s
else:
_d = _distincts[_lookup]
yield (_lookup, %s)
if _key >= _to:
break
_key += _step
""" % (fromExprStr, toExprStr, self.getAggregateDictionary(), dictValues)
def getGlobalExpressions(self):
return ForSelectCommand.getGlobalExpressions(self) + self.myForExpression.getGlobalExpressions()
def getStateForReduce(self):
toExp = "_toExp" if self.myToExpr else "None"
return "({'min':_min, 'max':_max, 'step':_step, 'start':_start, 'toExp' : %s}, _distincts)" % toExp
def getReduceCommand(self, queueListStr, queueList):
s = """
_resultsList = [q.get() for q in %s]
_min = min( [r[0]['min'] for r in _resultsList])
_max = max( [r[0]['max'] for r in _resultsList])
_step = _resultsList[0][0]['step']
_toExp = _resultsList[0][0]['toExp']
""" % (queueListStr, )
if self.myFromExpr:
s += " _start = _resultsList[0][0]['start']\n"
else:
s += " _start = min( [r[0]['start'] for r in _resultsList])\n"
s += """
_distinctsMerged = _runtime.mergeDictionaryItemsToList( [t[1] for t in _resultsList] )
_distincts = {}
for key, dlist in _distinctsMerged.iteritems():
d = reduce(_runtime.mergeDictionaryItems, dlist)
_distincts[key] = d
"""
return s
class ForEachSelectCommand(ForSelectCommand):
NAME = "FOR EACH SELECT"
SHORT_HELP = "FOR EACH <num> [as <name>] SELECT <agg> exp1 [as name1], ... - generates aggregated records for number of input records"
@staticmethod
def LONG_HELP():
s = """FOR EACH <num> [as <name>] SELECT <agg> exp1 [as name1], ...
Calculates aggergated expressions for number of input records:
FOR EACH 1000 as recordId SELECT sum(volume)
""" + ForSelectCommand.getAggregatorsHelp()
return s
def __init__(self, numberInputsExp, aggregationExpressions, preparationList=None):
ForSelectCommand.__init__(self, aggregationExpressions, 1, preparationList)
self.numberInputsExp = numberInputsExp
if not self.numberInputsExp.getName():
self.numberInputsExp.setName("_1")
def getVariableNames(self):
return [self.numberInputsExp.getName()] + ForSelectCommand.getVariableNames(self)
def getStart(self):
s = ForSelectCommand.getStart(self)
return s+"""
_d = %s
_id = 0
_last = 0
_mod = %s
""" % (self.getAggregateDictionary(), self.numberInputsExp.getValue())
def getBody(self):
s = """
if _id - _last >= _mod:
yield (_id,) + (%s)
_last = _id
_d = %s
_id += 1
%s
""" % ( self.getDictionaryValues(), self.getAggregateDictionary(), self.getAddValuesSection("_d"))
return s
def getEnd(self):
dictValues = self.getDictionaryValues()
return """
if _last != _id:
yield (_id,) + (%s)
""" % dictValues
def getGlobalExpressions(self):
globalExps = ForSelectCommand.getGlobalExpressions(self)
return globalExps + self.numberInputsExp.getGlobalExpressions()
miner_globals.addHelpClass(ForSelectCommand)
miner_globals.addHelpClass(ForDistinctSelectCommand)
miner_globals.addHelpClass(ForInSelectCommand)
miner_globals.addHelpClass(ForEachOfSelectCommand)
miner_globals.addHelpClass(ForEachSelectCommand)
miner_globals.addKeyWord(command="FOR")
miner_globals.addKeyWord(keyword="EACH")
miner_globals.addKeyWord(keyword="OF")
miner_globals.addKeyWord(keyword="SELECT")
miner_globals.addKeyWord(keyword="DISTINCT")
|
import multiprocessing as mp
import tqdm
import numpy as np
import json
import sys
import pycocotools.mask as maskUtils
def read_annot(ann, h, w):
segm = ann['inmodal_seg']
if isinstance(segm, list):
modal = maskUtils.decode(maskUtils.frPyObjects(segm, h, w))
else:
modal = maskUtils.decode(segm)
amodal = maskUtils.decode(maskUtils.frPyObjects(ann['segmentation'], h, w)).squeeze()
return modal, amodal
def task(ann, data, size_dict):
w, h = size_dict[ann['image_id']]
amp = maskUtils.decode(data['segmentation']).astype(np.bool)
m, amg = read_annot(ann, h, w)
return [((amp == 1) & (amg == 1)).sum(),
((amp == 1) | (amg == 1)).sum(),
m.sum(), amg.sum()]
def helper(args):
return task(*args)
def compute(data, annot_data, size_dict):
num = len(data)
pool = mp.Pool(16)
args = zip(annot_data, data, [size_dict] * num)
ret = list(tqdm.tqdm(pool.imap(helper, args), total=num))
return np.array(ret) # Nx4
if __name__ == "__main__":
method = 'std_no_rgb_mumford_shah'
test_set = 'val'
res_data = json.load(open(f'experiments/COCOA/pcnet_m_{method}/amodal_results/amodalcomp_{test_set}_ours.json', 'r'))
annot_data = json.load(open(f'data/COCOA/annotations/amodal_{test_set}2014_new.json', 'r'))
size_dict = dict([(a['id'], (a['width'], a['height'])) for a in annot_data['images']])
ret = compute(res_data, annot_data['annotations'], size_dict)
np.save(f"experiments/COCOA/stats/stat_{method}_{test_set}.npy", ret) |
# coding=UTF-8
import os
import json
import importlib
from getgauge.python import before_spec, data_store, step, Messages
from testlib.infrustructure.match.Match import Match
from testlib.infrustructure.devices.Device import Device
@before_spec
def before_spec_hook(context):
specificationPath = os.path.dirname(context.specification.file_name.encode('utf-8'))
data_store.spec['specificationPath'] = str(specificationPath, encoding='utf-8')
data_store.spec['deviceRepository'] = {}
data_store.spec['linkRepository'] = {}
@step(["Init env <reuqestPath>", "加载配置 <reuqestPath>"])
def init_env(reuqestPath):
config = read_config()
request = read_json_file(os.path.join(data_store.spec['specificationPath'], reuqestPath))
matchResult = do_match(config, request)
if matchResult is None:
return
Messages.write_message(matchResult)
fill_repository(config, matchResult)
def read_config():
specExecPath = data_store.spec['specificationPath']
rootPath = specExecPath.split("testcases")[0]
configPath = os.path.join(rootPath, 'env', 'envs', os.getenv('envConfig'))
return read_json_file(configPath)
def read_json_file(filePath):
with open(filePath, 'r') as f:
return json.load(f)
def do_match(config, request):
return Match(config, request).match()
def fill_repository(config, matchResult):
for requestDeviceId in matchResult:
if type(requestDeviceId) is tuple:
# todo: will fill data to linkRepository
continue
create_device(config, requestDeviceId, matchResult[requestDeviceId])
def create_device(config, requestId, configId):
matchDeviceFilter = list(filter(lambda device: device['id'] == configId, config['devices']))
if len(matchDeviceFilter) == 0:
return
deviceObj = create_device_with_reflect(matchDeviceFilter[0])
data_store.spec['deviceRepository'][requestId] = deviceObj
def create_device_with_reflect(device):
try:
classType = device['type']
if 'classType' in device:
classType = device['classType']
deviceModule = importlib.import_module('testlib.infrustructure.devices.' + classType)
deviceClass = getattr(deviceModule, classType)
return deviceClass(device['id'], device['attributes'])
except Exception as e:
return Device(device['id'], device)
|
def application(env, start_response):
start_response('200', [('Content-Length', '1')])
exit()
return [b'X']
|
# ----------------------------
# Class to implement the SIA DC03 message
# (c 2018 van Ovost Automatisering b.v.
# Author : Jacq. van Ovost
# ----------------------------
import time
from dc09_spt.param import *
import logging
"""
Copyright (c) 2018 van Ovost Automatisering b.v.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
you may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class dc03_codes:
"""
Some special codes
"""
@staticmethod
def dc03_is_user(code):
"""
Codes that have the user number following the code.
Note that there is no way to transfer a zone in the message
"""
codes_with_user = {"BC", "CE", "CF", "CJ", "CK", "CL", "CP", "CQ", "CR", "DA", "DB", "EE",
"JD", "JH", "JK", "JP", "JS", "JT", "JV", "JX", "JY", "JZ", "OC", "OH", "OJ", "OK", "OL",
"OP", "OQ", "OR", "OT", "RX"}
return code in codes_with_user
@staticmethod
def dc03_is_door(code):
"""
Codes that have the door number following the code.
Note that there is no way to transfer a zone in the message
"""
codes_with_door = {"DC", "DD", "DE", "DF", "DG", "DH", "DI", "DJ", "DK", "DL", "DM", "DN",
"DO", "DP", "DQ", "DR", "DS", "DV", "DW", "DX", "DY", "DZ"}
return code in codes_with_door
@staticmethod
def dc03_is_area(code):
"""
Codes that have the area number following the code.
Note that there is no way to transfer a zone in the message
"""
codes_with_area = {"BV", "CA", "CD", "CG", "CI", "CT", "CW", "FI", "FK", "JA", "JR", "NF",
"NL", "NM", "OA", "OG", "OI"}
return code in codes_with_area
class dc03_msg:
"""
construct a SIA DC03 message block
This static function builds a message DC03 block for packing into a SIA-DC09 message.
The DC09 standard document : SIA DC-09-2007 SIA-IP standaard.pdf,
Refers to the DC07 standard document : SIA DC-07-2001.04 SIA-CIS.pdf,
Which refers to the DC03 standard document : SIA DC-03-1990.01 (R2000.11)
All of these documents are available from the shop of https://services.securityindustry.org
The static method, dc03event, builds a DC03 message from a map with the various values.
"""
@staticmethod
def dc03event(spt_account, params={}):
"""
Construct a DC03 message
Parameters
spt_account
the account of the alarm transceiver.
in most situations this will be used in the alarm message too, but for situations like
a cloud based receiver, there will be a different account id in the params map.
params
a map with key-value pairs.
at this moment only the more commonly used fields are used.
the currently handled keys are:
account
the account number.
most receivers expect 4 to 8 numeric digits
area
the area number in which the event happened
(area is a part of an installation that can arm and disarm independently)
areaname
the name of the area.
zone
the alarm zone number.
the alarm zone is not always transferred. with some events the user number will be used.
zonename
the name of the zone
user
the user number doing the action (if available)
username
the name of the user
code
the event code in 2 upper case characters according to the DC03 standard.
text
an descriptive text explaining the event
time
an time string in format 'hh:mm:ss' or the word 'now'
all name and text fields can only use ascii characters in the range space to '~' but except [ ] | ^ and /
"""
account = param.strpar(params, 'account', spt_account)
area = param.numpar(params, 'area')
zone = param.numpar(params, 'zone')
user = param.numpar(params, 'user')
msg = ''
if account is None:
msg += '#0000|'
else:
msg += '#' + account + '|'
code = param.strpar(params, 'code', None)
text = param.strpar(params, 'text', None)
flavor = param.strpar(params, 'flavor', None)
if (code is None or code == 'A') and text is not None:
msg += 'A' + text
if zone is not None or area is not None or zone is not None or user is not None:
logging.warning("Text message can not contain zone, area or user id's")
else:
msg += 'N'
if code is None:
code = 'RP'
if area is not None:
if not dc03_codes.dc03_is_area(code):
msg += 'ri' + area
if 'areaname' in params:
msg += '^' + params['areaname'] + '^'
if user is not None:
if not dc03_codes.dc03_is_user(code):
msg += 'id' + user
if 'username' in params:
msg += '^' + params['username'] + '^'
if 'time' in params:
timep = params['time']
if timep == 'now':
timep = time.strftime('%H:%M:%S')
msg += 'ti' + timep
msg += code
if dc03_codes.dc03_is_user(code):
if user is not None:
msg += user
if zone is not None:
logging.warning('Zone %s not included in message because code %s is user related', zone, code)
elif dc03_codes.dc03_is_area(code) and area is not None:
if area is not None:
msg += area
if zone is not None:
logging.warning('Zone %s not included in message because code %s is area related', zone, code)
else:
if zone is not None:
msg += zone
if 'zonename' in params:
msg += '^' + params['zonename'] + '^'
if text is not None:
if flavor == 'xsia':
msg += '*"' + text + '"NM'
else:
msg += '|A' + text
return msg + ']'
|
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, validate_email_add, today
import datetime
from planning.planning.myfunction import mail_format_pms,actual_date_update,close_task_update
@frappe.whitelist()
def report_in_out(doctype=None,date1=None,date2=None):
filter = ""
date1="2015-08-01"
date2="2015-08-31"
if date1 and not date2:
date2=date1
if date2 and not date1:
date1=date2;
if not date1 and not date2:
date1=frappe.utils.data.nowdate ()
date2=frappe.utils.data.nowdate ()
user_name=frappe.session.user
if date1==date2:
no_days=1
else:
no_days=frappe.utils.data.date_diff(date2,date1)+1
outer_loop={}
return_list=[];
for num in range(0,no_days):
date_loop=frappe.utils.data.add_days(date1,num)
date_lists=frappe.db.sql("""select t1.name as name,t1.expected_start_date as date from `tabNNTask` t1,`tabNNAssign` t2 where t1.name=t2.parent and t2.employee_name=%s and date(t1.expected_start_date)=%s """+filter,(user_name,date_loop),as_dict=True)
i=0
innner_loop=[]
for date_list in date_lists:
task=date_list['name']
task_list_value=['task','schedule_time','worked_time','status','']
innner_loop.append(task_list_value)
i=i+i;
outer_loop[date_loop]=innner_loop
#return_list.append(outer_loop) #date_lists=frappe.db.sql("""select t1.name as name,t1.expected_start_date as date from `tabNNTask` t1,`tabNNAssign` t2 where t1.name=t2.parent and t2.employee_name=%s and date(t1.expected_start_date) between %s and %s """+filter,(user_name,date1,date2),as_dict=True)
return outer_loop |
# coding=utf-8
"""Function generators
.. moduleauthor:: Dieter Moser <[email protected]>
"""
|
from xbos import get_client
from xbos.services.mdal import *
from xbos.services.hod import HodClient
import pandas as pd
import pytz
from sklearn.metrics import mean_squared_error
from dateutil import rrule
from datetime import datetime, timedelta
# data clients
mdal = MDALClient("xbos/mdal")
hod = HodClient("xbos/hod")
# temporal parameters
SITE = "ciee"
def predict_day(targetday="2018-01-30 00:00:00 PST", WINDOW="30m", N_DAYS=10):
T0 = "2017-09-18 00:00:00 PST"
day = datetime.strptime(targetday, "%Y-%m-%d %H:%M:%S %Z")
day = pytz.timezone('US/Pacific').localize(day)
T1 = (day - timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S %Z")
tomorrow = (day + timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S %Z")
today_start = targetday
today_end = (day + timedelta(days=1)).strftime("%Y-%m-%d %H:%M:%S %Z")
print today_start, today_end
# retrieve data
weather_query = {
"Composition": ["1c467b79-b314-3c1e-83e6-ea5e7048c37b"],
"Variables": [],
"Selectors": [MEAN],
"Time": {
"T0": T0, "T1": T1,
"WindowSize": WINDOW,
"Aligned": True,
}
}
resp = mdal.do_query(weather_query)
df = resp['df']
weather_today_sofar = {
"Composition": ["1c467b79-b314-3c1e-83e6-ea5e7048c37b"],
"Variables": [],
"Selectors": [MEAN],
"Time": {
"T0": today_start,
"T1": today_end,
"WindowSize": WINDOW,
"Aligned": True,
}
}
resp = mdal.do_query(weather_today_sofar)
sample = resp['df']
# Similarity-based estimation implementation
begin = df.index[0].to_pydatetime()
# convert to midnight of the next day
begin = datetime(begin.year,begin.month,begin.day, tzinfo=begin.tzinfo) + timedelta(days=1)
end = df.index[-1].to_pydatetime()
# convert to midnight of previous day
end = datetime(end.year, end.month, end.day, tzinfo=end.tzinfo)
weather = df.columns[0]
hop = rrule.DAILY
hop_day = 1
errors = []
for dt in rrule.rrule(hop, dtstart=begin, until=end):
# data for the current day
day_weatherdata = df[dt:dt+timedelta(days=hop_day)]
# avoids indexing errors by making sure the # of data points aligns
num_sample = len(sample)
num_weatherdata = len(day_weatherdata)
num_use = min(num_sample, num_weatherdata)
today_data = sample.copy()[:num_use]
use_weather = day_weatherdata[:num_use]
today_data.index = use_weather.index # move them onto the same day to aid subtraction
sample_weather = today_data.columns[0]
# compare MSE error of today compared with the historical day
use_weather.dropna(inplace=True)
today_data[sample_weather] = today_data[sample_weather].dropna()
common = use_weather.join(today_data[sample_weather], how='inner', lsuffix='_').index
if len(common) == 0:
continue
mse = mean_squared_error(today_data[sample_weather].ix[common], use_weather.ix[common])
errors.append(mse)
d = pd.DataFrame(errors)
# sort errors ascending and take first 10 values
best_10_days = d.sort_values(0, ascending=True).head(N_DAYS)
# use the index of the value to figure out how many days since the first date ("start", above)
best_10_days_dates = [begin+timedelta(days=hop_day*x) for x in best_10_days.index]
# grab the daily data for each of those days and put it into a new dataframe
best_10_days_data = [df[weather][x:x+timedelta(days=hop_day)].values for x in best_10_days_dates]
predictor_days_df = pd.DataFrame(best_10_days_data)
predicted_day = predictor_days_df.mean(axis=0)
predicted_day.index = pd.date_range(targetday, tomorrow, freq="30min")
return predicted_day
if __name__ == '__main__':
print predict_day("2017-10-06 00:00:00 PST")
|
# Generated by Django 3.0.6 on 2021-08-03 10:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0015_auto_20210803_0931'),
]
operations = [
migrations.RenameField(
model_name='medicine',
old_name='schedule',
new_name='sch',
),
migrations.RemoveField(
model_name='medicine',
name='disease',
),
migrations.AddField(
model_name='medicine',
name='disease_name',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='medicine',
name='image',
field=models.ImageField(blank=True, upload_to='medicine_pics'),
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('schedule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.Medicine')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Doctor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('doctor_name', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='users.Medicine')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Disease',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('disease_name', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='users.Medicine')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
CaesarCypher.py
This module implements simple writing and reading methods for files based on the Caesar cypher.
It's a quite simple encryption method: every character is converted to its numerical value, a value is added to it and then it is reconverted to a character and printed to the file.
It is not meant to be sure or really good, it only outputs a file that is not human-readable. Output file is encoded in utf-8 (just for info).
Objects support the with statement.
See the end of the module for examples.
This module is distributed under the MIT License
Copyright (c) 2014 Glenderin/Elnath
Version of July 2014, Revision of July 2015
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Except as contained in this notice, the name(s) of (the) Author shall not be used in advertising or
otherwise to promote the sale, use or other dealings in this Software without
prior written authorization from (the )Author.
"""
import codecs
import random
class CaesarWriter():
def __init__(self, filename, offset = None):
"""
Creates an object used to write the encrypted file.
*filename (string): the name of the file that will be written.
*offset (int): the number that is added to the value of every character. Must be between 0 and 255. If None, a random number is generated.
You can access the file opened by this object with his file attribute (but it is not recommended).
The offset value is stored in the offset attribute
Don't forget to close the object at the end. Or you can use a with statement, that's better.
It supports the with statement.
How it works:
A file is created with codecs.open(filename, "wb", encoding = "utf-8"), the first character written to the file is the offset(so we can read it later).
When you use the write method, every character is converted to its numeric value, offset is added, and then it is converted to the character value and written to the file.
"""
if offset == None:
offset = random.randint(1, 254) #We don't include 0 and 255, because that will mean characters are not converted so the file is human-readable.
elif not isinstance(offset, int): #offset is passed: we verify if it is a integer
try:
offset = int(offset)
except ValueError:
raise TypeError("Offset must be a int, castable to int or None")
if offset < 0:
offset = -offset
if offset > 255:
raise ValueError("Offset must be between 0 and 255")
self.offset = offset
if isinstance(filename, str):
self.file = codecs.open(filename, "wb", encoding = "utf-8")
else:
raise TypeError("Filename must be a string")
self.file.write(unichr(self.offset)) #The first char in the file will be the offset so we can retrieve it when we read the file
def write(self, text):
"""Writes some text to the file"""
if not isinstance(text, str):
text = str(text)
for c in text:
x = (ord(c) + self.offset) % 255 #If > 255, then it will be its value-255
self.file.write(unichr(x))
def writelines(self, lines):
"""Writes multiple lines to the file."""
for element in lines:
self.write(element)
def close(self):
"""Close the file attached to the object. """
self.file.close()
def __enter__(writer):
return writer
def __exit__(self, exc_type, exc_value, traceback):
self.close()
class CaesarReader():
def __init__(self, filename):
"""
Creates an object used to read the encrypted file.
*filename (string): the name of the file to open.
Returns unicode encoded strings.
You can access the file opened by this object with his file attribute (but it is not recommended).
You can access the offset with the offset attribute.
Don't forget to close the object at the end. Or you can use a with statement, that's better
It supports the with statement.
"""
if isinstance(filename, str):
self.file = codecs.open(filename, "rb", encoding = "utf-8")
else:
raise TypeError("Filename argument must be a string")
self.offset = ord(self.file.read(1)) #The first char in the file is the offset
def read(self, size = None):
"""Reads size bytes from the file"""
result = ""
buffer = self.file.read(size)
for c in buffer:
x = ord(c) - self.offset
if x <= 0:
x = x+255
result += chr(x)
return result
def readline(self):
"""Reads from the file till a line break or EOF is found"""
result = ""
readloop = True
while readloop:
c = self.read(1)
result += c
if c == "": #This is the End of the file
readloop = False
if c == "\n":
readloop = False
return result
def readlines(self):
result = []
readloop = True
while readloop:
line = self.readline()
if line == "":
readloop = False
else:
result.append(line)
return result
def close(self):
"""Close the file attached to the object."""
self.file.close()
def __enter__(writer):
return writer
def __exit__(self, exc_type, exc_value, traceback):
self.file.close()
def encrypt(origin_file, end_file, offset):
"""Write the encrypted content of origin_file to end_file"""
with open(origin_file, "r") as reader:
with CaesarWriter(end_file, offset) as writer:
for line in reader.readlines():
writer.write(line)
def decrypt(origin_file, end_file):
"""Write the decrypted content of origin_file(must have been previously encrypted by this module) to end_file"""
with CaesarReader(origin_file) as reader:
with open(end_file, "w") as writer:
for line in reader.readlines():
writer.write(line)
#EXAMPLE
if __name__ == "__main__":
print("This module will print a test file")
name = raw_input("Enter the name of the file: ")
# name = "test.txt"
with CaesarWriter(name, 0) as out:
out.write("First line\n")
out.write("This is the second line")
out.write(" This is also the second line")
out.write("\nA third line because why not? And somê sp€ciàl ch@rs§")
with CaesarReader(name) as reader:
print "Content of the file when decrypted:"
for line in reader.readlines():
print line
|
import csv
import statistics
class VrmPrinter:
def __init__(self):
self.voltage = None
self.fluorescence = list()
self.voltages = list()
self.fluorescences = list()
def add(self, voltage: float, fluorescence: float):
if self.voltage is not None and self.voltage != voltage:
self.voltages.append(self.voltage)
self.fluorescences.append(statistics.mean(self.fluorescence))
self.fluorescence.clear()
self.voltage = None
self.voltage = voltage
self.fluorescence.append(fluorescence)
def print(self):
for v, f in zip(self.voltages, self.fluorescences):
print(f'{v}\t\t{f}\\\\')
def print_normalized(self):
max_fluorescence = self.fluorescences[0]
min_fluorescence = self.fluorescences[-1]
for v, f in zip(self.voltages, self.fluorescences):
print(f'{v}\t\t{((f-min_fluorescence)/(max_fluorescence-min_fluorescence))}\\\\')
# print(f'{((f-min_fluorescence)/(max_fluorescence-min_fluorescence))}')
def read(self, path: str, delimiter: str=';', encoding: str = 'iso-8859-1'):
with open(path, encoding=encoding) as csv_file:
reader = csv.reader(csv_file, delimiter=delimiter)
rows = [row for row in reader]
line_count = 0
for row in rows:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
else:
self.add(float(row[1]), float(row[2]))
line_count += 1
print(f'Read {line_count} lines.')
class CsvAverage:
def __init__(self, encoding='iso-8859-1', delimiter=';'):
self.encoding = encoding
self.delimiter = delimiter
self.average = list()
self.sources = list()
self.line_count = None
def add_source(self, path: str):
with open(path, encoding=self.encoding) as file:
count = sum(1 for _ in csv.reader(file))
if self.line_count is None:
self.line_count = count
elif self.line_count != count:
raise Exception("Files must contain same number of lines")
self.sources.append(path)
def print(self, col1: int = 0, col2: int = 1, x_offset: float = 0, show_column_names: bool = False):
files = [open(source, encoding=self.encoding) for source in self.sources]
readers = [csv.reader(file, delimiter=self.delimiter) for file in files]
iterators = [iter(reader) for reader in readers]
row_index = 0
while row_index < self.line_count:
rows = [next(iterator) for iterator in iterators]
if row_index == 0:
if show_column_names:
print(f'Column names are {rows[0][col1]} and {rows[0][col2]}')
else:
val1 = statistics.mean([float(row[col1]) for row in rows])
val2 = statistics.mean([float(row[col2]) for row in rows])
print(f'{val1+x_offset:.2f}\t\t{val2:.4f}\\\\')
row_index += 1
def values(self, col: int = 0, offset: float=0, has_header: bool = True):
files = [open(source, encoding=self.encoding) for source in self.sources]
readers = [csv.reader(file, delimiter=self.delimiter) for file in files]
iterators = [iter(reader) for reader in readers]
row_index = 0
while row_index < self.line_count:
rows = [next(iterator) for iterator in iterators]
if row_index > 0 or not has_header:
yield statistics.mean([float(row[col]) for row in rows]) + offset
row_index += 1
class SimpleCsv:
def __init__(self):
self.rows = list()
def read(self, path: str, delimiter: str=';', encoding: str = 'iso-8859-1', show_read_line_count=False):
with open(path, encoding=encoding) as csv_file:
reader = csv.reader(csv_file, delimiter=delimiter)
self.rows = [row for row in reader]
if show_read_line_count:
print(f'Read {len(self.rows)} lines.')
def print(self, col1: int=0, col2: int=1, x_offset: float=0, row_separator: str="", print_every_nth: int=1, show_column_names: bool=False):
line_count = 0
for row in self.rows:
if line_count == 0:
if show_column_names:
print(f'Column names are {", ".join(row)}')
elif line_count % print_every_nth != 0:
line_count += 1
continue
else:
print(f'{float(row[col1])+x_offset:.2f}\t\t{row[col2]}{row_separator}')
line_count += 1
def values(self, col: int = 0, offset: float=0, has_header: bool = True):
row_index = 0
for row in self.rows:
if row_index > 0 or not has_header:
yield float(row[col]) + offset
row_index += 1
class SimpleTikZPrinter:
@staticmethod
def print(x_values, y_values, x_offset: float=0, row_separator: str= ""):
for x, y in zip(x_values, y_values):
print(f'{x+x_offset:.2f}\t\t{y:.4f}{row_separator}')
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import os
import csv
import getopt
import sys
import argparse
def get_case_info(file_writer, file_content, reason, status="Pass"):
out = []
tmp_out = []
info = []
ge_out = []
hgvs_out = []
final_syn = []
all_gene = []
all_omim = []
status = ''
syndrome_name = ""
syndrome = {}
has_mask = ''
if 'genomicData' in file_content:
genomic = file_content['genomicData']
ge_out = [ge_entry['Test Information']['Gene Name'] for ge_entry in genomic]
# Check selected syndrome
if 'selected_syndromes' in file_content:
syn_out = []
omim_out = []
diag_out = []
has_mask_out = []
for syn in file_content['selected_syndromes']:
if syn['diagnosis'] == 'DIFFERENTIAL_DIAGNOSIS':
continue
syndrome.update({syn['syndrome_name']: {
'omim': syn['omim_id'],
'diagnosis': syn['diagnosis'],
'has_mask': syn['has_mask']
}})
syn_out.append(syn['syndrome_name'])
omim_out.append(syn['omim_id'])
diag_out.append(syn['diagnosis'])
has_mask_out.append(syn['has_mask'])
if len(syn_out) == 1:
# only one syndrome selected
syndrome_name = syn_out[0]
has_mask = has_mask_out[0]
final_syn.append(omim_out[0])
# Store omim id into all_omim
if type(omim_out[0]) == list:
for omim in omim_out[0]:
all_omim.append(omim)
else:
all_omim.append(omim_out[0])
else:
list_syn = []
single_syn = []
for idx, syn in enumerate(omim_out):
if type(syn) == list:
list_syn = syn
syndrome_name = syn_out[idx]
else:
single_syn.append(syn)
final_syn.append(list_syn)
all_omim = [omim for omim in list_syn]
for syn in single_syn:
if syn not in list_syn:
final_syn.append(syn)
all_omim.append(syn)
# Fixed deprecated omim id
tmp_all_omim = []
for omim in all_omim:
if str(omim) in deprecated_omim:
for new_id in deprecated_omim[str(omim)]:
tmp_all_omim.append(new_id)
else:
tmp_all_omim.append(omim)
all_omim = tmp_all_omim
# Map phenotype to gene
for omim in all_omim:
if str(omim) in omim_dict:
genes = omim_dict[str(omim)]
for gene in genes:
if gene not in all_gene:
all_gene.append(gene)
# Syndrome has no omim id
if len(all_omim) == 0:
all_gene = [gene for gene in ge_out]
status = 'no omim'
# no gene can be found via mapping
if len(all_gene) == 0:
all_gene = [gene for gene in ge_out]
status = 'no gene mapped from omim'
if ge_out[0] not in all_gene:
status = 'disease gene not in omim gene list'
if syndrome_name not in tmp_syn:
syn_cases = []
cases_count = 0
else:
syn_cases = tmp_syn[syndrome_name]['cases']
cases_count = tmp_syn[syndrome_name]['count']
syn_cases.append(fileName.split('.')[0])
cases_count = cases_count + 1
tmp_syn.update({syndrome_name:{
'gene':all_gene,
'omim':all_omim,
'has_mask':syndrome[syndrome_name]['has_mask'],
'cases': syn_cases,
'count': cases_count
}})
# Output results to csv
tmp_out.append(fileName.split('.')[0])
tmp_out.append(status)
tmp_out.append(has_mask_out)
tmp_out.append(1 if 1 in has_mask_out else 0)
tmp_out.append(all_gene)
tmp_out.append(str(ge_out)[1:-1])
tmp_out.append(final_syn)
tmp_out.append(syn_out)
tmp_out.append(diag_out)
tmp_out.append(omim_out)
file_writer.writerow(tmp_out)
if status != '':
not_mapped_writer.writerow(tmp_out)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate summary file')
parser.add_argument('-c', '--case', help='path to convert file')
parser.add_argument('-l', '--log', help='path to log file')
parser.add_argument('-o', '--output', help='path to output file')
args = parser.parse_args()
case_path = args.case
log_file = open(args.log)
log_data = json.load(log_file)
deprecated_file = open('data/omim_deprecated_replacement.json', 'r')
deprecated_omim = json.load(deprecated_file)
# Parse omim phenotype to gene mapping file
omim_dict = {}
omim_file = open('data/morbidmap.txt', 'r')
omim_reader = csv.reader(omim_file, delimiter='\t')
count = 0
for row in omim_reader:
count = count + 1
if len(row) > 2:
omim = row[0].split(', ')[-1].split(' ')[0]
gene = row[1].split(', ')[0]
if omim not in omim_dict:
omim_dict.update({omim:[gene]})
else:
if gene not in omim_dict[omim]:
omim_dict[omim].append(gene)
omim_file.close()
output_filename = os.path.join(args.output, 'case_gene_phenotype_table.csv')
not_mapped_file = open(os.path.join(args.output,'gene_not_mapped.csv'), 'w')
not_mapped_writer = csv.writer(not_mapped_file, delimiter='\t')
not_mapped_writer.writerow(['case', 'status', 'has_mask_all_syndrome', 'has_mask', 'all genes', 'gene', 'omim', 'syndrome name', 'diagnosis', 'all omim', 'count'])
syn_mapped_file = open(os.path.join(args.output,'phenotype_to_gene.csv'), 'w')
syn_mapped_writer = csv.writer(syn_mapped_file, delimiter='\t')
tmp_syn = {}
with open(output_filename, 'w') as csvfile:
file_writer = csv.writer(csvfile, delimiter='\t')
file_writer.writerow(['case', 'status','has_mask_all_syndrome', 'has_mask', 'all genes', 'gene', 'omim', 'syndrome name', 'diagnosis', 'all omim'])
for fileName in log_data["passed"].keys():
test = fileName
file_content = json.load(open(os.path.join(case_path, fileName + '.json')))
get_case_info(file_writer, file_content, [])
final_syn = {}
for syn in tmp_syn:
if len(tmp_syn[syn]['omim']) > 1 or len(tmp_syn[syn]['omim']) == 0:
final_syn.update({syn:tmp_syn[syn]})
for syn in tmp_syn:
if len(tmp_syn[syn]['omim']) == 1:
found = False
for syn_compare in tmp_syn:
if len(tmp_syn[syn_compare]['omim']) > 1:
if tmp_syn[syn]['omim'][0] in tmp_syn[syn_compare]['omim']:
for case in tmp_syn[syn]['cases']:
final_syn[syn_compare]['cases'].append(case)
final_syn[syn_compare]['count'] = final_syn[syn_compare]['count'] + tmp_syn[syn]['count']
found = True
break
if not found:
final_syn.update({syn:tmp_syn[syn]})
all_genes = []
syn_mapped_writer.writerow(['name', 'has_mask', 'gene', 'omim', 'cases', 'count'])
for syn in final_syn:
syn_mapped_writer.writerow([syn, tmp_syn[syn]['has_mask'], tmp_syn[syn]['gene'], tmp_syn[syn]['omim'], tmp_syn[syn]['cases'], tmp_syn[syn]['count']])
for gene in tmp_syn[syn]['gene']:
if gene not in all_genes:
all_genes.append(gene)
gene_mapped_file = open(os.path.join(args.output,'gene_mapped_table.csv'), 'w')
gene_mapped_writer = csv.writer(gene_mapped_file, delimiter='\t')
for gene in all_genes:
gene_mapped_writer.writerow([gene])
gene_mapped_file.close()
not_mapped_file.close()
syn_mapped_file.close()
|
class Group(object):
def __init__(self):
self.children = []
def add(self, child):
self.children.append(child)
return child
def update(self, time):
self.children[:] = [
child for child in self.children if child.exists()]
for child in self.children:
child.update(time)
def draw(self, surface):
for child in self.children:
child.draw(surface)
def exists(self):
return True
def destroy(self):
self.children = []
def __iter__(self):
for child in self.children:
yield child
def __getitem__(self, item):
return self.children[item]
def __len__(self):
return len(self.children) |
#!/usr/bin/env python
import Adafruit_ADS1x15
import time
import rospy
import numpy
from std_msgs.msg import String,Int32MultiArray
from rospy.numpy_msg import numpy_msg
from rospy_tutorials.msg import Floats
def talker():
pub = rospy.Publisher('line_follower_topic', numpy_msg(Floats),queue_size=10)
rospy.init_node('talker_ads', anonymous=True)
rate = rospy.Rate(10) # 10hz
adc_dx = Adafruit_ADS1x15.ADS1015(address=0x48, busnum=1)
adc_sx = Adafruit_ADS1x15.ADS1015(address=0x49, busnum=1)
#msg= Int32MultiArray
while not rospy.is_shutdown():
values = numpy.array([0, 0, 0, 0, 0, 0, 0, 0], dtype=numpy.float32)
#msg = [0]*8
for i in range(4):
values[i] = adc_dx.read_adc(i, gain=1)
values[i+4] = adc_sx.read_adc(i, gain=1)
# while not rospy.is_shutdown():
# Read all the ADC channel values in a list.
#values = [0]*8
#for i in range(4):
# Read the specified ADC channel using the previously set gain value.
# values[i] = adc_dx.read_adc(i, gain=1)
# values[i+4] = adc_sx.read_adc(i, gain=1)
rospy.loginfo(values)
pub.publish(values)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
from CGATReport.Tracker import *
from cpgReport import *
from CGATReport.odict import OrderedDict as odict
##########################################################################
class cgiAnnotations(cpgTracker):
"""Breakdown of overlap of predicted CGIs with genomic regions """
mPattern = "cgi_annotations"
def __call__(self, track, slice=None):
data = self.getFirstRow( """SELECT
sum(is_cds) AS cds,
sum(is_utr) AS utr,
sum(is_upstream) AS upstream,
sum(is_downstream) AS downstream,
sum(is_intronic) AS intronic,
sum(is_intergenic) AS intergenic,
sum(is_flank) AS flank,
sum(is_ambiguous) AS ambiguous
FROM cgi_annotations""" )
mColumns = ["cds",
"utr",
"upstream",
"downstream",
"intronic",
"intergenic",
"flank",
"ambiguous"]
return odict(list(zip(mColumns, data)))
##########################################################################
class cgitssoverlap(cpgTracker):
"""overlap of predicted CGIs with TSS """
mPattern = "tss_cgi_venn"
def __call__(self, track, slice=None):
data = self.getAll("SELECT track, intervals from tss_cgi_venn")
return data
##########################################################################
class CGI_CpGObsExp2(cpgTracker):
mPattern = "_comp$"
def __call__(self, track, slice=None):
data = self.getAll("SELECT CpG_ObsExp2 FROM %(track)s_comp" % locals())
return data
##########################################################################
class CGI_GCContent(cpgTracker):
mPattern = "_comp$"
def __call__(self, track, slice=None):
data = self.getAll("SELECT pGC FROM %(track)s_comp" % locals())
return data
|
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
def load_image(file_path):
img = Image.open(file_path)
img_data = np.array(img)
return img_data
def visualize(img_data):
print(img_data.shape)
plt.axis("off")
plt.imshow(img_data)
plt.show()
def visualize_dual(img_data_1, img_data_2):
fig = plt.figure(figsize=(10, 6))
rows = 1
columns = 2
fig.add_subplot(rows, columns, 1)
plt.axis('off')
plt.imshow(img_data_1)
fig.add_subplot(rows, columns, 2)
plt.axis('off')
plt.imshow(img_data_2)
plt.show() |
import pandas as pd
import requests
import io
import os
import json
import plotly.express as px
import plotly.figure_factory as ff
import numpy as np
import pathlib
import statsmodels.api as sm
import numpy as np
import matplotlib.pyplot as plt
pd.options.plotting.backend = "plotly"
def fetch_json_map():
if not os.path.exists("./data/states.json"):
resp = requests.get("https://gist.githubusercontent.com/mshafrir/2646763/raw/8b0dbb93521f5d6889502305335104218454c2bf/states_hash.json")
res = json.load(io.BytesIO(resp.content))
f = open("./data/states.json", "w")
json.dump(res, f)
f.close()
else:
f = open("./data/states.json")
res = json.load(f)
f.close()
result = {v: k for k, v in res.items()}
return result
def fetch_and_clean_tables_from_wikipedia():
"""
Grabs the tables of interest from wikipedia
Returns:
A DF that contains macro level data for each state
"""
gini_url = "https://en.wikipedia.org/wiki/List_of_U.S._states_by_Gini_coefficient"
pov_url = "https://en.wikipedia.org/wiki/List_of_U.S._states_and_territories_by_poverty_rate"
urb_url = "https://en.wikipedia.org/wiki/Urbanization_in_the_United_States"
climate_url = "" ####
urb_state_mapping = lambda x: x[:x.find('[')]
#First we grab the dirty tables
gini = pd.read_html(gini_url)
gini = gini[2] # this gets correct table from wikipedia page
pov = pd.read_html(pov_url)
pov = pov[2]
urb = pd.read_html(urb_url)
urb = urb[-1]
urb = urb.droplevel(level= 0, axis = 1) #clean the unecessary multindex
# climate = pd.read_html(climate_url) #TODO
# data sourcing of climate not straightforward like others
#Then we clean the tables such that the output is directly usable
gini.columns = gini.columns.str.replace(' ', '_')
pov.columns = pov.columns.str.replace(' ', '_')
urb.columns = urb.columns.str.replace(' ', '_')
gini = gini.rename(columns={
'State_or_federal_district': 'state',
'Gini_Coefficient': 'gini_coef'
})
gini.drop(['Rank'], axis=1, inplace=True)
gini.set_index('state', inplace=True)
gini.columns = gini.columns.str.lower()
pov = pov.rename(columns={
'State': 'state',
'2019_Poverty_rate(percent_of_persons_in_poverty)[note_2][7]': 'pov_2019',
'2014_Poverty_Rates_(includes_unrelated_children)': 'pov_2014'
})
pov.drop(['Rank', 'Supplemental_Poverty_Measure_(2017–2019_average)_(Geographically_Adjusted)'], axis=1, inplace=True)
pov.set_index('state', inplace = True)
pov.columns = pov.columns.str.lower()
urb = urb.rename(columns={'State/Territory': 'state',
'2010': 'urb_2010',
'2000': 'urb_2000' })
urb = urb[['state', 'urb_2010', 'urb_2000']].copy()
urb['state'] = urb['state'].apply(urb_state_mapping)
urb.set_index('state', inplace=True)
urb.columns = urb.columns.str.lower()
#join them all
macro_df = gini.merge(pov, 'inner', 'state').merge(urb, 'inner', 'state')
return macro_df.dropna()
def fetch_mental_health_df():
"""
Performs a sync get request to grab the data
"""
res = requests.get('https://data.cdc.gov/api/views/yni7-er2q/rows.csv?accessType=DOWNLOAD')
mental_health = pd.read_csv(io.BytesIO(res.content))
return mental_health
def chop_and_clean_mental_health(by_group: str, subgroup_name, mental_health):
"""
Chops the original df by_group and return a multindex of [time, subgroup_name
Returns:
chopped & cleaned df with a multi-index of [time, subgroup_name]
"""
to_drop= ['Group',
'Time Period Label',
'Time Period End Date',
'Suppression Flag',
'State',
'Time Period',
'index',
'Quartile Range',
'Confidence Interval']
indics_mapping = {
'Needed Counseling or Therapy But Did Not Get It, Last 4 Weeks':
'none',
'Received Counseling or Therapy, Last 4 Weeks':
'therapy',
'Took Prescription Medication for Mental Health And/Or Received Counseling or Therapy, Last 4 Weeks':
'either',
'Took Prescription Medication for Mental Health, Last 4 Weeks':
'medication'
}
education_mapping = {
"Some college/Associate's degree":
'associates',
"Bachelor's degree or higher":
'college',
'High school diploma or GED':
'highschool',
'Less than a high school diploma':
'none'
}
state_mapping = fetch_json_map()
indics_to_col_map = {}
df = mental_health[mental_health['Group'] == '{}'.format(by_group)].copy()
df.reset_index(inplace=True)
df.drop(to_drop, axis =1, inplace=True)
df.columns = df.columns.str.replace(' ', '_')
df.columns = df.columns.str.lower()
df = df.rename(columns = {
'time_period_start_date': 'time_period',
'subgroup': '{}'.format(subgroup_name)
})
if subgroup_name == 'education':
df['education'] = df['education'].apply(lambda x: education_mapping[x])
elif subgroup_name == 'state':
macro_df = fetch_and_clean_tables_from_wikipedia()
df = df.merge(macro_df, 'inner', 'state')
df['state'] = df['state'].apply(lambda x: state_mapping[x])
df.set_index(['time_period', '{}'.format(subgroup_name)], inplace=True)
#Here we map the indicator varibles to a summarized form.
#A set was used to get all unique values in the columns
df['indicator'] = df['indicator'].apply(lambda x: indics_mapping[x])
return df
def fetch_data():
if not (os.path.exists('./data/state.h5') and os.path.exists('./data/education.h5')):
mental_health = fetch_mental_health_df()
state_df = chop_and_clean_mental_health("By State", 'state', mental_health)
education_df = chop_and_clean_mental_health(
"By Education", 'education', mental_health)
state_df.to_hdf('./data/state.h5', key = 'df', mode = 'w')
education_df.to_hdf('./data/education.h5', key = 'df', mode = 'w')
else:
state_df = pd.read_hdf('./data/state.h5', key = 'df')
education_df = pd.read_hdf('./data/education.h5', key = 'df')
return [state_df, education_df]
def main():
[state_df, education_df] = fetch_data()
print(state_df, education_df)
#First we get begin and end slices
#we are only concerned with those who needed mental health care but got none
beg = state_df.loc['08/19/2020']
end = state_df.loc['06/23/2021']
beg = beg[beg['indicator'] == 'none']
end = end[end['indicator'] == 'none']
beg = beg.rename(columns = {'value': 'unmet_mental_health'})
end = end.rename(columns = {'value': 'unmet_mental_health'})
#Next we test to see which states had a statistically significant drop
#and ones that had a statistically significant increase
low_test_mask = end['unmet_mental_health'] < beg['lowci']
high_test_mask = end['unmet_mental_health'] > beg['highci']
lows = low_test_mask * 1
highs = high_test_mask * 1
end['improved_unmet_mental_health'] = low_test_mask
end['worsened_unmet_mental_health'] = high_test_mask
#make a figure for our findings
fig = px.choropleth(end,
locations = end.index,
color = 'improved_unmet_mental_health',
hover_name = end.index,
locationmode= 'USA-states')
fig.update_layout(
title_text = 'U.S. States',
geo_scope = 'usa'
)
fig.write_image("./figures/improved.png")
fig = px.choropleth(end,
locations = end.index,
color = 'worsened_unmet_mental_health',
hover_name = end.index,
locationmode= 'USA-states')
fig.update_layout(
title_text = 'U.S. States',
geo_scope = 'usa'
)
fig.write_image("./figures/worsened.png")
#Then we do sample correations.
#note we have to strip the percentages
print(end.columns)
subj_df = pd.DataFrame([
end['unmet_mental_health'],
end['gini_coef'],
end['pov_2019'].str.rstrip('%').astype('float'),
end['urb_2010'].str.rstrip('%').astype('float'),
]
)
subj_df = subj_df.transpose()
corr_df = subj_df.corr()
#remove duplicated info
mask = np.tril(np.ones(corr_df.shape)).astype(np.bool)
lt_corr_df = corr_df.where(mask).round(2)
print(lt_corr_df)
fig = ff.create_annotated_heatmap(
z = lt_corr_df.to_numpy(),
x = lt_corr_df.columns.tolist(),
y = lt_corr_df.index.tolist(),
zmax = 1,
zmin = -1,
colorscale = px.colors.diverging.RdBu
)
#We output the correlations to a figure
fig.update_layout(
yaxis_autorange='reversed',
xaxis_showgrid=False,
yaxis_showgrid=False,
uniformtext_minsize= 16
)
fig.write_image("./figures/corrs.png")
# We do tests of multicolinearity (if we need to )
#Now we use a simple multilinear regression on the data, only to test for
# statistical significance
dep_vars = end[['gini_coef']]
dep_vars['urb_2010'] = end['urb_2010'].str.rstrip('%').astype('float')
dep_vars['pov_2019'] = end['pov_2019'].str.rstrip('%').astype('float')
y_var = end['unmet_mental_health']
dep_vars = sm.add_constant(dep_vars)
est = sm.OLS(y_var.astype(float), dep_vars.astype(float), missing='drop').fit()
plt.rc(
'figure',
figsize=(12, 7))
plt.text(
0.01,
0.05,
str(est.summary()),
{'fontsize': 10},
fontproperties = 'monospace')
plt.axis('off')
plt.tight_layout()
plt.savefig('./figures/model0.png')
dep_vars = end['urb_2010'].str.rstrip('%').astype('float')
y_var = end['unmet_mental_health']
dep_vars = sm.add_constant(dep_vars)
est = sm.OLS(y_var.astype(float), dep_vars.astype(float), missing='drop').fit()
plt.clf()
dep_vars = end[['gini_coef']]
dep_vars['pov_2019'] = end['pov_2019'].str.rstrip('%').astype('float')
y_var = end['unmet_mental_health']
dep_vars = sm.add_constant(dep_vars)
est = sm.OLS(y_var.astype(float), dep_vars.astype(float), missing='drop').fit()
plt.rc(
'figure',
figsize=(12, 7))
plt.text(
0.01,
0.05,
str(est.summary()),
{'fontsize': 10},
fontproperties = 'monospace')
plt.axis('off')
plt.tight_layout()
plt.savefig('./figures/model1.png')
dep_vars = end['urb_2010'].str.rstrip('%').astype('float')
y_var = end['unmet_mental_health']
dep_vars = sm.add_constant(dep_vars)
est = sm.OLS(y_var.astype(float), dep_vars.astype(float), missing='drop').fit()
plt.clf()
plt.rc(
'figure',
figsize=(12, 7))
plt.text(
0.01,
0.05,
str(est.summary()),
{'fontsize': 10},
fontproperties = 'monospace')
plt.axis('off')
plt.tight_layout()
plt.savefig('./figures/model2.png')
if __name__ == '__main__':
os.chdir(pathlib.Path(__file__).parent.parent.resolve())
if not os.path.exists('./data/'):
os.mkdir('./data/')
main()
|
import os
import re
import logging
from virttest import data_dir
from virttest import utils_misc
from avocado.utils import process
from avocado.core import exceptions
from autotest.client.shared import error
@error.context_aware
def run(test, params, env):
"""
'thin-provisioning' functions test using sg_utils:
1) Create image using qemu-img
2) Convert the image and check if the speed is much
faster than standard time
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
standard_time = 0.4
qemu_img_binary = utils_misc.get_qemu_img_binary(params)
base_dir = params.get("images_base_dir", data_dir.get_data_dir())
if not qemu_img_binary:
raise exceptions.TestError("Can't find the command qemu-img.")
image_create_cmd = params["create_cmd"]
image_create_cmd = image_create_cmd % (qemu_img_binary, base_dir)
image_convert_cmd = params["convert_cmd"]
image_convert_cmd = image_convert_cmd % (
qemu_img_binary, base_dir, base_dir)
process.system(image_create_cmd, shell=True)
output = process.system_output(image_convert_cmd, shell=True)
realtime = re.search(r"real\s+\dm(.*)s", output)
if realtime is None:
raise exceptions.TestError(
"Faild to get the realtime from {}".format(output))
realtime = float(realtime.group(1))
logging.info("real time is : {:f}".format(realtime))
if realtime >= standard_time:
err = "realtime({:f}) to convert the image is a " \
"little longer than standardtime({:f})"
raise exceptions.TestFail(err.format(realtime, standard_time))
delete_image = params["disk_name"]
delete_image = os.path.join(base_dir, delete_image)
delete_convert_image = params.get("convert_disk_name")
delete_convert_image = os.path.join(base_dir, delete_convert_image)
process.system_output("rm -rf {:s} {:s}".format(
delete_image, delete_convert_image)) |
#!/usr/bin/env python
"""Process that loads the datastore"""
__author__ = 'Michael Meisinger, Thomas Lennan'
"""
Possible Features
- load objects into different datastores
- load from a directory of YML files in ion-definitions
- load from a ZIP of YMLs
- load an additional directory (not under GIT control)
- change timestamp for resources
- load a subset of objects by type, etc
"""
from pyon.public import CFG, log, ImmediateProcess, iex
from pyon.datastore import datastore_admin
from pyon.core import bootstrap
from pyon.core.bootstrap import get_sys_name
class DatastoreAdmin(ImmediateProcess):
"""
bin/pycc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=clear prefix=ion
bin/pycc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=dump path=res/preload/local/my_dump
bin/pycc -fc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=load path=res/preload/local/my_dump
bin/pycc -x ion.process.bootstrap.datastore_loader.DatastoreLoader op=dumpres
"""
def on_init(self):
pass
def on_start(self):
# print env temporarily to debug cei
import os
log.info('ENV vars: %s' % str(os.environ))
op = self.CFG.get("op", None)
datastore = self.CFG.get("datastore", None)
path = self.CFG.get("path", None)
prefix = self.CFG.get("prefix", get_sys_name()).lower()
log.info("DatastoreLoader: {op=%s, datastore=%s, path=%s, prefix=%s}" % (op, datastore, path, prefix))
self.da = datastore_admin.DatastoreAdmin()
if op:
if op == "load":
self.da.load_datastore(path, datastore, ignore_errors=False)
elif op == "dump":
self.da.dump_datastore(path, datastore)
elif op == "dumpres":
from ion.util.datastore.resources import ResourceRegistryHelper
rrh = ResourceRegistryHelper()
rrh.dump_resources_as_xlsx(path)
elif op == "blame":
# TODO make generic
self.da.get_blame_objects()
elif op == "clear":
self.da.clear_datastore(datastore, prefix)
else:
raise iex.BadRequest("Operation unknown")
else:
raise iex.BadRequest("No operation specified")
def on_quit(self):
pass
DatastoreLoader = DatastoreAdmin
|
""" Used to track events in tests. """
# Enthought library imports.
from traits.api import HasTraits, List, Str, Tuple
class EventTracker(HasTraits):
""" Used to track traits events. """
# The traits events that have fired.
#
# This is a list of tuples in the form:-
#
# (obj, trait_name, old, new)
events = List(Tuple)
# The names of the traits events that have fired.
#
# This is useful if you just care about the order of the events, not the
# contents.
event_names = List(Str)
# The trait event subscriptions used by the tracker.
#
# This is a list of tuples in the form:-
#
# (obj, trait_name)
#
# Where 'obj' is the object to listen to, and 'trait_name' is the name of
# the trait to listen to, or None to listen for all trait events.
subscriptions = List(Tuple)
###########################################################################
# Private interface.
###########################################################################
#### Trait change handlers ################################################
def _subscriptions_changed(self, old, new):
""" Static trait change handler. """
for subscription in old:
self._remove_subscription(subscription)
for subscription in new:
self._add_subscription(subscription)
return
def _subscriptions_items_changed(self, event):
""" Static trait change handler. """
for subscription in event.removed:
self._remove_subscription(subscription)
for subscription in event.added:
self._add_subscription(subscription)
return
def _listener(self, obj, trait_name, old, new):
""" Dynamic trait change listener. """
self.events.append((obj, trait_name, old, new))
self.event_names.append(trait_name)
return
#### Methods ##############################################################
def _add_subscription(self, subscription):
""" Add a subscription. """
obj, trait_name = subscription
if trait_name is not None:
obj.on_trait_change(self._listener, trait_name)
else:
obj.on_trait_change(self._listener)
return
def _remove_subscription(self, subscription):
""" Remove a subscription. """
obj, trait_name = subscription
if trait_name is not None:
obj.on_trait_change(self._listener, trait_name, remove=True)
else:
obj.on_trait_change(self._listener, remove=True)
return
#### EOF ######################################################################
|
#!/usr/bin/env python
'''
This script extracts info coded with intervals for any data with CHROM POS coordinates.
scores.file:
CHROM startPOS endPOS 12.4.SNPs.haplotype
scaffold_1 679 733 0.0
scaffold_1 1733 7988 0.0
scaffold_1 8339 10393 0.0
scaffold_1 10827 11638 0.0
scaffold_1 12147 14440 0.0
scaffold_1 14986 41140 0.0
scaffold_1 41583 55939 0.0
scaffold_1 56678 61321 0.602739726027
scaffold_1 61594 62590 0.0
scaffold_1 63013 63032 0.0
scaffold_1 63186 63915 0.0
scaffold_1 64331 69229 0.0
scaffold_1 69674 71548 0.0
scaffold_1 72356 73560 0.0
scaffold_1 73952 74255 1.0
scaffold_1 76025 76100 NA
scaffold_1 76499 76500 1.0
scaffold_1 77993 78223 0.0
scaffold_1 78581 78956 0.0
scaffold_1 79348 79750 NA
scaffold_1 80581 80986 NA
scaffold_1 81577 81767 0.0
scaffold_1 82218 85489 0.0
scaffold_1 85997 86249 NA
scaffold_1 87071 88130 0.0
scaffold_1 88973 89426 0.0
scaffold_1 89762 90013 0.0
scaffold_1 90564 90680 0.0
scaffold_1 91067 91347 0.0
scaffold_1 91724 100495 0.0
scaffold_1 101065 111027 0.0
scaffold_2 6186 6904 NA
scaffold_2 8480 11480 0.0
scaffold_2 11941 15733 0.0
genotype.file:
CHROM POS Value
scaffold_1 704 0.000000
scaffold_1 1704 0.000000
scaffold_1 2704 0.462166
scaffold_1 3704 0.616884
scaffold_1 4704 0.375514
scaffold_1 5704 0.190936
scaffold_1 6704 0.000000
scaffold_1 7704 0.000218
scaffold_1 8704 0.000246
scaffold_1 97041 0.076171
scaffold_2 1762 0.021667
scaffold_2 2762 0.405055
scaffold_2 3762 0.754771
scaffold_2 4762 0.429861
scaffold_2 5762 0.377096
scaffold_2 6762 1.022632
scaffold_2 7762 0.857520
scaffold_2 8762 0.021991
scaffold_2 9762 0.336997
output.file:
CHROM POS Value score
scaffold_1 704 0.000000 0.0
scaffold_1 2704 0.462166 0.0
scaffold_1 3704 0.616884 0.0
scaffold_1 4704 0.375514 0.0
scaffold_1 5704 0.190936 0.0
scaffold_1 6704 0.000000 0.0
scaffold_1 7704 0.000218 0.0
scaffold_1 8704 0.000246 0.0
scaffold_1 97041 0.076171 0.0
scaffold_2 6762 1.022632 NA
scaffold_2 8762 0.021991 0.0
scaffold_2 9762 0.336997 0.0
command:
python select_interval_info_for_genotypes.py -i genotype.file -s scores.file -o output.file
contact:
Dmytro Kryvokhyzha [email protected]
'''
############################# modules #############################
import calls # my custom module
############################ options ##############################
parser = calls.CommandLineParser()
parser.add_argument('-i', '--input', help = 'name of the file with genomic coordinates and values', type=str, required=True)
parser.add_argument('-o', '--output', help = 'name of the output file', type=str, required=True)
parser.add_argument('-s', '--scores_file', help = 'phasing scores (output of assign_HapCUT_blocks.py)', type=str, required=True)
args = parser.parse_args()
############################# program #############################
scoresFile = open(args.scores_file, "r")
scoresWords = scoresFile.readline().split()
scoresScore = scoresWords[3:]
scoresWords = scoresFile.readline().split()
scoresScaf = scoresWords[0]
scoresStart = int(scoresWords[1])
scoresEnd = int(scoresWords[2])
output = open(args.output, 'w')
counter = 0
with open(args.input) as input:
header_line = input.readline()
header_words = header_line.split()
headerP = '\t'.join(str(e) for e in header_words)
scoresScoreP = '\t'.join(str(e) for e in scoresScore)
output.write("%s\t%s\n" % (headerP, scoresScoreP))
for line in input:
inputWords = line.split()
inputScaf = inputWords[0]
inputPos = int(inputWords[1])
inputVal = str(inputWords[1:])
while ((inputScaf != scoresScaf) or (inputScaf == scoresScaf and inputPos > scoresEnd)):
scoresWords = scoresFile.readline().split()
if not scoresWords:
break
else:
scoresScaf = scoresWords[0]
scoresStart = int(scoresWords[1])
scoresEnd = int(scoresWords[2])
scoresScore = scoresWords[3:]
if (inputScaf == scoresScaf and inputPos >= scoresStart and inputPos <= scoresEnd):
inputWordsP = '\t'.join(str(e) for e in inputWords)
if len(scoresScore) > 1:
scoresScoreP = '\t'.join(str(e) for e in scoresScore)
else:
scoresScoreP = scoresScore
output.write('%s\t%s\n' % (inputWordsP, scoresScoreP))
counter += 1
if counter % 1000000 == 0:
print str(counter), "lines processed"
input.close()
scoresFile.close()
output.close()
print('Done!')
|
#! /usr/bin/env python
# Copyright (c) 2016-2018, Rethink Robotics Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import argparse
from intera_motion_interface import (
MotionTrajectory,
MotionWaypoint,
MotionWaypointOptions
)
from intera_motion_msgs.msg import TrajectoryOptions
from geometry_msgs.msg import PoseStamped
import PyKDL
from tf_conversions import posemath
from intera_interface import Limb
def main():
"""
Move the robot arm to the specified configuration.
Call using:
$ rosrun intera_examples go_to_cartesian_pose.py [arguments: see below]
-p 0.4 -0.3 0.18 -o 0.0 1.0 0.0 0.0 -t right_hand
--> Go to position: x=0.4, y=-0.3, z=0.18 meters
--> with quaternion orientation (0, 1, 0, 0) and tip name right_hand
--> The current position or orientation will be used if only one is provided.
-q 0.0 -0.9 0.0 1.8 0.0 -0.9 0.0
--> Go to joint angles: 0.0 -0.9 0.0 1.8 0.0 -0.9 0.0 using default settings
--> If a Cartesian pose is not provided, Forward kinematics will be used
--> If a Cartesian pose is provided, the joint angles will be used to bias the nullspace
-R 0.01 0.02 0.03 0.1 0.2 0.3 -T
-> Jog arm with Relative Pose (in tip frame)
-> x=0.01, y=0.02, z=0.03 meters, roll=0.1, pitch=0.2, yaw=0.3 radians
-> The fixed position and orientation paramters will be ignored if provided
"""
arg_fmt = argparse.RawDescriptionHelpFormatter
parser = argparse.ArgumentParser(formatter_class=arg_fmt,
description=main.__doc__)
parser.add_argument(
"-p", "--position", type=float,
nargs='+',
help="Desired end position: X, Y, Z")
parser.add_argument(
"-o", "--orientation", type=float,
nargs='+',
help="Orientation as a quaternion (x, y, z, w)")
parser.add_argument(
"-R", "--relative_pose", type=float,
nargs='+',
help="Jog pose by a relative amount in the base frame: X, Y, Z, roll, pitch, yaw")
parser.add_argument(
"-T", "--in_tip_frame", action='store_true',
help="For relative jogs, job in tip frame (default is base frame)")
parser.add_argument(
"-q", "--joint_angles", type=float,
nargs='+', default=[],
help="A list of joint angles, one for each of the 7 joints, J0...J6")
parser.add_argument(
"-t", "--tip_name", default='right_hand',
help="The tip name used by the Cartesian pose")
parser.add_argument(
"--linear_speed", type=float, default=0.6,
help="The max linear speed of the endpoint (m/s)")
parser.add_argument(
"--linear_accel", type=float, default=0.6,
help="The max linear acceleration of the endpoint (m/s/s)")
parser.add_argument(
"--rotational_speed", type=float, default=1.57,
help="The max rotational speed of the endpoint (rad/s)")
parser.add_argument(
"--rotational_accel", type=float, default=1.57,
help="The max rotational acceleration of the endpoint (rad/s/s)")
parser.add_argument(
"--timeout", type=float, default=None,
help="Max time in seconds to complete motion goal before returning. None is interpreted as an infinite timeout.")
args = parser.parse_args(rospy.myargv()[1:])
try:
rospy.init_node('go_to_cartesian_pose_py')
limb = Limb()
traj_options = TrajectoryOptions()
traj_options.interpolation_type = TrajectoryOptions.CARTESIAN
traj = MotionTrajectory(trajectory_options = traj_options, limb = limb)
wpt_opts = MotionWaypointOptions(max_linear_speed=args.linear_speed,
max_linear_accel=args.linear_accel,
max_rotational_speed=args.rotational_speed,
max_rotational_accel=args.rotational_accel,
max_joint_speed_ratio=1.0)
waypoint = MotionWaypoint(options = wpt_opts.to_msg(), limb = limb)
joint_names = limb.joint_names()
if args.joint_angles and len(args.joint_angles) != len(joint_names):
rospy.logerr('len(joint_angles) does not match len(joint_names!)')
return None
if (args.position is None and args.orientation is None
and args.relative_pose is None):
if args.joint_angles:
# does Forward Kinematics
waypoint.set_joint_angles(args.joint_angles, args.tip_name, joint_names)
else:
rospy.loginfo("No Cartesian pose or joint angles given. Using default")
waypoint.set_joint_angles(joint_angles=None, active_endpoint=args.tip_name)
else:
endpoint_state = limb.tip_state(args.tip_name)
if endpoint_state is None:
rospy.logerr('Endpoint state not found with tip name %s', args.tip_name)
return None
pose = endpoint_state.pose
if args.relative_pose is not None:
if len(args.relative_pose) != 6:
rospy.logerr('Relative pose needs to have 6 elements (x,y,z,roll,pitch,yaw)')
return None
# create kdl frame from relative pose
rot = PyKDL.Rotation.RPY(args.relative_pose[3],
args.relative_pose[4],
args.relative_pose[5])
trans = PyKDL.Vector(args.relative_pose[0],
args.relative_pose[1],
args.relative_pose[2])
f2 = PyKDL.Frame(rot, trans)
# and convert the result back to a pose message
if args.in_tip_frame:
# end effector frame
pose = posemath.toMsg(posemath.fromMsg(pose) * f2)
else:
# base frame
pose = posemath.toMsg(f2 * posemath.fromMsg(pose))
else:
if args.position is not None and len(args.position) == 3:
pose.position.x = args.position[0]
pose.position.y = args.position[1]
pose.position.z = args.position[2]
if args.orientation is not None and len(args.orientation) == 4:
pose.orientation.x = args.orientation[0]
pose.orientation.y = args.orientation[1]
pose.orientation.z = args.orientation[2]
pose.orientation.w = args.orientation[3]
poseStamped = PoseStamped()
poseStamped.pose = pose
if not args.joint_angles:
# using current joint angles for nullspace bais if not provided
joint_angles = limb.joint_ordered_angles()
waypoint.set_cartesian_pose(poseStamped, args.tip_name, joint_angles)
else:
waypoint.set_cartesian_pose(poseStamped, args.tip_name, args.joint_angles)
rospy.loginfo('Sending waypoint: \n%s', waypoint.to_string())
traj.append_waypoint(waypoint.to_msg())
result = traj.send_trajectory(timeout=args.timeout)
if result is None:
rospy.logerr('Trajectory FAILED to send')
return
if result.result:
rospy.loginfo('Motion controller successfully finished the trajectory!')
else:
rospy.logerr('Motion controller failed to complete the trajectory with error %s',
result.errorId)
except rospy.ROSInterruptException:
rospy.logerr('Keyboard interrupt detected from the user. Exiting before trajectory completion.')
if __name__ == '__main__':
main()
|
#!/usr/bin/env python3
# -*- encoding: utf8 -*-
"""
TBD
"""
import argparse
import itertools
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--out",
"-o",
type=str,
help="Path to output directory.",
)
parser.add_argument("--data", "-d", type=str, help="Path to original corpus.")
args = parser.parse_args()
def time_to_hash(time_str):
ret = "%08.3F" % float(time_str)
return "".join(str(ret).split("."))
def stm_to_kaldi(st_stm, asr_stm, dst):
data = {"F": [], "C": [], "S": [], "BT": [], "ET": [], "text_en": [], "text_ta": []}
with open(st_stm, "r", encoding="utf-8") as st_stm, open(
asr_stm, "r", encoding="utf-8"
) as asr_stm:
st_lines = st_stm.readlines()
asr_lines = asr_stm.readlines()
for i, (st_li, asr_li) in enumerate(zip(st_lines, asr_lines)):
F, C, S, BT, ET, _, text_en = st_li.strip().split("\t")
F2, _, _, _, _, _, text_ta = asr_li.strip().split("\t")
if F != F2:
sys.exit("ASR and ST STM files are not in the same order", F, F2)
data["F"].append(F)
data["C"].append(C)
data["S"].append(S)
data["BT"].append(BT)
data["ET"].append(ET)
data["text_en"].append(text_en)
data["text_ta"].append(text_ta)
with open(dst + "/wav.scp", "w", encoding="utf-8") as wav_scp, open(
dst + "/utt2spk", "w", encoding="utf-8"
) as utt2spk, open(dst + "/segments", "w", encoding="utf-8") as segments, open(
dst + "/text.en", "w", encoding="utf-8"
) as text_en, open(
dst + "/text.ta", "w", encoding="utf-8"
) as text_ta, open(
dst + "/reco2file_and_channel", "w", encoding="utf-8"
) as reco2file:
for i in range(len(data["F"])):
recid = data["F"][i].split("/")[-1].split(".")[0]
uttid = (
data["S"][i]
+ "_"
+ recid
+ "_"
+ time_to_hash(data["BT"][i])
+ "-"
+ time_to_hash(data["ET"][i])
)
sox_cmd = "sox -R -t wav - -t wav - rate 16000 dither |"
wav_scp.write(
" ".join(
[
recid,
"sph2pipe -f wav -p -c",
data["C"][i],
data["F"][i],
"|",
sox_cmd,
]
)
+ "\n"
)
utt2spk.write(" ".join([uttid, data["S"][i]]) + "\n")
segments.write(
" ".join([uttid, recid, data["BT"][i], data["ET"][i]]) + "\n"
)
text_en.write(" ".join([uttid, data["text_en"][i]]) + "\n")
text_ta.write(" ".join([uttid, data["text_ta"][i]]) + "\n")
# 2 channels are stored as separate sph, each with only 1 channel
reco2file.write(" ".join([recid, recid, "A"]) + "\n")
if __name__ == "__main__":
stm_to_kaldi(
args.data + "/stm/st-aeb2eng.norm.train.stm",
args.data + "/stm/asr-aeb.norm.train.stm",
args.out + "/train",
)
stm_to_kaldi(
args.data + "/stm/st-aeb2eng.norm.dev.stm",
args.data + "/stm/asr-aeb.norm.dev.stm",
args.out + "/dev",
)
stm_to_kaldi(
args.data + "/stm/st-aeb2eng.norm.test1.stm",
args.data + "/stm/asr-aeb.norm.test1.stm",
args.out + "/test1",
)
|
class BNode:
def __init__(self, val=None, left=None, right=None):
self.val = val
self.left = left
self.right = right
def _unival(root):
if root == None:
return (False, 0)
if root.left == None and root.right == None:
return (True, 1)
l_count, r_count = 0, 0
if root.left:
(l_is_unival, l_count) = _unival(root.left)
if root.right:
(r_is_unival, r_count) = _unival(root.right)
root_count = l_count + r_count
root_is_unival = False
if root.left and not root.right:
if l_is_unival and root.left.val == root.val:
root_is_unival = True
root_count += 1
elif root.right and not root.left:
if r_is_unival and root.right.val == root.val:
root_is_unival = True
root_count += 1
elif root.left and root.right:
if l_is_unival and root.left.val == root.val and r_is_unival and root.right.val == root.val:
root_is_unival = True
root_count += 1
return (root_is_unival, root_count)
def unival(root):
(is_unival, count) = _unival(root)
return count
root = BNode(5)
root.left = BNode(5)
root.right = BNode(6)
root.left.left = BNode(5)
root.left.right = BNode(5)
root.right.left = BNode(5)
root.right.right = BNode(5)
root.right.right.right = BNode(1)
print(unival(root))
root = BNode(5)
root.left = BNode(5)
root.right = BNode(5)
root.right.left = BNode(5)
root.right.right = BNode(5)
print(unival(root))
|
from python_experimenter.database_handler import (
create_database_handler_from_config,
MySqlDatabaseHandler,
)
class TestDatabaseHandlerCreation:
def test_database_handler_creation(self):
p = {
"keyfields": ["x", "y"],
"resultfields": ["addition", "multiplication"],
"db.host": ["192.168.0.1"],
"db.type": ["MYSQL"],
"db.username": ["experimenter"],
"db.password": ["1234"],
"db.database": ["experiments"],
"db.table": ["test"],
}
db_handler = create_database_handler_from_config(p)
assert db_handler is not None
assert isinstance(db_handler, MySqlDatabaseHandler)
assert db_handler.keyfields == ["x", "y"]
assert db_handler.resultfields == ["addition", "multiplication"]
assert db_handler.host == "192.168.0.1"
assert db_handler.user == "experimenter"
assert db_handler.password == "1234"
assert db_handler.database == "experiments"
assert db_handler.table == "test"
assert not db_handler.is_connected
assert db_handler.connection is None
assert db_handler.cursor is None
def test_unknown_db_type(self):
p = {
"keyfields": ["x", "y"],
"resultfields": ["addition", "multiplication"],
"db.host": ["192.168.0.1"],
"db.type": ["DB"],
"db.username": ["experimenter"],
"db.password": ["1234"],
"db.database": ["experiments"],
"db.table": ["test"],
}
db_handler = create_database_handler_from_config(p)
assert db_handler is None
def test_invalid_db_config(self):
p = {
"keyfields": ["x", "y"],
"resultfields": ["addition", "multiplication"],
"db.host": ["192.168.0.1"],
"db.type": ["DB"],
"db.username": ["experimenter"],
"db.password": ["1234"],
"db.database": ["experiments"],
}
db_handler = create_database_handler_from_config(p)
assert db_handler is None
|
#!/usr/bin/env python3.6
from Locker import Urufunguzo
from Credentials import Credential
import pyperclip
#Functions for Creating User
def hanga_user(name,fone,names,mail,ibanga):
'''
Function to create a new user
'''
user = Urufunguzo(name,fone,names,mail,ibanga)
return user
def save_users(Locker):
'''
Function to save user
'''
Locker.bika_user()
def delete_user(Locker):
'''
Function to delete user
'''
Locker.siba_user()
def search_user(string):
'''
Function that searchs user
'''
return Urufunguzo.ni_Izina(string)
def check_existing_user(string):
'''
Function that check if user exists
'''
return Urufunguzo.reba_user(string)
def display_user():
'''
Function that returns all the saved user
'''
return Urufunguzo.user_bose()
def copy_user(Locker):
'''
Function to copy user
'''
Locker.terura_user()
def login(user_na,user_pa):
return Urufunguzo.login(user_na,user_pa)
#Functions for Creating Accounts
def rema_konti(konti,username,password):
'''
Function to create a new credential
'''
account = Credential(konti,username,password)
return account
def save_credential(Credentials):
'''
Function to save credential
'''
Credentials.bika_konti()
def delete_credential(Credentials):
'''
Function to delete credential
'''
Credentials.delete_konti()
def search_credential(string):
'''
Function that searchs credential
'''
return Credential.search_konti(string)
def check_existing_credential(string):
'''
Function that check if credential exists
'''
return Credential.genzura_neza_konti(string)
def display_credential():
'''
Function that returns all the saved credentials
'''
return Credential.konti_zose()
def copy_credential(string):
'''
Function to copy credential
'''
Credentials.terura_konti()
#Main function for Creating user
def main():
print("*"*29)
print("WELCOME TO OUR APPLICATION.")
print("*"*29)
print(" What is your name?")
user_name = input()
print(f"So {user_name}. Enjoy Your Choice Using This Short Codes")
print('\n')
while True:
print("#"*19)
print("1 - Create Account")
print("2 - Display User")
print("3 - Search User")
print("4 - Copy User")
print("5 - Delete a User")
print("6 - Login")
print("7 - Exit")
print("#"*19)
Code = input()
if Code == '1':
print("New User")
print("="*10)
print ("Full Name:")
name = input()
print("Phone Number:")
fone = input()
print("UserName:")
names = input()
print("Email Address:")
mail = input()
print("Password:")
ibanga = input()
save_users(hanga_user(name,fone,names,mail,ibanga))
print ('\n')
print(f"New User {name} Created")
print ('\n')
elif Code == '2':
if display_user():
print("="*10)
print("List of Users")
print("="*10)
print('\n')
for user in display_user():
print(f"{user.name} {user.fone} {user.names} {user.mail}")
print('\n')
else:
print('\n')
print("You dont seem to have any users saved yet")
print('\n')
elif Code == '3':
print("Enter The Name You Want To Search For:")
search = input()
if check_existing_user(search):
user_search= search_user(search)
print('.' * 20)
print(f"Full Name:{user_search.name}")
print(f"UserName:{user_search.names}")
print(f"Phone Number:{user_search.fone}")
print(f"Email Address:{user_search.mail}")
print('.' * 20)
print ('\n')
else:
print("The User Doesn't Exist")
elif Code == '5':
print('Enter The Name:')
search = input()
if check_existing_user(search):
user_search= search_user(search)
print('.' * 20)
print(f"Full Name:{user_search.name}")
print(f"UserName:{user_search.names}")
print(f"Phone Number:{user_search.fone}")
print(f"Email Address:{user_search.mail}")
print('.' * 20)
print ('\n')
user_search.siba_user()
print('User Deleted')
else:
print("The User Doesn't Exist")
elif Code == '4':
print('Enter The Name:')
search = input()
if check_existing_user(search):
user_search= search_user(search)
print('.' * 20)
print(f"Full Name:{user_search.name}")
print(f"UserName:{user_search.names}")
print(f"Phone Number:{user_search.fone}")
print(f"Email Address:{user_search.mail}")
print('.' * 20)
print ('\n')
user_search.terura_user(search)
print('User Copied.')
else:
print("The User Doesn't Copied")
print ('\n')
elif Code == "7":
print("Thank you")
break
elif Code == '6':
print("Enter The UserName:")
user_na = input()
print("Enter Password:")
user_pa = input()
if check_existing_user(user_na) :
user_search= search_user(user_na)
if user_search.names == user_na:
if user_search.ibanga == user_pa:
user_search.search_user(user_na,user_pa)
print('User Logged in.')
print ('\n')
break
while True:
print("#"*19)
print("1 - Create Credential")
print("2 - Display Credential")
print("3 - Search Credential")
print("4 - Copy Credential")
print("5 - Delete Credential")
print("6 - Exit")
print("#"*19)
Tin = input()
if Tin == '1':
print("New Account")
print("="*10)
print ("Credential:")
konti = input()
print("UserName:")
username = input()
print("Password:")
password = input()
save_credential(rema_konti(konti,username,password))
print ('\n')
print(f"New User {username} Created")
print ('\n')
elif Tin == '2':
if display_credential():
print("="*10)
print("List of Users")
print("="*10)
print('\n')
for user in display_credential():
print(f"{user.konti} {user.username} {user.password} ")
print('\n')
else:
print('\n')
print("You dont seem to have any Credential saved yet")
print('\n')
elif Tin == '3':
print("Enter The UserName You Want To Search For:")
search = input()
if check_existing_credential(search):
user_search= search_konti(search)
print('.' * 20)
print(f"Credential:{user_search.konti}")
print(f"UserName:{user_search.username}")
print(f"Password:{user_search.password}")
print('.' * 20)
print ('\n')
else:
print("The Credential Doesn't Exist")
elif Tin == '5':
print('Enter The Name:')
search = input()
if check_existing_credential(search):
user_search= search_konti(search)
print('.' * 20)
print(f"Credential:{user_search.konti}")
print(f"UserName:{user_search.username}")
print(f"Password:{user_search.password}")
print('.' * 20)
print ('\n')
user_search.delete_konti()
print('Credential Deleted')
else:
print("Credential Doesn't Exist")
elif Tin == '4':
print('Enter The UserName:')
search = input()
if check_existing_credential(search):
user_search= search_konti(search)
print('.' * 20)
print(f"Credential:{user_search.konti}")
print(f"UserName:{user_search.username}")
print(f"Password:{user_search.password}")
print('.' * 20)
print ('\n')
user_search.terura_konti(search)
print('User Copied.')
else:
print("The User Doesn't Copied")
print ('\n')
elif Tin == "6":
print("Thank you")
break
else:
print("PLZ!! Choose Number!")
if __name__ == '__main__':
main()
|
import numpy as np
import pylab as plt
from matplotlib.patches import Ellipse
fig=plt.figure(figsize=(22.62372, 12))
ax = fig.add_subplot(111)
fig.subplots_adjust(bottom=.2)
font1={'family':'Arial',
'color' : 'b',
'weight' : 'normal',
'size' : 30,
}
font2={'family':'Arial',
'color' : 'k',
'weight' : 'normal',
'size' : 30,
}
font3={'family':'Arial',
'color' : 'r',
'weight' : 'normal',
'size' : 40,
}
x=np.arange(.001,3,.001)
ax.plot(x,1/x,color='k',lw=3)
data=np.array([.2,2])
ax.scatter(data,1/data,s=100,color='b',zorder=100)
ax.plot(data,1/data,'b--',lw=3)
width=(12/22.62372)/(6/(3-.05))
height=1.0
circ=Ellipse((np.mean(data),np.mean(1/data)),width=width/5,height=height/5, color='b',zorder=1000)
ax.add_artist(circ)
circ=Ellipse((np.mean(data),1/np.mean(data)),width=width/5,height=height/5, color='k',zorder=1000)
ax.add_artist(circ)
ax.vlines([np.mean(data)],[1/np.mean(data)],[np.mean(1/data)],color='r',linestyle='dashed',lw=3)
ax.text(1.075,2.9,"Mean of transformations",fontdict=font1)
ax.text(.375,.6,"Transformation of mean",fontdict=font2)
ax.text(1,2,"Error",fontdict=font3,rotation=90)
ax.text(.22,1/.2,"$x_1$",fontdict=font1)
ax.text(2,.56,"$x_2$",fontdict=font1)
for item in ax.get_yticklabels():
item.set_fontsize(15)
for item in ax.get_xticklabels():
item.set_fontsize(15)
ax.tick_params(axis='x',length=0,width=0,direction='out',labelsize=0)
ax.tick_params(axis='y',length=0,width=0,direction='out',labelsize=0)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['bottom'].set_position(['outward',0])
ax.spines['left'].set_position(['outward',0])
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
ax.set_xticks(range(14))
ax.set_ylabel('Transformed Data',fontsize=40,labelpad=20)
ax.set_xlabel('Original Data',fontsize=40,labelpad=20)
ax.set_xlim(0.05,3)
ax.set_ylim(0,6)
plt.savefig('figure_1.pdf')
plt.show()
|
from dataset.dataset import *
from torch.utils.data import Dataset, DataLoader
import getpass
import os
import socket
import numpy as np
from dataset.preprocess_data import *
import torch
from models.model import generate_model
from opts import parse_opts
from torch.autograd import Variable
import torch.nn.functional as F
import time
import sys
from utils import AverageMeter, calculate_accuracy, calculate_accuracy_video
import random
import pdb
import wandb
import json
from sklearn.metrics import confusion_matrix
import pandas as pd
import seaborn as sn
import matplotlib.pyplot as plt
class_map = {
"168": "jogging",
"259": "punching person (boxing)",
"260": "push up",
"57": "clapping",
}
total_predictions = {}
with open("device.json", "r") as devicefile:
device = json.load(devicefile)
device_id = device["device_id"]
device = torch.device("cuda:{}".format(device_id))
torch.cuda.set_device(device)
pred_list = []
true_list = []
def test():
print("Using GPU {}".format(device_id))
print("Device: {}".format(device))
correct = 0
opt = parse_opts()
print(opt)
opt.arch = "{}-{}".format(opt.model, opt.model_depth)
wandb.init(project="MARS", name="MARS test", notes=str(opt))
print("Preprocessing validation data ...")
data = globals()["{}_test".format(opt.dataset)](split=opt.split, train=0, opt=opt)
print("Length of validation data = ", len(data))
print("Preparing datatloaders ...")
val_dataloader = DataLoader(
data,
batch_size=1,
shuffle=False,
num_workers=opt.n_workers,
pin_memory=False,
drop_last=False,
)
print("Length of validation datatloader = ", len(val_dataloader))
result_path = "{}/{}/".format(opt.result_path, opt.dataset)
if not os.path.exists(result_path):
os.makedirs(result_path)
# define the model
print("Loading models... ", opt.model, opt.model_depth)
model1, parameters1 = generate_model(opt)
model1 = model1.to(device)
# if testing RGB+Flow streams change input channels
if not opt.only_RGB:
opt.input_channels = 2
model2, parameters2 = generate_model(opt)
model2 = model2.to(device)
if opt.resume_path1:
print("loading checkpoint {}".format(opt.resume_path1))
checkpoint = torch.load(opt.resume_path1)
assert opt.arch == checkpoint["arch"]
model1.load_state_dict(checkpoint["state_dict"])
if opt.resume_path2:
print("loading checkpoint {}".format(opt.resume_path2))
checkpoint = torch.load(opt.resume_path2)
assert opt.arch == checkpoint["arch"]
model2.load_state_dict(checkpoint["state_dict"])
model1.eval()
model2.eval()
wandb.watch(model1)
wandb.watch(model2)
accuracies = AverageMeter()
if opt.log:
if opt.only_RGB:
f = open(
"test_RGB_MARS_{}{}_{}_{}_{}.txt".format(
opt.model,
opt.model_depth,
opt.dataset,
opt.split,
opt.sample_duration,
),
"w+",
)
else:
f = open(
"test_RGB_Flow_{}{}_{}_{}_{}.txt".format(
opt.model,
opt.model_depth,
opt.dataset,
opt.split,
opt.sample_duration,
),
"w+",
)
f.write(str(opt))
f.write("\n")
f.flush()
with torch.no_grad():
for i, (clip, label) in enumerate(val_dataloader):
clip = clip.to(device)
label = label.to(device)
clip = torch.squeeze(clip)
if opt.only_RGB:
inputs = torch.Tensor(
int(clip.shape[1] / opt.sample_duration),
3,
opt.sample_duration,
opt.sample_size,
opt.sample_size,
)
for k in range(inputs.shape[0]):
inputs[k, :, :, :, :] = clip[
:, k * opt.sample_duration : (k + 1) * opt.sample_duration, :, :
]
inputs_var1 = Variable(inputs)
inputs_var2 = Variable(inputs)
else:
RGB_clip = clip[0:3, :, :, :]
Flow_clip = clip[3:, :, :, :]
inputs1 = torch.Tensor(
int(RGB_clip.shape[1] / opt.sample_duration),
3,
opt.sample_duration,
opt.sample_size,
opt.sample_size,
).to(device)
inputs2 = torch.Tensor(
int(Flow_clip.shape[1] / opt.sample_duration),
2,
opt.sample_duration,
opt.sample_size,
opt.sample_size,
).to(device)
for k in range(inputs1.shape[0]):
inputs1[k, :, :, :, :] = RGB_clip[
:, k * opt.sample_duration : (k + 1) * opt.sample_duration, :, :
]
inputs2[k, :, :, :, :] = Flow_clip[
:, k * opt.sample_duration : (k + 1) * opt.sample_duration, :, :
]
inputs_var1 = Variable(inputs1).to(device)
inputs_var2 = Variable(inputs2).to(device)
outputs_var1 = model1(inputs_var1).to(device)
outputs_var2 = model2(inputs_var2).to(device)
outputs_var = torch.mean(
torch.cat((outputs_var1, outputs_var2), dim=0), dim=0
).unsqueeze(0)
pred5 = np.array(outputs_var.topk(5, 1, True)[1].cpu().data[0])
if str(pred5[0]) in total_predictions:
total_predictions[str(pred5[0])] += 1
else:
total_predictions[str(pred5[0])] = 1
acc = float(pred5[0] == label[0])
if str(pred5[0]) in class_map:
pred_list.append(class_map[str(pred5[0])])
else:
pred_list.append("other")
true_list.append(class_map[str(int(label[0]))])
accuracies.update(acc, 1)
float_acc_log = {"Average Accuracy": accuracies.avg}
wandb.log(float_acc_log)
if int(pred5[0]) == int(label[0]):
prediction = "correct"
correct += 1
else:
prediction = "wrong"
line = (
"Video["
+ str(i)
+ "] : \t top5 "
+ str(pred5)
+ "\t top1 = "
+ str(pred5[0])
+ "\t true = "
+ str(label[0])
+ "\t video = "
+ str(accuracies.avg)
)
if prediction == "correct":
line2 = "Prediction correct!"
else:
line2 = "Prediction wrong"
line2 += " - Correct predictions: {}/{}\n".format(correct, i + 1)
print(line)
print(line2)
if opt.log:
f.write(line + "\n")
f.flush()
print(total_predictions)
cm = confusion_matrix(true_list, pred_list)
df_cm = pd.DataFrame(
cm, index=["clapping", "jogging", "other", "punching", "push up"], columns=["clapping", "jogging", "other", "punching", "push up"]
)
plt.figure(figsize = (15,10))
sn.set(font_scale=1.4) # for label size
sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}) # font size
plt.savefig('cm.png')
wandb.save('cm.png')
print("Video accuracy = ", accuracies.avg)
line = "Video accuracy = " + str(accuracies.avg) + "\n"
if opt.log:
f.write(line)
if __name__ == "__main__":
test()
|
class MyHashMap:
class Entry:
def __init__(self, key, val, next):
self.key = key
self.val = val
self.next = next
def __init__(self):
self.mod = 100003
self.list = [self.Entry(-1, -1, None) for _ in range(self.mod)]
def _index(self, key):
return key % self.mod
def _find_pre(self, key):
index = self._index(key)
ptr = self.list[index]
while ptr.next is not None:
if ptr.next.key == key:
return ptr
ptr = ptr.next
return ptr
def put(self, key: int, value: int) -> None:
ptr = self._find_pre(key)
if ptr.next:
ptr.next.val = value
else:
ptr.next = self.Entry(key, value, None)
def get(self, key: int) -> int:
ptr = self._find_pre(key)
if ptr.next:
return ptr.next.val
return -1
def remove(self, key: int) -> None:
ptr = self._find_pre(key)
if ptr.next:
ptr.next = ptr.next.next
# Your MyHashMap object will be instantiated and called as such:
# obj = MyHashMap()
# obj.put(key,value)
# param_2 = obj.get(key)
# obj.remove(key)
if __name__ == "__main__":
h = MyHashMap()
print(h.put(1, 1))
print(h.put(2, 2))
print(h.get(1))
print(h.get(3))
print(h.remove(2))
print(h.get(2))
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "[email protected] (Liz Looney)"
import os
# PROJECT_ID is set in the environment in app engine and cloud functions.
PROJECT_ID = os.getenv('PROJECT_ID')
# ORIGIN is set in the environment in app engine, but not cloud functions.
ORIGIN = os.getenv('ORIGIN')
# REDIS_IP_ADDR may be set in the environment in app engine, but not cloud functions.
REDIS_IP_ADDR = os.getenv('REDIS_IP_ADDR')
# Expects to be 'development' or 'production'
ENVIRONMENT = os.getenv('ENVIRONMENT')
# Limits
MAX_DESCRIPTION_LENGTH = 30
MAX_VIDEOS_PER_TEAM = 50
MAX_VIDEO_SIZE_MB = 100
MAX_VIDEO_SIZE_BYTES = MAX_VIDEO_SIZE_MB * 1000 * 1000
MAX_VIDEO_LENGTH_SECONDS = 120
MAX_FRAMES_PER_VIDEO = 1000
MAX_VIDEO_RESOLUTION_WIDTH = 3840
MAX_VIDEO_RESOLUTION_HEIGHT = 2160
MAX_VIDEOS_TRACKING_PER_TEAM = 3
MAX_BOUNDING_BOX_PER_FRAME = 10
MAX_DATASETS_PER_TEAM = 20
|
#!/usr/bin/python
# Codeing By IOXhop : www.ioxhop.com
# Sonthaya Nongnuch : www.fb.me/maxthai
import time
import IOXhop_MCUio as mcu
def main():
mcu.begin(0x08)
mcu.mode(9, mcu.OUTPUT)
while True:
mcu.tone(9, 2000) # Send sound 2KHz to pin 2
time.sleep(1)
mcu.Dtone(9) # Cancel send sound to pin 2
time.sleep(1)
if __name__ == '__main__':
main() |
import sys
# taken from https://stackoverflow.com/a/11325249
class Logger(object):
def __init__(self, *files):
self.files = files
def write(self, obj):
for f in self.files:
f.write(obj)
f.flush()
def flush(self):
for f in self.files:
f.flush()
@staticmethod
def start(file):
f = open(file, "w")
logger = Logger(sys.stdout, f)
sys.stdout = logger
return logger
def stop(self):
sys.stdout = self.files[0]
self.files[1].close()
|
# Copyright 2016, Kevin Christen and the juno-addresses contributors.
from collections import defaultdict
import argparse
import csv
import sys
from juno_addresses import parser
COLUMNS = [
'Name',
'Given Name',
'Family Name',
'Nickname',
'E-mail 1 - Value',
'Phone 1 - Type',
'Phone 1 - Value',
'Phone 2 - Type',
'Phone 2 - Value',
'Address 1 - Formatted',
]
def format_name(name):
full = name
first = last = ''
names = name.split(',', 1)
if len(names) == 2:
first = names[1].strip()
last = names[0].strip()
full = '{} {}'.format(first, last)
return (full, first, last)
def format_address(address):
return ' '.join(address)
def format_addresses(input, output, include_deleted=False, mappings=[]):
mapped_fields = [ mapping.split(':')[0] for mapping in mappings ]
mapped_columns = [ mapping.split(':')[1] for mapping in mappings ]
writer = csv.writer(output)
writer.writerow(COLUMNS + mapped_columns)
for entry in parser.parser(input):
if entry['Type'] == 'Entry' and (include_deleted or not entry['Deleted']):
e = defaultdict(str)
for k, v in entry.items():
e[k] = v
row = format_name(e['Name']) + \
( e['Alias'],
e['Email'],
'Home', e['Primary Phone'],
'Mobile', e['Mobile Phone'],
format_address(e['Address']),
)
for mapped_field in mapped_fields:
row = row + (e[mapped_field],)
writer.writerow(row)
def main():
arg_parser = argparse.ArgumentParser(
description='Convert a Juno address book into a Gmail compatible CSV file.')
arg_parser.add_argument(
'input',
nargs='?',
type=argparse.FileType('r'),
default=sys.stdin,
help='Juno address book file (addrbook.nv). Defaults to stdin.')
arg_parser.add_argument(
'output',
nargs='?',
type=argparse.FileType('w'),
default=sys.stdout, help='Output file. Defaults to stdout.')
arg_parser.add_argument(
'-d', '--deleted',
action='store_true',
help='Include deleted entries')
arg_parser.add_argument(
'-m', '--map',
action='append',
default=[],
help='Additional mappings of the form from:to')
args = arg_parser.parse_args()
format_addresses(args.input, args.output, include_deleted=args.deleted,
mappings=args.map)
if __name__ == '__main__':
main()
|
import numpy as np
from .Scan import Scan
from .Annotation import Annotation
import matplotlib.pyplot as plt
from skimage.measure import find_contours
from matplotlib.widgets import Slider
def consensus(anns, clevel=0.5, pad=None, ret_masks=True):
"""Return the boolean-valued consensus volume amongst the
provided annotations (`anns`) at a particular consensus level
(`clevel`).
Parameters
----------
anns: list of `pylidc.Annotation` objects
This list should be probably be one of the lists
returned by the `pylidc.Scan.cluster_annotations`
routine.
clevel: float, default=0.5
The consensus fraction. For example, if clevel=0.5, then
a voxel will have value 1 in the returned boolean volume
when >= 50% of the segmentations include that voxel, and 0
otherwise.
pad: int, list, or float, default=None
See `Annotation.bbox` for description for this argument.
ret_masks: bool, default=True
If True, a list of masks is also returned corresponding to
all the annotations. Note that this slightly different than calling
`boolean_mask` on each respective Annotation object because these
volumes will be the same shape and in a common reference frame.
Returns
-------
consensus_mask, consensus_bbox[, masks]: (ndarray, tuple[, list])
`consensus_mask` is the boolean-valued volume of the annotation
masks at `clevel` consensus. `consensus_bbox` is a 3-tuple of
slices that can be used to index into the image volume at the
corresponding location of `consensus_mask`. `masks` is a list of
boolean-valued mask volumes corresponding to each Annotation object.
Each mask in the `masks` list has the same shape and is sampled in
the common reference frame provided by `consensus_bbox`.
"""
bmats = np.array([a.bbox_matrix(pad=pad) for a in anns])
imin,jmin,kmin = bmats[:,:,0].min(axis=0)
imax,jmax,kmax = bmats[:,:,1].max(axis=0)
# consensus_bbox
cbbox = np.array([[imin,imax],
[jmin,jmax],
[kmin,kmax]])
masks = [a.boolean_mask(bbox=cbbox) for a in anns]
cmask = np.mean(masks, axis=0) >= clevel
cbbox = tuple(slice(cb[0], cb[1]+1, None) for cb in cbbox)
if ret_masks:
return cmask, cbbox, masks
else:
return cmask, cbbox
def volume_viewer(vol, mask=None, axis=2, aspect=None, **line_kwargs):
"""
Interactive volume viewer utility
Parameters
----------
vol: ndarray, ndim=3
An image volume.
mask: ndarray, ndim=3, dtype=bool
A boolean mask volume.
axis: int, default=2
Image slices of the volume are viewed by holding this
axis to integer values.
aspect: float or :class:`pylidc.Scan` object, default=None
If float, then the argument is passed to `pyplot.imshow`.
If the Scan object associatd with the image volume is passed,
then the image aspect ratio is computed automatically. This is useful
when the axis != 2, where the image slices are most likely
anisotropic.
line_kwargs: args
Any keyword arguments that can be passed to `matplotlib.pyplot.plot`.
Example
-------
An example::
import pylidc as pl
from pylidc.utils import volume_viewer
ann = pl.query(pl.Annotation).first()
vol = ann.scan.to_volume()
padding = 70.0
mask = ann.boolean_mask(pad=padding)
bbox = ann.bbox(pad=padding)
volume_viewer(vol[bbox], mask, axis=0, aspect=ann.scan,
ls='-', lw=2, c='r')
"""
if vol.ndim !=3:
raise TypeError("`vol` must be 3d.")
if axis not in (0,1,2):
raise ValueError("`axis` must be 0,1, or 2.")
if mask is not None:
if mask.dtype != bool:
raise TypeError("mask was not bool type.")
if vol.shape != mask.shape:
raise ValueError("Shape mismatch between image volume and mask.")
if aspect is not None and isinstance(aspect, Scan):
scan = aspect
dij = scan.pixel_spacing
dk = scan.slice_spacing
d = np.r_[dij, dij, dk]
inds = [i for i in range(3) if i != axis]
aspect = d[inds[0]] / d[inds[1]]
else:
aspect = 1.0 if aspect is None else float(aspect)
nslices = vol.shape[axis]
k = int(0.5*nslices)
# Selects the jth index along the correct axis.
slc = lambda j: tuple(np.roll([j, slice(None), slice(None)], axis))
fig = plt.figure(figsize=(6,6))
aximg = fig.add_axes([0.1,0.2,0.8,0.8])
img = aximg.imshow(vol[slc(k)], vmin=vol.min(), aspect=aspect,
vmax=vol.max(), cmap=plt.cm.gray)
aximg.axis('off')
if mask is not None:
contours = []
for i in range(nslices):
contour = []
for c in find_contours(mask[slc(i)].astype(np.float), 0.5):
line = aximg.plot(c[:,1], c[:,0], **line_kwargs)[0]
line.set_visible(0)
contour.append(line)
contours.append(contour)
axslice = fig.add_axes([0.1, 0.1, 0.8, 0.05])
axslice.set_facecolor('w')
sslice = Slider(axslice, 'Slice', 0, nslices-1,
valinit=k, valstep=1)
def update(i):
img.set_data(vol[slc( int(i) )])
sslice.label.set_text('%d'%i)
if mask is not None:
for ic,contour in enumerate(contours):
for c in contours[ic]:
c.set_visible(ic == int(i))
fig.canvas.draw_idle()
sslice.on_changed(update)
update(k)
plt.show()
|
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | | / _ \ | ___ \_ _|
| . . | ___ __| | ___| | / /_\ \| |_/ / | |
| |\/| |/ _ \ / _` |/ _ \ | | _ || __/ | |
| | | | (_) | (_| | __/ | | | | || | _| |_
\_| |_/\___/ \__,_|\___|_| \_| |_/\_| \___/
Train models using autogbt: https://github.com/pfnet-research/autogbt-alt
This is enabled if the default_training_script = ['autogbt']
'''
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
import random, pickle, time, json, os, shutil
import numpy as np
from sklearn.preprocessing import LabelBinarizer, StandardScaler
from sklearn.model_selection import train_test_split
try:
from autogbt import AutoGBTClassifier
except:
print('initializing installation...')
os.system('pip3 install git+https://github.com/pfnet-research/autogbt-alt.git')
from autogbt import AutoGBTClassifier
def train_autogbt(X_train,X_test,y_train,y_test,mtype,common_name_model,problemtype,classes,default_featurenames,transform_model,settings,model_session):
# make initial names and lists
files=list()
model_name=common_name_model+'.pickle'
# train classifier
model = AutoGBTClassifier()
model.fit(X_train, y_train)
print('saving model...')
pmodel=open(model_name,'wb')
g=pickle.dump(model, pmodel)
pmodel.close()
files.append(model_name)
model_dir=os.getcwd()
return model_name, model_dir, files
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import functools
def ignore_exception_decorator(func):
"""どんなエラーも無視してしまうデコレータ"""
@functools.wraps(func)
def ignore_exception(*args, **kargs):
try:
return func(*args, **kargs)
except:
pass
return ignore_exception
|
import base64
from datetime import datetime
import logging
import re
import json
from sets import Set
from datawinners.project.views.data_sharing import DataSharing
from datawinners.alldata.helper import get_all_project_for_user
from datawinners.blue.correlated_xlxform import ParentXform
from django.http import HttpResponseBadRequest, HttpResponseNotFound, HttpResponse
from django.views.decorators.csrf import csrf_exempt
import magic
from datawinners.project.submission.submission_search import get_submissions_paginated, get_submission_count
from datawinners.accountmanagement.localized_time import get_country_time_delta
from datawinners.dataextraction.helper import convert_date_string_to_UTC
from datawinners.dcs_app.auth import basicauth_allow_cors, response_json_cors, enable_cors
from datawinners.blue.view import SurveyWebXformQuestionnaireRequest, logger
from datawinners.blue.xform_bridge import XFormTransformer, XFormSubmissionProcessor
from datawinners.blue.xform_web_submission_handler import XFormWebSubmissionHandler
from datawinners.main.database import get_database_manager
from datawinners.search.index_utils import es_questionnaire_field_name
from datawinners.search.submission_query import SubmissionQueryResponseCreator
from datawinners.utils import get_organization
from mangrove.errors.MangroveException import DataObjectNotFound
from mangrove.form_model.form_model import FormModel
from mangrove.form_model.project import Project
from mangrove.transport.player.new_players import XFormPlayerV2
from mangrove.utils.dates import convert_date_time_to_epoch, utcnow
logger = logging.getLogger("datawinners.xlfrom.client")
@csrf_exempt
@basicauth_allow_cors()
def get_questions_paginated_or_by_ids(request):
manager = get_database_manager(request.user)
start = int(request.GET.get('start', '0'))
length = int(request.GET.get('length', '10'))
ids = request.GET.getlist('ids')
if ids:
projects = [_project_details(manager, project_id, request.user) for project_id in ids]
projects = list(filter(lambda x: x != None, projects))
return response_json_cors(projects)
project_list = []
rows = get_all_project_for_user(request.user)
for row in rows:
questionnaire = FormModel.get(manager, row['value']['_id'])
if questionnaire.xform:
project_temp = dict(name=questionnaire.name, project_uuid=questionnaire.id, version=questionnaire._doc.rev)
project_list.append(project_temp)
return response_json_cors({"projects":project_list[start:start+length],
"total":len(project_list),
"start":start,
"length":length})
def _project_details(manager, project_uuid, user):
try:
project = Project.get(manager, project_uuid)
xform = project.xform
can_create_parent = project.is_parent_project and is_authorized_for_project(user, project)
can_create = can_create_parent or not project.is_parent_project
updated_xform = xform if can_create else ParentXform().make_all_fields_read_only(xform)
project_response = dict(name=project.name, project_uuid=project.id, version=project._doc.rev,
created=str(project.created),
xform=re.sub(r"\n", " ", XFormTransformer(updated_xform).transform()),
has_media_field=project.is_media_type_fields_present,
last_updated=utcnow().isoformat(),
is_assigned=is_authorized_for_project(user, project),
displayable_mobile_fields=project.mobile_main_fields)
_update_response_with_relation(project, project_response)
return project_response
except DataObjectNotFound:
#TODO raise not found exception or some mechanism to propagate this above
return
def is_authorized_for_project(request_user, project):
user_profile = request_user.get_profile()
if user_profile.reporter_id in project.data_senders:
return True
return False
def _update_response_with_relation(project, project_response):
if project.is_child_project:
project_response.update({'project_type': 'child',
'parent_info':{'action_label': project.parent_info.get('action_label'),
'parent_uuid': project.parent_info.get('parent_uuid', ""),
'parent_fields_code_label_str':
json.dumps(project.parent_info.get('parent_fields_code_label', ""))},
'child_ids':''})
elif project.is_parent_project:
project_response.update({'project_type': 'parent',
'child_ids': ','.join(project.child_ids),
'parent_info':{'action_label':'', 'parent_field_codes': ''}})
else:
project_response.update({'project_type': 'none', 'parent_info':{'action_label':'', 'parent_field_codes': ''}, 'child_ids':''})
@csrf_exempt
@basicauth_allow_cors()
def authenticate_user(request):
return response_json_cors({'auth':'success', 'hash': base64.b64encode(str(request.user)) })
@csrf_exempt
@basicauth_allow_cors()
def check_submissions_status(request, project_uuid):
req_id_version_array = json.loads(request.POST['submissions'])
outdated_ids = []
insync_ids = []
req_id_version_dict = {}
manager = get_database_manager(request.user)
for single_dict in req_id_version_array:
req_id_version_dict[single_dict['id']] = single_dict['rev']
req_ids = req_id_version_dict.keys()
rows = manager.load_all_rows_in_view("survey_response_by_survey_response_id", keys=req_ids)
id_version_dict = {r.value['_id']:r.value['_rev'] for r in rows if not r.value['void']}
req_ids_set = Set(req_ids)
ids = id_version_dict.keys()
ids_not_found = list(req_ids_set.difference(ids))
ids_found = req_ids_set.intersection(ids)
for id_found in ids_found:
if req_id_version_dict[id_found] == id_version_dict[id_found]:
insync_ids.append(id_found)
else:
outdated_ids.append(id_found)
return response_json_cors({
'both':insync_ids,
'server-deleted':ids_not_found,
'outdated':outdated_ids})
@csrf_exempt
@basicauth_allow_cors()
def paginated_submissions_or_by_id_or_create(request, project_uuid):
if request.method == 'GET':
return _paginate_submissions_or_by_ids(project_uuid, request)
elif request.method == 'POST':
return _create_submission(request)
def _get_submissions_by_ids(project_uuid, request):
survey_request = SurveyWebXformQuestionnaireRequest(request, project_uuid, XFormSubmissionProcessor())
submissions = survey_request.get_many_submissions(request.GET['ids'])
return response_json_cors(submissions)
def _paginate_submissions_or_by_ids(project_uuid, request):
if request.GET.getlist('ids'):
return _get_submissions_by_ids(project_uuid, request)
else:
is_slim_submission_requested = not (request.GET.get('view') and request.GET['view'] == 'full')
survey_request = SurveyWebXformQuestionnaireRequest(request, project_uuid, XFormSubmissionProcessor())
slim_submission_response = _get_slim_submission_paginated(request, project_uuid)
if is_slim_submission_requested:
return response_json_cors(slim_submission_response)
ID_INDEX = 0
submission_ids = [slim_submission[ID_INDEX] for slim_submission in slim_submission_response['data']]
submissions_response = survey_request.get_many_submissions(submission_ids)
submissions_response_paginated = slim_submission_response
submissions_response_paginated.update({'data': submissions_response})
return response_json_cors(submissions_response_paginated)
def _create_submission(request):
try:
form_code = _get_form_code_from_xform(request.POST['form_data']);
response = XFormWebSubmissionHandler(request=request, form_code=form_code). \
create_new_submission_response()
return enable_cors(response)
except Exception as e:
logger.exception("Exception in submission : \n%s" % e)
return HttpResponseBadRequest()
def _get_form_code_from_xform(xform):
return re.search('<form_code>(.+?)</form_code>', xform).group(1)
@csrf_exempt
@basicauth_allow_cors()
def update_submission_or_get_by_id(request, project_uuid, submission_uuid):
if request.method == 'GET':
survey_request = SurveyWebXformQuestionnaireRequest(request, project_uuid, XFormSubmissionProcessor())
content = survey_request.get_submission(submission_uuid)
return response_json_cors(content)
elif request.method == 'POST':
try:
form_code = _get_form_code_from_xform(request.POST['form_data']);
response = XFormWebSubmissionHandler(request=request, form_code=form_code).\
update_submission_response(submission_uuid)
return enable_cors(response)
except LookupError:
return enable_cors(HttpResponseNotFound())
except Exception as e:
logger.exception("Exception in submission : \n%s" % e)
return HttpResponseBadRequest()
@csrf_exempt
@basicauth_allow_cors()
def submit_submission(request):
try:
response = XFormWebSubmissionHandler(request=request).\
create_new_submission_response()
response['Location'] = request.build_absolute_uri(request.path)
return enable_cors(response)
except Exception as e:
logger.exception("Exception in submission : \n%s" % e)
return HttpResponseBadRequest()
@csrf_exempt
@basicauth_allow_cors()
def get_projects_status(request):
response_projects = []
manager = get_database_manager(request.user)
client_projects = json.loads(request.POST['projects'])
current_date_time = utcnow().isoformat()
unassigned_uuids = []
for client_project in client_projects:
try:
server_project = Project.get(manager, client_project['project_uuid'])
_add_to_unassigned(unassigned_uuids, request.user, server_project)
if server_project.is_void():
response_projects.append({'project_uuid': client_project['project_uuid'], 'status': 'server-deleted'})
elif server_project.revision != client_project['version'] :
response_projects.append({'project_uuid': server_project.id, 'status': 'outdated'})
except Exception:
response_projects.append({'project_uuid': client_project['project_uuid'], 'status': 'server-deleted'})
return response_json_cors({'outdated_projects': response_projects, 'last_updated': current_date_time,
'unassigned_uuids': unassigned_uuids})
def _add_to_unassigned(unassigned_uuids, user, server_project):
if not is_authorized_for_project(user, server_project):
unassigned_uuids.append(server_project.id)
@csrf_exempt
@basicauth_allow_cors()
def attachment_post(request, survey_response_id):
player = XFormPlayerV2(get_database_manager(request.user))
player.add_new_attachments(request.FILES, survey_response_id)
return HttpResponse(status=201)
@csrf_exempt
@basicauth_allow_cors()
def attachment_get(request, survey_response_id, file_name):
manager = get_database_manager(request.user)
try:
file_content = manager.get_attachments(survey_response_id, attachment_name=file_name.strip())
return HttpResponse(file_content, mimetype=magic.from_buffer(file_content, mime=True))
except LookupError:
return HttpResponseNotFound('Attachment not found')
@csrf_exempt
@basicauth_allow_cors()
def get_delta_submission(request, project_uuid):
survey_request = SurveyWebXformQuestionnaireRequest(request, project_uuid, XFormSubmissionProcessor())
to_time = convert_date_time_to_epoch(datetime.utcnow())
from_time = int(request.GET.get('last_fetch'))
if DataSharing().is_admin(request.user):
submissions = survey_request.get_submission_updated_between(from_time, to_time)
else:
ds_tag = request.user.get_profile().get_assigned_tag()
submissions = survey_request.get_submission_updated_between(from_time, to_time, ds_tag)
return response_json_cors({'submissions':submissions,
'last_fetch': convert_date_time_to_epoch(datetime.utcnow())})
def _get_slim_submission_paginated(request, project_uuid):
dbm = get_database_manager(request.user)
form_model = FormModel.get(dbm, project_uuid)
length = int(request.GET.get('length', '10'))
start = int(request.GET.get('start', '0'))
search_text = request.GET.get('search_str')
search_parameters = {}
search_parameters.update({"start_result_number": start})
search_parameters.update({"number_of_results": length})
search_parameters.update({"filter": 'all'})
search_parameters.update({"headers_for": 'all'})
search_parameters.update({'response_fields': ['ds_id', 'ds_name', 'date', 'status', 'date_updated']})
search_parameters.update({"sort_field": "date"})
search_parameters.update({"order": "-"})
search_filters = {"submissionDatePicker": "All Dates", "datasenderFilter": "", "search_text": search_text,
"dateQuestionFilters": {}, "uniqueIdFilters": {}}
DataSharing(form_model, request.user).append_data_filter(search_filters)
search_parameters.update({"search_filters": search_filters})
search_parameters.update({"search_text": search_text})
local_time_delta = get_country_time_delta('IN')
search_results, query_fields = get_submissions_paginated(dbm, form_model, search_parameters, local_time_delta)
submission_count_with_filters = get_submission_count(dbm, form_model, search_parameters, local_time_delta)
submissions = SubmissionQueryResponseCreator(form_model, local_time_delta, use_iso_create_date=True) \
.create_response(query_fields, search_results)
return {
'data': submissions,
'headers': '',
'total': submission_count_with_filters,
'start': start,
"search_count": len(submissions),
'length': length
}
|
colormode(RGB)
fill(0)
rect(0,0,100,100)
fill(0,0,0)
rect(100.0,0,100,100)
colormode(CMYK)
fill(0,0,0,1)
rect(200.0,0,100,100)
|
import turtle
def draw_green_triangle(ram):
ram.color("#64DD17")
ram.begin_fill()
for i in range(1,3):
ram.forward(100)
ram.left(120)
ram.forward(100)
ram.end_fill()
def draw_white_triangle(guru):
guru.left(120)
guru.forward(100)
guru.left(120)
guru.forward(50)
guru.begin_fill()
guru.color("#fff")
guru.left(60)
guru.forward(50)
for i in range(1,3):
guru.left(120)
guru.forward(50)
guru.end_fill()
def draw_full_triangle(block):
# first triangle
draw_green_triangle(block)
draw_white_triangle(block)
# second triangle
block.left(60)
block.forward(50)
block.right(120)
draw_green_triangle(block)
draw_white_triangle(block)
# third triangle
block.right(120)
block.forward(50)
block.right(60)
draw_green_triangle(block)
draw_white_triangle(block)
def draw_art():
dev = turtle.Turtle()
window = turtle.Screen()
window.bgcolor("#fff")
dev.setpos(0,0)
dev.shape("turtle")
for i in range(1,4):
draw_full_triangle(dev)
dev.left(60)
dev.forward(50)
dev.right(120)
window.exitonclick()
draw_art()
|
def bit_req(A,B):
"""
Bits required to convert int A to int B
"""
c=A^B
return countOnes(c)
def countOnes(c):
count=0
if c == 1:
return 1
while(c>=1):
b=c%2
if b == 1:
count+=1
c=c//2
return count
print bit_req(4,7)
|
"""
Simulates the actual hardware. This is the package used by the unit tests.
"""
import time
import config
heating = False
cooling = False
stirring = False
temperature = -1
timer = time.time();
def log(message):
print('harware.simulation - ' + str(message))
def secondSinceStart():
"""
The number of seconds since this package was started multiplied by config.hardwareSpeedup.
This can effectively simulate time speedups for testing recipies.
:return:
The number of seconds since this package was started multiplied by config.hardwareSpeedup.
"""
elapsed = time.time() - timer
if hasattr(config,'hardwareSpeedup'):
speed = config.hardwareSpeedup
if not (speed == None):
return elapsed * speed
return elapsed
def sleep(seconds):
"""
Sleep for a number of seconds or if config.harwareSpeedup is configured, for a number of
seconds/config.hardwareSpeedup
The point of this method is to allow for speeding up time without modifying the recipes. This
is especially useful for testing.
:param seconds:
Number of seconds to sleep. In real life will actually sleep for seconds/config.hardwareSpeedup.
:return:
None
"""
if hasattr(config,'hardwareSpeedup'):
speed = config.hardwareSpeedup
if not (speed == None):
time.sleep(seconds/speed)
return
time.sleep(seconds)
def turnHeaterOn():
"""
Sets the heater flag for the simulation.
:return:
None
"""
global heating
log('Turning on heat')
heating = True
def turnHeaterOff():
"""
Un-sets the heater flag for the simulation.
:return:
None
"""
global heating
log('Turning off heat')
heating = False
def turnCoolerOn():
"""
Sets the cooler flag for the simulation.
:return:
None
"""
global cooling
log('Turning on cooling')
cooling = True
def turnCoolerOff():
"""
Un-sets the heater flag for the simulation.
:return:
None
"""
global cooling
log('Turning off cooling')
cooling = False
def getTemp():
"""
Simulates and returns temperature.
Temperature starts off at 24C and changes every time the function is called as follows:
heater flag on: +1C
heater flag off: -0.1C
:return:
"""
global temperature, heating
if temperature == -1:
temperature = 24
else:
if heating == True:
temperature = temperature + 1
elif cooling == True:
temperature = temperature - 1
else:
if temperature > 24:
temperature = temperature - 0.1
elif temperature < 24:
temperature = temperature + 0.1
print('Temperature read as: ' + str(temperature))
return temperature
def turnStirrerOn():
"""
Sets the stirrer flag for the simulation.
:return:
None
"""
global stirring
log('Starting to stir liquid')
stirring = True
def turnStirrerOff():
"""
Un-sets the stirrer flag for the simulation.
:return:
None
"""
global stirring
log('Stirring stopped')
stirring = False
def pumpDispense(pumpId,volume):
"""
Displays pump dispensing message.
:param pumpId:
The pump id. One of 'A' or 'B'
:param volume:
The number ml to dispense
:return:
None
"""
if pumpId == 'A':
log('Dispensing ' + str(volume) + 'ml from pump A')
elif pumpId == 'B':
log('Dispensing ' + str(volume) + 'ml from pump B')
|
import connexion
from flask_cors import CORS
if __name__ == '__main__':
capp = connexion.FlaskApp(__name__, specification_dir='specs/')
capp.add_api('blog.yaml', arguments={'title': 'Hello World Example'})
CORS(capp.app)
capp.run(host='0.0.0.0', debug=True, port=9090) |
#python
import ast
import jwt
#web app
import tornado
from tornado.ioloop import IOLoop
import jinja2
from flask import Flask, redirect, url_for, session, request, jsonify, render_template
#pdf context
import fitz
#internal
from handlers.apiBaseHandler import BaseHandler
from models import User, Document, Link, signRecord, signerUser
from handlers.emailHandler import Mailer
from handlers.WSHandler import *
from handlers.manageDocuments import ManageDocuments
from utils import *
latex_jinja_env = jinja2.Environment(
block_start_string = '\BLOCK{',
block_end_string = '}',
variable_start_string = '${{',
variable_end_string = '}}$',
comment_start_string = '\#{',
comment_end_string = '}',
line_statement_prefix = '%%line',
line_comment_prefix = '%#line',
trim_blocks = True,
autoescape = False,
loader = jinja2.FileSystemLoader(os.path.abspath('/'))
)
# Load Logging definition
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('tornado-info')
SECRET = conf.SECRET
RENDER_EMAIL = "render_and_send_by_email"
RENDER_HASH = "render_sethash_and_download"
RENDER_NOHASH = "render_and_download"
RENDER_URL= "render_by_url_parameters"
BASE_PATH = "/docs/"
#HTML EMAIL TEMPLATES
DEFAULT_HTML_TEXT = \
"<h3>Hello,</h3>\
<p>You will find the documentation you requested attached, thank you very much for your interest.</p>\
<p>Best regards,</p>"
NOTIFICATION_HTML = \
"<h3>Hi!</h3>\
<p> {} has just downloaded the following document {}!</p>\
<p>You can view detailed analytics here: <a href='{}'>{}</a></p>\
<p>Keep crushing it!</p>\
<p>WPCI Admin</p>"
ATTACH_CONTENT_TYPE = 'octet-stream'
# S3 PATHS
FOLDER = f"{conf.FOLDER_NAME}/"
BUCKET = "wpci-signed-docs"
S3_BASE_URL = "https://s3-us-west-2.amazonaws.com/"+BUCKET+"/"+FOLDER+"{}"
#SMTP VARIABLES
SMTP_PASS = conf.SMTP_PASS
SMTP_USER = conf.SMTP_USER
SMTP_EMAIL = conf.SMTP_EMAIL
SMTP_ADDRESS = conf.SMTP_ADDRESS
SMTP_PORT = conf.SMTP_PORT
SENDER_NAME = "Andrea WPCI"
#specific app variables
DEFAULT_LOGO_PATH = "static/images/default_logo.base64"
TIMEZONE = conf.TIMEZONE
LANGUAGE = "en"
AUTH_ERROR = {"error":"incorrect authentication"}
# Axis for the pdf header
AXIS_X = 15
AXIS_Y = 500
AXIS_Y_GOOGLE = 200
AXIS_X_LOWER = 28
AXIS_Y_LOWER = AXIS_Y + 11
PRESENTATION_OFFSET = 130
WATERMARK_ROTATION = 90
WATERMARK_FONT = "Times-Roman"
WATERMARK_SIZE = 10
FLIP_MATRIX = fitz.Matrix(1.0, -1.0) # this generates [a=1,b=0,c=0,d=-1,e=0,f= 0]
# The default message to be sent in the body of the email
DEFAULT_HTML_TEXT = "<h3>Hello,</h3>\
<p>You will find the documentation you requested attached, thank you very much for your interest.</p>\
<p>Best regards,</p>"
def encode_auth_token(user):
"""
Generates the Auth Token
:return: string
"""
try:
payload = {
"exp": datetime.datetime.utcnow() + datetime.timedelta(days=1, seconds=ONE_HOUR),
"iat": datetime.datetime.utcnow(),
"username": user.username,
"password": user.password
}
return jwt.encode(
payload,
SECRET,
algorithm="HS256"
)
except Exception as e:
return e
def decode_auth_token(auth_token):
"""
Decodes the auth token
:param auth_token:
:return: json user
"""
try:
payload = jwt.decode(auth_token, SECRET)
return payload
except jwt.ExpiredSignatureError:
return 'Signature expired. Please log in again.'
except jwt.InvalidTokenError:
return 'Invalid token. Please log in again.'
def authenticate(password, username):
'''this verifies a user and returns a token'''
if not (username and password):
return False
user = User.User(username, password)
if user.check():
token_auth = encode_auth_token(user)
return token_auth
else:
return False
def validate_token(access_token):
"""Verifies that an access-token is valid and
meant for this app."""
try:
method, token = access_token.split(" ")
user_id = decode_auth_token(token.strip('"'))
except Exception as e:
logger.info(e)
return False
return user_id
def jwtauth(handler_class):
""" Handle Tornado JWT Auth """
userid = None
def wrap_execute(handler_execute):
def require_auth(handler, kwargs):
auth = handler.request.headers.get('Authorization')
if auth:
parts = auth.split()
if parts[0].lower() != 'bearer':
handler._transforms = []
handler.set_status(401)
handler.write("invalid header authorization")
handler.finish()
elif len(parts) == 1:
handler._transforms = []
handler.set_status(401)
handler.write("invalid header authorization")
handler.finish()
elif len(parts) > 2:
handler._transforms = []
handler.set_status(401)
handler.write("invalid header authorization")
handler.finish()
try:
userid = validate_token(auth)
if userid is False:
handler._transforms = []
handler.set_status(403)
handler.write("Forbidden")
handler.finish()
if 'username' not in userid:
handler._transforms = []
handler.set_status(401)
handler.write("Forbidden")
handler.finish()
kwargs["userid"] = str(userid)
except Exception as e:
handler._transforms = []
handler.set_status(401)
handler.write(e)
handler.finish()
else:
handler._transforms = []
handler.write("Missing authorization")
handler.finish()
return True
def _execute(self, transforms, *args, **kwargs):
try:
require_auth(self, kwargs)
except Exception as e:
logger.info(e)
return False
return handler_execute(self, transforms, *args, **kwargs)
return _execute
handler_class._execute = wrap_execute(handler_class._execute)
return handler_class
def authenticate_json(json_data):
'''Gets the information from the payload and verifies if it is registered'''
try:
username = json_data.get("username")
password = json_data.get("password")
except:
return False
if not (username and password):
return False
user = User.User(username,password)
if user.check():
token_auth = encode_auth_token(user)
return token_auth.decode("utf-8")
else:
return False
def create_link(doc_id):
'''Create a new link for the document'''
result = False
try:
mylink = Link.Link(doc_id)
result = mylink.create_link()
return result
except Exception as e:
logger.info("error creating the link" + str(e))
return False
def delete_link(doc_id):
'''Delete a previously created link'''
result = False
try:
mylink = Link.Link(doc_id)
result = mylink.delete_link()
return True
except Exception as e:
logger.info("error deleting the link" + str(e))
return False
def get_link_details(link_id):
''' Retrieves the status of a Document link (signed or unsigned)'''
result = False
try:
mylink = Link.Link()
result = mylink.find_by_link(link_id)
return result
except Exception as e:
logger.info("error deleting the link" + str(e))
return False
def get_document_details(doc_id):
''' Retrieves the status of a Document link (signed or unsigned)'''
result = False
try:
doc = Document.Document()
doc = doc.find_by_doc_id(doc_id)
result = doc.__dict__
result.pop("_id")
result.pop("type")
return result
except Exception as e:
logger.info("error getting document details " + str(e))
return False
def get_b64_pdf(doc_id, userjson):
'''Call the render function and retrive a base 64 pdf'''
result = False
try:
user = User.User()
user = user.find_by_attr("username", userjson.get("username"))
new_document = ManageDocuments()
new_document.get_document_by_doc_id(doc_id)
if new_document.is_valid_document() and new_document.user_has_permission(user):
# render and send the documents by email
pdffile, complete_hash, file_tittle = new_document.render_document(main_tex="main.tex")
pdf_b64 = new_document.convert_bytes_to_b64(pdffile)
return pdf_b64
except Exception as e:
logger.info("error rendering the document link " + str(e))
return result
def create_dynamic_endpoint(document, userjson):
'''This function retrives an URL formed by document ID and the Base url for the server'''
base_url= conf.BASE_URL
PDF_VIEW_URL = '/api/v1/pdf/'
try:
user = User.User()
user = user.find_by_attr("username", userjson.get("username"))
if user is not False:
document.org_id = user.org_id
doc_id = document.create_document()
if doc_id is not False:
return doc_id
except Exception as e:
logger.info("error creating doc" + str(e))
return False
logger.info("Information not valid creating doc")
return False
@jwtauth
class APINotFoundHandler(BaseHandler):
'''If the endpoint doesn't exists then it will response with this code'''
def options(self, *args, **kwargs):
self.set_status(200)
self.finish()
class AuthLoginHandler(BaseHandler):
'''Receives the username and password to retrive a token'''
def post(self):
json_data = json.loads(self.request.body.decode('utf-8'))
token_auth = authenticate_json(json_data)
if token_auth is False:
status_code =401
response = {'status': '401', 'message': 'Incorrect username or password'}
else:
status_code = 200
response = {"token": token_auth}
self.write_json(response, status_code)
def set_current_user(self, user):
if user:
self.set_secure_cookie("user", tornado.escape.json_encode(user))
else:
self.clear_cookie("user")
class RegisterUserByEmail(BaseHandler):
"""Receives a payload with the user data and stores it on the bd"""
def post(self):
VERIFICATION_HTML = "<h3>Hello,</h3>\
<p>Click <a href='{}'>HERE</a> to verify your email.</p>\
<p>Best regards,</p>"
try:
ADMIN_URL = conf.BASE_URL + BASE_PATH+"validate_email?code="
email = self.get_argument('email', "")
if is_valid_email(email):
user = User.User(email)
if user.find() is False:
code = user.get_validation_code()
if code is False:
self.write(json.dumps({"error": "user"}))
try:
html_text = VERIFICATION_HTML.format(ADMIN_URL + code)
mymail = Mailer(username=SMTP_USER, password=SMTP_PASS, host=SMTP_ADDRESS, port=SMTP_PORT)
mymail.send(subject="Documentation", email_from=SMTP_EMAIL, emails_to=[email],
html_message=html_text)
self.write(json.dumps({"response": "email sent"}))
except Exception as e:
logger.info("sending email: "+str(e))
self.write(json.dumps({"error": "email"}))
else:
self.write(json.dumps({"error": "user"}))
else:
self.write(json.dumps({"error": "email"}))
except:
logger.info("registering user: " + str(e))
self.write(json.dumps({"error": "email"}))
class RegisterUser(BaseHandler):
'''receives a payload with the user data and stores it on the bd'''
def post(self):
try:
json_data = json.loads(self.request.body.decode('utf-8'))
username = json_data.get("username")
if is_valid_email(username):
user = User.User(username, json_data.get("password"))
if not user.find():
user.create()
self.write(json.dumps({"response": "user created successfully"}))
else:
self.write(json.dumps({"response": "user already exists"}))
except:
self.write(json.dumps({"response": "error registering user"}))
class WebhookConfirm(BaseHandler):
'''Receives a payload with the user data and stores it on the bd'''
def post(self):
try:
user = User.User()
json_data = json.loads(self.request.body.decode('utf-8'))
username = json_data.get("user_email")
if json_data.get("token") is not None and username is not None:
if is_valid_email(username):
user = user.find_by_attr("username", username)
if json_data.get("token") == conf.PAY_TOKEN and json_data.get("payment_status") is not None:
user.set_attributes({"has_paid": json_data.get("payment_status")})
user.update()
self.write_json({"response": "ok"}, 200)
else:
error = "error on token"
logger.info(error)
self.write_json({"error": error}, 401)
except:
error= "error getting response"
logger.error(error)
self.write_json({"error": error}, 500)
@jwtauth
class PostDocument(BaseHandler):
'''Receives a post with the document url and responses with a document id '''
def post(self, userid):
try:
json_data = json.loads(self.request.body.decode('utf-8'))
# TODO convert this to a validator function
if not json_data.get("doc_url"):
self.write(json.dumps({"response": "Error, White paper url not found"}))
if not json_data.get("doc_name"):
self.write(json.dumps({"response": "Error, White paper name not found"}))
if not json_data.get("doc_main_tex"):
json_data["main_tex"] = "main.tex"
if not json_data.get("contract_url"):
json_data["contract_url"] = ""
if not json_data.get("email_body_html"):
json_data["email_body_html"] = ""
if not json_data.get("email_body_txt"):
json_data["email_body_txt"] = ""
if not json_data.get("render"):
json_data["render"] = "google"
if not json_data.get("type"):
json_data["type"] = conf.CONTRACT
if not json_data.get("doc_description"):
json_data["doc_description"] = " It is required to sign this before you can continue. Please\
read carefully and sign to continue."
if not json_data.get("doc_getit_btn"):
json_data["doc_getit_btn"] = "Sign to receive the document!"
if json_data.get("type") == conf.CONTRACT and \
json_data.get("contract_url") == "" and \
json_data.get("doc_url") != "":
json_data["contract_url"] = json_data.get("doc_url")
if json_data.get("type") == conf.NDA and (
json_data.get("contract_url") == "" or
json_data.get("doc_url") == ""):
self.write(json.dumps({
"response": "Error, Couldn't create the document, "
"no valid urls provided"
}))
if json_data.get("type") == conf.DOCUMENT and \
json_data.get("doc_url") == "":
self.write(json.dumps({
"response": "Error, Couldn't create "
"the document, no valid urls provided"
}))
doc = Document.Document()
doc.__dict__ = json_data
userjson = ast.literal_eval(userid)
result = create_dynamic_endpoint(doc, userjson)
if result is not False:
self.write(json.dumps({"doc_id": result}))
else:
self.write(json.dumps({"response": "Error, Couldn't create the document"}))
except Exception as e:
logger.info("error creating endpoint" + str(e))
self.write(json.dumps({"response": "Error the parameters are incorrect please send a valid json"}))
def get(self, userid):
result = None
response = dict()
contract_file_name = doc_file_name = "unknown.pdf"
try:
link_id = self.get_argument('link_id', "")
email = self.get_argument('email', "")
name = self.get_argument('name', "")
email_body_html = self.get_argument('email_body_html', DEFAULT_HTML_TEXT)
email_body_text = self.get_argument('email_body_text', "")
send_by_email = ast.literal_eval(
self.get_argument('send_by_email', "True")
)
options = json.loads(self.get_argument('options', "{}"))
if is_valid_email(email):
timestamp_now = str(time.time())
try:
# TODO refactor on a dryer method
thislink = Link.Link()
thislink = thislink.find_by_link(link_id)
temp_signed_count = thislink.signed_count
thislink.signed_count = int(temp_signed_count) + 1
new_document = ManageDocuments()
new_document.get_document_by_link_id(link_id)
if new_document.is_valid_document():
# render and send the documents by email
new_document.link_id = link_id
new_document.send_by_email = send_by_email
# The file name is composed by the email of the user,
# the link id and the timestamp of the creation
doc_file_name = F"doc_{email}_{new_document.link_id}_{timestamp_now}.pdf"
response.update(
{"doc_keywords": F"doc_{email}_{new_document.link_id}_{timestamp_now}"}
)
contract_file_name = F"contract_{email}_{new_document.link_id}_{timestamp_now}.pdf"
response.update(
{"contract_keywords": F"contract_{email}_{new_document.link_id}_{timestamp_now}"}
)
IOLoop.instance().add_callback(
callback=lambda:
new_document.render_and_send_all_documents(
email, name, email_body_html, timestamp_now,
contract_file_name, doc_file_name,
contract_b64_file=None, main_tex="main.tex",
email_body_text=email_body_text
)
)
thislink.status = "signed"
thislink.update()
self.write(json.dumps(response))
else:
self.write(json.dumps({"response": "Error, Couldn't find the document"}))
except Exception as e:
logger.error(F"[ERROR PostDocument GET] {str(e)}")
except Exception as e:
logger.info("error on clone" + str(e))
self.write(json.dumps({"response": "Error"}))
class Links(BaseHandler):
'''Get, create and delete a document link'''
def get(self, link_id):
if not validate_token(self.request.headers.get('Authorization')):
self.write_json(AUTH_ERROR, 403)
if link_id:
result = get_link_details(link_id)
if result is not False:
result = result.__dict__
result.pop("_id")
#Replace the Link id for the full link url
result["link"] = conf.BASE_URL +BASE_PATH+"pdf/" + result.pop("link")
self.write_json(result, 200)
else:
self.write(json.dumps({"doc_status": "failed"}))
else:
self.write(json.dumps({"error": "not enough information to perform the action"}))
def post(self, doc_id):
if not validate_token(self.request.headers.get('Authorization')):
self.write_json(AUTH_ERROR, 403)
if doc_id:
result = create_link(doc_id)
if result is not False:
result = result.__dict__
result.pop("_id")
# Replace the Link id for the full link url
result["link"] = conf.BASE_URL + BASE_PATH + "pdf/" + result.pop("link")
self.write_json(result, 200)
else:
self.write(json.dumps({"response": "failed link creation"}))
else:
self.write(json.dumps({"error": "not enough information to perform the action"}))
def delete(self, link_id):
if not validate_token(self.request.headers.get('Authorization')):
self.write_json(AUTH_ERROR, 403)
if link_id:
result = delete_link(link_id)
if result:
self.write(json.dumps({"response": "link deleted"}))
else:
self.write(json.dumps({"response": "failed link creation"}))
else:
self.write(json.dumps({"error": "not enough information to perform the action"}))
@jwtauth
class RenderDocToPDF(BaseHandler):
'''Receives a get with the id of the document and renders it to PDF with clone_repo'''
def get(self, doc_id):
'''Receives a document id and retrieves a json with a b64 pdf'''
userjson = validate_token(self.request.headers.get('Authorization'))
if not userjson:
self.write_json(AUTH_ERROR, 403)
if doc_id is not None and doc_id != "":
result = get_b64_pdf(doc_id, userjson)
if result is not False:
self.write(json.dumps({"document": result}))
else:
self.write(json.dumps({"error": "failed"}))
else:
self.write(json.dumps({"error": "not enough information to perform the action"}))
class SignLink(BaseHandler):
def post(self, link_id):
""" Receives a post with a signer user data
and signs the document with it"""
result = None
response = dict()
contract_file_name = doc_file_name = "unknown.pdf"
try:
userjson = validate_token(self.request.headers.get('Authorization'))
if not userjson:
self.write_json(AUTH_ERROR, 403)
if not link_id:
self.write(
json.dumps(
{
"response": "Error, link id not found"
}
)
)
json_data = json.loads(self.request.body.decode('utf-8'))
if not json_data.get("email") or not json_data.get("name"):
self.write(
json.dumps(
{
"response": "Error, not enough information to sign the pdf"
}
)
)
email = json_data.get("email")
name = json_data.get("name")
email_body_html = json_data.get("email_body_html", DEFAULT_HTML_TEXT)
email_body_text = json_data.get("email_body_text", "")
send_by_email = ast.literal_eval(
json_data.get("send_by_email", "True")
)
if is_valid_email(email):
timestamp_now = str(time.time())
try:
# TODO refactor on a dryer method
thislink = Link.Link()
thislink = thislink.find_by_link(link_id)
temp_signed_count = thislink.signed_count
thislink.signed_count = int(temp_signed_count) + 1
new_document = ManageDocuments()
new_document.get_document_by_link_id(link_id)
if new_document.is_valid_document():
# render and send the documents by email
new_document.link_id = link_id
new_document.send_by_email = send_by_email
# The file name is composed by the email of the user,
# the link id and the timestamp of the creation
doc_file_name = F"doc_{email}_{new_document.link_id}_{timestamp_now}.pdf"
response.update(
{
"doc_keywords": F"doc_{email}_{new_document.link_id}_{timestamp_now}"}
)
contract_file_name = F"contract_{email}_{new_document.link_id}_{timestamp_now}.pdf"
response.update(
{
"contract_keywords": F"contract_{email}_{new_document.link_id}_{timestamp_now}"}
)
IOLoop.instance().add_callback(
callback=lambda:
new_document.render_and_send_all_documents(
email, name, email_body_html, timestamp_now,
contract_file_name, doc_file_name,
contract_b64_file=None, main_tex="main.tex",
email_body_text=email_body_text
)
)
thislink.status = "signed"
thislink.update()
self.write(json.dumps(response))
else:
self.write(json.dumps(
{"response": "Error, Couldn't find the document"}))
except Exception as e:
logger.error(F"[ERROR SignLink POST] {str(e)}")
self.write(json.dumps(
{"response": "Error, Couldn't find the link_id"}
))
except Exception as e:
logger.info(F"error on payload data: {e}")
self.write(json.dumps({"response": "Error on Payload data"}))
class SignedToRexchain(BaseHandler):
"""Receives a post with the link id and sends it to the rexchain and
renders back a signed document with the signers attached sheet"""
def post(self, link_id):
'''Receives a document id and retrieves a json with a b64 pdf'''
response = dict()
userjson = validate_token(self.request.headers.get('Authorization'))
if not userjson:
self.write_json(AUTH_ERROR, 403)
try:
new_document = ManageDocuments()
timestamp_now = str(int(time.time()))
# Tenemos registro de este link id en la base de datos?
new_document.get_document_by_link_id(link_id)
if new_document.is_valid_document():
# render and send the documents by email
new_document.link_id = link_id
new_document.send_by_email = ast.literal_eval(
self.get_argument('send_by_email', "True")
)
json_data = json.loads(self.request.body.decode('utf-8'))
if not json_data.get("signer_metadata"):
self.write(json.dumps({
"response": "Error, no signer metadata found"}))
else:
signer_metadata = json_data.get("signer_metadata", {})
if (
not signer_metadata.get("email")
or not signer_metadata.get("name")
or not signer_metadata.get("public_key")
or not json_data.get("doc_id")
or not json_data.get("doc_hash")
):
self.write(json.dumps({
"response":
"Error, missing document or signer metadata"
}))
else:
new_document.signer_user = signerUser.SignerUser(
signer_metadata.get("email"),
signer_metadata.get("name"))
# create the signer user so it can generate their keys
new_document.signer_user.create()
crypto_tool = CryptoTools()
signer_public_key = signer_metadata.get("public_key")
doc_signature = base64.b64encode(crypto_tool.hex2bin(
json_data.get("doc_id")))
# The file name is composed by the email of the user,
# the link id and the timestamp of the creation
contract_file_name = F"contract_" \
F"{signer_metadata.get('email')}_" \
F"{new_document.link_id}_{timestamp_now}.pdf"
response.update(
{
"contract_keywords":
F"{signer_metadata.get('email')}_"
F"{new_document.link_id}_"
F"{timestamp_now}"
}
)
IOLoop.instance().add_callback(
callback=lambda:
new_document.b2chize_signed_doc(
signer_public_key_hex=signer_public_key,
doc_signature_b64=doc_signature,
doc_hash=json_data.get("doc_hash"),
timestamp_now=timestamp_now,
contract_file_name=contract_file_name
)
)
thislink = Link.Link()
thislink = thislink.find_by_link(link_id)
temp_signed_count = thislink.signed_count
thislink.signed_count = int(temp_signed_count) + 1
thislink.status = "signed"
thislink.update()
self.write(json.dumps(response))
else:
self.write(json.dumps({"error": "not enough information to perform the action"}))
except Exception as e:
logger.error(F"[ERROR] SignedToRexchain: {e} ")
self.write({"error": "Incorrect data received for the the signed document"})
class Documents(BaseHandler):
'''Documents endpoint'''
def get(self, doc_id):
'''Receives a document id and retrieves all its parameters'''
userjson = validate_token(self.request.headers.get('Authorization'))
if not userjson:
self.write_json(AUTH_ERROR, 403)
if doc_id is not None and doc_id != "":
result = get_document_details(doc_id)
if result:
self.write_json(result, 200)
else:
self.write(json.dumps({"error": "failed"}))
else:
self.write(json.dumps({"error": "not enough information to perform the action"}))
|
import websockets, json, traceback, os, asyncio, inspect, logging
import websockets.client
import websockets.server
from websockets.exceptions import ConnectionClosedOK, ConnectionClosedError
from .client_management.client import Client
from .inventory_management.skin_manager import Skin_Manager
from .randomizers.skin_randomizer import Skin_Randomizer
from .session_management.client_state import Client_State
from .sys_utilities.system import System
from .file_utilities.filepath import Filepath
from .sys_utilities.logging import Logger
from .user_configuartion.config import Config
from .client_config import FORCE_ONBOARDING, SERVER_VERSION
from . import shared
logger_errors = logging.getLogger('VIM_errors')
logger = logging.getLogger('VIM_main')
class Server:
shared.client = Client()
shared.client.connect()
request_lookups = {
"handshake": lambda: True,
"get_onboarding_state": lambda: shared.config["app"]["settings"]["onboarding_completed"]["value"] if not FORCE_ONBOARDING else False,
"complete_onboarding": Config.complete_onboarding,
"get_server_version": lambda: SERVER_VERSION,
# system stuff
"start_game": System.start_game,
"get_running_state": System.are_processes_running,
"autodetect_account": shared.client.autodetect_account,
# config stuff
"fetch_config": lambda: shared.config,
"update_config": Config.update_config,
# client stuff
"fetch_loadout": shared.client.fetch_loadout,
"refresh_inventory": Skin_Manager.update_skin_database,
"randomize_skins": Skin_Randomizer.randomize,
"fetch_inventory": Skin_Manager.fetch_inventory,
"put_weapon": shared.client.put_weapon,
"update_inventory": Skin_Manager.update_inventory,
}
@staticmethod
def start():
if not os.path.exists(Filepath.get_appdata_folder()):
os.mkdir(Filepath.get_appdata_folder())
Logger.create_logger()
if not shared.client.ready:
Server.reset_valclient()
shared.loop = asyncio.get_event_loop()
Config.init_config()
# iniitalize any submodules
client_state = Client_State()
#start websocket server
start_server = websockets.serve(Server.ws_entrypoint, "", 8765)
if shared.client.ready:
logger.info("refreshing inventory")
Server.request_lookups["refresh_inventory"]()
print("server running\nopen https://colinhartigan.github.io/valorant-inventory-manager in your browser to use")
shared.loop.run_until_complete(start_server)
# initialize any asynchronous submodules
shared.loop.run_until_complete(client_state.loop())
shared.loop.run_forever()
def reset_valclient():
shared.client = Client()
try:
shared.client.connect()
except:
logger.warning("valclient couldnt connect")
@staticmethod
async def ws_entrypoint(websocket, path):
logger.debug("a client connected")
logger.debug(shared.sockets)
shared.sockets.append(websocket)
try:
while websocket in shared.sockets:
data = await websocket.recv()
data = json.loads(data)
request = data.get("request")
args = data.get("args")
has_kwargs = True if args is not None else False
logger.debug(f"request: {request}")
payload = {}
if request in Server.request_lookups.keys():
payload = {
"success": True,
"event": request,
"data": None,
}
if inspect.iscoroutinefunction(Server.request_lookups[request]):
if has_kwargs:
payload["data"] = await Server.request_lookups[request](**args)
else:
payload["data"] = await Server.request_lookups[request]()
else:
if has_kwargs:
payload["data"] = Server.request_lookups[request](**args)
else:
payload["data"] = Server.request_lookups[request]()
else:
payload = {
"success": False,
"data": "could not find the specified request"
}
await websocket.send(json.dumps(payload))
logger.debug(f"response:\n{json.dumps(payload)} ")
except ConnectionClosedOK:
logger.info("disconnected")
shared.sockets.pop(shared.sockets.index(websocket))
except ConnectionClosedError:
logger.info("disconnected w/ error")
shared.sockets.pop(shared.sockets.index(websocket))
except Exception:
logger_errors.error("----- EXCEPTION -----")
logger_errors.error(traceback.format_exc())
except:
logger.error("idk what even happened to get here")
|
# -*- coding: utf-8 -*-
"""
GroundwaterDupuitPercolator Component
@author: G Tucker
"""
import numpy as np
from landlab import Component
from landlab.utils import return_array_at_node, return_array_at_link
from landlab.grid.mappers import map_mean_of_link_nodes_to_link
ACTIVE_LINK = 0
class GroundwaterDupuitPercolator(Component):
"""
Simulate groundwater flow in a shallow unconfined aquifer.
The GroundwaterDupuitPercolator uses the Dupuit approximation that the
hydraulic gradient is equal to the slope of the water table.
Parameters
----------
grid: ModelGrid
Landlab ModelGrid object
hydraulic_conductivity: float, field name, or array of float
saturated hydraulic conductivity, m/s
Default = 0.01 m/s
recharge_rate: float, field name, or array of float
Rate of recharge, m/s
Default = 1.0e-8 m/s
Examples
--------
>>> from landlab import RasterModelGrid
>>> mg = RasterModelGrid((3, 3))
>>> z = mg.add_zeros('node', 'topographic__elevation')
>>> gdp = GroundwaterDupuitPercolator(mg)
Notes
-----
Groundwater discharge per unit length, q, is calculated as:
q = - K H dw/dx,
where K is hydraulic conductivity, H is aquifer thickness, w is water table
height, and x is horizontal distance.
"""
_name = "GroundwaterDupuitPercolator"
_input_var_names = set(("topographic__elevation",
"aquifer_base__elevation"))
_output_var_names = set(
("aquifer__thickness", "water_table__elevation", "hydraulic__gradient",
"groundwater__specific_discharge", "groundwater__velocity")
)
_var_units = {
"topographic__elevation": "m",
"aquifer_base__elevation": "m",
"aquifer__thickness": "m",
"water_table__elevation": "m",
"hydraulic__gradient": "m/m",
"groundwater__specific_discharge": "m2/s",
"groundwater__velocity": "m/s"
}
_var_mapping = {
"topographic__elevation": "node",
"aquifer_base__elevation": "node",
"aquifer__thickness": "node",
"water_table__elevation": "node",
"hydraulic__gradient": "link",
"groundwater__specific_discharge": "link",
"groundwater__velocity": "link",
}
_var_doc = {
"topographic__elevation": "elevation of land surface",
"aquifer_base__elevation": "elevation of impervious layer",
"aquifer__thickness": "thickness of saturated zone",
"water_table__elevation": "elevation of water table",
"hydraulic__gradient": "gradient of water table in link direction",
"groundwater__specific_discharge": "discharge per width in link dir",
"groundwater__velocity": "velocity of groundwater in link direction",
}
def __init__(self, grid, hydraulic_conductivity=0.01,
recharge_rate=1.0e-8):
"""Initialize the GroundwaterDupuitPercolator.
Parameters
----------
grid: ModelGrid
Landlab ModelGrid object
hydraulic_conductivity: float, field name, or array of float
saturated hydraulic conductivity, m/s
Default = 0.01 m/s
recharge_rate: float, field name, or array of float
Rate of recharge, m/s
Default = 1.0e-8 m/s
"""
# Store grid
self._grid = grid
# Shorthand
self.cores = grid.core_nodes
self.inactive_links = np.where(grid.status_at_link != ACTIVE_LINK)[0]
# Convert parameters to fields if needed, and store a reference
self.K = return_array_at_link(grid, hydraulic_conductivity)
self.recharge = return_array_at_node(grid, recharge_rate)
# Create fields:
if "topographic__elevation" in self.grid.at_node:
self.elev = self.grid.at_node["topographic__elevation"]
else:
self.elev = self.grid.add_ones("node", "topographic__elevation")
if "aquifer_base__elevation" in self.grid.at_node:
self.base = self.grid.at_node["aquifer_base__elevation"]
else:
self.base = self.grid.add_zeros("node", "aquifer_base__elevation")
if "water_table__elevation" in self.grid.at_node:
self.wtable = self.grid.at_node["water_table__elevation"]
else:
self.wtable = self.grid.add_zeros("node", "water_table__elevation")
if "aquifer__thickness" in self.grid.at_node:
self.thickness = self.grid.at_node["aquifer__thickness"]
else:
self.thickness = self.grid.add_zeros("node", "aquifer__thickness")
self.thickness[:] = self.wtable - self.base
if "hydraulic__gradient" in self.grid.at_link:
self.hydr_grad = self.grid.at_link["hydraulic__gradient"]
else:
self.hydr_grad = self.grid.add_zeros("link", "hydraulic__gradient")
if "groundwater__specific_discharge" in self.grid.at_link:
self.q = self.grid.at_link["groundwater__specific_discharge"]
else:
self.q = self.grid.add_zeros("link",
"groundwater__specific_discharge")
if "groundwater__velocity" in self.grid.at_link:
self.vel = self.grid.at_link["groundwater__velocity"]
else:
self.vel = self.grid.add_zeros("link", "groundwater__velocity")
def run_one_step(self, dt, **kwds):
"""
Advance component by one time step of size dt.
Parameters
----------
dt: float (time in seconds)
The imposed timestep.
"""
# Calculate hydraulic gradient
self.hydr_grad[:] = self._grid.calc_grad_at_link(self.wtable)
self.hydr_grad[self.inactive_links] = 0.0
# Calculate groundwater velocity
self.vel[:] = -self.K * self.hydr_grad
self.vel[self._grid.status_at_link != 0] = 0.0
# Aquifer thickness at links
hlink = map_mean_of_link_nodes_to_link(self._grid,
'aquifer__thickness')
# Calculate specific discharge
self.q[:] = hlink * self.vel
# Mass balance
dqdx = self._grid.calc_flux_div_at_node(self.q)
dhdt = self.recharge - dqdx
# Update
self.thickness[self._grid.core_nodes] += dhdt[self.cores] * dt
# Recalculate water surface height
self.wtable[self._grid.core_nodes] = (self.base[self.cores]
+ self.thickness[self.cores])
|
# Copyright (C) 2019-2020, TomTom (http://tomtom.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for parsing Objective C types."""
import pytest
import xml.etree.ElementTree as ET
from unittest.mock import MagicMock
from asciidoxy.doxygenparser.language_traits import TokenCategory
from asciidoxy.doxygenparser.objc import ObjectiveCTypeParser
from asciidoxy.doxygenparser.type_parser import Token
from .shared import assert_equal_or_none_if_empty
from .test_doxygenparser__type_parser import (qualifier, whitespace, name, operator, arg_name,
args_start, args_end, sep, args_sep)
@pytest.fixture(params=[
"",
"nullable ",
"const ",
"__weak ",
"__strong ",
"nullable __weak ",
"nullable __strong ",
"_Nonnull ",
"_Nullable ",
"__nonnull ",
])
def objc_type_prefix(request):
return request.param
@pytest.fixture(params=["", " *", " **", " * *", " * _Nonnull", " * _Nullable"])
def objc_type_suffix(request):
return request.param
def test_parse_objc_type_from_text_simple(objc_type_prefix, objc_type_suffix):
type_element = ET.Element("type")
type_element.text = f"{objc_type_prefix}NSInteger{objc_type_suffix}"
driver_mock = MagicMock()
type_ref = ObjectiveCTypeParser.parse_xml(type_element, driver=driver_mock)
driver_mock.unresolved_ref.assert_not_called() # built-in type
assert type_ref is not None
assert type_ref.id is None
assert type_ref.kind is None
assert type_ref.language == "objc"
assert type_ref.name == "NSInteger"
assert_equal_or_none_if_empty(type_ref.prefix, objc_type_prefix)
assert_equal_or_none_if_empty(type_ref.suffix, objc_type_suffix)
assert not type_ref.nested
@pytest.mark.parametrize("type_with_space", [
"short int",
"signed short",
"signed short int",
"unsigned short",
"unsigned short int",
"signed int",
"unsigned int",
"long int",
"signed long",
"signed long int",
"unsigned long",
"unsigned long int",
"long long",
"long long int",
"signed long long",
"signed long long int",
"unsigned long long",
"unsigned long long int",
"signed char",
"long double",
"unsigned char",
"signed char",
])
def test_parse_objc_type_with_space(type_with_space):
type_element = ET.Element("type")
type_element.text = type_with_space
driver_mock = MagicMock()
type_ref = ObjectiveCTypeParser.parse_xml(type_element, driver=driver_mock)
driver_mock.unresolved_ref.assert_not_called() # built-in type
assert type_ref is not None
assert not type_ref.id
assert not type_ref.kind
assert type_ref.language == "objc"
assert type_ref.name == type_with_space
assert not type_ref.prefix
assert not type_ref.suffix
def block(text: str = "^") -> Token:
return Token(text, TokenCategory.BLOCK)
@pytest.mark.parametrize("tokens, expected", [
([], []),
([
qualifier("nullable"),
whitespace(),
name("Type"),
whitespace(),
operator("*"),
], [
qualifier("nullable"),
whitespace(),
name("Type"),
whitespace(),
operator("*"),
]),
([
name("Type"),
whitespace(),
name("value"),
], [
name("Type"),
whitespace(),
name("value"),
]),
([
name("Type"),
args_start(),
name("OtherType"),
whitespace(),
name("value"),
args_end(),
], [
name("Type"),
args_start(),
name("OtherType"),
whitespace(),
arg_name("value"),
args_end(),
]),
([
name("Type"),
args_start(),
name("OtherType"),
whitespace(),
operator("*"),
name("value"),
args_end(),
], [
name("Type"),
args_start(),
name("OtherType"),
whitespace(),
operator("*"),
arg_name("value"),
args_end(),
]),
([
name("Type"),
args_start(),
name("OtherType"),
whitespace(),
operator("*"),
name("value"),
sep(),
name("CoolType"),
whitespace(),
qualifier("nullable"),
whitespace(),
name("null_value"),
args_end(),
], [
name("Type"),
args_start(),
name("OtherType"),
whitespace(),
operator("*"),
arg_name("value"),
args_sep(),
name("CoolType"),
whitespace(),
qualifier("nullable"),
whitespace(),
arg_name("null_value"),
args_end(),
]),
([
name("void"),
args_start(),
block(),
args_end(),
args_start(),
name("NSString"),
whitespace(),
operator("*"),
name("text"),
args_end(),
], [
name("void"),
args_start(),
name("NSString"),
whitespace(),
operator("*"),
arg_name("text"),
args_end(),
]),
],
ids=lambda ts: "".join(t.text for t in ts))
def test_objc_type_parser__adapt_tokens(tokens, expected):
assert ObjectiveCTypeParser.adapt_tokens(tokens) == expected
|
import logging
from django import forms
from django.forms import ModelForm
from django.template.defaultfilters import slugify
from crispy_forms.bootstrap import FieldWithButtons, StrictButton
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, Div, Field, Layout, Submit
from .models import AuthAPIKey, Org, OrgMembership
from .roles import MEMBERSHIP_FORM_BEGIN, MEMBERSHIP_FORM_END, ORG_ROLE_CHOICES
logger = logging.getLogger(__name__)
class OrgCreateForm(ModelForm):
class Meta:
model = Org
fields = ['name']
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = Layout(
'name',
HTML('<br>'),
)
self.helper.add_input(Submit('submit', 'Create New Company', css_class='btn btn-success submit'))
super(OrgCreateForm, self).__init__(*args, **kwargs)
def clean_name(self):
# Check that username is not used already
name = self.cleaned_data.get('name')
slug = slugify(name)
if Org.objects.filter(slug=slug).exists():
raise forms.ValidationError('Organization with this Company Name already exists: {}'.format(name))
return name
class OrgEditForm(ModelForm):
about = forms.CharField(label='Description', max_length=400, required=False,
widget=forms.Textarea(attrs={'rows': 5}))
class Meta:
model = Org
fields = ['name', 'about']
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = Layout(
'name',
'about',
HTML('<br>'),
)
self.helper.add_input(Submit('submit', 'Save', css_class='btn btn-block btn-success submit'))
super(OrgEditForm, self).__init__(*args, **kwargs)
def clean_name(self):
name = self.cleaned_data.get('name')
# Check that username is not used already
if self.instance:
slug = slugify(name)
qs = Org.objects.filter(slug=slug)
if qs.count() == 1 and self.instance.id != qs.first().id:
raise forms.ValidationError('Organization with this Company Name already exists: {}'.format(name))
return name
class OrgMembershipForm(ModelForm):
# Filter Staff and Develpment Roles
role = forms.ChoiceField(label="Select Permission Role",
choices=ORG_ROLE_CHOICES[MEMBERSHIP_FORM_BEGIN:MEMBERSHIP_FORM_END],
required=True)
class Meta:
model = OrgMembership
fields = ['is_active', 'role']
def __init__(self, user, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = Layout(
'role',
HTML('<hr>'),
Div(
Field('is_active',
data_toggle="toggle",
data_size="small",
data_on="Active",
data_off="Disabled",
data_onstyle='success',
data_offstyle='danger',
data_style='ios'),
css_class='checkbox-label'
),
HTML('<br>'),
)
self.helper.add_input(Submit('submit', 'Change', css_class='btn btn-success btn-block submit'))
super(OrgMembershipForm, self).__init__(*args, **kwargs)
if user.is_staff:
self.fields['role'].choices = ORG_ROLE_CHOICES
else:
# User should only be able to change to a role below itself
if self.instance:
user_membership = self.instance.org.get_membership_obj(user)
role_index = 0
while ORG_ROLE_CHOICES[role_index][0] != user_membership.role:
role_index += 1
self.fields['role'].choices = ORG_ROLE_CHOICES[role_index:MEMBERSHIP_FORM_END]
def clean(self):
role = self.cleaned_data.get('role')
is_active = self.cleaned_data.get('is_active')
if role == 'a0' and is_active is False:
raise forms.ValidationError("Owner cannot be disabled. Please downgrade this user first.")
if role != 'a0' and self.instance.org.is_owner(self.instance.user) \
and self.instance.org.owner_count() < 2:
raise forms.ValidationError("Cannot remove owner: organization must have an owner.")
return self.cleaned_data
class OrgMembershipMessageForm(ModelForm):
# Filter Staff and Develpment Roles
role = forms.ChoiceField(label="Send message to members with role:",
choices=ORG_ROLE_CHOICES[MEMBERSHIP_FORM_BEGIN:MEMBERSHIP_FORM_END],
required=True)
message = forms.CharField(label='Message', required=False,
widget=forms.Textarea(attrs={'rows': 10}))
class Meta:
model = Org
fields = []
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = Layout(
'role',
HTML('<hr>'),
'message',
HTML('<hr>'),
HTML('<br>'),
)
self.helper.add_input(Submit('submit', 'Send', css_class='btn btn-success btn-block submit'))
super(OrgMembershipMessageForm, self).__init__(*args, **kwargs)
self.fields['role'].choices = self.fields['role'].choices + [('-', 'All Members')]
class OrgDomainAdminForm(ModelForm):
default_role = forms.ChoiceField(label="Default Permission Role", choices=ORG_ROLE_CHOICES, required=True)
def __init__(self, *args, **kwargs):
super(OrgDomainAdminForm, self).__init__(*args, **kwargs)
if self.instance.id:
self.fields['default_role'] = forms.ChoiceField(choices=ORG_ROLE_CHOICES)
class OrgMembershipAdminForm(ModelForm):
role = forms.ChoiceField(label="Permission Role", choices=ORG_ROLE_CHOICES, required=True)
def __init__(self, *args, **kwargs):
super(OrgMembershipAdminForm, self).__init__(*args, **kwargs)
if self.instance.id:
self.fields['role'].initial = self.instance.role
class DataBlockSearchForm(forms.Form):
q = forms.CharField(required=False, label=(''),
widget=forms.TextInput(attrs={'type': 'search', 'autocomplete': 'off'}))
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.layout = Layout(
FieldWithButtons('q', StrictButton('Search', css_class='btn btn-success btn-block',))
)
super(DataBlockSearchForm, self).__init__(*args, **kwargs)
class OrgAPIKeyCreateForm(ModelForm):
class Meta:
model = AuthAPIKey
# fields = ['name', 'expiry_date', 'revoked']
exclude = ['id', 'org']
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_method = 'post'
self.helper.add_input(Submit('submit', 'Submit', css_class='btn btn-success btn-block submit'))
super(OrgAPIKeyCreateForm, self).__init__(*args, **kwargs)
|
from src.binary_lane_filter import apply_road_lane_binary_filter
import numpy as np
import cv2
from src.warp import ImageWarp
from src.line import TrafficLine
from src.visualisation import window_corners, color_window_pixels
from src.curvature import estimate_lane_curve_radius_m
from src.camera_callibration import CameraCalibrator
def create_image_processor():
calibrator = CameraCalibrator('camera_cal/calibration*.jpg')
calibrator.calibrate()
def process_image(img):
# calibrate image
img = calibrator.undistort(img)
# apply binary filter
output_binary = apply_road_lane_binary_filter(img)
# warp image
src = np.float32([[500, 515], [760, 500], [200, 720], [1100, 720]])
dst = np.float32([[200, 500], [1100, 500], [200, 700], [1110, 700]])
warp = ImageWarp(src, dst)
warped = warp.top_view(output_binary)
# find lanes with sliding windows method
margin = 100
height, width, layers = img.shape
h = height / 10 # windows
out_img = cv2.merge([warped * 255, warped * 255, warped * 255])
left_line = TrafficLine("left", margin=margin)
left_line.init_position(warped)
left_line.find_indexes(warped)
right_line = TrafficLine("right", margin=margin)
right_line.init_position(warped)
right_line.find_indexes(warped)
for x, y in left_line.line_indexes:
corners = window_corners(x, y, h, margin * 2)
color_window_pixels(out_img, warped, corners, [(0, 255, 0)])
cv2.rectangle(
out_img,
corners[0],
corners[1],
color=(0, 0, 255),
thickness=2
)
for x, y in right_line.line_indexes:
corners = window_corners(x, y, h, margin * 2)
color_window_pixels(out_img, warped, corners, [(255, 0, 0)])
cv2.rectangle(
out_img,
corners[0],
corners[1],
color=(0, 0, 255),
thickness=2
)
left_line_curvature = estimate_lane_curve_radius_m(left_line.line_indexes, height)
right_line_curvature = estimate_lane_curve_radius_m(right_line.line_indexes, height)
# Write curvature and car position text
font = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 1
fontColor = (255, 255, 255)
lineType = 2
cv2.putText(
img,
"Radius of Curvature: {:.0f} meters".format(left_line_curvature // 2),
(100, 50),
font,
fontScale,
fontColor,
lineType
)
# it looks like camera position on the car needs calibration
# looking it test images, it seems that middle of the car is closer to 680px
# as opposed to 640 expected
mid_x = 680 # width // 2
xm_per_pix: float = 3.7 / 1000
car_position = ((left_line.line_indexes[0][0] + right_line.line_indexes[0][0]) / 2 - mid_x) * xm_per_pix
if car_position > 0:
position_side = "right"
else:
position_side = "left"
cv2.putText(
img,
"Car position: {position_side} {car_position:.2f} meters".format(
position_side=position_side,
car_position=abs(car_position)
),
(100, 100),
font,
fontScale,
fontColor,
lineType
)
# Restore the car view
visual_output = warp.car_view(out_img)
# Clean overlay visualisation from white dots
lower_white = np.array([50, 50, 50], dtype="uint16")
upper_white = np.array([255, 255, 255], dtype="uint16")
white_mask = cv2.inRange(visual_output, lower_white, upper_white)
visual_output[np.where((white_mask != [0]))] = [0, 0, 0]
overlay_out = np.copy(img)
visual_output_gray = cv2.cvtColor(visual_output, cv2.COLOR_BGR2GRAY)
overlay_out[(visual_output_gray > 10)] = [0, 0, 0]
overlay_out = cv2.addWeighted(overlay_out, 1, visual_output, 1, 0)
return overlay_out
return process_image |
import unittest
from src.morphothec import Morphothec
from src.generator import word_for_keys
import src.composer as composer
class FormTests(unittest.TestCase):
def setUp(self):
self.morphothec = Morphothec(["data/morphs-latin.json", "data/morphs-greek.json"])
# Tests that prefix sound assimilation resolves correctly.
def testPrefixAssimilation(self):
# 'ab' ----------
self.assertForm(["ab", "ambulare", "ion"], "abambulation") #*
self.assertForm(["ab", "ducere", "ion"], "abduction")
self.assertForm(["ab", "errare", "ion"], "aberration")
self.assertForm(["ab", "grex", "ate"], "abgregate")
self.assertForm(["ab", "horrere"], "abhor")
self.assertForm(["ab", "ire", "nt"], "abient")
self.assertForm(["ab", "jacere", "ion"], "abjection")
self.assertForm(["ab", "lact", "ate"], "ablactate")
self.assertForm(["ab", "negare", "ion"], "abnegation")
self.assertForm(["ab", "oriri", "ion"], "abortion")
self.assertForm(["ab", "rogare", "ion"], "abrogation")
self.assertForm(["ab", "solvere", "ion"], "absolution")
self.assertForm(["ab", "uti"], "abuse")
# ab -> a
self.assertForm(["ab", "movere"], "amove")
self.assertForm(["ab", "pellere", "ion"], "apulsion") #*
self.assertForm(["ab", "vertere", "ion"], "aversion")
# ab -> abs
self.assertForm(["ab", "condere"], "abscond")
self.assertForm(["ab", "quaerere", "ion"], "absquisition") #*
self.assertForm(["ab", "trahere", "ion"], "abstraction")
# ab -> au
self.assertForm(["ab", "ferre", "nt"], "auferent") #*
# 'ad' ----------
self.assertForm(["ad", "arguere", "or"], "adargutor") #*
self.assertForm(["ad", "ducere", "ion"], "adduction")
self.assertForm(["ad", "educare", "ion"], "adeducation") #*
self.assertForm(["ad", "haerere", "ion"], "adhesion")
self.assertForm(["ad", "jurare"], "adjure")
self.assertForm(["ad", "mirari", "ion"], "admiration")
self.assertForm(["ad", "optare", "ion"], "adoption")
self.assertForm(["ad", "venire", "ure"], "adventure")
# ad -> a
self.assertForm(["ad", "scandere", "nt"], "ascendant")
self.assertForm(["ad", "specere"], "aspect")
self.assertForm(["ad", "stringere"], "astrict")
# ad doubling
self.assertForm(["ad", "cedere", "ion"], "accession")
self.assertForm(["ad", "figere", "ion"], "affixation")
self.assertForm(["ad", "gradi", "ive"], "aggressive")
self.assertForm(["ad", "ludere", "ion"], "allusion")
self.assertForm(["ad", "nuntiare", "ion"], "annunciation")
self.assertForm(["ad", "parere", "nt"], "apparent")
self.assertForm(["ad", "rogare", "nt"], "arrogant")
self.assertForm(["ad", "serere", "ion"], "assertion")
# ad -> ac
self.assertForm(["ad", "quaerere", "ion"], "acquisition")
# ad -> ab
self.assertForm(["ad", "bibere"], "abbibe")
# 'com' ----------
self.assertForm(["com", "battuere"], "combate") #*
self.assertForm(["com", "mandare"], "command")
self.assertForm(["com", "pungere", "ion"], "compunction")
# com -> con
self.assertForm(["com", "cedere", "ion"], "concession")
self.assertForm(["com", "ducere", "or"], "conductor")
self.assertForm(["com", "fundere", "ion"], "confusion")
self.assertForm(["com", "gerere", "ion"], "congestion")
self.assertForm(["com", "nectare", "ive"], "connective")
self.assertForm(["com", "quaerere", "or"], "conquisitor") #*
self.assertForm(["com", "sequi", "nt"], "consequent")
self.assertForm(["com", "tenere", "nt"], "continent")
self.assertForm(["com", "venire", "ion"], "convention")
# com -> co
self.assertForm(["com", "arguere", "or"], "coargutor") #*
self.assertForm(["com", "esse", "nt"], "coessent") #*
self.assertForm(["com", "haerere", "nt"], "coherent")
self.assertForm(["com", "ire", "ion"], "coition")
self.assertForm(["com", "ordinare", "ion"], "coordination")
#com -> co+
self.assertForm(["com", "laborare", "or"], "collaborator")
self.assertForm(["com", "regere", "ive"], "corrective")
# 'dis' ----------
self.assertForm(["dis", "apparere", "nt"], "disapparent") #*
self.assertForm(["dis", "battuere", "nt"], "disbatant") #*
self.assertForm(["dis", "cernere", "ion"], "discretion")
self.assertForm(["dis", "aequa", "ity"], "disequity") #*
self.assertForm(["dis", "gradi", "ion"], "digression")
self.assertForm(["dis", "hortari"], "dishort")
self.assertForm(["dis", "identa", "ity"], "disidentity") #*
self.assertForm(["dis", "jacere"], "disject")
self.assertForm(["dis", "mittere", "ion"], "dismission") #*
self.assertForm(["dis", "numerare", "ion"], "disnumeration") #*
self.assertForm(["dis", "pandere", "ion"], "dispansion") #*
self.assertForm(["dis", "quaerere", "ion"], "disquisition")
self.assertForm(["dis", "secare", "ion"], "dissection")
self.assertForm(["dis", "trahere", "ion"], "distraction")
self.assertForm(["dis", "unus", "ity"], "disunity")
# dis -> di
self.assertForm(["dis", "ducere", "ion"], "diduction") #*
self.assertForm(["dis", "luere", "ion"], "dilution")
self.assertForm(["dis", "regere", "ion"], "direction")
self.assertForm(["dis", "vagari", "ion"], "divagation")
# dis -> di+
self.assertForm(["dis", "ferre", "nt"], "different")
# 'ex' ----------
self.assertForm(["ex", "agere"], "exact")
self.assertForm(["ex", "emere", "ion"], "exemption")
self.assertForm(["ex", "haurire", "ion"], "exhaustion")
self.assertForm(["ex", "ire"], "exit")
self.assertForm(["ex", "onus", "ate"], "exonerate")
self.assertForm(["ex", "pellere", "ion"], "expulsion")
self.assertForm(["ex", "quaerere", "ion"], "exquisition")
self.assertForm(["ex", "tendere", "ion"], "extension")
# ex -> e
# ebullient
self.assertForm(["ex", "pellere", "ion"], "expulsion")
self.assertForm(["ex", "donare", "ion"], "edonation") #*
self.assertForm(["ex", "jacere", "ion"], "ejection")
# self.assertForm(["ex", "ferre", "ion"], "elation") # BROKEN - this form doesn't work. Need estimate form when assimilating.
self.assertForm(["ex", "mergere", "nt"], "emergent")
self.assertForm(["ex", "numerare", "or"], "enumerator")
self.assertForm(["ex", "regere", "ion"], "erection")
self.assertForm(["ex", "vadere", "ive"], "evasive")
self.assertForm(["ex", "numerare", "or"], "enumerator")
# ex -> ex/
self.assertForm(["ex", "sequi", "or"], "executor")
# ex -> e+
self.assertForm(["ex", "fluere", "nt"], "effluent")
# 'in' ----------
# in + a-, as in 'inactive'
self.assertForm(["in", "ducere", "ion"], "induction")
self.assertForm(["in", "clinare", "ion"], "inclination")
self.assertForm(["in", "ducere", "ive"], "inductive")
self.assertForm(["in", "educare", "ble"], "ineducable") #*
self.assertForm(["in", "facere", "ion"], "infection")
self.assertForm(["in", "gerere", "ion"], "ingestion")
self.assertForm(["in", "habitare", "nt"], "inhabitant")
self.assertForm(["in", "imitari", "ble"], "inimitable") #*
self.assertForm(["in", "numerare", "ble"], "innumerable") #*
self.assertForm(["in", "optare", "ive"], "inoptive") #*
self.assertForm(["in", "quaerere", "ive"], "inquisitive")
self.assertForm(["in", "scribere", "ion"], "inscription")
self.assertForm(["in", "trudere", "ive"], "intrusive")
# in -> im
self.assertForm(["in", "bibere"], "imbibe")
self.assertForm(["in", "mergere", "ion"], "immersion")
self.assertForm(["in", "pellere", "ive"], "impulsive")
self.assertForm(["in", "urbs", "ify"], "inurbify") #*
self.assertForm(["in", "vadere", "ion"], "invasion")
# in -> i+
self.assertForm(["in", "ludere", "ion"], "illusion")
self.assertForm(["in", "rogare", "nt"], "irrogant") #*
# 'ob' ----------
self.assertForm(["ob", "ambulare"], "obambulate")
self.assertForm(["ob", "battuere", "nt"], "obbatant") #*
self.assertForm(["ob", "durare", "ion"], "obduration")
self.assertForm(["ob", "errare", "ion"], "oberration")
self.assertForm(["ob", "gerere", "ion"], "obgestion") #*
self.assertForm(["ob", "haurire", "ion"], "obhaustion") #*
self.assertForm(["ob", "ignorare", "nt"], "obignorant") #*
self.assertForm(["ob", "jacere", "ion"], "objection")
self.assertForm(["ob", "ligare", "ion"], "obligation")
self.assertForm(["ob", "nectare", "ion"], "obnection")
self.assertForm(["ob", "ordinare", "ure"], "obordinature") #*
self.assertForm(["ob", "quaerere", "or"], "obquisitor") #*
self.assertForm(["ob", "rumpere", "nt"], "obrumpent")
self.assertForm(["ob", "sequi", "nt"], "obsequent")
self.assertForm(["ob", "temperare"], "obtemper")
self.assertForm(["ob", "volare"], "obvolate")
# ob -> o
# Before 'st', as in ostentatious
self.assertForm(["ob", "mittere", "ion"], "omission")
# ob -> o+
self.assertForm(["ob", "cadere", "ion"], "occasion")
self.assertForm(["ob", "ferre"], "offer")
self.assertForm(["ob", "premere", "ion"], "oppression")
# 'se' ----------
# b
self.assertForm(["se", "cedere", "ion"], "secession")
self.assertForm(["se", "ducere", "ion"], "seduction")
self.assertForm(["se", "ferre", "nt"], "seferent") #*
self.assertForm(["se", "grex", "ate"], "segregate")
self.assertForm(["se", "haerere", "ion"], "sehesion") #*
self.assertForm(["se", "jungere", "ion"], "sejunction") #*
self.assertForm(["se", "ligere", "ion"], "selection")
self.assertForm(["se", "haerere", "ion"], "sehesion") #*
self.assertForm(["se", "movere", "ion"], "semotion") #*
self.assertForm(["se", "narrare", "or"], "senarrator") #*
self.assertForm(["se", "parare", "ion"], "separation")
self.assertForm(["se", "quaerere", "ion"], "sequisition") #*
self.assertForm(["se", "quaerere", "ion"], "sequisition") #*
self.assertForm(["se", "radere", "ive"], "serasive") #*
self.assertForm(["se", "salire", "nt"], "sesilient") #*
self.assertForm(["se", "trahere", "ion"], "setraction") #*
self.assertForm(["se", "vocare", "ion"], "sevocation")
# se -> sed
self.assertForm(["se", "agere", "ion"], "sedaction") #*
self.assertForm(["se", "errare", "nt"], "sederrant") #*
self.assertForm(["se", "ire", "ion"], "sedition")
self.assertForm(["se", "uti"], "seduse") #*
# 'sub' ----------
self.assertForm(["sub", "alternare"], "subalternate")
self.assertForm(["sub", "bracchium", "al"], "subbrachial")
self.assertForm(["sub", "ducere", "ion"], "subduction")
self.assertForm(["sub", "errare", "ion"], "suberration")
self.assertForm(["sub", "haurire", "ion"], "subhaustion") #*
self.assertForm(["sub", "ire"], "subit") #*
self.assertForm(["sub", "jacere", "nt"], "subjacent")
self.assertForm(["sub", "linere", "ion"], "sublition")
self.assertForm(["sub", "nasci", "nt"], "subnascent")
self.assertForm(["sub", "oculus", "al"], "subocular")
self.assertForm(["sub", "quaerere", "ive"], "subquisitive") #*
self.assertForm(["sub", "secare"], "subsect")
self.assertForm(["sub", "tendere"], "subtend")
self.assertForm(["sub", "urbs", "al"], "suburbal") #*
self.assertForm(["sub", "venire", "ion"], "subvention")
# sub -> su+
self.assertForm(["sub", "cedere", "ion"], "succession")
self.assertForm(["sub", "ferre"], "suffer")
self.assertForm(["sub", "gerere", "ion"], "suggestion")
self.assertForm(["sub", "mandare"], "summand")
self.assertForm(["sub", "ponere", "ion"], "supposition")
self.assertForm(["sub", "rogare"], "surrogate")
# sub -> sus
def testLRDissimilation(self):
score = 0
# Has an L, ends in L
score += self.countForm(["familia", "al"], "familial")
score += self.countForm(["labia", "al"], "labial")
score += self.countForm(["lingua", "al"], "lingual")
score += self.countForm(["littera", "al"], "literal")
score += self.countForm(["locus", "al"], "local")
score += self.countForm(["helix", "al"], "helical")
score += self.countForm(["latus", "al"], "lateral")
score += self.countForm(["lex", "al"], "legal")
score += self.countForm(["limen", "al"], "liminal")
score += self.countForm(["glacies", "al"], "glacial")
# Has an L, ends in R
score += self.countForm(["ala", "al"], "alar")
score += self.countForm(["columna", "al"], "columnar")
score += self.countForm(["familia", "al"], "familiar")
score += self.countForm(["insula", "al"], "insular")
score += self.countForm(["linea", "al"], "linear")
score += self.countForm(["luna", "al"], "lunar")
score += self.countForm(["stella", "al"], "stellar")
score += self.countForm(["angulus", "al"], "angular")
score += self.countForm(["anulus", "al"], "annular")
score += self.countForm(["oculus", "al"], "ocular")
score += self.countForm(["populus", "al"], "popular")
score += self.countForm(["sol", "al"], "solar")
self.assertTrue(score >= 15)
def testStemChange(self):
self.assertForm(["mugire", "nt"], "mugient")
self.assertForm(["nutrire", "nt"], "nutrient")
self.assertForm(["oriri", "nt"], "orient")
self.assertForm(["sentire", "nt"], "sentient")
self.assertForm(["com", "venire", "nt"], "convenient")
self.assertForm(["re", "salire", "nt"], "resilient")
self.assertFormIn(["experiri", "nce"], ["experience", "experiency"])
self.assertFormIn(["scire", "nce"], ["science", "sciency"])
def testStemRaise(self):
self.assertForm(["credere", "ble"], "credible")
self.assertForm(["fallere", "ble"], "fallible")
self.assertForm(["fungi", "ble"], "fungible")
self.assertForm(["legere", "ble"], "legible")
self.assertForm(["neglegere", "ble"], "negligible")
self.assertForm(["tangere", "ble"], "tangible")
self.assertForm(["re", "vertere", "ble"], "reversible")
self.assertForm(["in", "vincere", "ble"], "invincible")
# Miscellaneous tests confirming that real words have the correct forms.
def testActualForms(self):
# Latin nouns, 1st declension
self.assertForm(["anima", "al"], "animal")
self.assertForm(["bestia", "al"], "bestial")
self.assertForm(["littera", "al"], "literal")
self.assertForm(["materia", "al"], "material")
self.assertForm(["persona", "al"], "personal")
self.assertForm(["ancilla", "ary"], "ancillary")
self.assertForm(["culina", "ary"], "culinary")
self.assertForm(["epistula", "ary"], "epistulary")
self.assertForm(["littera", "ary"], "literary")
self.assertForm(["pecunia", "ary"], "pecuniary")
self.assertFormIn(["columba", "arium"], ["columbary", "columbarium"])
self.assertFormIn(["planeta", "arium"], ["planetary", "planetarium"])
self.assertFormIn(["terra", "arium"], ["terrary", "terrarium"])
self.assertForm(["branchia", "ate-bodypart"], "branchiate")
self.assertForm(["labia", "ate-bodypart"], "labiate")
self.assertForm(["lingua", "ate-bodypart"], "linguate")
self.assertForm(["mamma", "ate-bodypart"], "mammate")
self.assertForm(["idea", "ate-secretion"], "ideate")
self.assertForm(["urina", "ate-secretion"], "urinate")
self.assertForm(["aquila", "ine"], "aquiline")
self.assertForm(["columba", "ine"], "columbine")
self.assertForm(["femina", "ine"], "feminine")
self.assertForm(["rana", "ine"], "ranine")
self.assertForm(["palma", "etum"], "palmetum")
self.assertForm(["copia", "ous"], "copious")
self.assertForm(["fabula", "ous"], "fabulous")
self.assertForm(["fama", "ous"], "famous")
self.assertForm(["gloria", "ous"], "glorious")
self.assertForm(["pecunia", "ous"], "pecunious")
self.assertForm(["aqua", "fer"], "aquifer")
self.assertForm(["mamma", "fer"], "mammifer")
self.assertForm(["arma", "ger"], "armiger")
self.assertForm(["campana", "form"], "campaniform")
self.assertForm(["columna", "form"], "columniform")
self.assertForm(["luna", "form"], "luniform")
self.assertForm(["palma", "form"], "palmiform")
self.assertForm(["rana", "form"], "raniform")
self.assertForm(["femina", "cide"], "feminicide")
self.assertForm(["filia", "cide"], "filicide")
self.assertForm(["gallina", "cide"], "gallinicide")
self.assertForm(["herba", "cide"], "herbicide")
# Latin nouns, 2nd declension
self.assertForm(["astrum", "al"], "astral")
self.assertForm(["bracchium", "al"], "brachial")
self.assertForm(["carcer", "al"], "carceral")
# self.assertForm(["fluvius", "al"], "fluvial")
self.assertForm(["frater", "al"], "fraternal")
self.assertForm(["populus", "al"], "popular")
self.assertForm(["auxilium", "ary"], "auxiliary")
self.assertForm(["capillus", "ary"], "capillary")
self.assertForm(["exemplum", "ary"], "exemplary")
self.assertForm(["numerus", "ary"], "numerary")
self.assertForm(["auxilium", "ary"], "auxiliary")
self.assertFormIn(["granus", "arium"], ["granarium", "granary"])
self.assertFormIn(["liber", "arium"], ["librarium", "library"])
self.assertForm(["auxilium", "ary"], "auxiliary")
self.assertForm(["bracchium", "ate-bodypart"], "brachiate")
self.assertForm(["capillus", "ate-bodypart"], "capillate")
self.assertForm(["folium", "ate-bodypart"], "foliate")
self.assertForm(["granus", "ate-bodypart"], "granate")
self.assertForm(["oculus", "ate-bodypart"], "oculate")
self.assertForm(["insectum", "ile"], "insectile")
self.assertForm(["puer", "ile"], "puerile")
self.assertForm(["servus", "ile"], "servile")
self.assertForm(["vir", "ile"], "virile")
self.assertForm(["asinus", "ine"], "asinine")
self.assertForm(["equus", "ine"], "equine")
self.assertForm(["labyrinthus", "ine"], "labyrinthine")
self.assertForm(["serpentus", "ine"], "serpentine")
self.assertForm(["corvus", "id-descendant"], "corvid")
self.assertForm(["pinus", "etum"], "pinetum")
self.assertForm(["servus", "tude"], "servitude")
self.assertForm(["cancer", "ous"], "cancrous")
self.assertForm(["ferrum", "ous-material"], "ferrous")
self.assertForm(["numerus", "ous"], "numerous")
self.assertForm(["officium", "ous"], "officious")
self.assertForm(["rapax", "ous"], "rapacious")
self.assertForm(["deus", "ify"], "deify")
self.assertForm(["exemplum", "ify"], "exemplify")
self.assertForm(["modus", "ify"], "modify")
self.assertForm(["nullus", "ify"], "nullify")
self.assertForm(["signum", "ify"], "signify")
self.assertForm(["conus", "fer"], "conifer")
self.assertForm(["scutum", "fer"], "scutifer")
self.assertForm(["bellum", "ger", "nt"], "belligerent")
self.assertForm(["cancer", "form"], "cancriform")
self.assertForm(["cerebrum", "form"], "cerebriform")
self.assertForm(["nasus", "form"], "nasiform")
self.assertForm(["ovum", "form"], "oviform")
self.assertForm(["insectum", "cide"], "insecticide")
self.assertForm(["insectum", "cide"], "insecticide")
self.assertForm(["vir", "cide"], "viricide")
self.assertForm(["virus", "cide"], "viricide")
# Latin nouns, 3nd declension
self.assertForm(["caput", "al"], "capital")
self.assertForm(["caro", "al"], "carnal")
self.assertForm(["cordis", "al"], "cordial")
self.assertForm(["hospes", "al"], "hospital")
self.assertForm(["rex", "al"], "regal")
self.assertForm(["imago", "ary"], "imaginary")
self.assertForm(["lapis", "ary"], "lapidary")
self.assertForm(["miles", "ary"], "military")
self.assertForm(["pulmo", "ary"], "pulmonary")
self.assertForm(["tempus", "ary"], "temporary")
self.assertFormIn(["avis", "arium"], ["aviarium", "aviary"])
self.assertFormIn(["apes", "arium"], ["apiarium", "apiary"])
self.assertForm(["caput", "ate-bodypart"], "capitate")
self.assertForm(["corpus", "ate-bodypart"], "corporate")
self.assertForm(["dens", "ate-bodypart"], "dentate")
self.assertForm(["pulmo", "ate-bodypart"], "pulmonate")
self.assertForm(["radix", "ate-bodypart"], "radicate")
self.assertForm(["lact", "ate-secretion"], "lactate")
self.assertForm(["hostis", "ile"], "hostile")
self.assertForm(["juvenis", "ile"], "juvenile")
self.assertForm(["senex", "ile"], "senile")
self.assertForm(["bos", "ine"], "bovine")
self.assertForm(["felis", "ine"], "feline")
self.assertForm(["leo", "ine"], "leonine")
self.assertForm(["ovis", "ine"], "ovine")
self.assertForm(["sphinx", "ine"], "sphingine")
self.assertForm(["avis", "ian-animal"], "avian")
self.assertForm(["canis", "id-descendant"], "canid")
self.assertForm(["felis", "id-descendant"], "felid")
self.assertForm(["homo", "id-descendant"], "hominid")
self.assertForm(["arbor", "esce-plant", "nt"], "arborescent")
self.assertForm(["ex", "flos", "esce-plant"], "effloresce")
self.assertForm(["frons", "esce-plant"], "frondesce")
self.assertForm(["apes", "culture"], "apiculture")
self.assertForm(["avis", "culture"], "aviculture")
self.assertForm(["bos", "culture"], "boviculture")
self.assertForm(["mare", "culture"], "mariculture")
self.assertForm(["vermis", "culture"], "vermiculture")
self.assertForm(["arbor", "ous"], "arborous")
self.assertForm(["caro", "ous"], "carnous")
self.assertForm(["carbo", "ous"], "carbonous")
self.assertForm(["febris", "ous"], "febrous")
self.assertForm(["gramen", "ous"], "graminous")
self.assertForm(["carbo", "ify"], "carbonify")
self.assertForm(["crux", "ify"], "crucify")
self.assertForm(["lapis", "ify"], "lapidify")
self.assertForm(["mors", "ify"], "mortify")
self.assertForm(["pax", "ify"], "pacify")
self.assertForm(["crux", "fer"], "crucifer")
self.assertForm(["lux", "fer"], "lucifer")
self.assertForm(["proles", "fer", "ate"], "proliferate")
self.assertForm(["thuris", "fer"], "thurifer")
self.assertForm(["clavis", "ger"], "claviger")
self.assertForm(["calix", "form"], "caliciform")
self.assertForm(["caseus", "form"], "caseiform")
self.assertForm(["falx", "form"], "falciform")
self.assertForm(["funis", "form"], "funiform")
self.assertForm(["homo", "form"], "hominiform")
self.assertForm(["grex", "cide"], "gregicide")
self.assertForm(["pater", "cide"], "patricide")
self.assertForm(["senex", "cide"], "senicide")
self.assertForm(["soror", "cide"], "sororicide")
self.assertForm(["vitis", "cide"], "viticide")
# Latin nouns, 4th declension
self.assertForm(["casus", "al"], "casual")
self.assertForm(["manus", "al"], "manual")
self.assertFormIn(["os-remains", "arium"], ["ossuarium", "ossuary"])
self.assertForm(["cornus", "ate-bodypart"], "cornuate")
self.assertForm(["lacus", "ine"], "lacustrine")
self.assertForm(["quercus", "ine"], "quercine")
self.assertForm(["fructus", "ous"], "fructuous")
self.assertForm(["lacus", "ine"], "lacustrine")
self.assertForm(["manus", "form"], "maniform")
# Latin nouns, 5th declension
self.assertForm(["facies", "al"], "facial")
self.assertForm(["dies", "ary"], "diary")
self.assertForm(["facies", "al"], "facial")
# Latin nouns, indeclinable
self.assertForm(["unus", "ity"], "unity")
self.assertForm(["duo", "al"], "dual")
self.assertForm(["ad", "nihil", "ate"], "annihilate")
# Latin adjectives, 1st/2nd declension
self.assertForm(["acerba", "ity"], "acerbity")
self.assertForm(["digna", "ity"], "dignity")
self.assertForm(["aequa", "ity"], "equity")
self.assertForm(["obscura", "ity"], "obscurity")
self.assertForm(["tranquilla", "ity"], "tranquility")
self.assertForm(["alta", "tude"], "altitude")
self.assertForm(["certa", "tude"], "certitude")
self.assertForm(["crassa", "tude"], "crassitude")
self.assertForm(["pulchra", "tude"], "pulchritude")
self.assertForm(["sola", "tude"], "solitude")
self.assertForm(["digna", "ify"], "dignify")
self.assertForm(["falsa", "ify"], "falsify")
self.assertForm(["magna", "ify"], "magnify")
self.assertForm(["puter", "ify"], "putrify")
self.assertForm(["aequa", "ate"], "equate")
self.assertForm(["integra", "ate"], "integrate")
self.assertForm(["libera", "ate"], "liberate")
self.assertForm(["valida", "ate"], "validate")
self.assertForm(["stulta", "ify"], "stultify")
self.assertForm(["maxima", "ize"], "maximize")
self.assertForm(["minima", "ize"], "minimize")
self.assertForm(["pessima", "ize"], "pessimize")
self.assertForm(["privata", "ize"], "privatize")
self.assertForm(["tranquilla", "ize"], "tranquilize")
self.assertForm(["re", "cruda", "esce"], "recrudesce")
self.assertForm(["matura", "esce"], "maturesce")
self.assertForm(["puter", "esce", "nt"], "putrescent")
self.assertFormIn(["antiqua", "arium"], ["antiquarium", "antiquary"])
# Latin adjectives, 3rd declension
self.assertForm(["communis", "ity"], "community")
self.assertForm(["levis", "ity"], "levity")
self.assertForm(["maior", "ity"], "majority")
self.assertForm(["real", "ity"], "reality")
self.assertForm(["stabilis", "ity"], "stability")
self.assertForm(["humilis", "tude"], "humilitude")
self.assertForm(["lenis", "tude"], "lenitude")
self.assertForm(["similis", "tude"], "similitude")
self.assertForm(["turpis", "tude"], "turpitude")
self.assertForm(["mollis", "ify"], "mollify")
self.assertForm(["debilis", "ate"], "debilitate")
self.assertForm(["facilis", "ate"], "facilitate")
self.assertForm(["levis", "ate"], "levitate")
self.assertForm(["facilis", "ate"], "facilitate")
# Latin verbs, 1st conjugation, suffixes
self.assertForm(["curare", "ble"], "curable")
self.assertForm(["damnare", "ble"], "damnable")
self.assertForm(["delectare", "ble"], "delectable")
self.assertForm(["laudare", "ble"], "laudable")
self.assertForm(["tolerare", "ble"], "tolerable")
self.assertForm(["fricare", "ile-verb"], "frictile")
self.assertForm(["natare", "ile-verb"], "natatile")
self.assertForm(["plicare", "ile-verb"], "plicatile")
self.assertForm(["secare", "ile-verb"], "sectile")
self.assertForm(["vibrare", "ile-verb"], "vibratile")
self.assertForm(["fricare", "ion"], "friction")
self.assertForm(["jubilare", "ion"], "jubilation")
self.assertForm(["optare", "ion"], "option")
self.assertForm(["secare", "ion"], "section")
self.assertForm(["vibrare", "ion"], "vibration")
self.assertForm(["generare", "ive"], "generative")
self.assertForm(["laxare", "ive"], "laxative")
self.assertForm(["laudare", "ive"], "laudative")
self.assertForm(["narrare", "ive"], "narrative")
self.assertForm(["stare", "ive"], "stative")
self.assertForm(["curare", "or"], "curator")
self.assertForm(["educare", "or"], "educator")
self.assertForm(["liberare", "or"], "liberator")
self.assertForm(["narrare", "or"], "narrator")
self.assertForm(["praedari", "or"], "predator")
self.assertForm(["abundare", "nt"], "abundant")
self.assertForm(["errare", "nt"], "errant")
self.assertForm(["fragrare", "nt"], "fragrant")
self.assertForm(["migrare", "nt"], "migrant")
self.assertForm(["militare", "nt"], "militant")
# Latin verbs, 2nd conjugation, suffixes
self.assertForm(["ardere", "nt"], "ardent")
self.assertForm(["lucere", "nt"], "lucent")
self.assertForm(["paenitere", "nt"], "penitent")
self.assertForm(["torrere", "nt"], "torrent")
self.assertForm(["valere", "nt"], "valent")
self.assertForm(["delere", "ble"], "delible")
self.assertForm(["horrere", "ble"], "horrible")
self.assertForm(["delere", "ion"], "deletion")
self.assertForm(["manere", "ion"], "mansion")
self.assertForm(["movere", "ion"], "motion")
self.assertForm(["sedere", "ion"], "session")
self.assertForm(["tueri", "ion"], "tuition")
# Latin verbs, 3rd conjugation, suffixes
self.assertForm(["crescere", "nt"], "crescent")
self.assertForm(["currere", "nt"], "current")
self.assertForm(["docere", "nt"], "docent")
self.assertForm(["gradi", "nt"], "gradient")
self.assertForm(["nasci", "nt"], "nascent")
self.assertForm(["credere", "ble"], "credible")
self.assertForm(["fallere", "ble"], "fallible")
self.assertForm(["fungi", "ble"], "fungible")
self.assertForm(["tangere", "ble"], "tangible")
self.assertForm(["vincere", "ble"], "vincible")
self.assertForm(["figere", "ion"], "fixation")
self.assertForm(["fungi", "ion"], "function")
self.assertForm(["mittere", "ion"], "mission")
self.assertForm(["petere", "ion"], "petition")
self.assertForm(["scandere", "ion"], "scansion")
# Latin verbs, 4th conjugation, suffixes
self.assertForm(["nutrire", "nt"], "nutrient")
self.assertForm(["mugire", "nt"], "mugient")
self.assertForm(["oriri", "nt"], "orient")
self.assertForm(["salire", "nt"], "salient")
self.assertForm(["sentire", "nt"], "sentient")
self.assertForm(["audire", "ble"], "audible")
self.assertForm(["audire", "ion"], "audition")
self.assertForm(["ex", "haurire", "ion"], "exhaustion")
self.assertForm(["partiri", "ion"], "partition")
self.assertFormIn(["salire", "ion"], ["saltion", "salition"])
self.assertForm(["aperire", "ure"], "aperture")
self.assertForm(["venire", "ure"], "venture")
self.assertForm(["in", "vestire", "ure"], "investiture")
# preposition + verb
self.assertForm(["com", "coquere"], "concoct")
self.assertForm(["com", "fateri"], "confess")
self.assertForm(["com", "finire"], "confine")
self.assertForm(["com", "venire"], "convene")
self.assertForm(["ab", "battuere"], "abate")
self.assertForm(["ad", "facere"], "affect")
self.assertForm(["dis", "apparere"], "disappear")
self.assertForm(["dis", "cernere"], "discern")
self.assertForm(["ex", "facere"], "effect")
self.assertForm(["in", "bibere"], "imbibe")
self.assertForm(["re", "agere"], "react")
self.assertForm(["re", "apparere"], "reappear")
self.assertForm(["per", "facere"], "perfect")
# preposition + verb + suffix
self.assertForm(["ad", "figere", "ion"], "affixation")
self.assertForm(["com", "battuere", "nt"], "combatant")
self.assertForm(["com", "fateri", "ion"], "confession")
self.assertForm(["com", "venire", "ion"], "convention")
self.assertForm(["de", "caedere", "ion"], "decision")
self.assertForm(["ex", "citare", "ion"], "excitation")
self.assertForm(["ex", "facere", "nt"], "efficient")
self.assertForm(["ex", "facere", "ive"], "effective")
self.assertForm(["in", "cantare", "ion"], "incantation")
self.assertForm(["in", "capere", "nt"], "incipient")
self.assertForm(["ob", "cadere", "ion"], "occasion")
self.assertForm(["re", "agere", "ion"], "reaction")
self.assertForm(["re", "capere", "ive"], "receptive")
self.assertForm(["re", "currere", "nt"], "recurrent")
self.assertForm(["per", "emere", "ory"], "peremptory")
# prefix + verb
self.assertForm(["re-again", "cruda", "esce"], "recrudesce")
# two suffixes
self.assertForm(["com", "fidere", "nce", "al"], "confidential")
self.assertForm(["diversa", "ify", "ion"], "diversification")
self.assertForm(["duo", "al", "ity"], "duality")
self.assertForm(["esse", "nce", "al"], "essential")
self.assertForm(["funis", "pote-power", "nt"], "funipotent")
# relative constructions
self.assertForm(["ad", "glomus", "ate"], "agglomerate")
self.assertForm(["ad", "grex", "ate"], "aggregate")
self.assertForm(["ad", "nihil", "ate"], "annihilate")
self.assertForm(["com", "grex", "ate"], "congregate")
self.assertForm(["com", "mensa", "al"], "commensal")
self.assertForm(["com", "mensus", "ate"], "commensurate")
self.assertForm(["de", "fenestra", "ate"], "defenestrate")
self.assertForm(["ex", "onus", "ate"], "exonerate")
self.assertForm(["ex", "pectus", "ate"], "expectorate")
self.assertForm(["ex", "radix", "ate"], "eradicate")
self.assertForm(["in", "carcer", "ate"], "incarcerate")
self.assertForm(["in", "corpus", "ate"], "incorporate")
self.assertForm(["in", "persona", "ate"], "impersonate")
self.assertForm(["in", "semen", "ate"], "inseminate")
self.assertForm(["re-again", "in", "caro", "ate", "ion"], "reincarnation")
self.assertForm(["inter", "columna", "al"], "intercolumnar")
self.assertForm(["inter", "crus", "al"], "intercrural")
self.assertForm(["inter", "dens", "al"], "interdental")
self.assertForm(["inter", "planeta", "ary"], "interplanetary")
self.assertForm(["inter", "stella", "al"], "interstellar")
self.assertForm(["praeter", "natura", "al"], "preternatural")
self.assertForm(["pre", "industria", "al"], "preindustrial")
self.assertForm(["sub", "apex", "al"], "subapical")
self.assertForm(["sub", "bracchium", "al"], "subbrachial")
self.assertForm(["sub", "limen", "al"], "subliminal")
self.assertForm(["super", "natura", "al"], "supernatural")
self.assertForm(["trans", "dermis", "al"], "transdermal")
self.assertForm(["trans", "luna", "al"], "translunar")
# numerical constructions
self.assertForm(["two-join", "camera", "al"], "bicameral")
self.assertForm(["two-join", "latus", "al"], "bilateral")
self.assertForm(["three-join", "geminus", "al"], "trigeminal")
self.assertForm(["three-join", "angulus", "al"], "triangular")
self.assertForm(["three-join", "latus", "al"], "trilateral")
self.assertForm(["four-join", "latus", "al"], "quadrilateral")
# Tests confirming that exception cases work as expected.
def testFormException(self):
# Latin nouns
# aerial
self.assertForm(["aqua", "ous"], "aqueous")
self.assertForm(["arbor", "al"], "arboreal")
self.assertForm(["homo", "cide"], "homicide")
self.assertForm(["fructus", "esce-plant", "nt"], "fructescent")
self.assertForm(["homo", "cide"], "homicide")
self.assertForm(["lapis", "ous"], "lapideous")
self.assertForm(["lignum", "ous"], "ligneous")
self.assertForm(["manus", "form"], "maniform")
self.assertForm(["nux", "ous"], "nuceous")
self.assertForm(["vitis", "etum"], "viticetum")
# Latin verbs
self.assertForm(["debere", "or"], "debtor")
self.assertForm(["jurare", "or"], "juror")
# Verbs with alternate prefixed fors
self.assertFormIn(["cadere", "nce"], ["cadence", "cadency"])
self.assertForm(["in", "cadere", "nt"], "incident")
self.assertForm(["capere", "ive"], "captive")
self.assertForm(["re", "capere", "ive"], "receptive")
self.assertForm(["re", "capere", "nt"], "recipient")
self.assertForm(["damnare", "ion"], "damnation")
self.assertForm(["com", "damnare", "ion"], "condemnation")
self.assertForm(["facere", "ion"], "faction")
self.assertForm(["in", "facere", "ion"], "infection")
self.assertForm(["ex", "facere", "nt"], "efficient")
self.assertForm(["in", "habere", "ion"], "inhibition")
self.assertForm(["ad", "jacere", "nt"], "adjacent")
self.assertForm(["com", "jacere", "ure"], "conjecture")
self.assertForm(["re", "jacere"], "reject")
self.assertForm(["salire", "nt"], "salient")
self.assertForm(["re", "salire", "nt"], "resilient")
self.assertForm(["scandere", "ion"], "scansion")
self.assertForm(["ad", "scandere", "ion"], "ascension")
self.assertForm(["ad", "scandere", "nt"], "ascendant")
self.assertForm(["tenere", "ion"], "tension")
self.assertForm(["ad", "tenere", "ion"], "attention")
self.assertForm(["tenere", "nt"], "tenent")
self.assertForm(["com", "tenere", "nt"], "continent")
self.assertForm(["violare", "nt"], "violent")
# Verbs with exceptions for "ble"
self.assertForm(["pre", "dicere", "ble"], "predictable")
self.assertForm(["trans", "ferre", "ble"], "transferable")
self.assertForm(["flectere", "ble"], "flexible")
#self.assertForm(["sub", "mergere", "ble"], "submersible")
self.assertForm(["ad", "mittere", "ble"], "admissible")
self.assertForm(["movere", "ble"], "movable")
self.assertForm(["com", "plere", "ble"], "completable")
self.assertForm(["com", "prehendere", "ble"], "comprehensible")
self.assertForm(["ad", "quaerere", "ble"], "acquirable")
self.assertForm(["re", "vertere", "ble"], "reversible")
self.assertForm(["videre", "ble"], "visible")
# Tests that forms that were made impossible in order to make other forms possible still don't work.
# If these fail, it may not be a problem, but I should confirm that no other desired forms were lost.
def testUnrealizedForms(self):
self.assertFormNot(["humilis", "ate"], "humiliate")
self.assertFormNot(["de", "cadere", "nt"], "decadent")
self.assertFormNot(["ex", "sanguis", "ate"], "exsanguinate")
# Helpers ==========
def assertForm(self, keys, form):
word = word_for_keys(keys, self.morphothec)
composed = composer.get_form(word)
self.assertEqual(composed, form)
def assertFormIn(self, keys, forms):
word = word_for_keys(keys, self.morphothec)
form = composer.get_form(word)
self.assertIn(form, forms)
def assertFormNot(self, keys, form):
word = word_for_keys(keys, self.morphothec)
composed = composer.get_form(word)
self.assertNotEqual(composed, form)
def countForm(self, keys, form):
word = word_for_keys(keys, self.morphothec)
composed = composer.get_form(word)
if composed == form:
return 1
else:
return 0
if __name__ == '__main__':
unittest.main() |
import json
from unittest.mock import Mock
import graphene
import pytest
from django.shortcuts import reverse
from tests.utils import get_graphql_content
from saleor.product.models import (
Category, ProductAttribute, AttributeChoiceValue)
from saleor.graphql.product.utils import attributes_to_hstore
from saleor.graphql.product.types import resolve_attribute_value_type, ProductAttributeValueType
def test_attributes_to_hstore(product, color_attribute):
color_value = color_attribute.values.first()
# test transforming slugs of existing attributes to IDs
input_data = [{
'slug': color_attribute.slug, 'value': color_value.slug}]
attrs_qs = product.product_type.product_attributes.all()
ids = attributes_to_hstore(input_data, attrs_qs)
assert str(color_attribute.pk) in ids
assert ids[str(color_attribute.pk)] == str(color_value.pk)
# test creating a new attribute value
input_data = [{
'slug': color_attribute.slug, 'value': 'Space Grey'}]
ids = attributes_to_hstore(input_data, attrs_qs)
new_value = AttributeChoiceValue.objects.get(slug='space-grey')
assert str(color_attribute.pk) in ids
assert ids[str(color_attribute.pk)] == str(new_value.pk)
# test passing an attribute that doesn't belong to this product raises
# an error
input_data = [{'slug': 'not-an-attribute', 'value': 'not-a-value'}]
with pytest.raises(ValueError):
attributes_to_hstore(input_data, attrs_qs)
def test_attributes_query(user_api_client, product):
attributes = ProductAttribute.objects.prefetch_related('values')
query = '''
query {
attributes {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
'''
response = user_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
attributes_data = content['data']['attributes']['edges']
assert len(attributes_data) == attributes.count()
def test_attributes_in_category_query(user_api_client, product):
category = Category.objects.first()
query = '''
query {
attributes(inCategory: "%(category_id)s") {
edges {
node {
id
name
slug
values {
id
name
slug
}
}
}
}
}
''' % {'category_id': graphene.Node.to_global_id('Category', category.id)}
response = user_api_client.post(reverse('api'), {'query': query})
content = get_graphql_content(response)
assert 'errors' not in content
attributes_data = content['data']['attributes']['edges']
assert len(attributes_data) == ProductAttribute.objects.count()
def test_create_product_attribute(admin_api_client):
query = """
mutation createAttribute($name: String!, $slug: String!) {
productAttributeCreate(input: {name: $name, slug: $slug}) {
productAttribute {
name
slug
values {
name
slug
}
}
}
}
"""
name = 'test name'
slug = 'test-slug'
variables = json.dumps({'name': name, 'slug': slug})
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['productAttributeCreate']['productAttribute']
assert data['name'] == name
assert data['slug'] == slug
assert not data['values']
def test_update_product_attribute(admin_api_client, color_attribute):
attribute = color_attribute
query = """
mutation updateAttribute($id: ID!, $name: String!, $slug: String!) {
productAttributeUpdate(id: $id, input: {name: $name, slug: $slug}) {
productAttribute {
name
}
}
}
"""
name = 'Wings name'
slug = attribute.slug
id = graphene.Node.to_global_id('ProductAttribute', attribute.id)
variables = json.dumps({'name': name, 'id': id, 'slug': slug})
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
attribute.refresh_from_db()
assert 'errors' not in content
data = content['data']['productAttributeUpdate']['productAttribute']
assert data['name'] == name == attribute.name
def test_delete_product_attribute(admin_api_client, color_attribute):
attribute = color_attribute
query = """
mutation deleteAttribute($id: ID!) {
productAttributeDelete(id: $id) {
productAttribute {
id
}
}
}
"""
id = graphene.Node.to_global_id('ProductAttribute', attribute.id)
variables = json.dumps({'id': id})
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
assert 'errors' not in content
with pytest.raises(attribute._meta.model.DoesNotExist):
attribute.refresh_from_db()
def test_create_attribute_choice_value(admin_api_client, color_attribute):
attribute = color_attribute
query = """
mutation createChoice($attribute: ID!, $name: String!, $slug: String!, $value: String!) {
attributeChoiceValueCreate(
input: {attribute: $attribute, name: $name, slug: $slug, value: $value}) {
attributeChoiceValue {
name
slug
type
value
}
}
}
"""
attribute_id = graphene.Node.to_global_id('ProductAttribute', attribute.id)
name = 'test name'
slug = 'test-slug'
value = 'test-string'
variables = json.dumps(
{'name': name, 'slug': slug, 'value': value, 'attribute': attribute_id})
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
assert 'errors' not in content
data = content[
'data']['attributeChoiceValueCreate']['attributeChoiceValue']
assert data['name'] == name
assert data['slug'] == slug
assert data['value'] == value
assert data['type'] == 'STRING'
def test_update_attribute_choice_value(admin_api_client, pink_choice_value):
value = pink_choice_value
query = """
mutation updateChoice($id: ID!, $name: String!, $slug: String!) {
attributeChoiceValueUpdate(
id: $id, input: {name: $name, slug: $slug}) {
attributeChoiceValue {
name
slug
}
}
}
"""
id = graphene.Node.to_global_id('ProductAttributeValue', value.id)
name = 'Crimson'
slug = value.slug
variables = json.dumps(
{'name': name, 'slug': slug, 'id': id})
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
assert 'errors' not in content
value.refresh_from_db()
data = content[
'data']['attributeChoiceValueUpdate']['attributeChoiceValue']
assert data['name'] == name == value.name
def test_delete_attribute_choice_value(admin_api_client, color_attribute, pink_choice_value):
value = pink_choice_value
value = color_attribute.values.get(name='Red')
query = """
mutation updateChoice($id: ID!) {
attributeChoiceValueDelete(id: $id) {
attributeChoiceValue {
name
slug
}
}
}
"""
id = graphene.Node.to_global_id('ProductAttributeValue', value.id)
variables = json.dumps({'id': id})
response = admin_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
assert 'errors' not in content
with pytest.raises(value._meta.model.DoesNotExist):
value.refresh_from_db()
@pytest.mark.parametrize('raw_value, expected_type', [
('#0000', ProductAttributeValueType.COLOR),
('#FF69B4', ProductAttributeValueType.COLOR),
('rgb(255, 0, 0)', ProductAttributeValueType.COLOR),
('hsl(0, 100%, 50%)', ProductAttributeValueType.COLOR),
('hsla(120, 60%, 70%, 0.3)', ProductAttributeValueType.COLOR),
('rgba(100%, 255, 0, 0)', ProductAttributeValueType.COLOR),
('http://example.com', ProductAttributeValueType.URL),
('https://example.com', ProductAttributeValueType.URL),
('ftp://example.com', ProductAttributeValueType.URL),
('example.com', ProductAttributeValueType.STRING),
('Foo', ProductAttributeValueType.STRING),
('linear-gradient(red, yellow)', ProductAttributeValueType.GRADIENT),
('radial-gradient(#0000, yellow)', ProductAttributeValueType.GRADIENT),
])
def test_resolve_attribute_value_type(raw_value, expected_type):
assert resolve_attribute_value_type(raw_value) == expected_type
def test_query_attribute_values(
color_attribute, pink_choice_value, user_api_client):
attribute_id = graphene.Node.to_global_id(
'ProductAttribute', color_attribute.id)
query = """
query getAttribute($id: ID!) {
attributes(id: $id) {
edges {
node {
id
name
values {
name
type
value
}
}
}
}
}
"""
variables = json.dumps({'id': attribute_id})
response = user_api_client.post(
reverse('api'), {'query': query, 'variables': variables})
content = get_graphql_content(response)
assert 'errors' not in content
data = content['data']['attributes']['edges'][0]['node']
values = data['values']
pink = [v for v in values if v['name'] == pink_choice_value.name]
assert len(pink) == 1
pink = pink[0]
assert pink['value'] == '#FF69B4'
assert pink['type'] == 'COLOR'
|
class A1:
foo = <error descr="Unresolved reference 'B1'">B1</error>()
class B1:
pass
class A21:
class A22:
bar = <error descr="Unresolved reference 'B2'">B2</error>()
class B2:
pass
class A31:
def baz(self):
class A32:
egg = B3()
class B3:
pass |
import numpy as np
import matplotlib.pyplot as plt
"""
Extensions of DictLearner that keep track of how well
they have recovered a known sparse model. The data passed in should
be a StimSet.ToySparseSet object.
"""
def make_fit_learner_class(Learner):
"""Given a particular DictLearner class, returns a version of it that
keeps track of how well it has recovered a known sparse model."""
class FitLearner(Learner):
def initialize_stats(self):
self.modfits = np.array([])
Learner.initialize_stats(self)
def store_statistics(self, *args, **kwargs):
self.modfits = np.append(self.modfits, self.stims.test_fit(self.Q))
return Learner.store_statistics(self, *args, **kwargs)
def get_histories(self):
histories = Learner.get_histories(self)
histories['modfits'] = self.modfits
return histories
def set_histories(self, histories):
try:
self.modfits = histories['modfits']
except KeyError:
print('Model fit history not available.')
Learner.set_histories(self, histories)
def fit_progress_plot(self, window_size=100, norm=1, start=0, end=-1,
ax=None):
"""Plots a moving average of the error and activity history
with the given averaging window."""
if window_size == 1:
def conv(x):
return x[start:end]
else:
window = np.ones(int(window_size))/float(window_size)
def conv(history):
return np.convolve(errorhist[start:end], window, 'valid')
try:
errorhist = self.errorhist
except:
errorhist = self.mse_history
smoothederror = conv(errorhist)
if norm == 2:
acthist = self.L2hist
elif norm == 0:
acthist = self.L0hist
else:
try:
acthist = self.L1hist
except:
acthist = self.L1_history
smoothedactivity = conv(acthist)
smoothedmodfits = conv(self.modfits)
lines = []
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
lines = []
lines = lines + ax.plot(smoothederror, 'b')
lines = lines + ax.plot(smoothedactivity, 'g')
lines = lines + ax.plot(smoothedmodfits, 'r')
labels = ['MSE', 'L1 activity', 'Model recovery']
try:
lam = self.lam
loss = 0.5*smoothederror + lam*smoothedactivity
lines = lines + ax.plot(loss, 'm')
labels.append('Sparse coding loss')
except:
pass
ax.legend(lines, labels)
return ax
return FitLearner
|
from flask import jsonify
from lin import route_meta, group_required, login_required
from lin.exception import Success
from lin.redprint import Redprint
from app.validators.forms import ThirdClientForm,WxClientForm
from app.models.third_client.third_bind import ThirdBind
member_api = Redprint('member')
@member_api.route('/check-reg', methods=['POST'])
def checkReg():
form = ThirdClientForm().validate_for_api()
resp = ThirdBind.wx_check_reg(form)
return jsonify(resp)
@member_api.route('/login', methods=['POST'])
def login():
form = WxClientForm().validate_for_api()
resp = ThirdBind.wx_register(form)
return jsonify(resp)
|
"""
Copyright (c) 2013, rhambach.
This file is part of the TEMareels package and released
under the MIT-Licence. See LICENCE file for details.
"""
import Release
__all__ = ["aperture","ecal","qcal","gui","tools"]
__version__ = str(Release.version);
__author__ = ", ".join(Release.authors);
__license__ = str(Release.license);
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File : scriptsytSequence.py
# Author : yang <[email protected]>
# Date : 10.03.2019
# Last Modified Date: 14.03.2019
# Last Modified By : yang <[email protected]>
# from ytLoggingSettings import yl
import re
import os
import shutil
import string
from ytLoggingSettings import yl
class sequence():
'''
path : /home/test/abc.(###|%03d).exr.
pattern : abc.\d{3,}.exr <= a regex format path
prefix name : abc
ext : exr
seqMark : ### or %03d
markLenth : len(###)
'''
def __init__(self, path='', start=0, last=0, frames=[], pattern='', regex=r'^(.*?\.?)((#+)|%(\d*)d)(\.?\w+)$'):
self.__path = path
self.__start = start
self.__last = last
self.__frames = frames[:]
self.__pattern = pattern
self.__regex = regex
self.__seqMark = ''
def __str__(self):
lists = self.optimizeList()
if lists:
l = len(lists)
if l == 1:
return 'sequence: %s\nall: %d lost: 0\nlostFrames: [] existentFrames: [%d-%d]' % (self.__path, len(lists[0]), lists[0][0], lists[0][-1])
existentFrames = ''
lostFrames = ''
i = 0
while i < l - 1:
existentFrames += '[%d-%d], ' % (lists[i][0], lists[i][-1])
lostFrames += '[%d-%d], ' % (lists[i][-1]+1, lists[i + 1][0]-1)
i += 1
existentFrames += '[%d-%d], ' % (lists[i][0], lists[i][-1])
return 'sequence: %s\nall: %d lost: %d\nlostFrames: %s\nexistentFrames: %s' % (self.__path, self.__last - self.__start + 1, self.__last - self.__start + 1 - len(self.__frames), lostFrames[:-2], existentFrames[:-2])
else:
return 'sequence: %s\nall: %d lost: 0\nlostFrames: [] existentFrames: [%d-%d]' % (self.__path, 0, 0, 0)
def setPath(self, path):
'''
set sequence path
'''
self.__path = path
def setFrames(self, frames):
'''
set sequence frames
'''
self.__frames = frames.sort()
self.__start = min(frames)
self.__last = max(frames)
def setPattern(self, pattern):
'''
set pattern
'''
self.__pattern = pattern
def actualFrames(self):
'''
get actual frames
'''
return self.__frames
def start(self):
return self.__start
def last(self):
return self.__last
def seqMark(self):
return self.__seqMark
def frames(self):
return self.__frames
def lostFrames(self):
'''
return lost frames list, or None
'''
lost = []
for i in range(self.__start, self.__last + 1):
if i not in self.__frames:
lost.append(i)
return lost
def optimizeList(self):
if len(self.__frames) > 0:
frameLen = len(self.__frames)
if frameLen < 2:
return [self.__frames]
splittedFrame = []
splittedList = [self.__frames[0]]
i = 1
while i < frameLen:
if abs(self.__frames[i] - self.__frames[i - 1]) == 1:
splittedList.append(self.__frames[i])
else:
splittedFrame.append(splittedList)
splittedList = [self.__frames[i]]
i += 1
splittedFrame.append(splittedList)
return splittedFrame
else:
yl.error('there is noting in frames, please ensure there are files in this path ,then call scan method to get frames')
def path(self):
'''
return path
'''
return self.__path
def pattern(self):
'''
return regex pattern that matched files
'''
return self.__pattern
def scan(self, files, resetStartAndLast=True):
'''
use pattern to scan files in frame range
'''
if len(files) > 0:
yl.debug('start scan sequence: %s' % self.__path)
analizePathPattern = re.compile(self.__regex)
result = analizePathPattern.match(os.path.basename(self.__path))
self.__seqMark = result.group(2)
yl.debug('path pattern: %s' % str(result.groups()))
numLenth = (result.groups()[2] and len(result.groups()[2])) or (result.groups()[1] and ((result.groups()[3] and int(result.groups()[3])) or 1))
preName =re.sub(r'([%s])' % string.punctuation, r'\\\1', result.group(1))
self.__pattern = preName + r'\.?(\d{%d,})' % numLenth + r'\.?' + result.groups()[4] + '$'
yl.debug('file pattern: %s' % self.__pattern)
self.pathPattern = re.compile(self.__pattern)
i = 0
while i<len(files):
result = self.pathPattern.match(files[i])
if result and (len(result.groups()[0]) == numLenth or len(result.groups()[0]) > numLenth and not result.groups()[0].startswith('0')):
files.pop(i)
self.__frames.append(int(result.groups()[0]))
continue
else:
i+=1
self.__frames.sort()
if resetStartAndLast and len(self.__frames) > 0:
self.__start = min(self.__frames)
self.__last = max(self.__frames)
return files
else:
yl.debug('scan files is empty')
def move(self, newName=None, newDirname=None, replace=False):
'''
move file
if newName is None, keep name as source
if newDirname is None, like rename
if replace is True, if destination path is exists, remove it, than move.
'''
# newName analize
dirname = os.path.dirname(self.__path)
if newDirname is None:
newDirname = dirname
if newName is None:
newName = os.path.basename(self.__path)
analizePathPattern = re.compile(self.__regex)
newNameResult = analizePathPattern.match(newName)
if newNameResult:
result = analizePathPattern.match(os.path.basename(self.__path))
for num in self.__frames:
fileName = ''.join((result.group(1), str(seq2num(num, result.group(2))), result.group(5)))
newName = ''.join((newNameResult.group(1), str(seq2num(num, newNameResult.group(2))), newNameResult.group(5)))
if newName != fileName or newDirname != dirname:
if os.path.exists(os.path.join(newDirname, newName)):
if replace:
try:
os.remove(os.path.join(newDirname, newName))
yl.warning('destination is exists ,remove it')
except Exception, e:
yl.error(e.message)
else:
yl.warning('move failed, destination is exists, pass')
continue
try:
os.rename(os.path.join(dirname, fileName), os.path.join(newDirname, newName))
except Exception, e:
yl.error(e.message)
yl.debug('move file: {} => {}'.format(fileName, newName))
else:
yl.warning('move failed, destination name is the same as source name')
else:
yl.error('newName format error, for example: abc.###.dpx, abc.%05d.dpx')
def copy(self, newName=None, newDirname=None, replace=False):
'''
copy file
if newName is None, keep name as source
if newDirname is None, copy to source path, so newName must be different with source name.
if replace is True, if destination path is exists, remove it, than move.
'''
# newName analize
if not os.path.isdir(newDirname):
os.mkdirs(newDirname)
dirname = os.path.dirname(self.__path)
if newDirname is None:
newDirname = dirname
if newName is None:
newName = os.path.basename(self.__path)
analizePathPattern = re.compile(self.__regex)
newNameResult = analizePathPattern.match(newName)
if newNameResult:
result = analizePathPattern.match(os.path.basename(self.__path))
for num in self.__frames:
fileName = ''.join((result.group(1), str(seq2num(num, result.group(2))), result.group(5)))
newName = ''.join((newNameResult.group(1), str(seq2num(num, newNameResult.group(2))), newNameResult.group(5)))
if newName != fileName or newDirname != dirname:
if os.path.exists(os.path.join(newDirname, newName)):
if replace:
try:
os.remove(os.path.join(newDirname, newName))
yl.warning('destination is exists ,remove it')
except Exception, e:
yl.error(e.message)
else:
yl.warning('copy failed, destination is exists, pass')
continue
try:
shutil.copyfile(os.path.join(dirname, fileName), os.path.join(newDirname, newName))
except Exception, e:
yl.error(e.message)
yl.debug('copy file: {} => {}'.format(fileName, newName))
else:
yl.warning('copy failed, destination name is the same as source name')
else:
yl.error('newName format error, for example: abc.###.dpx, abc.%05d.dpx')
def scanPath(path):
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
regex = r'^(.*?\.?)(\d*)(\.?\w+)$' # used to analize the path of sequence
pattern = re.compile(regex)
filePatterns = []
seqs = []
# filePattern = []
for f in files:
result = pattern.match(f)
if result:
seqMarks = num2seq(result.group(2))
for sm in seqMarks:
compare = ''.join([result.group(1), sm, result.group(3)])
if compare not in filePatterns:
filePatterns.append(compare)
else:
break
for fp in filePatterns:
seq = sequence(path=os.path.join(path, fp).replace('\\', '/'))
files = seq.scan(files, True)
seqs.append(seq)
return seqs
def num2seq(num):
if num.isdigit():
seqNum = []
if len(num) == 1:
seqNum.append('%d')
elif (len(num) > 1 and not num.startswith('0')):
seqNum.append('%d')
seqNum.append('%0{}d'.format(len(num)))
else:
seqNum.append('%0{}d'.format(len(num)))
return seqNum
else:
return ''
def seq2num(num, seqMark):
if seqMark:
if '#' in seqMark:
if len(seqMark) == 1:
return num
else:
return '{{:>0{}d}}'.format(len(seqMark)).format(num)
else:
return seqMark % num
else:
num
if __name__ == '__main__':
# seqs = scanPath(r'e:\CG1001')
# for seq in seqs:
# seq.rename('test')
path = 'E:/CG1001/abc.#.dpx'
test = sequence(path=path)
test.scan(os.listdir(os.path.dirname(path)), True)
test.move(newDirname='E:/CG1001/abc', replace=True)
|
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class VeryBad(Player):
"""
It cooperates in the first three rounds, and uses probability
(it implements a memory, which stores the opponent’s moves) to decide for
cooperating or defecting.
Due to a lack of information as to what that probability refers to in this
context, probability(P(X)) refers to (Count(X)/Total_Moves) in this
implementation
P(C) = Cooperations / Total_Moves
P(D) = Defections / Total_Moves = 1 - P(C)
Names:
- VeryBad: [Andre2013]_
"""
name = "VeryBad"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
@staticmethod
def strategy(opponent: Player) -> Action:
total_moves = len(opponent.history)
if total_moves < 3:
return C
cooperations = opponent.cooperations
cooperation_probability = cooperations / total_moves
if cooperation_probability > 0.5:
return C
elif cooperation_probability < 0.5:
return D
else:
return opponent.history[-1]
|
# coding: utf-8
# flake8: noqa
"""
Home Connect API
This API provides access to home appliances enabled by Home Connect (https://home-connect.com). Through the API programs can be started and stopped, or home appliances configured and monitored. For instance, you can start a cotton program on a washer and get a notification when the cycle is complete. To get started with this web client, visit https://developer.home-connect.com and register an account. An application with a client ID for this API client will be automatically generated for you. In order to use this API in your own client, you need an OAuth 2 client implementing the authorization code grant flow (https://developer.home-connect.com/docs/authorization/flow). More details can be found here: https://www.rfc-editor.org/rfc/rfc6749.txt Authorization URL: https://api.home-connect.com/security/oauth/authorize Token URL: https://api.home-connect.com/security/oauth/token # noqa: E501
OpenAPI spec version: 1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from home_connect_api.api.commands_api import CommandsApi
from home_connect_api.api.default_api import DefaultApi
from home_connect_api.api.images_api import ImagesApi
from home_connect_api.api.programs_api import ProgramsApi
from home_connect_api.api.settings_api import SettingsApi
from home_connect_api.api.status_events_api import StatusEventsApi
# import ApiClient
from home_connect_api.api_client import ApiClient
from home_connect_api.configuration import Configuration
# import models into sdk package
from home_connect_api.models.active_program_not_set_error import ActiveProgramNotSetError
from home_connect_api.models.array_of_available_programs import ArrayOfAvailablePrograms
from home_connect_api.models.array_of_events import ArrayOfEvents
from home_connect_api.models.array_of_home_appliances import ArrayOfHomeAppliances
from home_connect_api.models.array_of_images import ArrayOfImages
from home_connect_api.models.array_of_options import ArrayOfOptions
from home_connect_api.models.array_of_programs import ArrayOfPrograms
from home_connect_api.models.array_of_settings import ArrayOfSettings
from home_connect_api.models.array_of_status import ArrayOfStatus
from home_connect_api.models.command import Command
from home_connect_api.models.command_definition import CommandDefinition
from home_connect_api.models.conflict import Conflict
from home_connect_api.models.conflict_error import ConflictError
from home_connect_api.models.forbidden_error import ForbiddenError
from home_connect_api.models.get_setting import GetSetting
from home_connect_api.models.home_appliance import HomeAppliance
from home_connect_api.models.interal_server_error import InteralServerError
from home_connect_api.models.no_program_active_error import NoProgramActiveError
from home_connect_api.models.no_program_selected_error import NoProgramSelectedError
from home_connect_api.models.not_acceptable_error import NotAcceptableError
from home_connect_api.models.not_found_error import NotFoundError
from home_connect_api.models.option import Option
from home_connect_api.models.program import Program
from home_connect_api.models.program_definition import ProgramDefinition
from home_connect_api.models.program_not_available_error import ProgramNotAvailableError
from home_connect_api.models.put_setting import PutSetting
from home_connect_api.models.request_timeout_error import RequestTimeoutError
from home_connect_api.models.selected_program_not_set_error import SelectedProgramNotSetError
from home_connect_api.models.status import Status
from home_connect_api.models.too_many_requests_error import TooManyRequestsError
from home_connect_api.models.unauthorized_error import UnauthorizedError
from home_connect_api.models.unsupported_media_type_error import UnsupportedMediaTypeError
from home_connect_api.models.wrong_operation_state_error import WrongOperationStateError
|
# -*- coding: utf8 -*-
# ============LICENSE_START====================================================
# org.onap.vvp/validation-scripts
# ===================================================================
# Copyright © 2019 AT&T Intellectual Property. All rights reserved.
# ===================================================================
#
# Unless otherwise specified, all software contained herein is licensed
# under the Apache License, Version 2.0 (the "License");
# you may not use this software except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# Unless otherwise specified, all documentation contained herein is licensed
# under the Creative Commons License, Attribution 4.0 Intl. (the "License");
# you may not use this documentation except in compliance with the License.
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by/4.0/
#
# Unless required by applicable law or agreed to in writing, documentation
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============LICENSE_END============================================
#
#
import os
from tests.helpers import get_base_template_from_yaml_files
from tests.utils.nested_files import get_nested_files
def is_incremental_module(yaml_file, yaml_files):
"""
Returns true if the file is not a base module, volume module, or nested module.
"""
base_template = get_base_template_from_yaml_files(yaml_files)
nested_templates = get_nested_files(yaml_files)
is_volume_module = os.path.splitext(yaml_file)[0].endswith("_volume")
return (
yaml_file != base_template
and yaml_file not in nested_templates
and not is_volume_module
)
def get_incremental_modules(yaml_files):
"""
Returns the a list of file paths for the incremental modules in yaml_files
"""
return [f for f in yaml_files if is_incremental_module(f, yaml_files)]
|
from dataclasses import dataclass
from pandas import Series
"""
This file demonstrates the usage of a dataclass to abstract away data operations. Using dataclasses can be useful
for working with data where conversion from Dataframes or JSON is required.
"""
@dataclass
class OtherSampleDataItem:
state: str
total_exports: float
beef_exports: float
pork_exports: float
poultry_exports: float
dairy_exports: float
@classmethod
def from_df_row(cls, row: Series):
return cls(
state=row["state"],
total_exports=row["total exports"],
beef_exports=row["beef"],
pork_exports=row["pork"],
poultry_exports=row["poultry"],
dairy_exports=row["dairy"]
)
|
from tabulate import tabulate
from collections import OrderedDict
#we create a jobContext Class
class JobContext(object):
def __init__(self,sc):
self.counters = OrderedDict()
self.constants = OrderedDict()
self._init_accumulators(sc) # accumulators are here!
self._init_shared_data(sc) # broadcast values are here!
def _init_accumulators(self, sc):
pass
def _init_shared_data(self, sc):
pass
def initilize_counter(self, sc, name):
self.counters[name] = sc.accumulator(0)
def set_constant(self, sc, name, value):
self.constants[name] = sc.broadcast(value)
def inc_counter(self, name, value=1):
if name not in self.counters:
raise ValueError(
'{!s} counter was not initialized. ({!s})'.format(
name, self.counters.keys()
)
)
self.counters[name] += value
def print_accumulators(self):
print(tabulate(
self.counters.items(),
self.counters.keys(), tablefmt="simple")
)
def print_broadcasts(self):
print(tabulate(
self.constants.items(),
self.constants.keys(), tablefmt='simple')
)
|
AUTOLBOX_HEURISTIC_LINE = True
AUTOLBOX_HEURISTIC_HIST = True
AUTOLBOX_HEURISTIC_STACK = True
|
from django.contrib.admin import register, ModelAdmin
from django_admin_inline_paginator.admin import TabularInlinePaginated
from .models import Country, State
class StateAdminInline(TabularInlinePaginated):
fields = ('name', 'active')
per_page = 1
model = State
@register(State)
class StateAdmin(ModelAdmin):
fields = ('country', 'name', 'active')
model = State
@register(Country)
class CountryAdmin(ModelAdmin):
fields = ('name', 'active')
inlines = (StateAdminInline, )
model = Country
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CryptoKey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('flags', models.PositiveIntegerField(help_text=b'Key flags.', verbose_name='flags')),
('active', models.BooleanField(help_text=b'Check to activate key.', verbose_name='active')),
('content', models.TextField(help_text=b'Enter the key data.', null=True, verbose_name='content', blank=True)),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
],
options={
'ordering': ['domain'],
'db_table': 'cryptokeys',
'verbose_name': 'crypto key',
'verbose_name_plural': 'crypto keys',
'get_latest_by': 'date_modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Domain',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'This field is the actual domainname. This is the field that powerDNS matches to when it gets a request. The domainname should be in the format of: domainname.TLD (no trailing dot)', unique=True, max_length=255, verbose_name='name', db_index=True)),
('master', models.CharField(help_text=b'Enter a comma delimited list of nameservers that are master for this domain. This setting applies only to slave zones.', max_length=128, null=True, verbose_name='master', blank=True)),
('last_check', models.PositiveIntegerField(help_text=b'Last time this domain was checked for freshness.', max_length=11, null=True, verbose_name='last check')),
('type', models.CharField(default=b'NATIVE', help_text=b'Select the zone type. Native refers to native SQL replication. Master/Slave refers to DNS server based zone transfers.', max_length=6, verbose_name='type', choices=[(b'NATIVE', b'Native'), (b'MASTER', b'Master'), (b'SLAVE', b'Slave')])),
('notified_serial', models.PositiveIntegerField(help_text=b'The last notified serial of a master domain. This is updated from the SOA record of the domain.', max_length=11, null=True, verbose_name='notified serial')),
('account', models.CharField(help_text=b'Determine if a certain host is a supermaster for a certain domain name. (???)', max_length=40, null=True, verbose_name='account', blank=True)),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('created_by', models.ForeignKey(related_name='powerdns_manager_domain_created_by', verbose_name='owner username', to=settings.AUTH_USER_MODEL, help_text=b'The Django user this zone belongs to.', null=True)),
],
options={
'ordering': ['name'],
'db_table': 'domains',
'verbose_name': 'zone',
'verbose_name_plural': 'zones',
'get_latest_by': 'date_modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DomainMetadata',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('kind', models.CharField(help_text=b'Select a setting.', max_length=16, verbose_name='setting', choices=[(b'ALLOW-AXFR-FROM', b'ALLOW-AXFR-FROM'), (b'AXFR-MASTER-TSIG', b'AXFR-MASTER-TSIG'), (b'LUA-AXFR-SCRIPT', b'LUA-AXFR-SCRIPT'), (b'NSEC3NARROW', b'NSEC3NARROW'), (b'NSEC3PARAM', b'NSEC3PARAM'), (b'PRESIGNED', b'PRESIGNED'), (b'SOA-EDIT', b'SOA-EDIT'), (b'TSIG-ALLOW-AXFR', b'TSIG-ALLOW-AXFR')])),
('content', models.TextField(help_text=b'Enter the metadata.', null=True, verbose_name='content', blank=True)),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('domain', models.ForeignKey(related_name='powerdns_manager_domainmetadata_domain', verbose_name='domain', to='powerdns_manager.Domain', help_text='Select the domain this record belongs to.')),
],
options={
'ordering': ['kind'],
'db_table': 'domainmetadata',
'verbose_name': 'domain metadata',
'verbose_name_plural': 'domain metadata',
'get_latest_by': 'date_modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='DynamicZone',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_dynamic', models.BooleanField(help_text=b'Check to mark this zone as dynamic. An API key will be generated for you so as to be able to update the A nd AAAA records IP addresses over HTTP.', verbose_name='Dynamic zone')),
('api_key', models.CharField(help_text=b'The API key is generated automatically. To reset it, use the relevant action in the changelist view.', max_length=64, null=True, verbose_name='API Key')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('domain', models.ForeignKey(related_name='powerdns_manager_dynamiczone_domain', verbose_name='domain', to='powerdns_manager.Domain', help_text='Select the domain, the A and AAAA records of which might be updated dynamically over HTTP.', unique=True)),
],
options={
'ordering': ['-domain'],
'db_table': 'dynamiczones',
'verbose_name': 'dynamic zone',
'verbose_name_plural': 'dynamic zones',
'get_latest_by': 'date_modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b"Actual name of a record. Must not end in a '.' and be fully qualified - it is not relative to the name of the domain! For example: www.test.com (no trailing dot)", max_length=255, null=True, verbose_name='name', db_index=True)),
('type', models.CharField(choices=[(b'SOA', b'SOA'), (b'NS', b'NS'), (b'MX', b'MX'), (b'A', b'A'), (b'AAAA', b'AAAA'), (b'CNAME', b'CNAME'), (b'PTR', b'PTR'), (b'TXT', b'TXT'), (b'SPF', b'SPF'), (b'SRV', b'SRV')], max_length=10, help_text=b'Select the type of the resource record.', null=True, verbose_name='type', db_index=True)),
('content', models.CharField(help_text=b"This is the 'right hand side' of a DNS record. For an A record, this is the IP address for example.", max_length=64000, null=True, verbose_name='content')),
('ttl', models.PositiveIntegerField(help_text=b'How long the DNS-client are allowed to remember this record. Also known as Time To Live(TTL) This value is in seconds.', max_length=11, null=True, verbose_name='TTL', blank=True)),
('prio', models.PositiveIntegerField(help_text=b'For MX records, this should be the priority of the mail exchanger specified.', max_length=11, null=True, verbose_name='priority')),
('auth', models.NullBooleanField(help_text=b"The 'auth' field should be set to '1' for data for which the zone itself is authoritative, which includes the SOA record and its own NS records. The 'auth' field should be 0 however for NS records which are used for delegation, and also for any glue (A, AAAA) records present for this purpose. Do note that the DS record for a secure delegation should be authoritative!", verbose_name='authoritative')),
('ordername', models.CharField(help_text=b'http://doc.powerdns.com/dnssec-modes.html#dnssec-direct-database', max_length=255, null=True, verbose_name='ordername', db_index=True)),
('change_date', models.PositiveIntegerField(help_text=b'Timestamp for the last update. This is used by PowerDNS internally.', max_length=11, null=True, verbose_name='change date')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('domain', models.ForeignKey(related_name='powerdns_manager_record_domain', verbose_name='domain', to='powerdns_manager.Domain', help_text='Select the domain this record belongs to.')),
],
options={
'get_latest_by': 'date_modified',
'ordering': ['name', 'type'],
'verbose_name_plural': 'records',
'db_table': 'records',
'verbose_name': 'record',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SuperMaster',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ip', models.GenericIPAddressField(help_text=b'IP address for supermaster (IPv4 or IPv6).', unique=True, verbose_name='IP address')),
('nameserver', models.CharField(help_text=b'Hostname of the supermaster.', unique=True, max_length=255, verbose_name='nameserver')),
('account', models.CharField(help_text=b'Account name (???)', max_length=40, null=True, verbose_name='account', blank=True)),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
],
options={
'ordering': ['nameserver'],
'db_table': 'supermasters',
'verbose_name': 'supermaster',
'verbose_name_plural': 'supermasters',
'get_latest_by': 'date_modified',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='TsigKey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'Enter a name for the key.', max_length=255, verbose_name='name')),
('algorithm', models.CharField(help_text=b'Select hashing algorithm.', max_length=50, verbose_name='algorithm', choices=[(b'hmac-md5', b'hmac-md5')])),
('secret', models.CharField(help_text=b'Enter the shared secret.', max_length=255, verbose_name='secret')),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('created_by', models.ForeignKey(related_name='powerdns_manager_tsigkey_created_by', verbose_name='created by', to=settings.AUTH_USER_MODEL, help_text=b'The Django user this TSIG key belongs to.', null=True)),
],
options={
'get_latest_by': 'date_modified',
'ordering': ['name'],
'verbose_name_plural': 'TSIG Keys',
'db_table': 'tsigkeys',
'verbose_name': 'TSIG Key',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ZoneTemplate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text=b'Enter a name for the template.', max_length=100, verbose_name='name')),
('content', models.TextField(help_text=b'Enter the template content. The placeholder #origin# is expanded to the origin of the zone to which the template is applied.', null=True, verbose_name='content', blank=True)),
('notes', models.TextField(help_text=b'Space for notes about the template.', null=True, verbose_name='notes', blank=True)),
('date_modified', models.DateTimeField(auto_now=True, verbose_name='Last Modified')),
('created_by', models.ForeignKey(related_name='powerdns_manager_zonetemplate_created_by', verbose_name='template creator', to=settings.AUTH_USER_MODEL, help_text=b'The Django user this template belongs to.')),
],
options={
'get_latest_by': 'date_modified',
'ordering': ['name', 'date_modified'],
'verbose_name_plural': 'templates',
'db_table': 'zonetemplates',
'verbose_name': 'template',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='zonetemplate',
unique_together=set([('name', 'created_by')]),
),
migrations.AlterUniqueTogether(
name='tsigkey',
unique_together=set([('name', 'algorithm')]),
),
migrations.AlterIndexTogether(
name='record',
index_together=set([('domain', 'ordername'), ('name', 'type')]),
),
migrations.AddField(
model_name='cryptokey',
name='domain',
field=models.ForeignKey(related_name='powerdns_manager_cryptokey_domain', verbose_name='domain', to='powerdns_manager.Domain', help_text='Select the domain this record belongs to.'),
preserve_default=True,
),
]
|
import argparse
import base64
import binascii
import copy
import hashlib
import inspect
import io
import os
import shlex
import struct
import sys
import time
import zlib
import string
__version__ = "0.1 dev"
PYTHON2 = sys.version_info[0] < 3 # True if on pre-Python 3
def add_crc(args):
fp = open(args.filename, "r+b")
fp.seek(0, 0)
file_content = fp.read()
crc32_result = zlib.crc32(file_content)
fp.seek(0, 2)
fp.write(struct.pack('>I', crc32_result))
print("Firmware CRC32: " + hex(crc32_result))
fp.close()
def combine(args) :
ub = open(args.uart_boot, "rb")
sf = open(args.src_firmware, "rb")
df = open(args.dst_firmware, "w+b")
df.write(ub.read())
ub.close()
df.seek(0x4000,0)
sf.seek(0x4000,0)
df.write(sf.read())
df.seek(0x2c000,0)
sf.seek(0x00000,0)
df.write(sf.read(0x4000))
sf.close()
df.seek(0, 0)
file_content = df.read()
crc32_result = zlib.crc32(file_content) & 0xffffffff
df.seek(0, 2)
df.write(struct.pack('>I', crc32_result))
print("Firmware CRC32: " + hex(crc32_result))
df.close()
def main(custom_commandline=None):
parser = argparse.ArgumentParser(description='tl_fireware_tools.py v%s - Telink BLE Firmware Utility' % __version__)
subparsers = parser.add_subparsers(dest='operation', help='Run tl_fireware_tools.py -h for additional help')
add_crc = subparsers.add_parser('add_crc', help='Add CRC32 check to the file tail')
add_crc.add_argument('filename', help='Firmware image')
combine = subparsers.add_parser('combine', help='Combine Firmware with uart_boot')
combine.add_argument('uart_boot', help='uart_boot firmware image')
combine.add_argument('src_firmware', help='source firmware image')
combine.add_argument('dst_firmware', help='target firmware image')
args = parser.parse_args(custom_commandline)
print('tl_fireware_tools.py v%s' % __version__)
if args.operation is None:
parser.print_help()
sys.exit(1)
operation_func = globals()[args.operation]
operation_func(args)
def _main():
#try:
main()
# except FatalError as e:
# print('\nA fatal error occurred: %s' % e)
# sys.exit(2)
if __name__ == '__main__':
_main() |
import stylecloud
ip_files = ('top_authors_2019.csv', 'top_authors_2021.csv')
op_files = ('top_authors_2019.png', 'top_authors_2021.png')
for ip_file, op_file in zip(ip_files, op_files):
stylecloud.gen_stylecloud(file_path=ip_file,
icon_name='fas fa-book-open',
background_color='black',
gradient='horizontal',
output_name=op_file)
|
class KitsuError(Exception):
pass
|
class Load:
def __init__(self) -> None:
self
def __str__(self) -> str:
return str(self.__dict__)
|
PACKAGE_NAME = "pyreess"
__version__ = "1.0"
DESCRIPTION = "CLI application for deterministic password generation and recall"
|
from vampire.modules.encoder import *
from vampire.modules.pretrained_vae import PretrainedVAE
from vampire.modules.token_embedders.vampire_token_embedder import VampireTokenEmbedder
from vampire.modules.vae import LogisticNormal
from vampire.modules.vae import VAE
|
from paraview.simple import *
import paraview.benchmark as pvb
pvb.logbase.maximize_logs()
w = Wavelet()
w.UpdatePipeline()
pvb.logbase.get_logs()
pvb.logbase.print_logs()
pvb.logbase.dump_logs("benchmark.log")
pvb.logbase.import_logs("benchmark.log")
print('='*40)
print('Raw logs:')
print('='*40)
pvb.logbase.print_logs()
print('='*40)
print('Parsed logs:')
print('='*40)
comp_rank_frame_logs = pvb.logparser.process_logs()
for c, rank_frame_logs in comp_rank_frame_logs.items():
print('-'*40)
print('Component: %s' % c)
print('-'*40)
for r in range(0,len(rank_frame_logs)):
print('.'*40)
print(' Rank: %s' % r)
print('.'*40)
for f in rank_frame_logs[r]:
print(f)
print('SUCCESS')
|
#!/usr/bin/env python
# encoding: utf-8
from collections import namedtuple
def _create_named_tuple(name, values):
'''Helper function for creating a named tuple with the first
letter of each of the values as the names of its fields.
:param name: the name of the class that is generated
:param values: the values to instantiate the named tuple with
:returns: a named tuple
'''
return namedtuple(name, (v[0] for v in values))(*values)
# Using a named tuple guarantees the order of the colors
Color = _create_named_tuple('Color',
('red', 'green', 'yellow', 'blue', 'white', 'magenta', 'cyan', 'orange', 'purple'))
# The one-letter representations of the colors
colors_short = Color._fields
colors_short_set = set(colors_short)
# The full names of the colors
colors_long = Color._asdict().values()
|
import configparser
config = configparser.ConfigParser()
config.sections()
config.read('example.cfg')
config.sections()
for key in config['SectionOne']:
print(key)
config['SectionOne']["status"]
|
# This sample tests the "pseudo-generic class" functionality,
# where a class is made into a generic class in cases where
# it has no annotated constructor parameters.
# We use "strict" here because we want to ensure that there are
# no "unknown" types remaining in this file.
# pyright: strict
class Foo:
def __init__(self, a, b = 'hello'):
self._foo_a = a
self._foo_b = b
@property
def value_a(self):
return self._foo_a
@property
def value_b(self):
return self._foo_b
def test_function(a: int, b: str):
return
foo1 = Foo(27)
int_value_1 = foo1.value_a
str_value_1 = foo1.value_b
test_function(int_value_1, str_value_1)
foo2 = Foo("hello", 27)
str_value_2 = foo2.value_a
int_value_2 = foo2.value_b
test_function(int_value_2, str_value_2)
|
# coding: utf-8
import socketserver
import sys
import os
# Copyright 2013 Abram Hindle, Eddie Antonio Santos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
# Copyright 2020 Yun Tai Liu, Tian Qi Wang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2013 Python Software
# Foundation; All Rights Reserved
class MyWebServer(socketserver.BaseRequestHandler):
source_dir = os.getcwd() + '/www/' # suggested by twang2(Tian Qi Wang)
def handle(self):
self.data = self.request.recv(1024).strip()
# print(self.data.decode('utf-8')) OKAY?
request, header = self.data.decode('utf-8').split('\r\n', 1)
method, req_dir, http_header = request.split()
# print(method)||print(req_dir)||print(http_header)
if method != 'GET':
self.MethodForbiddenError()
# Only happens if method and header are correct
elif (method == 'GET'):
self.get_file(req_dir)
else:
return
def get_file(self, req_dir):
if ".html" in req_dir:
mimetype = 'text/html'
elif ".css" in req_dir:
mimetype = 'text/css'
elif req_dir[-1] == "/":
mimetype = 'text/html'
req_dir += "index.html" # serves index in that folder
elif "../../" in req_dir:
self.finalize(
(self.res_header(404, 'Not Found', "text/html")))
return
else:
mimetype = 'text/html'
# reference: https: // pythonexamples.org/python-check-if-path-is-file-or-directory/
'''if len(req_dir) > 25:
self.finalize(self.res_header(403, 'TEST GROUP', mimetype))'''
try: # the normal case
f_name = MyWebServer.source_dir+(req_dir)
file = open(f_name, 'r')
content = file.read()
self.finalize((self.res_header(200, 'OK', mimetype)+content))
file.close()
except FileNotFoundError:
content = "<html><body><h1 style = 'text-align:center'>404 NOT FOUND ERROR</h1></body></html>"
self.finalize(
(self.res_header(404, 'Not Found', mimetype)+content))
except IsADirectoryError:
if req_dir[-1] != '/':
f_name = MyWebServer.source_dir+req_dir+'/index.html'
file = open(f_name)
content = file.read()
file.close()
self.finalize(
(self.res_header(301, 'Moved Permanently', mimetype)+content))
# helper method
def res_header(self, status_code, status_desc, mimetype):
res_header_str = (
'HTTP/1.1 '+str(status_code) + ' ' + status_desc + '\r\n' +
'Content-Type: '+mimetype + '\r\n\r\n'
)
# print(res_header_str)
return res_header_str
# helper method
def finalize(self, b_array):
self.request.sendall(bytearray(
b_array, 'utf-8'
))
def MethodForbiddenError(self):
details = self.res_header(
405, 'Method Not Allowed', 'text/html')
content = "<html><body><h1 style = 'text-align:center'>405 Method Not Allowed --- Only 'GET' is Allowed !!!</h1></body></html>"
feedback = details + content
print(feedback)
self.finalize(feedback)
def Error301(self, directory):
details = self.res_header(
301, 'Moved Permanently\r\nRedireted to : '+directory+'\r\n', 'text/html')
self.finalize(details)
# DO NOT MODIFY
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
|
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: routeros_facts
version_added: "2.8"
author: "Egor Zaitsev (@heuels)"
short_description: Collect facts from remote devices running MikroTik RouterOS
description:
- Collects a base set of device facts from a remote device that
is running RotuerOS. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument include
C(all), C(hardware), C(config), and C(interfaces). Can specify a list of
values to include a larger subset. Values can also be used
with an initial C(!) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Collect all facts from the device
- routeros_facts:
gather_subset: all
# Collect only the config and default facts
- routeros_facts:
gather_subset:
- config
# Do not collect hardware facts
- routeros_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_serialnum:
description: The serial number of the remote device
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
# hardware
ansible_net_spacefree_mb:
description: The available disk space on the remote device in MiB
returned: when hardware is configured
type: dict
ansible_net_spacetotal_mb:
description: The total disk space on the remote device in MiB
returned: when hardware is configured
type: dict
ansible_net_memfree_mb:
description: The available free memory on the remote device in MiB
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in MiB
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.network.routeros.routeros import run_commands
from ansible.module_utils.network.routeros.routeros import routeros_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
class FactsBase(object):
COMMANDS = list()
def __init__(self, module):
self.module = module
self.facts = dict()
self.responses = None
def populate(self):
self.responses = run_commands(self.module, commands=self.COMMANDS, check_rc=False)
def run(self, cmd):
return run_commands(self.module, commands=cmd, check_rc=False)
class Default(FactsBase):
COMMANDS = [
'/system identity print without-paging',
'/system resource print without-paging',
'/system routerboard print without-paging'
]
def populate(self):
super(Default, self).populate()
data = self.responses[0]
if data:
self.facts['hostname'] = self.parse_hostname(data)
data = self.responses[1]
if data:
self.facts['version'] = self.parse_version(data)
data = self.responses[2]
if data:
self.facts['model'] = self.parse_model(data)
self.facts['serialnum'] = self.parse_serialnum(data)
def parse_hostname(self, data):
match = re.search(r'name:\s(.*)\s*$', data, re.M)
if match:
return match.group(1)
def parse_version(self, data):
match = re.search(r'version:\s(.*)\s*$', data, re.M)
if match:
return match.group(1)
def parse_model(self, data):
match = re.search(r'model:\s(.*)\s*$', data, re.M)
if match:
return match.group(1)
def parse_serialnum(self, data):
match = re.search(r'serial-number:\s(.*)\s*$', data, re.M)
if match:
return match.group(1)
class Hardware(FactsBase):
COMMANDS = [
'/system resource print without-paging'
]
def populate(self):
super(Hardware, self).populate()
data = self.responses[0]
if data:
self.parse_filesystem_info(data)
self.parse_memory_info(data)
def parse_filesystem_info(self, data):
match = re.search(r'free-hdd-space:\s(.*)([KMG]iB)', data, re.M)
if match:
self.facts['spacefree_mb'] = self.to_megabytes(match)
match = re.search(r'total-hdd-space:\s(.*)([KMG]iB)', data, re.M)
if match:
self.facts['spacetotal_mb'] = self.to_megabytes(match)
def parse_memory_info(self, data):
match = re.search(r'free-memory:\s(\d+\.?\d*)([KMG]iB)', data, re.M)
if match:
self.facts['memfree_mb'] = self.to_megabytes(match)
match = re.search(r'total-memory:\s(\d+\.?\d*)([KMG]iB)', data, re.M)
if match:
self.facts['memtotal_mb'] = self.to_megabytes(match)
def to_megabytes(self, data):
if data.group(2) == 'KiB':
return float(data.group(1)) / 1024
elif data.group(2) == 'MiB':
return float(data.group(1))
elif data.group(2) == 'GiB':
return float(data.group(1)) * 1024
else:
return None
class Config(FactsBase):
COMMANDS = ['/export']
def populate(self):
super(Config, self).populate()
data = self.responses[0]
if data:
self.facts['config'] = data
class Interfaces(FactsBase):
COMMANDS = [
'/interface print detail without-paging',
'/ip address print detail without-paging',
'/ipv6 address print detail without-paging',
'/ip neighbor print detail without-paging'
]
DETAIL_RE = re.compile(r'([\w\d\-]+)=\"?(\w{3}/\d{2}/\d{4}\s\d{2}:\d{2}:\d{2}|[\w\d\-\.:/]+)')
def populate(self):
super(Interfaces, self).populate()
self.facts['interfaces'] = dict()
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
self.facts['neighbors'] = dict()
data = self.responses[0]
if data:
interfaces = self.parse_interfaces(data)
self.populate_interfaces(interfaces)
data = self.responses[1]
if data:
data = self.parse_addresses(data)
self.populate_ipv4_interfaces(data)
data = self.responses[2]
if data:
data = self.parse_addresses(data)
self.populate_ipv6_interfaces(data)
data = self.responses[3]
if data:
self.facts['neighbors'] = self.parse_neighbors(data)
def populate_interfaces(self, data):
for key, value in iteritems(data):
self.facts['interfaces'][key] = value
def populate_ipv4_interfaces(self, data):
for key, value in iteritems(data):
if 'ipv4' not in self.facts['interfaces'][key]:
self.facts['interfaces'][key]['ipv4'] = list()
addr, subnet = value['address'].split("/")
ipv4 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv4')
self.facts['interfaces'][key]['ipv4'].append(ipv4)
def populate_ipv6_interfaces(self, data):
for key, value in iteritems(data):
if 'ipv6' not in self.facts['interfaces'][key]:
self.facts['interfaces'][key]['ipv6'] = list()
addr, subnet = value['address'].split("/")
ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
self.add_ip_address(addr.strip(), 'ipv6')
self.facts['interfaces'][key]['ipv6'].append(ipv6)
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def preprocess(self, data):
preprocessed = list()
for line in data.split('\n'):
if len(line) == 0 or line[:5] == 'Flags':
continue
elif re.match(r'\s\d', line[:2]):
preprocessed.append(line[2:])
else:
preprocessed[-1] += line
return preprocessed
def parse_interfaces(self, data):
facts = dict()
data = self.preprocess(data)
for line in data:
name = self.parse_name(line)
facts[name] = dict()
for (key, value) in re.findall(self.DETAIL_RE, line):
facts[name][key] = value
return facts
def parse_addresses(self, data):
facts = dict()
data = self.preprocess(data)
for line in data:
name = self.parse_interface(line)
facts[name] = dict()
for (key, value) in re.findall(self.DETAIL_RE, line):
facts[name][key] = value
return facts
def parse_neighbors(self, data):
facts = dict()
data = self.preprocess(data)
for line in data:
name = self.parse_interface(line)
facts[name] = dict()
for (key, value) in re.findall(self.DETAIL_RE, line):
facts[name][key] = value
return facts
def parse_name(self, data):
match = re.search(r'name=\"([\w\d\-]+)\"', data, re.M)
if match:
return match.group(1)
def parse_interface(self, data):
match = re.search(r'interface=([\w\d\-]+)', data, re.M)
if match:
return match.group(1)
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
warnings = list()
def main():
"""main entry point for module execution
"""
argument_spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
argument_spec.update(routeros_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset: %s' % subset)
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
instances = list()
for key in runable_subsets:
instances.append(FACT_SUBSETS[key](module))
for inst in instances:
inst.populate()
facts.update(inst.facts)
ansible_facts = dict()
for key, value in iteritems(facts):
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
if __name__ == '__main__':
main()
|
from PIL import Image
from PIL import Image
import pandas as pd
import numpy as np
import torch.utils.data
from collections import OrderedDict
import torch
def rgb_loader(path):
return Image.open(path).convert('RGB')
class VideoDataset(torch.utils.data.Dataset):
def __init__(self, datalist_config, transforms):
'''
Dataset to generate image sequences by group_column
:param datalist_config:
:param transforms:
video_len, group_column, random_start
'''
self.datalist_config = datalist_config
self.group_column = self.datalist_config.group_column
self.transforms = transforms
self.seq_transforms = self.datalist_config.sequence_transforms
self.df = self._read_list()
self._init_groups()
def _init_groups(self):
group_ids = self.df[self.group_column].unique()
for batch_idx, group_id in enumerate(group_ids):
subdf = self.df[self.df[self.group_column] == group_id]
self.df.loc[subdf.index.values, 'batch_idx'] = batch_idx
def __getitem__(self, index):
item_dict = OrderedDict()
video_subdf = self.df[self.df.batch_idx == index]
for column, column_name in self.data_columns:
all_images = []
for img_path in video_subdf[column].values:
all_images.append(img_path)
item_dict[column_name] = all_images
for column, column_name in self.target_columns:
item_dict[column_name] = video_subdf[column].iloc[0]
if self.seq_transforms is not None:
item_dict = self.seq_transforms(item_dict)
for _, column_name in self.data_columns:
item_dict[column_name] = [rgb_loader(x) for x in item_dict[column_name]]
for _, column_name in self.target_columns:
item_dict[column_name] = torch.Tensor([item_dict[column_name]])
if self.transforms is not None:
item_dict = self.transforms(item_dict)
item_dict['video_id'] = str(video_subdf[self.group_column].iloc[0])
return item_dict
def __len__(self):
return len(self.df[self.group_column].unique())
def _read_list(self):
data_df = pd.read_csv(self.datalist_config.datalist_path)
data_df = data_df[data_df[self.datalist_config.protocol_name]]
if isinstance(self.datalist_config.data_columns, list):
self.data_columns = self.datalist_config.data_columns
elif isinstance(self.datalist_config.data_columns, tuple):
self.data_columns = [self.datalist_config.data_columns]
elif isinstance(self.datalist_config.data_columns, str):
self.data_columns = [(self.datalist_config.data_columns,
self.datalist_config.data_columns)]
else:
raise Exception('Unknown columns types in dataset')
if isinstance(self.datalist_config.target_columns, list):
self.target_columns = self.datalist_config.target_columns
elif isinstance(self.datalist_config.target_columns, tuple):
self.target_columns = [self.datalist_config.target_columns]
elif isinstance(self.datalist_config.target_columns, str):
self.target_columns = [(self.datalist_config.target_columns,
self.datalist_config.target_columns)]
else:
raise Exception('Unknown columns types in dataset')
needed_columns = [x[0] for x in self.data_columns]
needed_columns = needed_columns + [x[0] for x in self.target_columns]
needed_columns = list(set(needed_columns))
needed_columns.append(self.group_column)
data_df = data_df[needed_columns]
return data_df
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from cinder.tests.functional.api_sample_tests import test_volumes
class VolumeSnapshotsSampleJsonTest(test_volumes.VolumesSampleBase):
sample_dir = "snapshots"
def setUp(self):
super(VolumeSnapshotsSampleJsonTest, self).setUp()
res = self._create_volume()
res = jsonutils.loads(res.content)['volume']
self._poll_volume_while(res['id'], ['creating'])
self.subs = {
"volume_id": res['id']
}
self.response = self._create_snapshot(self.subs)
def _create_snapshot(self, subs=None):
response = self._do_post('snapshots',
'snapshot-create-request',
subs)
return response
def test_snapshot_list_detail(self):
response = self._do_get('snapshots/detail')
self._verify_response('snapshots-list-detailed-response',
{}, response, 200)
def test_snapshot_create(self):
self._verify_response('snapshot-create-response',
{}, self.response, 202)
def test_snapshot_list(self):
response = self._do_get('snapshots')
self._verify_response('snapshots-list-response',
{}, response, 200)
def test_snapshot_metadata_show(self):
res = jsonutils.loads(self.response.content)['snapshot']
response = self._do_get('snapshots/%s/metadata' % res['id'])
self._verify_response('snapshot-metadata-show-response',
{}, response, 200)
def test_snapshot_metadata_create(self):
res = jsonutils.loads(self.response.content)['snapshot']
response = self._do_post('snapshots/%s/metadata' % res['id'],
'snapshot-metadata-create-request')
self._verify_response('snapshot-metadata-create-response',
{}, response, 200)
def test_snapshot_metadata_update(self):
res = jsonutils.loads(self.response.content)['snapshot']
response = self._do_put('snapshots/%s/metadata' % res['id'],
'snapshot-metadata-update-request')
self._verify_response('snapshot-metadata-update-response',
{}, response, 200)
def test_snapshot_show(self):
res = jsonutils.loads(self.response.content)['snapshot']
response = self._do_get('snapshots/%s' % res['id'])
self._verify_response('snapshot-show-response',
{}, response, 200)
def test_snapshot_update(self):
res = jsonutils.loads(self.response.content)['snapshot']
response = self._do_put('snapshots/%s' % res['id'],
'snapshot-update-request')
self._verify_response('snapshot-update-response',
{}, response, 200)
def test_snapshot_metadata_show_specific_key(self):
res = jsonutils.loads(self.response.content)['snapshot']
response = self._do_get('snapshots/%s/metadata/key' % res['id'])
self._verify_response('snapshot-metadata-show-key-response',
{}, response, 200)
def test_snapshot_metadata_update_specific_key(self):
res = jsonutils.loads(self.response.content)['snapshot']
response = self._do_put('snapshots/%s/metadata/key' % res['id'],
'snapshot-metadata-update-key-request')
self._verify_response('snapshot-metadata-update-key-response',
{}, response, 200)
|
import os, sys
import argparse
import numpy as np
def convert_to_conll(npz_file, bpe_file, conll_file):
output=np.load(npz_file)
labels=output['labels']
deps =output['deps']
f_conll=open(conll_file, 'w')
idx=0
for idx, bpe in enumerate( open(bpe_file, 'r') ):
#print(idx, bpe.strip())
words = bpe.replace('@@', '').split()
bpe_words = [ s.replace('@@', ' ') for s in bpe.split() ]
bpe_word_len = np.array( [ len(s.split()) for s in bpe_words ] )
#bpe_word_len[0] = 0 # Fix up first entry idx=0
bpe_word_idx = np.cumsum(bpe_word_len)[:-2] # Take off ending
print(idx, bpe_words)
#print(idx, bpe_word_idx, bpe_words)
#print(idx, bpe_word_len, bpe_word_idx, bpe_words)
labels_idx = labels[idx,:]
deps_idx = deps[idx,:]
# Ok, so now let's go through the indices and look at the values
line_i=0
for i in bpe_word_idx:
line_i+=1
label = labels_idx[i]
dep = deps_idx[i] # Ahh - but this is in bpe units ... translate to words...
dep_word_idx = np.searchsorted(bpe_word_idx, dep, side='right')
if dep!=dep_word_idx:
print(i, label, dep, dep_word_idx)
parent_id_str = str(dep_word_idx)
rel, prop ='_', '_'
if label==0:
parent_id_str = '_'
elif label==1:
rel='same'
elif label==2:
parent_id_str, prop = '0', 'OBJ'
elif label==3:
rel, prop = 'OBJT', 'OBJ'
elif label==4:
rel, prop = 'ATTR', 'ATTR'
elif label==5:
rel, prop = 'PRED', 'PRED'
# node_id, node_word, parent_id_str, rel, prop = each
conll_line = "%d\t%s\t%s\t%s\t%s" % (line_i, words[line_i], parent_id_str, rel, prop,)
print(conll_line)
if dep_word_idx>len( bpe_word_idx ):
exit(0)
f_conll.write(conll_line+'\n')
f_conll.write('\n')
#if idx>15: break
#if idx>9645: break
#if idx>9820: break
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='./bist-parser/preprocess/output/')
parser.add_argument('--npz', type=str, default='coco_dev.conll_v32.hdf5_v32.npz')
parser.add_argument('--bpe', type=str, default='coco_dev.conll_v32.bpe')
parser.add_argument('--conll', type=str, default='coco_dev.conll_v32')
args = parser.parse_args()
print(args)
convert_to_conll( args.path+args.npz, args.path+args.bpe, args.path+args.conll)
|
redirect_domain = 'brewgrindwater.com'
|
from courier.models import UserSendTime
from fuauth.models import User
from functional_tests.utils import SeleniumTestCase
class TestAdmin(SeleniumTestCase):
"""
Tests for the FiveUp admin dashboard.
"""
def setUp(self):
self.admin_user = User.objects.create_superuser(
name="Noofie Admin",
phone_number="777-777-7777",
carrier="ATT",
password="password",
user_timezone="HAWAII",
email="[email protected]",
)
self.plain_jane = User.objects.create_user(
name="Jane",
phone_number="777-777-7771",
carrier="ATT",
password="password1",
user_timezone="HAWAII",
email="[email protected]",
)
self.plain_jane_send_time = UserSendTime.objects.create(user=self.plain_jane)
def test_non_admin_cannot_log_in(self):
"""
Non-admin users should not be able to login to the admin page.
"""
with self.wait_for_page_load():
self.browser.get(self.live_server_url + "/admin/")
self.browser.find_element_by_name("username").send_keys(self.plain_jane.email)
self.browser.find_element_by_name("password").send_keys("password")
with self.wait_for_page_load():
self.browser.find_element_by_css_selector("*[type=submit]").click()
text = self.browser.find_element_by_tag_name("body").text
warning = "Please enter the correct your email address and password for a staff account. Note that both fields may be case-sensitive."
assert warning in text
def test_admin_page_renders_for_admin_user(self):
"""
Test that the admin interface renders.
"""
with self.wait_for_page_load():
self.browser.get(self.live_server_url + "/admin/")
self.browser.find_element_by_name("username").send_keys(self.admin_user.email)
self.browser.find_element_by_name("password").send_keys("password")
with self.wait_for_page_load():
self.browser.find_element_by_css_selector("*[type=submit]").click()
text = self.browser.find_element_by_tag_name("body").text
page_items = [
"Django administration",
"WELCOME, NOOFIE ADMIN.",
"CHANGE PASSWORD / LOG OUT",
"Site administration",
"Groups",
"COURIER",
"EMAIL LOG",
"FUAUTH",
"Messages",
"Curated messages",
]
for item in page_items:
assert item in text
def test_custom_user_admin(self):
"""
Test the send time admin
"""
with self.wait_for_page_load():
self.browser.get(self.live_server_url + "/admin/")
self.browser.find_element_by_name("username").send_keys(self.admin_user.email)
self.browser.find_element_by_name("password").send_keys("password")
with self.wait_for_page_load():
self.browser.find_element_by_css_selector("*[type=submit]").click()
with self.wait_for_page_load():
self.browser.find_element_by_link_text("Users").click()
text = self.browser.find_element_by_tag_name("body").text
assert "[email protected]" in text
assert "Noofie Admin" in text
assert "[email protected]" in text
assert "Jane" in text
assert "2 users" in text
def test_send_time_admin(self):
"""
Test the send time admin
"""
with self.wait_for_page_load():
self.browser.get(self.live_server_url + "/admin/")
self.browser.find_element_by_name("username").send_keys(self.admin_user.email)
self.browser.find_element_by_name("password").send_keys("password")
with self.wait_for_page_load():
self.browser.find_element_by_css_selector("*[type=submit]").click()
with self.wait_for_page_load():
self.browser.find_element_by_link_text("User send times").click()
text = self.browser.find_element_by_tag_name("body").text
assert "[email protected]" in text
assert "Jane" in text
|
from typing import Sequence
from ..base import BaseAPIClass
from ..libraries import Color
from .vector import Vector
from ..utils import to_dict, to_dict_seq
class Polygon(BaseAPIClass):
"""Represents the berkerdemoglu.engine.graphics.math.geometry.Polygon3D class."""
def __init__(self, color: Color, *vertices: Sequence[Vector]):
self.color = color
self._check_args(vertices)
self.vertices = vertices
def _check_args(self, vertices):
"""Check that nothing weird was passed as an argument."""
if not hasattr(vertices, '__iter__') and type(vertices) != str:
raise TypeError('A sequence must be provided as input')
for v in vertices:
if type(v) != Vector:
raise TypeError('Vectors must be provided as input')
if len(vertices) <= 2:
raise ValueError('2 or more vertices must be provided to make a polygon')
def as_dict(self):
d = {
'color': to_dict(self.color),
'vertices': to_dict_seq(self.vertices)
}
return d
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sumOfLeftLeaves(self, root):
"""
Using DFS
O(2^N)
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
stack = []
stack.append((None, root))
ans = 0
while stack:
p, node = stack.pop()
if node.left:
stack.append((node, node.left))
if node.right:
stack.append((node, node.right))
if node.left == None and node.right == None:
if p != None and p.left == node:
ans += node.val
return ans
|
import chainer
from chainer.functions.array import broadcast
from chainer.functions.array import reshape
def scale(x, y, axis=1):
"""Elementwise product with broadcasting.
Computes a elementwise product of two input variables, with the shape of
the latter variable broadcasted to match the shape of the former. ``axis``
is the first axis of the first variable along which the second variable is
applied.
The term "broadcasting" here comes from Caffe's scale layer so the
"broadcasting" with the following arguments::
x : 100 x 3 x 40 x 60
y : 3 x 40
axis : 1
is equivalent to the following numpy broadcasting::
x : 100 x 3 x 40 x 60
y : 1 x 3 x 40 x 1
Note that how the ``axis`` indicates to which axis of ``x`` we apply ``y``.
Args:
x (~chainer.Variable): Input variable to be scaled.
y (~chainer.Variable): Input variable to scale, broadcasted.
axis (int): The first axis of ``x`` along which ``y`` is applied.
Returns:
~chainer.Variable: Output variable.
"""
x_shape = x.shape
y_shape = y.shape
if chainer.is_debug():
assert x_shape[axis:axis + len(y_shape)] == y_shape
y1_shape = tuple([1] * axis + list(y_shape) +
[1] * (len(x_shape) - axis - len(y_shape)))
y1 = reshape.reshape(y, y1_shape)
y2 = broadcast.broadcast_to(y1, x_shape)
return x * y2
|
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example shows how to use dlib's face recognition tool. This tool maps
# an image of a human face to a 128 dimensional vector space where images of
# the same person are near to each other and images from different people are
# far apart. Therefore, you can perform face recognition by mapping faces to
# the 128D space and then checking if their Euclidean distance is small
# enough.
#
# When using a distance threshold of 0.6, the dlib model obtains an accuracy
# of 99.38% on the standard LFW face recognition benchmark, which is
# comparable to other state-of-the-art methods for face recognition as of
# February 2017. This accuracy means that, when presented with a pair of face
# images, the tool will correctly identify if the pair belongs to the same
# person or is from different people 99.38% of the time.
#
# Finally, for an in-depth discussion of how dlib's tool works you should
# refer to the C++ example program dnn_face_recognition_ex.cpp and the
# attendant documentation referenced therein.
#
#
#
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
#
# Compiling dlib should work on any operating system so long as you have
# CMake installed. On Ubuntu, this can be done easily by running the
# command:
# sudo apt-get install cmake
#
# Also note that this example requires Numpy which can be installed
# via the command:
# pip install numpy
import sys
import os
import dlib
import glob
if len(sys.argv) != 4:
print(
"Call this program like this:\n"
" ./face_recognition.py shape_predictor_5_face_landmarks.dat dlib_face_recognition_resnet_model_v1.dat ../examples/faces\n"
"You can download a trained facial shape predictor and recognition model from:\n"
" http://dlib.net/files/shape_predictor_5_face_landmarks.dat.bz2\n"
" http://dlib.net/files/dlib_face_recognition_resnet_model_v1.dat.bz2")
exit()
predictor_path = sys.argv[1]
face_rec_model_path = sys.argv[2]
faces_folder_path = sys.argv[3]
# Load all the models we need: a detector to find the faces, a shape predictor
# to find face landmarks so we can precisely localize the face, and finally the
# face recognition model.
detector = dlib.get_frontal_face_detector()
sp = dlib.shape_predictor(predictor_path)
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
win = dlib.image_window()
# Now process all the images
for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")):
print("Processing file: {}".format(f))
img = dlib.load_rgb_image(f)
win.clear_overlay()
win.set_image(img)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets = detector(img, 1)
print("Number of faces detected: {}".format(len(dets)))
# Now process each face we found.
for k, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
k, d.left(), d.top(), d.right(), d.bottom()))
# Get the landmarks/parts for the face in box d.
shape = sp(img, d)
# Draw the face landmarks on the screen so we can see what face is currently being processed.
win.clear_overlay()
win.add_overlay(d)
win.add_overlay(shape)
# Compute the 128D vector that describes the face in img identified by
# shape. In general, if two face descriptor vectors have a Euclidean
# distance between them less than 0.6 then they are from the same
# person, otherwise they are from different people. Here we just print
# the vector to the screen.
face_descriptor = facerec.compute_face_descriptor(img, shape)
print(face_descriptor)
# It should also be noted that you can also call this function like this:
# face_descriptor = facerec.compute_face_descriptor(img, shape, 100)
# The version of the call without the 100 gets 99.13% accuracy on LFW
# while the version with 100 gets 99.38%. However, the 100 makes the
# call 100x slower to execute, so choose whatever version you like. To
# explain a little, the 3rd argument tells the code how many times to
# jitter/resample the image. When you set it to 100 it executes the
# face descriptor extraction 100 times on slightly modified versions of
# the face and returns the average result. You could also pick a more
# middle value, such as 10, which is only 10x slower but still gets an
# LFW accuracy of 99.3%.
dlib.hit_enter_to_continue()
|
#!/usr/bin/env python
from assignment8 import StressStrainConverter
import numpy as np
import scipy.integrate
from PyTrilinos import Epetra
# +
class EpetraParallelToughness(StressStrainConverter):
def __init__(self, filename, comm):
super().__init__(filename)
self.comm = comm
self.rank = comm.MyPID()
self.size = comm.NumProc()
if self.rank == 0:
self.convert_to_true_stress_and_strain()
else:
self.true_stress = np.array([], dtype=np.double)
self.true_strain = np.array([], dtype=np.double)
######################
#### Add Code Here ###
######################
# two maps:
# unbalanced map where all the data is on processor 0
# balanced map with no data
# import the unblanced map to the balanced map
# two vectors:
# unbalanced that's filled with data using unbalanced map
# balanced filled with no data using balanced map
# use import again for unbalanced vector to balanced vector
# then integrate
ub_map = Epetra.Map(-1, len(self.true_stress), 0, self.comm)
ub_strain = Epetra.Vector(Epetra.Copy, ub_map, self.true_strain)
ub_stress = Epetra.Vector(Epetra.Copy, ub_map, self.true_stress)
my_map = Epetra.Map(ub_map.NumGlobalElements(), 0, self.comm)
my_global_elements = my_map.MyGlobalElements()
if self.rank < (self.size-1):
my_global_elements = np.append(my_global_elements, [my_map.MaxMyGID()+1])
bal_map = Epetra.Map(-1, list(my_global_elements), 0, self.comm)
data = Epetra.Import(bal_map, ub_map)
self.true_stress = Epetra.Vector(bal_map)
self.true_strain = Epetra.Vector(bal_map)
self.true_stress.Import(ub_stress, data, Epetra.Insert)
self.true_strain.Import(ub_strain, data, Epetra.Insert)
def compute_toughness(self):
my_toughness = scipy.integrate.trapz(self.true_stress, self.true_strain)
return self.comm.SumAll(my_toughness)
# -
if __name__ == "__main__":
from PyTrilinos import Epetra
comm = Epetra.PyComm()
T = EpetraParallelToughness('data.dat', comm)
if comm.MyPID() == 0:
print(T.compute_toughness())
|
from pandas_profiling.report.presentation.core.overview import Overview
from pandas_profiling.report.presentation.flavours.html import templates
class HTMLOverview(Overview):
def render(self):
return templates.template("info.html").render(**self.content)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals
import argparse
import sys
import torch
from onmt.utils.misc import get_logger
from onmt.translate.translator import build_translator
from tqdm import tqdm
from modifications.util import file_cached_function
import json
import io
import os
class FakeOpt(object):
def __init__(self,
beam_size=5,
min_length=10,
max_length=100,
stepwise_penalty=False,
block_ngram_repeat=0,
ignore_when_blocking=[],
replace_unk=True,
model=None,
verbose=False,
report_bleu=False,
batch_size=30,
n_best=1,
dump_beam='',
dump_layers='',
gpu=0,
alpha=0,
beta=0,
length_penalty='none',
coverage_penalty='none',
data_type='text'):
self.alpha = alpha
self.beta = beta
self.length_penalty = length_penalty
self.coverage_penalty = coverage_penalty
self.beam_size = beam_size
self.n_best = n_best
self.max_length = max_length
self.min_length = min_length
self.stepwise_penalty = stepwise_penalty
self.block_ngram_repeat = block_ngram_repeat
self.ignore_when_blocking = ignore_when_blocking
self.dump_beam = dump_beam
self.dump_layers = dump_layers
self.report_bleu = report_bleu
self.data_type = data_type
self.replace_unk = replace_unk
self.batch_size = batch_size
self.gpu = gpu
self.verbose = verbose
self.model = model
def _modify(
corpus=None,
neurons=None,
values=None,
model=None):
opt = FakeOpt(model=model)
translator = build_translator(opt, report_score=False, logger=get_logger(), use_output=False)
sources, toggles = zip(*corpus)
print('Originally toggles is the following:')
print(toggles[:10])
_, toggles = zip(*toggles)
print(sources[:10], toggles[:10])
def intervene(layer_data, sentence_index, index):
for (layer, neuron), value in zip(neurons, values):
if index == layer:
for i in toggles[sentence_index]:
tqdm.write('Successfully modifying %d %d %d %f' % (i, layer, neuron, value))
layer_data[i][0][neuron] = value
return layer_data
modified = []
for i, source in enumerate(tqdm(sources)):
stream = io.StringIO()
# Logging:
tqdm.write('Source: %s' % ' '.join(source))
tqdm.write('Target: %s' % ' '.join(source[j] for j in toggles[i]))
translator.translate(src_data_iter=[' '.join(source)],
src_dir='',
batch_size=1,
attn_debug=False,
intervention=lambda l, j: intervene(l, i, j),
out_file=stream)
translation = stream.getvalue()
# Logging:
tqdm.write('Result: %s' % translation)
modified.append(translation.strip().split(' '))
return modified
modify = file_cached_function(_modify, 1)
|
"""labpals package initializer."""
import flask
app = flask.Flask(__name__) # pylint: disable=invalid-name
app.config.from_object("labpals.config")
app.config.from_envvar("LABPALS_SETTINGS", silent=True)
import labpals.model # noqa: E402 pylint: disable=wrong-import-position
import labpals.utils # noqa: E402 pylint: disable=wrong-import-position
import labpals.views # noqa: E402 pylint: disable=wrong-import-position
import labpals.config
import labpals.api # noqa: E402 pylint: disable=wrong-import-position |
from django.http import FileResponse
# Create your views here.
def send_image(request, name="icon.jpg"):
img = open(f"./templates/{name}", 'rb')
response = FileResponse(img)
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.