content
stringlengths 5
1.05M
|
---|
import os
from typing import Dict
from fluid.http import HttpClient
from .repo import GithubRepo
class Github(HttpClient):
def __init__(self, token=None) -> None:
self.token = token or get_token()
@property
def api_url(self):
return "https://api.github.com"
@property
def uploads_url(self):
return "https://uploads.github.com"
def __repr__(self):
return self.api_url
__str__ = __repr__
def repo(self, repo_name: str) -> GithubRepo:
return GithubRepo(self, repo_name)
def default_headers(self) -> Dict[str, str]:
headers = super().default_headers()
if self.token:
headers["authorization"] = f"token {self.token}"
return headers
def get_token() -> str:
token = os.getenv("GITHUB_SECRET_TOKEN") or os.getenv("GITHUB_TOKEN")
return token
|
import os
from pm4py.objects.log.importer.xes import importer as xes_importer
from pm4py.algo.discovery.inductive import algorithm as inductive_miner
from pm4py.objects.petri.exporter import exporter as pnml_exporter
from pm4py.visualization.petrinet import visualizer as pn_visualizer
log = xes_importer.apply(os.path.join("logs", "log_augur_preprocessed.xes"))
net, initial_marking, final_marking = inductive_miner.apply(log)
pnml_exporter.apply(
net,
initial_marking,
os.path.join("petri", "augur_preprocessed_discovered_petri.pnml"),
final_marking=final_marking
)
parameters = {pn_visualizer.Variants.WO_DECORATION.value.Parameters.FORMAT: "svg"}
gviz = pn_visualizer.apply(net, initial_marking, final_marking, parameters=parameters)
pn_visualizer.save(gviz, os.path.join("img", "augur_preprocessed_discovered_petri.svg"),)
|
from typing import List
class Board:
CONTINUE = -1
STALEMATE = 0
P_ONE_WIN = 1
P_TWO_WIN = 2
def __init__(self):
self.board = self.col_count = None # Stop the inspections from complaining
self.reset()
@property
def available_columns(self):
return [i for i in range(7) if self.col_count[i] < 6]
@property
def game_state(self):
"""Returns state representing current game state"""
# For now, brute force check all the rows, columns, and diagonals
# Further optimization is needed
# Check all rows
for r in range(6):
for c in range(4):
section = self.board[r][c:c+4]
four_in_a_row = self._four_in_a_row(section)
if four_in_a_row:
return four_in_a_row
# Check all columns
for c in range(7):
for r in range(3):
section = [self.board[i][c] for i in range(r, r+4)]
four_in_a_row = self._four_in_a_row(section)
if four_in_a_row:
return four_in_a_row
# Check all diagonals (\) direction
for r in range(3):
for c in range(4):
section = [self.board[r+i][c+i] for i in range(4)]
four_in_a_row = self._four_in_a_row(section)
if four_in_a_row:
return four_in_a_row
# Check all diagonals (/) direction
for r in range(3):
for c in range(3, 7):
section = [self.board[r+i][c-i] for i in range(4)]
four_in_a_row = self._four_in_a_row(section)
if four_in_a_row:
return four_in_a_row
# If board is full then stalemate
if len(self.available_columns) == 0:
return Board.STALEMATE
return Board.CONTINUE
def p_one_play(self, column) -> int:
return self._play(column, 1)
def p_two_play(self, column) -> int:
return self._play(column, 2)
def reset(self):
self.board = [[0 for j in range(7)] for i in range(6)]
self.col_count = [0 for i in range(7)] # Stores the number of pieces in each column
def _play(self, column, piece) -> int:
piece_count = self.col_count[column]
# If column is filled, raise exception
if piece_count >= 6:
raise ValueError("Column " + str(column) + " is already full")
row = 5 - piece_count # Find row to place piece
self.board[row][column] = piece
self.col_count[column] += 1 # Update number of pieces in column
return row
def _four_in_a_row(self, piece_sequence: List[int]) -> int:
"""Returns if and which player has a 4-in-a-row given the piece sequence"""
piece_set = set(piece_sequence) # Convert sequence to set to filter out duplicates
if len(piece_set) == 1: # If all pieces are of one type
return piece_set.pop() # Return singular piece
return 0 # No 4-in-a-row
def __repr__(self):
return '\n'.join([' '.join([str(piece) for piece in row]) for row in self.board])
|
import os
import shutil
import winbrew.util
import zipfile
import tarfile
import urllib
import subprocess
class Archive:
"""
Archive describes the type of package to download. Typically, some kind of
compressed file (tar, zip) or a git repository.
"""
@staticmethod
def create(url, work_dir, package_name):
ext = os.path.splitext(url)[1]
name = os.path.split(url)[1]
if ext == '.zip':
return ZipArchive(url, work_dir, name)
elif ext == '.gz':
return TarArchive(url, work_dir, name, compression='gz')
elif ext == '.tgz':
return TarArchive(url, work_dir, name, compression='gz')
elif ext == '.bz2':
return TarArchive(url, work_dir, name, compression='bz2')
elif ext == '.xz':
return TarArchive(url, work_dir, name, compression='xz')
elif ext == '.msi':
return MsiArchive(url, work_dir, name)
elif ext == '.git':
return GitArchive(url, work_dir, package_name)
else:
raise Exception('unknown archive file type')
def __init__(self, url, work_dir, name):
self.url = url
# Parent working directory in cache
self.work_dir = work_dir
# File name
self.name = name
# Full downloaded file path
self.path = os.path.join(work_dir, name)
def download(self):
winbrew.util.rm_rf(self.work_dir)
winbrew.util.mkdir_p(self.work_dir)
with open(self.path, 'wb') as fd, self.urlopen() as stream:
shutil.copyfileobj(stream, fd)
def urlopen(self):
headers = {
'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11'
}
request = urllib.request.Request(self.url, headers=headers)
return urllib.request.urlopen(request)
def clean(self):
for fn in os.listdir(self.work_dir):
if fn != self.name:
winbrew.util.rm_rf(os.path.join(self.work_dir, fn))
@property
def unpack_dir(self):
return os.path.join(self.work_dir, self.unpack_name)
class ZipArchive(Archive):
def __init__(self, url, work_dir, name):
super(ZipArchive, self).__init__(url, work_dir, name)
@property
def unpack_name(self):
with self.zipfile() as zf:
return os.path.commonprefix(zf.namelist())
def zipfile(self):
return zipfile.ZipFile(self.path)
def unpack(self):
with self.zipfile() as zf: zf.extractall(self.work_dir)
class TarArchive(Archive):
def __init__(self, url, work_dir, name, compression='gz'):
super(TarArchive, self).__init__(url, work_dir, name)
self.compression = compression
@property
def unpack_name(self):
with self.tarfile() as tf:
return os.path.commonprefix(tf.getnames())
def tarfile(self):
return tarfile.open(self.path, mode='r:%s' % self.compression)
def unpack(self):
with self.tarfile() as tf: tf.extractall(self.work_dir)
class MsiArchive(Archive):
def __init__(self, url, work_dir, name, compression='gz'):
super(MsiArchive, self).__init__(url, work_dir, name)
@property
def unpack_name(self):
return '.'
def unpack(self):
self.system('msiexec /quiet /i %s' % self.path)
class GitArchive(Archive):
def __init__(self, url, work_dir, name):
super(GitArchive, self).__init__(url, work_dir, name)
try:
self.tag = self.url.split('#')[1]
except IndexError:
self.tag = None
@property
def unpack_name(self):
return self.name + '-build'
def unpack(self):
subprocess.check_call(('git', 'clone', self.path, self.unpack_dir))
if self.tag:
subprocess.check_call(('git', '-C', self.unpack_dir, 'fetch', self.tag))
subprocess.check_call(('git', '-C', self.unpack_dir, 'tag', self.tag, 'FETCH_HEAD'))
subprocess.check_call(('git', '-C', self.unpack_dir, 'checkout', self.tag, '--quiet'))
def download(self):
winbrew.util.rm_rf(self.work_dir)
winbrew.util.mkdir_p(self.work_dir)
subprocess.check_call(('git', 'clone', self.url, self.path))
|
import uuid
from django.utils.functional import cached_property
from django.conf import settings
from .tasks import send_report_task
GOOGLE_ID = settings.GOOGLE_ANALYTICS_ID
class Tracker(object):
valid_anal_types = ('pageview', 'event', 'social', 'screenview', 'transaction', 'item', 'exception', 'timing')
def __init__(self, request=None, client_id=None, user_id=None):
self.request = request
self.client_id = client_id
self.user_id = user_id
@cached_property
def get_client_id(self):
if self.client_id:
return self.client_id
if not self.request:
self.client_id = str(uuid.uuid4())
else:
_ga = self.request.COOKIES.get('_ga')
if _ga:
ga_split = _ga.split('.')
self.client_id = '.'.join((ga_split[2], ga_split[3]))
return self.client_id
@cached_property
def get_user_id(self):
if self.user_id:
return self.user_id
if self.request and self.request.user.id:
self.user_id = self.request.user.id
return self.user_id
@property
def default_params(self):
client_id = self.get_client_id
user_id = self.get_user_id
ret = {
'v': 1,
'tid': GOOGLE_ID,
'cid': client_id,
}
if user_id:
ret['uid'] = user_id
return ret
def get_payload(self, *args, **kwargs):
"""Receive all passed in args, kwargs, and combine them together with any required params"""
if not kwargs:
kwargs = self.default_params
else:
kwargs.update(self.default_params)
for item in args:
if isinstance(item, dict):
kwargs.update(item)
if hasattr(self, 'type_params'):
kwargs.update(self.type_params(*args, **kwargs))
return kwargs
def debug(self, *args, **kwargs):
return self.get_payload(*args, **kwargs)
def sync_send(self, *args, **kwargs):
# For use when you don't want to send as an async task through Celery
payload = self.get_payload(*args, **kwargs)
return send_report_task(payload)
def send(self, *args, **kwargs):
payload = self.get_payload(*args, **kwargs)
return send_report_task.delay(payload)
class PageView(Tracker):
anal_type = 'pageview'
def type_params(self, *args, **kwargs):
domain = kwargs.get('domain') or kwargs.get('dh')
page = kwargs.get('page') or kwargs.get('dp')
title = kwargs.get('title') or kwargs.get('dt')
label = kwargs.get('label') or kwargs.get('el')
value = kwargs.get('value') or kwargs.get('ev')
params = {
't': self.anal_type,
'dh': domain or settings.DEFAULT_TRACKING_DOMAIN, #mydomain.com
'dp': page, #/home
'dt': title #hompage
}
if label:
params['el'] = label
if value:
params['ev'] = value
return params
class Event(Tracker):
anal_type = 'event'
def type_params(self, *args, **kwargs):
category = kwargs.get('category') or kwargs.get('ec')
action = kwargs.get('action') or kwargs.get('ea')
document_path = kwargs.get('document_path') or kwargs.get('dp')
document_title = kwargs.get('document_title') or kwargs.get('dt')
campaign_id = kwargs.get('campaign_id') or kwargs.get('ci')
campaign_name = kwargs.get('campaign_name') or kwargs.get('cn')
campaign_source = kwargs.get('campaign_source') or kwargs.get('cs')
campaign_medium = kwargs.get('campaign_medium') or kwargs.get('cm')
campaign_content = kwargs.get('campaign_content') or kwargs.get('cc')
label = kwargs.get('label') or kwargs.get('el')
value = kwargs.get('value') or kwargs.get('ev')
params = {
't': self.anal_type,
'ec': category, #video
'ea': action, #play
}
if document_path:
params['dp'] = document_path
if document_title:
params['dt'] = document_title
if campaign_id:
params['ci'] = campaign_id
if campaign_name:
params['cn'] = campaign_name
if campaign_source:
params['cs'] = campaign_source
if campaign_medium:
params['cm'] = campaign_medium
if campaign_content:
params['cc'] = campaign_content
if label:
params['el'] = label
if value:
params['ev'] = value
return params
|
from typing import List
from mathy_core import ExpressionParser, MathExpression, Token, VariableExpression
problem = "4 + 2x"
parser = ExpressionParser()
tokens: List[Token] = parser.tokenize(problem)
expression: MathExpression = parser.parse(problem)
assert len(expression.find_type(VariableExpression)) == 1
|
# -*- encoding: utf-8 -*-
TRAIN_PERIOD = ['2010', '2011', '2012', '2013', '2014', '2015']
VALIDATION_PERIOD = ['2016', '2017']
BACKTEST_PERIOD = ['2018', '2019']
M = 60
T = 28
UP_THRESHOLD = 6
DOWN_THRESHOLD = -6 |
import subprocess
from voxel_globe.tools.subprocessbg import Popen
def findProcess(imageName, filterString):
pid = Popen(['wmic', 'path', 'win32_process', 'where', "Name='%s'" % imageName, 'get', 'CommandLine,ProcessId', '/VALUE'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out = pid.communicate()
out=out[0].split('\r\r\n\r\r\n')
out=filter(lambda x:'CommandLine' in x, out)
out = map(lambda x:x.strip().split('\r\r\n'), out)
if out:
if out[0][0].startswith('CommandLine'):
commandLineIndex = 0
processIndex = 1
else:
commandLineIndex = 1
processIndex = 0
out = map(lambda x:[int(x[processIndex][10:]), x[commandLineIndex][12:]], out)
out = filter(lambda x:filterString in x[1], out)
return map(lambda x:x[0], out)
return []
if __name__=='__main__':
import sys
import os
myPid = os.getpid()
if 1:
# try:
imageName = sys.argv[1]
filterString = sys.argv[2]
pids = findProcess(imageName, filterString)
pids = filter(lambda x: x!=myPid, pids)
print '\n'.join(map(str, pids))
# except:
# print 'Usage: %s [imageName] [filterString]' % sys.argv[0] |
from django.conf.urls import include, url
from rest_framework import routers
from .views import DynamicSerializerViewSet
router = routers.DefaultRouter()
router.register(r'users-dynamic', DynamicSerializerViewSet)
# router.register(r'users-dynamic-fields', DynamicFieldsSerializerViewSet)
# router.register(r'users-dynamic-output', DynamicOutputViewSet)
urlpatterns = (
url(r'^', include(router.urls)),
)
|
"""AWS CodeArtifact Poetry CLI Definition."""
from typing import Optional
import click
from click import Context
from aws_codeartifact_poetry.commands.login import login
from aws_codeartifact_poetry.helpers.catch_exceptions import catch_exceptions
from aws_codeartifact_poetry.helpers.logging import setup_logging
@click.group(help='AWS CodeArtifact Poetry CLI.')
@click.option(
'--loglevel',
help='Log level.',
required=False,
default='WARNING',
show_default=True,
type=click.Choice(['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], case_sensitive=False)
)
@click.option(
'--log-file',
help='Log file name',
required=False,
type=str
)
@catch_exceptions
@click.pass_context
def cli(ctx: Context, loglevel: str, log_file: Optional[str]):
"""
CLI group root function (aws-codeartifact-poetry --help).
Args:
ctx (`Context`): click context
loglevel (`str`): loglevel
log_file (`str`, optional): output log file
"""
ctx.ensure_object(dict)
ctx.obj['loglevel'] = loglevel
setup_logging(__package__, loglevel, log_file)
cli.add_command(login)
|
/home/runner/.cache/pip/pool/9a/45/66/84570200a54f99fddefdcdaee7fc44afacceb8b1f18723cbf24f70e90b |
class ZoneStats:
def __init__(self, zones_data:dict):
self.zones_secured = zones_data.get('secured', 0)
self.zones_captured = zones_data.get('captured', 0)
self.occupation_time = zones_data.get('occupation', {}).get('duration', {}).get('seconds', 0)
zone_kills = zones_data.get('kills', {})
self.defensive_kills = zone_kills.get('defensive', 0)
self.offensive_kills = zone_kills.get('offensive', 0)
class FlagStats:
def __init__(self, flags_data:dict):
self.flag_grabs = flags_data.get('grabs', 0)
self.flag_steals = flags_data.get('grabs', 0)
self.flag_secures = flags_data.get('grabs', 0)
self.flag_returns = flags_data.get('grabs', 0)
self.flag_possession = flags_data.get('possession', {}).get('duration', {}).get('seconds', 0)
capture_data = flags_data.get('captures', {})
self.flag_captures = capture_data.get('total', 0)
self.flag_assists = capture_data.get('assists', 0)
flag_kills = flags_data.get('kills', {})
self.flag_carrier_kills = flag_kills.get('carriers', 0)
self.flag_returner_kills = flag_kills.get('returners', 0)
self.kills_as_flag_carrier = flag_kills.get('as', {}).get('carrier', 0)
self.kills_as_flag_returner = flag_kills.get('as', {}).get('returner', 0)
class OddballStats:
def __init__(self, oddballs_stats:dict):
self.ball_grabs = oddballs_stats.get('grabs', 0)
self.ball_controls = oddballs_stats.get('controls', 0)
possession_data = oddballs_stats.get('possession', {})
self.ball_hold_ticks, \
self.longest_ball_time, \
self.total_ball_time = self._parse_possession_data(possession_data)
self.ball_carriers_killed = oddballs_stats.get('kills', {}).get('carriers', 0)
self.kills_as_ball_carrier = oddballs_stats.get('kills', {}).get('as', {}).get('carrier', 0)
@staticmethod
def _parse_possession_data(possession_data:dict):
ticks = possession_data.get('ticks', 0)
durations = possession_data.get('durations', {})
longest_hold = durations.get('longest', {}).get('seconds', 0)
total_hold_time = durations.get('total', {}).get('seconds', 0)
return ticks, longest_hold, total_hold_time |
from __future__ import absolute_import, unicode_literals
from celery import Celery
from celery.schedules import crontab
from jeta.archive.ingest import force_sort_by_time, calculate_delta_times
##CELERYBEAT_SCHEDULER = 'redbeat.RedBeatScheduler'
app = Celery(
'jeta.archive',
broker='redis://localhost',
backend='redis://localhost',
include=[
'jeta.archive.ingest',
'jeta.archive.ingest'
]
)
app.conf.update(
result_expires=3600,
)
if __name__ == '__main__':
app.start()
# app.conf.broker_url = 'redis://'
# app.conf.result_backend = 'redis://localhost'
# redbeat_redis_url = "redis://localhost"
# app.conf.beat_schedule = {
# 'execute_telemetry_ingest': {
# 'task': 'jeta.ingest.controller._execute_automated_ingest',
# 'schedule': crontab(minute='*/15'),
# }
# }
# app.autodiscover_tasks()
|
"""
Permissions for the instructor dashboard and associated actions
"""
from bridgekeeper import perms
from bridgekeeper.rules import is_staff
from lms.djangoapps.courseware.rules import HasAccessRule, HasRolesRule
ALLOW_STUDENT_TO_BYPASS_ENTRANCE_EXAM = 'instructor.allow_student_to_bypass_entrance_exam'
ASSIGN_TO_COHORTS = 'instructor.assign_to_cohorts'
EDIT_COURSE_ACCESS = 'instructor.edit_course_access'
EDIT_FORUM_ROLES = 'instructor.edit_forum_roles'
EDIT_INVOICE_VALIDATION = 'instructor.edit_invoice_validation'
ENABLE_CERTIFICATE_GENERATION = 'instructor.enable_certificate_generation'
GENERATE_CERTIFICATE_EXCEPTIONS = 'instructor.generate_certificate_exceptions'
GENERATE_BULK_CERTIFICATE_EXCEPTIONS = 'instructor.generate_bulk_certificate_exceptions'
GIVE_STUDENT_EXTENSION = 'instructor.give_student_extension'
VIEW_ISSUED_CERTIFICATES = 'instructor.view_issued_certificates'
CAN_RESEARCH = 'instructor.research'
CAN_ENROLL = 'instructor.enroll'
CAN_BETATEST = 'instructor.enroll_beta'
ENROLLMENT_REPORT = 'instructor.enrollment_report'
EXAM_RESULTS = 'instructor.view_exam_results'
OVERRIDE_GRADES = 'instructor.override_grades'
SHOW_TASKS = 'instructor.show_tasks'
VIEW_COUPONS = 'instructor.view_coupons'
EMAIL = 'instructor.email'
RESCORE_EXAMS = 'instructor.rescore_exams'
VIEW_REGISTRATION = 'instructor.view_registration'
VIEW_DASHBOARD = 'instructor.dashboard'
perms[ALLOW_STUDENT_TO_BYPASS_ENTRANCE_EXAM] = HasAccessRule('staff')
perms[ASSIGN_TO_COHORTS] = HasAccessRule('staff')
perms[EDIT_COURSE_ACCESS] = HasAccessRule('instructor')
perms[EDIT_FORUM_ROLES] = HasAccessRule('staff')
perms[EDIT_INVOICE_VALIDATION] = HasAccessRule('staff')
perms[ENABLE_CERTIFICATE_GENERATION] = is_staff
perms[GENERATE_CERTIFICATE_EXCEPTIONS] = is_staff
perms[GENERATE_BULK_CERTIFICATE_EXCEPTIONS] = is_staff
perms[GIVE_STUDENT_EXTENSION] = HasAccessRule('staff')
perms[VIEW_ISSUED_CERTIFICATES] = HasAccessRule('staff') | HasRolesRule('data_researcher')
# only global staff or those with the data_researcher role can access the data download tab
# HasAccessRule('staff') also includes course staff
perms[CAN_RESEARCH] = is_staff | HasRolesRule('data_researcher')
perms[CAN_ENROLL] = HasAccessRule('staff')
perms[CAN_BETATEST] = HasAccessRule('instructor')
perms[ENROLLMENT_REPORT] = HasAccessRule('staff') | HasRolesRule('data_researcher')
perms[VIEW_COUPONS] = HasAccessRule('staff') | HasRolesRule('data_researcher')
perms[EXAM_RESULTS] = HasAccessRule('staff')
perms[OVERRIDE_GRADES] = HasAccessRule('staff')
perms[SHOW_TASKS] = HasAccessRule('staff') | HasRolesRule('data_researcher')
perms[EMAIL] = HasAccessRule('staff')
perms[RESCORE_EXAMS] = HasAccessRule('instructor')
perms[VIEW_REGISTRATION] = HasAccessRule('staff')
perms[VIEW_DASHBOARD] = \
HasRolesRule(
'staff',
'instructor',
'data_researcher'
) | HasAccessRule('staff') | HasAccessRule('instructor')
|
import serial
import time
import numpy as np
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui
# Change the configuration file name
configFileName = 'mmw_pplcount_demo_default.cfg'
CLIport = {}
Dataport = {}
byteBuffer = np.zeros(2**15,dtype = 'uint8')
byteBufferLength = 0;
# ------------------------------------------------------------------
# Function to configure the serial ports and send the data from
# the configuration file to the radar
def serialConfig(configFileName):
global CLIport
global Dataport
# Open the serial ports for the configuration and the data ports
# Raspberry pi
CLIport = serial.Serial('/dev/ttyACM0', 115200)
Dataport = serial.Serial('/dev/ttyACM1', 921600)
# Windows
#CLIport = serial.Serial('COM3', 115200)
#Dataport = serial.Serial('COM4', 921600)
# Read the configuration file and send it to the board
config = [line.rstrip('\r\n') for line in open(configFileName)]
for i in config:
CLIport.write((i+'\n').encode())
print(i)
time.sleep(0.01)
return CLIport, Dataport
# ------------------------------------------------------------------
# Function to parse the data inside the configuration file
def parseConfigFile(configFileName):
configParameters = {} # Initialize an empty dictionary to store the configuration parameters
# Read the configuration file and send it to the board
config = [line.rstrip('\r\n') for line in open(configFileName)]
for i in config:
# Split the line
splitWords = i.split(" ")
# Hard code the number of antennas, change if other configuration is used
numRxAnt = 4
numTxAnt = 2
# Get the information about the profile configuration
if "profileCfg" in splitWords[0]:
startFreq = int(float(splitWords[2]))
idleTime = int(splitWords[3])
rampEndTime = float(splitWords[5])
freqSlopeConst = float(splitWords[8])
numAdcSamples = int(splitWords[10])
digOutSampleRate = int(splitWords[11])
numAdcSamplesRoundTo2 = 1;
while numAdcSamples > numAdcSamplesRoundTo2:
numAdcSamplesRoundTo2 = numAdcSamplesRoundTo2 * 2;
digOutSampleRate = int(splitWords[11]);
# Get the information about the frame configuration
elif "frameCfg" in splitWords[0]:
chirpStartIdx = int(splitWords[1]);
chirpEndIdx = int(splitWords[2]);
numLoops = int(splitWords[3]);
numFrames = int(splitWords[4]);
framePeriodicity = int(splitWords[5]);
# Combine the read data to obtain the configuration parameters
numChirpsPerFrame = (chirpEndIdx - chirpStartIdx + 1) * numLoops
configParameters["numDopplerBins"] = numChirpsPerFrame / numTxAnt
configParameters["numRangeBins"] = numAdcSamplesRoundTo2
configParameters["rangeResolutionMeters"] = (3e8 * digOutSampleRate * 1e3) / (2 * freqSlopeConst * 1e12 * numAdcSamples)
configParameters["rangeIdxToMeters"] = (3e8 * digOutSampleRate * 1e3) / (2 * freqSlopeConst * 1e12 * configParameters["numRangeBins"])
configParameters["dopplerResolutionMps"] = 3e8 / (2 * startFreq * 1e9 * (idleTime + rampEndTime) * 1e-6 * configParameters["numDopplerBins"] * numTxAnt)
configParameters["maxRange"] = (300 * 0.9 * digOutSampleRate)/(2 * freqSlopeConst * 1e3)
configParameters["maxVelocity"] = 3e8 / (4 * startFreq * 1e9 * (idleTime + rampEndTime) * 1e-6 * numTxAnt)
return configParameters
# ------------------------------------------------------------------
# Funtion to read and parse the incoming data
def readAndParseData16xx(Dataport, configParameters):
global byteBuffer, byteBufferLength
# Constants
OBJ_STRUCT_SIZE_BYTES = 12;
BYTE_VEC_ACC_MAX_SIZE = 2**15;
MMWDEMO_UART_MSG_POINT_CLOUD_2D = 6;
MMWDEMO_UART_MSG_TARGET_LIST_2D = 7;
MMWDEMO_UART_MSG_TARGET_INDEX_2D = 8;
maxBufferSize = 2**15;
tlvHeaderLengthInBytes = 8;
pointLengthInBytes = 16;
targetLengthInBytes = 68;
magicWord = [2, 1, 4, 3, 6, 5, 8, 7]
# Initialize variables
magicOK = 0 # Checks if magic number has been read
dataOK = 0 # Checks if the data has been read correctly
targetDetected = 0 # Checks if a person has been detected
frameNumber = 0
targetObj = {}
pointObj = {}
readBuffer = Dataport.read(Dataport.in_waiting)
byteVec = np.frombuffer(readBuffer, dtype = 'uint8')
byteCount = len(byteVec)
# Check that the buffer is not full, and then add the data to the buffer
if (byteBufferLength + byteCount) < maxBufferSize:
byteBuffer[byteBufferLength:byteBufferLength + byteCount] = byteVec[:byteCount]
byteBufferLength = byteBufferLength + byteCount
# Check that the buffer has some data
if byteBufferLength > 16:
# Check for all possible locations of the magic word
possibleLocs = np.where(byteBuffer == magicWord[0])[0]
# Confirm that is the beginning of the magic word and store the index in startIdx
startIdx = []
for loc in possibleLocs:
check = byteBuffer[loc:loc + 8]
if np.all(check == magicWord):
startIdx.append(loc)
# Check that startIdx is not empty
if startIdx:
# Remove the data before the first start index
if startIdx[0] > 0 and startIdx[0] < byteBufferLength:
byteBuffer[:byteBufferLength - startIdx[0]] = byteBuffer[startIdx[0]:byteBufferLength]
byteBuffer[byteBufferLength-startIdx[0]:] = np.zeros(len(byteBuffer[byteBufferLength-startIdx[0]:]),dtype = 'uint8')
byteBufferLength = byteBufferLength - startIdx[0]
# Check that there have no errors with the byte buffer length
if byteBufferLength < 0:
byteBufferLength = 0
# word array to convert 4 bytes to a 32 bit number
word = [1, 2 ** 8, 2 ** 16, 2 ** 24]
# Read the total packet length
totalPacketLen = np.matmul(byteBuffer[20:20 + 4], word)
# Check that all the packet has been read
if (byteBufferLength >= totalPacketLen) and (byteBufferLength != 0):
magicOK = 1
# If magicOK is equal to 1 then process the message
if magicOK:
# word array to convert 4 bytes to a 32 bit number
word = [1, 2 ** 8, 2 ** 16, 2 ** 24]
# Initialize the pointer index
idX = 0
# Read the header
# Read the header
magicNumber = byteBuffer[idX:idX + 8]
idX += 8
version = format(np.matmul(byteBuffer[idX:idX + 4], word), 'x')
idX += 4
platform = format(np.matmul(byteBuffer[idX:idX + 4], word), 'x')
idX += 4
timeStamp = np.matmul(byteBuffer[idX:idX + 4], word)
idX += 4
totalPacketLen = np.matmul(byteBuffer[idX:idX + 4], word)
idX += 4
frameNumber = np.matmul(byteBuffer[idX:idX + 4], word)
idX += 4
subFrameNumber = np.matmul(byteBuffer[idX:idX + 4], word)
idX += 4
chirpMargin = np.matmul(byteBuffer[idX:idX + 4], word)
idX += 4
frameMargin = np.matmul(byteBuffer[idX:idX + 4], word)
idX += 4
uartSentTime = np.matmul(byteBuffer[idX:idX + 4], word)
idX += 4
trackProcessTime = np.matmul(byteBuffer[idX:idX + 4], word)
idX += 4
word = [1, 2 ** 8]
numTLVs = np.matmul(byteBuffer[idX:idX + 2], word)
idX += 2
checksum = np.matmul(byteBuffer[idX:idX + 2], word)
idX += 2
# Read the TLV messages
for tlvIdx in range(numTLVs):
# word array to convert 4 bytes to a 32 bit number
word = [1, 2 ** 8, 2 ** 16, 2 ** 24]
# Initialize the tlv type
tlv_type = 0
try:
# Check the header of the TLV message
tlv_type = np.matmul(byteBuffer[idX:idX + 4], word)
idX += 4
tlv_length = np.matmul(byteBuffer[idX:idX + 4], word)
idX += 4
except:
pass
# Read the data depending on the TLV message
if tlv_type == MMWDEMO_UART_MSG_POINT_CLOUD_2D:
# word array to convert 4 bytes to a 16 bit number
word = [1, 2 ** 8, 2 ** 16, 2 ** 24]
# Calculate the number of detected points
numInputPoints = (tlv_length - tlvHeaderLengthInBytes) // pointLengthInBytes
# Initialize the arrays
rangeVal = np.zeros(numInputPoints, dtype=np.float32)
azimuth = np.zeros(numInputPoints, dtype=np.float32)
dopplerVal = np.zeros(numInputPoints, dtype=np.float32)
snr = np.zeros(numInputPoints, dtype=np.float32)
for objectNum in range(numInputPoints):
# Read the data for each object
rangeVal[objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
azimuth[objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
dopplerVal[objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
snr[objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
# Store the data in the detObj dictionary
pointObj = {"numObj": numInputPoints, "range": rangeVal, "azimuth": azimuth,\
"doppler": dopplerVal, "snr": snr}
dataOK = 1
elif tlv_type == MMWDEMO_UART_MSG_TARGET_LIST_2D:
# word array to convert 4 bytes to a 16 bit number
word = [1, 2 ** 8, 2 ** 16, 2 ** 24]
# Calculate the number of target points
numTargetPoints = (tlv_length - tlvHeaderLengthInBytes) // targetLengthInBytes
# Initialize the arrays
targetId = np.zeros(numTargetPoints, dtype=np.uint32)
posX = np.zeros(numTargetPoints, dtype=np.float32)
posY = np.zeros(numTargetPoints, dtype=np.float32)
velX = np.zeros(numTargetPoints, dtype=np.float32)
velY = np.zeros(numTargetPoints, dtype=np.float32)
accX = np.zeros(numTargetPoints, dtype=np.float32)
accY = np.zeros(numTargetPoints, dtype=np.float32)
EC = np.zeros((3, 3, numTargetPoints), dtype=np.float32) # Error covariance matrix
G = np.zeros(numTargetPoints, dtype=np.float32) # Gain
for objectNum in range(numTargetPoints):
# Read the data for each object
targetId[objectNum] = np.matmul(byteBuffer[idX:idX + 4], word)
idX += 4
posX[objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
posY[objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
velX[objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
velY[objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
accX[objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
accY[objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
EC[0, 0, objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
EC[0, 1, objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
EC[0, 2, objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
EC[1, 0, objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
EC[1, 1, objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
EC[1, 2, objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
EC[2, 0, objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
EC[2, 1, objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
EC[2, 2, objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
G[objectNum] = byteBuffer[idX:idX + 4].view(dtype=np.float32)
idX += 4
# Store the data in the detObj dictionary
targetObj = {"targetId": targetId, "posX": posX, "posY": posY, \
"velX": velX, "velY": velY, "accX": accX, "accY": accY, \
"EC": EC, "G": G, "numTargets":numTargetPoints}
targetDetected = 1
elif tlv_type == MMWDEMO_UART_MSG_TARGET_INDEX_2D:
# Calculate the length of the index message
numIndices = tlv_length - tlvHeaderLengthInBytes
indices = byteBuffer[idX:idX + numIndices]
idX += numIndices
# Remove already processed data
if idX > 0:
shiftSize = totalPacketLen
byteBuffer[:byteBufferLength - shiftSize] = byteBuffer[shiftSize:byteBufferLength]
byteBuffer[byteBufferLength - shiftSize:] = np.zeros(len(byteBuffer[byteBufferLength - shiftSize:]),dtype = 'uint8')
byteBufferLength = byteBufferLength - shiftSize
# Check that there are no errors with the buffer length
if byteBufferLength < 0:
byteBufferLength = 0
return dataOK, targetDetected, frameNumber, targetObj, pointObj
# ------------------------------------------------------------------
# Funtion to update the data and display in the plot
def update():
dataOk = 0
targetDetected = 0
global targetObj
global pointObj
x = []
y = []
# Read and parse the received data
dataOk, targetDetected, frameNumber, targetObj, pointObj = readAndParseData16xx(Dataport, configParameters)
if targetDetected:
print(targetObj)
print(targetObj["numTargets"])
x = -targetObj["posX"]
y = targetObj["posY"]
s2.setData(x,y)
QtGui.QApplication.processEvents()
if dataOk:
x = -pointObj["range"]*np.sin(pointObj["azimuth"])
y = pointObj["range"]*np.cos(pointObj["azimuth"])
s1.setData(x,y)
QtGui.QApplication.processEvents()
return dataOk
# ------------------------- MAIN -----------------------------------------
# Configurate the serial port
CLIport, Dataport = serialConfig(configFileName)
# Get the configuration parameters from the configuration file
configParameters = parseConfigFile(configFileName)
# START QtAPPfor the plot
app = QtGui.QApplication([])
# Set the plot
pg.setConfigOption('background','w')
win = pg.GraphicsWindow(title="2D scatter plot")
p = win.addPlot()
p.setXRange(-0.5,0.5)
p.setYRange(0,6)
p.setLabel('left',text = 'Y position (m)')
p.setLabel('bottom', text= 'X position (m)')
s1 = p.plot([],[],pen=None,symbol='o')
s2 = p.plot([],[],pen=(0,0,255),symbol='star')
# Main loop
targetObj = {}
pointObj = {}
frameData = {}
currentIndex = 0
while True:
try:
# Update the data and check if the data is okay
dataOk = update()
if dataOk:
# Store the current frame into frameData
frameData[currentIndex] = targetObj
currentIndex += 1
time.sleep(0.033) # Sampling frequency of 30 Hz
# Stop the program and close everything if Ctrl + c is pressed
except KeyboardInterrupt:
CLIport.write(('sensorStop\n').encode())
CLIport.close()
Dataport.close()
win.close()
break
|
from .legofy import legofy
|
"""
Test cases for the HTTP endpoints of the profile image api.
"""
from contextlib import closing
from unittest import mock
from unittest.mock import patch
import pytest
import datetime # lint-amnesty, pylint: disable=wrong-import-order
from pytz import UTC
from django.urls import reverse
from django.http import HttpResponse
import ddt
from PIL import Image
from rest_framework.test import APITestCase, APIClient
from common.djangoapps.student.tests.factories import UserFactory
from common.djangoapps.student.tests.tests import UserSettingsEventTestMixin
from openedx.core.djangoapps.user_api.accounts.image_helpers import (
set_has_profile_image,
get_profile_image_names,
get_profile_image_storage,
)
from openedx.core.djangolib.testing.utils import skip_unless_lms
from ..images import create_profile_images, ImageValidationError
from ..views import LOG_MESSAGE_CREATE, LOG_MESSAGE_DELETE
from .helpers import make_image_file
TEST_PASSWORD = "test"
TEST_UPLOAD_DT = datetime.datetime(2002, 1, 9, 15, 43, 1, tzinfo=UTC)
TEST_UPLOAD_DT2 = datetime.datetime(2003, 1, 9, 15, 43, 1, tzinfo=UTC)
class ProfileImageEndpointMixin(UserSettingsEventTestMixin):
"""
Base class / shared infrastructure for tests of profile_image "upload" and
"remove" endpoints.
"""
# subclasses should override this with the name of the view under test, as
# per the urls.py configuration.
_view_name = None
def setUp(self):
super().setUp()
self.user = UserFactory.create(password=TEST_PASSWORD)
# Ensure that parental controls don't apply to this user
self.user.profile.year_of_birth = 1980
self.user.profile.save()
self.url = reverse(self._view_name, kwargs={'username': self.user.username})
self.client.login(username=self.user.username, password=TEST_PASSWORD)
self.storage = get_profile_image_storage()
self.table = 'auth_userprofile'
# this assertion is made here as a sanity check because all tests
# assume user.profile.has_profile_image is False by default
assert not self.user.profile.has_profile_image
# Reset the mock event tracker so that we're not considering the
# initial profile creation events.
self.reset_tracker()
def tearDown(self):
super().tearDown()
for name in get_profile_image_names(self.user.username).values():
self.storage.delete(name)
def check_images(self, exist=True):
"""
If exist is True, make sure the images physically exist in storage
with correct sizes and formats.
If exist is False, make sure none of the images exist.
"""
for size, name in get_profile_image_names(self.user.username).items():
if exist:
assert self.storage.exists(name)
with closing(Image.open(self.storage.path(name))) as img:
assert img.size == (size, size)
assert img.format == 'JPEG'
else:
assert not self.storage.exists(name)
def check_response(self, response, expected_code, expected_developer_message=None, expected_user_message=None):
"""
Make sure the response has the expected code, and if that isn't 204,
optionally check the correctness of a developer-facing message.
"""
assert expected_code == response.status_code
if expected_code == 204:
assert response.data is None
else:
if expected_developer_message is not None:
assert response.data.get('developer_message') == expected_developer_message
if expected_user_message is not None:
assert response.data.get('user_message') == expected_user_message
def check_has_profile_image(self, has_profile_image=True):
"""
Make sure the value of self.user.profile.has_profile_image is what we
expect.
"""
# it's necessary to reload this model from the database since save()
# would have been called on another instance.
profile = self.user.profile.__class__.objects.get(user=self.user)
assert profile.has_profile_image == has_profile_image
def check_anonymous_request_rejected(self, method):
"""
Make sure that the specified method rejects access by unauthorized users.
"""
anonymous_client = APIClient()
request_method = getattr(anonymous_client, method)
response = request_method(self.url)
self.check_response(response, 401)
self.assert_no_events_were_emitted()
@skip_unless_lms
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class ProfileImageViewGeneralTestCase(ProfileImageEndpointMixin, APITestCase):
"""
Tests for the profile image endpoint
"""
_view_name = "accounts_profile_image_api"
def test_unsupported_methods(self, mock_log):
"""
Test that GET, PUT, and PATCH are not supported.
"""
assert 405 == self.client.get(self.url).status_code
assert 405 == self.client.put(self.url).status_code
assert 405 == self.client.patch(self.url).status_code
assert not mock_log.info.called
self.assert_no_events_were_emitted()
@ddt.ddt
@skip_unless_lms
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class ProfileImageViewPostTestCase(ProfileImageEndpointMixin, APITestCase):
"""
Tests for the POST method of the profile_image api endpoint.
"""
_view_name = "accounts_profile_image_api"
# Use the patched version of the API client to workaround a unicode issue
# with DRF 3.1 and Django 1.4. Remove this after we upgrade Django past 1.4!
def check_upload_event_emitted(self, old=None, new=TEST_UPLOAD_DT):
"""
Make sure we emit a UserProfile event corresponding to the
profile_image_uploaded_at field changing.
"""
self.assert_user_setting_event_emitted(
setting='profile_image_uploaded_at', old=old, new=new
)
def test_anonymous_access(self, mock_log):
"""
Test that an anonymous client (not logged in) cannot call POST.
"""
self.check_anonymous_request_rejected('post')
assert not mock_log.info.called
@ddt.data('.jpg', '.jpeg', '.jpg', '.jpeg', '.png', '.gif', '.GIF')
@patch(
'openedx.core.djangoapps.profile_images.views._make_upload_dt',
side_effect=[TEST_UPLOAD_DT, TEST_UPLOAD_DT2],
)
def test_upload_self(self, extension, _mock_make_image_version, mock_log):
"""
Test that an authenticated user can POST to their own upload endpoint.
"""
with make_image_file(extension=extension) as image_file:
response = self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 204)
self.check_images()
self.check_has_profile_image()
mock_log.info.assert_called_once_with(
LOG_MESSAGE_CREATE,
{'image_names': list(get_profile_image_names(self.user.username).values()), 'user_id': self.user.id}
)
self.check_upload_event_emitted()
# Try another upload and make sure that a second event is emitted.
with make_image_file() as image_file:
response = self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 204)
self.check_upload_event_emitted(old=TEST_UPLOAD_DT, new=TEST_UPLOAD_DT2)
@ddt.data(
('image/jpeg', '.jpg'),
('image/jpeg', '.jpeg'),
('image/pjpeg', '.jpg'),
('image/pjpeg', '.jpeg'),
('image/png', '.png'),
('image/gif', '.gif'),
('image/gif', '.GIF'),
)
@ddt.unpack
@patch('openedx.core.djangoapps.profile_images.views._make_upload_dt', return_value=TEST_UPLOAD_DT)
def test_upload_by_mimetype(self, content_type, extension, _mock_make_image_version, mock_log):
"""
Test that a user can upload raw content with the appropriate mimetype
"""
with make_image_file(extension=extension) as image_file:
data = image_file.read()
response = self.client.post(
self.url,
data,
content_type=content_type,
HTTP_CONTENT_DISPOSITION=f'attachment;filename=filename{extension}',
)
self.check_response(response, 204)
self.check_images()
self.check_has_profile_image()
mock_log.info.assert_called_once_with(
LOG_MESSAGE_CREATE,
{'image_names': list(get_profile_image_names(self.user.username).values()), 'user_id': self.user.id}
)
self.check_upload_event_emitted()
def test_upload_unsupported_mimetype(self, mock_log):
"""
Test that uploading an unsupported image as raw content fails with an
HTTP 415 Error.
"""
with make_image_file() as image_file:
data = image_file.read()
response = self.client.post(
self.url,
data,
content_type='image/tiff',
HTTP_CONTENT_DISPOSITION='attachment;filename=filename.tiff',
)
self.check_response(response, 415)
self.check_images(False)
self.check_has_profile_image(False)
assert not mock_log.info.called
self.assert_no_events_were_emitted()
def test_upload_nonexistent_user(self, mock_log):
"""
Test that an authenticated user who POSTs to a non-existent user's upload
endpoint gets an indistinguishable 403.
"""
nonexistent_user_url = reverse(self._view_name, kwargs={'username': 'nonexistent'})
with make_image_file() as image_file:
response = self.client.post(nonexistent_user_url, {'file': image_file}, format='multipart')
self.check_response(response, 403)
assert not mock_log.info.called
def test_upload_other(self, mock_log):
"""
Test that an authenticated user cannot POST to another user's upload
endpoint.
"""
different_user = UserFactory.create(password=TEST_PASSWORD)
# Ignore UserProfileFactory creation events.
self.reset_tracker()
different_client = APIClient()
different_client.login(username=different_user.username, password=TEST_PASSWORD)
with make_image_file() as image_file:
response = different_client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 403)
self.check_images(False)
self.check_has_profile_image(False)
assert not mock_log.info.called
self.assert_no_events_were_emitted()
def test_upload_staff(self, mock_log):
"""
Test that an authenticated staff cannot POST to another user's upload
endpoint.
"""
staff_user = UserFactory(is_staff=True, password=TEST_PASSWORD)
# Ignore UserProfileFactory creation events.
self.reset_tracker()
staff_client = APIClient()
staff_client.login(username=staff_user.username, password=TEST_PASSWORD)
with make_image_file() as image_file:
response = staff_client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(response, 403)
self.check_images(False)
self.check_has_profile_image(False)
assert not mock_log.info.called
self.assert_no_events_were_emitted()
def test_upload_missing_file(self, mock_log):
"""
Test that omitting the file entirely from the POST results in HTTP 400.
"""
response = self.client.post(self.url, {}, format='multipart')
self.check_response(
response, 400,
expected_developer_message="No file provided for profile image",
expected_user_message="No file provided for profile image",
)
self.check_images(False)
self.check_has_profile_image(False)
assert not mock_log.info.called
self.assert_no_events_were_emitted()
def test_upload_not_a_file(self, mock_log):
"""
Test that sending unexpected data that isn't a file results in HTTP
400.
"""
response = self.client.post(self.url, {'file': 'not a file'}, format='multipart')
self.check_response(
response, 400,
expected_developer_message="No file provided for profile image",
expected_user_message="No file provided for profile image",
)
self.check_images(False)
self.check_has_profile_image(False)
assert not mock_log.info.called
self.assert_no_events_were_emitted()
def test_upload_validation(self, mock_log):
"""
Test that when upload validation fails, the proper HTTP response and
messages are returned.
"""
with make_image_file() as image_file:
with mock.patch(
'openedx.core.djangoapps.profile_images.views.validate_uploaded_image',
side_effect=ImageValidationError("test error message")
):
response = self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_response(
response, 400,
expected_developer_message="test error message",
expected_user_message="test error message",
)
self.check_images(False)
self.check_has_profile_image(False)
assert not mock_log.info.called
self.assert_no_events_were_emitted()
@patch('PIL.Image.open')
def test_upload_failure(self, image_open, mock_log):
"""
Test that when upload validation fails, the proper HTTP response and
messages are returned.
"""
image_open.side_effect = [Exception("whoops"), None]
with make_image_file() as image_file:
with pytest.raises(Exception):
self.client.post(self.url, {'file': image_file}, format='multipart')
self.check_images(False)
self.check_has_profile_image(False)
assert not mock_log.info.called
self.assert_no_events_were_emitted()
@skip_unless_lms
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class ProfileImageViewDeleteTestCase(ProfileImageEndpointMixin, APITestCase):
"""
Tests for the DELETE method of the profile_image endpoint.
"""
_view_name = "accounts_profile_image_api"
def setUp(self):
super().setUp()
with make_image_file() as image_file:
create_profile_images(image_file, get_profile_image_names(self.user.username))
self.check_images()
set_has_profile_image(self.user.username, True, TEST_UPLOAD_DT)
# Ignore previous event
self.reset_tracker()
def check_remove_event_emitted(self):
"""
Make sure we emit a UserProfile event corresponding to the
profile_image_uploaded_at field changing.
"""
self.assert_user_setting_event_emitted(
setting='profile_image_uploaded_at', old=TEST_UPLOAD_DT, new=None
)
def test_anonymous_access(self, mock_log):
"""
Test that an anonymous client (not logged in) cannot call DELETE.
"""
self.check_anonymous_request_rejected('delete')
assert not mock_log.info.called
def test_remove_self(self, mock_log):
"""
Test that an authenticated user can DELETE to remove their own profile
images.
"""
response = self.client.delete(self.url)
self.check_response(response, 204)
self.check_images(False)
self.check_has_profile_image(False)
mock_log.info.assert_called_once_with(
LOG_MESSAGE_DELETE,
{'image_names': list(get_profile_image_names(self.user.username).values()), 'user_id': self.user.id}
)
self.check_remove_event_emitted()
def test_remove_other(self, mock_log):
"""
Test that an authenticated user cannot DELETE to remove another user's
profile images.
"""
different_user = UserFactory.create(password=TEST_PASSWORD)
# Ignore UserProfileFactory creation events.
self.reset_tracker()
different_client = APIClient()
different_client.login(username=different_user.username, password=TEST_PASSWORD)
response = different_client.delete(self.url)
self.check_response(response, 403)
self.check_images(True) # thumbnails should remain intact.
self.check_has_profile_image(True)
assert not mock_log.info.called
self.assert_no_events_were_emitted()
def test_remove_staff(self, mock_log):
"""
Test that an authenticated staff user can DELETE to remove another user's
profile images.
"""
staff_user = UserFactory(is_staff=True, password=TEST_PASSWORD)
staff_client = APIClient()
staff_client.login(username=staff_user.username, password=TEST_PASSWORD)
response = self.client.delete(self.url)
self.check_response(response, 204)
self.check_images(False)
self.check_has_profile_image(False)
mock_log.info.assert_called_once_with(
LOG_MESSAGE_DELETE,
{'image_names': list(get_profile_image_names(self.user.username).values()), 'user_id': self.user.id}
)
self.check_remove_event_emitted()
@patch('common.djangoapps.student.models.UserProfile.save')
def test_remove_failure(self, user_profile_save, mock_log):
"""
Test that when remove validation fails, the proper HTTP response and
messages are returned.
"""
user_profile_save.side_effect = [Exception("whoops"), None]
with pytest.raises(Exception):
self.client.delete(self.url)
self.check_images(True) # thumbnails should remain intact.
self.check_has_profile_image(True)
assert not mock_log.info.called
self.assert_no_events_were_emitted()
class DeprecatedProfileImageTestMixin(ProfileImageEndpointMixin):
"""
Actual tests for DeprecatedProfileImage.*TestCase classes defined here.
Requires:
self._view_name
self._replacement_method
"""
def test_unsupported_methods(self, mock_log):
"""
Test that GET, PUT, PATCH, and DELETE are not supported.
"""
assert 405 == self.client.get(self.url).status_code
assert 405 == self.client.put(self.url).status_code
assert 405 == self.client.patch(self.url).status_code
assert 405 == self.client.delete(self.url).status_code
assert not mock_log.info.called
self.assert_no_events_were_emitted()
def test_post_calls_replacement_view_method(self, mock_log):
"""
Test that calls to this view pass through the the new view.
"""
with patch(self._replacement_method) as mock_method:
mock_method.return_value = HttpResponse()
self.client.post(self.url)
assert mock_method.called
assert not mock_log.info.called
self.assert_no_events_were_emitted()
@skip_unless_lms
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class DeprecatedProfileImageUploadTestCase(DeprecatedProfileImageTestMixin, APITestCase):
"""
Tests for the deprecated profile_image upload endpoint.
Actual tests defined on DeprecatedProfileImageTestMixin
"""
_view_name = 'profile_image_upload'
_replacement_method = 'openedx.core.djangoapps.profile_images.views.ProfileImageView.post'
@skip_unless_lms
@mock.patch('openedx.core.djangoapps.profile_images.views.log')
class DeprecatedProfileImageRemoveTestCase(DeprecatedProfileImageTestMixin, APITestCase):
"""
Tests for the deprecated profile_image remove endpoint.
Actual tests defined on DeprecatedProfileImageTestMixin
"""
_view_name = "profile_image_remove"
_replacement_method = 'openedx.core.djangoapps.profile_images.views.ProfileImageView.delete'
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class JobSpecification(Model):
"""Specifies details of the jobs to be created on a schedule.
:param priority: The priority of jobs created under this schedule.
Priority values can range from -1000 to 1000, with -1000 being the lowest
priority and 1000 being the highest priority. The default value is 0. This
priority is used as the default for all jobs under the job schedule. You
can update a job's priority after it has been created using by using the
update job API.
:type priority: int
:param display_name: The display name for jobs created under this
schedule. The name need not be unique and can contain any Unicode
characters up to a maximum length of 1024.
:type display_name: str
:param uses_task_dependencies: Whether tasks in the job can define
dependencies on each other. The default is false.
:type uses_task_dependencies: bool
:param on_all_tasks_complete: The action the Batch service should take
when all tasks in a job created under this schedule are in the completed
state. Note that if a job contains no tasks, then all tasks are considered
complete. This option is therefore most commonly used with a Job Manager
task; if you want to use automatic job termination without a Job Manager,
you should initially set onAllTasksComplete to noAction and update the job
properties to set onAllTasksComplete to terminateJob once you have
finished adding tasks. The default is noAction. Possible values include:
'noAction', 'terminateJob'
:type on_all_tasks_complete: str or :class:`OnAllTasksComplete
<azure.batch.models.OnAllTasksComplete>`
:param on_task_failure: The action the Batch service should take when any
task fails in a job created under this schedule. A task is considered to
have failed if it have failed if has a failureInfo. A failureInfo is set
if the task completes with a non-zero exit code after exhausting its retry
count, or if there was an error starting the task, for example due to a
resource file download error. The default is noAction. Possible values
include: 'noAction', 'performExitOptionsJobAction'
:type on_task_failure: str or :class:`OnTaskFailure
<azure.batch.models.OnTaskFailure>`
:param constraints: The execution constraints for jobs created under this
schedule.
:type constraints: :class:`JobConstraints
<azure.batch.models.JobConstraints>`
:param job_manager_task: The details of a Job Manager task to be launched
when a job is started under this schedule. If the job does not specify a
Job Manager task, the user must explicitly add tasks to the job using the
Task API. If the job does specify a Job Manager task, the Batch service
creates the Job Manager task when the job is created, and will try to
schedule the Job Manager task before scheduling other tasks in the job.
:type job_manager_task: :class:`JobManagerTask
<azure.batch.models.JobManagerTask>`
:param job_preparation_task: The Job Preparation task for jobs created
under this schedule. If a job has a Job Preparation task, the Batch
service will run the Job Preparation task on a compute node before
starting any tasks of that job on that compute node.
:type job_preparation_task: :class:`JobPreparationTask
<azure.batch.models.JobPreparationTask>`
:param job_release_task: The Job Release task for jobs created under this
schedule. The primary purpose of the Job Release task is to undo changes
to compute nodes made by the Job Preparation task. Example activities
include deleting local files, or shutting down services that were started
as part of job preparation. A Job Release task cannot be specified without
also specifying a Job Preparation task for the job. The Batch service runs
the Job Release task on the compute nodes that have run the Job
Preparation task.
:type job_release_task: :class:`JobReleaseTask
<azure.batch.models.JobReleaseTask>`
:param common_environment_settings: A list of common environment variable
settings. These environment variables are set for all tasks in jobs
created under this schedule (including the Job Manager, Job Preparation
and Job Release tasks). Individual tasks can override an environment
setting specified here by specifying the same setting name with a
different value.
:type common_environment_settings: list of :class:`EnvironmentSetting
<azure.batch.models.EnvironmentSetting>`
:param pool_info: The pool on which the Batch service runs the tasks of
jobs created under this schedule.
:type pool_info: :class:`PoolInformation
<azure.batch.models.PoolInformation>`
:param metadata: A list of name-value pairs associated with each job
created under this schedule as metadata. The Batch service does not assign
any meaning to metadata; it is solely for the use of user code.
:type metadata: list of :class:`MetadataItem
<azure.batch.models.MetadataItem>`
"""
_validation = {
'pool_info': {'required': True},
}
_attribute_map = {
'priority': {'key': 'priority', 'type': 'int'},
'display_name': {'key': 'displayName', 'type': 'str'},
'uses_task_dependencies': {'key': 'usesTaskDependencies', 'type': 'bool'},
'on_all_tasks_complete': {'key': 'onAllTasksComplete', 'type': 'OnAllTasksComplete'},
'on_task_failure': {'key': 'onTaskFailure', 'type': 'OnTaskFailure'},
'constraints': {'key': 'constraints', 'type': 'JobConstraints'},
'job_manager_task': {'key': 'jobManagerTask', 'type': 'JobManagerTask'},
'job_preparation_task': {'key': 'jobPreparationTask', 'type': 'JobPreparationTask'},
'job_release_task': {'key': 'jobReleaseTask', 'type': 'JobReleaseTask'},
'common_environment_settings': {'key': 'commonEnvironmentSettings', 'type': '[EnvironmentSetting]'},
'pool_info': {'key': 'poolInfo', 'type': 'PoolInformation'},
'metadata': {'key': 'metadata', 'type': '[MetadataItem]'},
}
def __init__(self, pool_info, priority=None, display_name=None, uses_task_dependencies=None, on_all_tasks_complete=None, on_task_failure=None, constraints=None, job_manager_task=None, job_preparation_task=None, job_release_task=None, common_environment_settings=None, metadata=None):
self.priority = priority
self.display_name = display_name
self.uses_task_dependencies = uses_task_dependencies
self.on_all_tasks_complete = on_all_tasks_complete
self.on_task_failure = on_task_failure
self.constraints = constraints
self.job_manager_task = job_manager_task
self.job_preparation_task = job_preparation_task
self.job_release_task = job_release_task
self.common_environment_settings = common_environment_settings
self.pool_info = pool_info
self.metadata = metadata
|
"""
Created on Sep 23, 2016
@author: andrew
"""
import asyncio
import logging
from math import floor
import discord
from discord.errors import NotFound
from discord.ext import commands
import api.avrae.utils.redisIO as redis
from api.avrae.cogs5e.funcs.lookupFuncs import compendium
from api.avrae.utils import checks, config
log = logging.getLogger(__name__)
COMMAND_PUBSUB_CHANNEL = f"admin-commands:{config.ENVIRONMENT}" # >:c
class AdminUtils(commands.Cog):
"""
Administrative Utilities.
"""
def __init__(self, bot):
self.bot = bot
bot.loop.create_task(self.load_admin())
bot.loop.create_task(self.admin_pubsub())
self.blacklisted_serv_ids = set()
self.whitelisted_serv_ids = set()
# pubsub stuff
self._ps_cmd_map = {} # set up in admin_pubsub()
self._ps_requests_pending = {}
# ==== setup tasks ====
async def load_admin(self):
self.bot.muted = set(await self.bot.rdb.jget('muted', []))
self.blacklisted_serv_ids = set(await self.bot.rdb.jget('blacklist', []))
self.whitelisted_serv_ids = set(await self.bot.rdb.jget('server-whitelist', []))
loglevels = await self.bot.rdb.jget('loglevels', {})
for logger, level in loglevels.items():
try:
logging.getLogger(logger).setLevel(level)
except:
log.warning(f"Failed to reset loglevel of {logger}")
async def admin_pubsub(self):
self._ps_cmd_map = {
"leave": self._leave,
"loglevel": self._loglevel,
"changepresence": self._changepresence,
"reload_static": self._reload_static,
"reload_lists": self._reload_lists,
"serv_info": self._serv_info,
"whois": self._whois,
"ping": self._ping
}
channel = (await self.bot.rdb.subscribe(COMMAND_PUBSUB_CHANNEL))[0]
async for msg in channel.iter(encoding="utf-8"):
try:
await self._ps_recv(msg)
except Exception as e:
log.error(str(e))
# ==== commands ====
@commands.command(hidden=True)
@checks.is_owner()
async def blacklist(self, ctx, _id: int):
self.blacklisted_serv_ids.add(_id)
await self.bot.rdb.jset('blacklist', list(self.blacklisted_serv_ids))
resp = await self.pscall("reload_lists")
await self._send_replies(ctx, resp)
@commands.command(hidden=True)
@checks.is_owner()
async def whitelist(self, ctx, _id: int):
self.whitelisted_serv_ids.add(_id)
await self.bot.rdb.jset('server-whitelist', list(self.whitelisted_serv_ids))
resp = await self.pscall("reload_lists")
await self._send_replies(ctx, resp)
@commands.command(hidden=True)
@checks.is_owner()
async def chanSay(self, ctx, channel: int, *, message: str):
"""Low-level calls `bot.http.send_message()`."""
await self.bot.http.send_message(channel, message)
await ctx.send(f"Sent message.")
@commands.command(hidden=True)
@checks.is_owner()
async def servInfo(self, ctx, guild_id: int):
resp = await self.pscall("serv_info", kwargs={"guild_id": guild_id}, expected_replies=1)
await self._send_replies(ctx, resp)
@commands.command(hidden=True)
@checks.is_owner()
async def whois(self, ctx, user_id: int):
user = await self.bot.fetch_user(user_id)
resp = await self.pscall("whois", kwargs={"user_id": user_id})
await self._send_replies(ctx, resp, base=f"{user_id} is {user}:")
@commands.command(hidden=True)
@checks.is_owner()
async def pingall(self, ctx):
resp = await self.pscall("ping")
embed = discord.Embed(title="Cluster Pings")
for cluster, pings in sorted(resp.items(), key=lambda i: i[0]):
pingstr = "\n".join(f"Shard {shard}: {floor(ping * 1000)}ms" for shard, ping in pings.items())
avgping = floor((sum(pings.values()) / len(pings)) * 1000)
embed.add_field(name=f"Cluster {cluster}: {avgping}ms", value=pingstr)
await ctx.send(embed=embed)
@commands.command(hidden=True, name='leave')
@checks.is_owner()
async def leave_server(self, ctx, guild_id: int):
resp = await self.pscall("leave", kwargs={"guild_id": guild_id}, expected_replies=1)
await self._send_replies(ctx, resp)
@commands.command(hidden=True)
@checks.is_owner()
async def mute(self, ctx, target: int):
"""Mutes a person by ID."""
try:
target_user = await self.bot.fetch_user(target)
except NotFound:
target_user = "Not Found"
if target in self.bot.muted:
self.bot.muted.remove(target)
await ctx.send("{} ({}) unmuted.".format(target, target_user))
else:
self.bot.muted.add(target)
await ctx.send("{} ({}) muted.".format(target, target_user))
await self.bot.rdb.jset('muted', list(self.bot.muted))
resp = await self.pscall("reload_lists")
await self._send_replies(ctx, resp)
@commands.command(hidden=True)
@checks.is_owner()
async def loglevel(self, ctx, level: int, logger=None):
"""Changes the loglevel. Do not pass logger for global. Default: 20"""
loglevels = await self.bot.rdb.jget('loglevels', {})
loglevels[logger] = level
await self.bot.rdb.jset('loglevels', loglevels)
resp = await self.pscall("loglevel", args=[level], kwargs={"logger": logger})
await self._send_replies(ctx, resp)
@commands.command(hidden=True)
@checks.is_owner()
async def changepresence(self, ctx, status=None, *, msg=None):
"""Changes Avrae's presence. Status: online, idle, dnd"""
resp = await self.pscall("changepresence", kwargs={"status": status, "msg": msg})
await self._send_replies(ctx, resp)
@commands.command(hidden=True)
@checks.is_owner()
async def reload_static(self, ctx):
resp = await self.pscall("reload_static")
await self._send_replies(ctx, resp)
# ==== listener ====
@commands.Cog.listener()
async def on_guild_join(self, server):
if server.id in self.blacklisted_serv_ids:
return await server.leave()
elif server.id in self.whitelisted_serv_ids:
return
bots = sum(1 for m in server.members if m.bot)
members = len(server.members)
ratio = bots / members
if ratio >= 0.6 and members >= 20:
log.info("Detected bot collection server ({}), ratio {}. Leaving.".format(server.id, ratio))
try:
await server.owner.send("Please do not add me to bot collection servers. "
"Your server was flagged for having over 60% bots. "
"If you believe this is an error, please PM the bot author.")
except:
pass
await asyncio.sleep(members / 200)
await server.leave()
# ==== helper ====
@staticmethod
async def _send_replies(ctx, resp, base=None):
sorted_replies = sorted(resp.items(), key=lambda i: i[0])
out = '\n'.join(f"{cid}: {rep}" for cid, rep in sorted_replies)
if base:
out = f"{base}\n{out}"
await ctx.send(out)
# ==== methods (called by pubsub) ====
async def _leave(self, guild_id):
guild = self.bot.get_guild(guild_id)
if not guild:
return False
await guild.leave()
return f"Left {guild.name}."
@staticmethod
async def _loglevel(level, logger=None):
logging.getLogger(logger).setLevel(level)
return f"Set level of {logger} to {level}."
async def _changepresence(self, status=None, msg=None):
statuslevel = {'online': discord.Status.online, 'idle': discord.Status.idle, 'dnd': discord.Status.dnd}
status = statuslevel.get(status)
await self.bot.change_presence(status=status, activity=discord.Game(msg or "D&D 5e | !help"))
return "Changed presence."
async def _reload_static(self):
await compendium.reload(self.bot.mdb)
return "OK"
async def _reload_lists(self):
self.blacklisted_serv_ids = set(await self.bot.rdb.jget('blacklist', []))
self.whitelisted_serv_ids = set(await self.bot.rdb.jget('server-whitelist', []))
self.bot.muted = set(await self.bot.rdb.jget('muted', []))
return "OK"
async def _serv_info(self, guild_id):
guild = self.bot.get_guild(guild_id)
if not guild:
channel = self.bot.get_channel(guild_id)
if not channel:
return False
else:
guild = channel.guild
try:
invite = (
await next(c for c in guild.channels if isinstance(c, discord.TextChannel)).create_invite()).url
except:
invite = None
if invite:
out = f"{guild.name} ({guild.id}, <{invite}>)"
else:
out = f"{guild.name} ({guild.id})"
out += f"\n{len(guild.members)} members, {sum(m.bot for m in guild.members)} bot"
return out
async def _whois(self, user_id):
return [guild.id for guild in self.bot.guilds if user_id in {user.id for user in guild.members}]
async def _ping(self):
return dict(self.bot.latencies)
# ==== pubsub ====
async def pscall(self, command, args=None, kwargs=None, *, expected_replies=config.NUM_CLUSTERS or 1, timeout=30):
"""Makes an IPC call to all clusters. Returns a dict of {cluster_id: reply_data}."""
request = redis.PubSubCommand.new(self.bot, command, args, kwargs)
self._ps_requests_pending[request.id] = {}
await self.bot.rdb.publish(COMMAND_PUBSUB_CHANNEL, request.to_json())
for _ in range(timeout * 10): # timeout after 30 sec
if len(self._ps_requests_pending[request.id]) >= expected_replies:
break
else:
await asyncio.sleep(0.1)
return self._ps_requests_pending.pop(request.id)
async def _ps_recv(self, message):
redis.pslogger.debug(message)
msg = redis.deserialize_ps_msg(message)
if msg.type == 'reply':
await self._ps_reply(msg)
elif msg.type == 'cmd':
await self._ps_cmd(msg)
async def _ps_reply(self, message: redis.PubSubReply):
if message.reply_to not in self._ps_requests_pending:
return
self._ps_requests_pending[message.reply_to][message.sender] = message.data
async def _ps_cmd(self, message: redis.PubSubCommand):
if message.command not in self._ps_cmd_map:
return
command = self._ps_cmd_map[message.command]
result = await command(*message.args, **message.kwargs)
if result is not False:
response = redis.PubSubReply.new(self.bot, reply_to=message.id, data=result)
await self.bot.rdb.publish(COMMAND_PUBSUB_CHANNEL, response.to_json())
# ==== setup ====
def setup(bot):
bot.add_cog(AdminUtils(bot))
|
from __future__ import absolute_import
from __future__ import unicode_literals
import json
import logging
import os
import re
import sys
import six
from docker.utils.ports import split_port
from jsonschema import Draft4Validator
from jsonschema import FormatChecker
from jsonschema import RefResolver
from jsonschema import ValidationError
from .errors import ConfigurationError
from .errors import VERSION_EXPLANATION
from .sort_services import get_service_name_from_network_mode
log = logging.getLogger(__name__)
DOCKER_CONFIG_HINTS = {
'cpu_share': 'cpu_shares',
'add_host': 'extra_hosts',
'hosts': 'extra_hosts',
'extra_host': 'extra_hosts',
'device': 'devices',
'link': 'links',
'memory_swap': 'memswap_limit',
'port': 'ports',
'privilege': 'privileged',
'priviliged': 'privileged',
'privilige': 'privileged',
'volume': 'volumes',
'workdir': 'working_dir',
}
VALID_NAME_CHARS = '[a-zA-Z0-9\._\-]'
VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$'
@FormatChecker.cls_checks(format="ports", raises=ValidationError)
def format_ports(instance):
try:
split_port(instance)
except ValueError as e:
raise ValidationError(six.text_type(e))
return True
@FormatChecker.cls_checks(format="expose", raises=ValidationError)
def format_expose(instance):
if isinstance(instance, six.string_types):
if not re.match(VALID_EXPOSE_FORMAT, instance):
raise ValidationError(
"should be of the format 'PORT[/PROTOCOL]'")
return True
@FormatChecker.cls_checks(format="bool-value-in-mapping")
def format_boolean_in_environment(instance):
"""
Check if there is a boolean in the environment and display a warning.
Always return True here so the validation won't raise an error.
"""
if isinstance(instance, bool):
log.warn(
"There is a boolean value in the 'environment' key.\n"
"Environment variables can only be strings.\n"
"Please add quotes to any boolean values to make them string "
"(eg, 'True', 'yes', 'N').\n"
"This warning will become an error in a future release. \r\n"
)
return True
def match_named_volumes(service_dict, project_volumes):
service_volumes = service_dict.get('volumes', [])
for volume_spec in service_volumes:
if volume_spec.is_named_volume and volume_spec.external not in project_volumes:
raise ConfigurationError(
'Named volume "{0}" is used in service "{1}" but no'
' declaration was found in the volumes section.'.format(
volume_spec.repr(), service_dict.get('name')
)
)
def validate_top_level_service_objects(filename, service_dicts):
"""Perform some high level validation of the service name and value.
This validation must happen before interpolation, which must happen
before the rest of validation, which is why it's separate from the
rest of the service validation.
"""
for service_name, service_dict in service_dicts.items():
if not isinstance(service_name, six.string_types):
raise ConfigurationError(
"In file '{}' service name: {} needs to be a string, eg '{}'".format(
filename,
service_name,
service_name))
if not isinstance(service_dict, dict):
raise ConfigurationError(
"In file '{}' service '{}' doesn\'t have any configuration options. "
"All top level keys in your docker-compose.yml must map "
"to a dictionary of configuration options.".format(
filename, service_name
)
)
def validate_top_level_object(config_file):
if not isinstance(config_file.config, dict):
raise ConfigurationError(
"Top level object in '{}' needs to be an object not '{}'.".format(
config_file.filename,
type(config_file.config)))
def validate_ulimits(service_config):
ulimit_config = service_config.config.get('ulimits', {})
for limit_name, soft_hard_values in six.iteritems(ulimit_config):
if isinstance(soft_hard_values, dict):
if not soft_hard_values['soft'] <= soft_hard_values['hard']:
raise ConfigurationError(
"Service '{s.name}' has invalid ulimit '{ulimit}'. "
"'soft' value can not be greater than 'hard' value ".format(
s=service_config,
ulimit=ulimit_config))
def validate_extends_file_path(service_name, extends_options, filename):
"""
The service to be extended must either be defined in the config key 'file',
or within 'filename'.
"""
error_prefix = "Invalid 'extends' configuration for %s:" % service_name
if 'file' not in extends_options and filename is None:
raise ConfigurationError(
"%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix
)
def validate_network_mode(service_config, service_names):
network_mode = service_config.config.get('network_mode')
if not network_mode:
return
if 'networks' in service_config.config:
raise ConfigurationError("'network_mode' and 'networks' cannot be combined")
dependency = get_service_name_from_network_mode(network_mode)
if not dependency:
return
if dependency not in service_names:
raise ConfigurationError(
"Service '{s.name}' uses the network stack of service '{dep}' which "
"is undefined.".format(s=service_config, dep=dependency))
def validate_depends_on(service_config, service_names):
for dependency in service_config.config.get('depends_on', []):
if dependency not in service_names:
raise ConfigurationError(
"Service '{s.name}' depends on service '{dep}' which is "
"undefined.".format(s=service_config, dep=dependency))
def get_unsupported_config_msg(path, error_key):
msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
if error_key in DOCKER_CONFIG_HINTS:
msg += " (did you mean '{}'?)".format(DOCKER_CONFIG_HINTS[error_key])
return msg
def anglicize_validator(validator):
if validator in ["array", "object"]:
return 'an ' + validator
return 'a ' + validator
def is_service_dict_schema(schema_id):
return schema_id == 'fields_schema_v1.json' or schema_id == '#/properties/services'
def handle_error_for_schema_with_id(error, path):
schema_id = error.schema['id']
if is_service_dict_schema(schema_id) and error.validator == 'additionalProperties':
return "Invalid service name '{}' - only {} characters are allowed".format(
# The service_name is the key to the json object
list(error.instance)[0],
VALID_NAME_CHARS)
if schema_id == '#/definitions/constraints':
# Build context could in 'build' or 'build.context' and dockerfile could be
# in 'dockerfile' or 'build.dockerfile'
context = False
dockerfile = 'dockerfile' in error.instance
if 'build' in error.instance:
if isinstance(error.instance['build'], six.string_types):
context = True
else:
context = 'context' in error.instance['build']
dockerfile = dockerfile or 'dockerfile' in error.instance['build']
# TODO: only applies to v1
if 'image' in error.instance and context:
return (
"{} has both an image and build path specified. "
"A service can either be built to image or use an existing "
"image, not both.".format(path_string(path)))
if 'image' not in error.instance and not context:
return (
"{} has neither an image nor a build path specified. "
"At least one must be provided.".format(path_string(path)))
# TODO: only applies to v1
if 'image' in error.instance and dockerfile:
return (
"{} has both an image and alternate Dockerfile. "
"A service can either be built to image or use an existing "
"image, not both.".format(path_string(path)))
if error.validator == 'additionalProperties':
if schema_id == '#/definitions/service':
invalid_config_key = parse_key_from_error_msg(error)
return get_unsupported_config_msg(path, invalid_config_key)
if not error.path:
return '{}\n{}'.format(error.message, VERSION_EXPLANATION)
def handle_generic_service_error(error, path):
msg_format = None
error_msg = error.message
if error.validator == 'oneOf':
msg_format = "{path} {msg}"
config_key, error_msg = _parse_oneof_validator(error)
if config_key:
path.append(config_key)
elif error.validator == 'type':
msg_format = "{path} contains an invalid type, it should be {msg}"
error_msg = _parse_valid_types_from_validator(error.validator_value)
# TODO: no test case for this branch, there are no config options
# which exercise this branch
elif error.validator == 'required':
msg_format = "{path} is invalid, {msg}"
elif error.validator == 'dependencies':
config_key = list(error.validator_value.keys())[0]
required_keys = ",".join(error.validator_value[config_key])
msg_format = "{path} is invalid: {msg}"
path.append(config_key)
error_msg = "when defining '{}' you must set '{}' as well".format(
config_key,
required_keys)
elif error.cause:
error_msg = six.text_type(error.cause)
msg_format = "{path} is invalid: {msg}"
elif error.path:
msg_format = "{path} value {msg}"
if msg_format:
return msg_format.format(path=path_string(path), msg=error_msg)
return error.message
def parse_key_from_error_msg(error):
return error.message.split("'")[1]
def path_string(path):
return ".".join(c for c in path if isinstance(c, six.string_types))
def _parse_valid_types_from_validator(validator):
"""A validator value can be either an array of valid types or a string of
a valid type. Parse the valid types and prefix with the correct article.
"""
if not isinstance(validator, list):
return anglicize_validator(validator)
if len(validator) == 1:
return anglicize_validator(validator[0])
return "{}, or {}".format(
", ".join([anglicize_validator(validator[0])] + validator[1:-1]),
anglicize_validator(validator[-1]))
def _parse_oneof_validator(error):
"""oneOf has multiple schemas, so we need to reason about which schema, sub
schema or constraint the validation is failing on.
Inspecting the context value of a ValidationError gives us information about
which sub schema failed and which kind of error it is.
"""
types = []
for context in error.context:
if context.validator == 'required':
return (None, context.message)
if context.validator == 'additionalProperties':
invalid_config_key = parse_key_from_error_msg(context)
return (None, "contains unsupported option: '{}'".format(invalid_config_key))
if context.path:
return (
path_string(context.path),
"contains {}, which is an invalid type, it should be {}".format(
json.dumps(context.instance),
_parse_valid_types_from_validator(context.validator_value)),
)
if context.validator == 'uniqueItems':
return (
None,
"contains non unique items, please remove duplicates from {}".format(
context.instance),
)
if context.validator == 'type':
types.append(context.validator_value)
valid_types = _parse_valid_types_from_validator(types)
return (None, "contains an invalid type, it should be {}".format(valid_types))
def process_errors(errors, path_prefix=None):
"""jsonschema gives us an error tree full of information to explain what has
gone wrong. Process each error and pull out relevant information and re-write
helpful error messages that are relevant.
"""
path_prefix = path_prefix or []
def format_error_message(error):
path = path_prefix + list(error.path)
if 'id' in error.schema:
error_msg = handle_error_for_schema_with_id(error, path)
if error_msg:
return error_msg
return handle_generic_service_error(error, path)
return '\n'.join(format_error_message(error) for error in errors)
def validate_against_fields_schema(config_file):
schema_filename = "fields_schema_v{0}.json".format(config_file.version)
_validate_against_schema(
config_file.config,
schema_filename,
format_checker=["ports", "expose", "bool-value-in-mapping"],
filename=config_file.filename)
def validate_against_service_schema(config, service_name, version):
_validate_against_schema(
config,
"service_schema_v{0}.json".format(version),
format_checker=["ports"],
path_prefix=[service_name])
def _validate_against_schema(
config,
schema_filename,
format_checker=(),
path_prefix=None,
filename=None):
config_source_dir = os.path.dirname(os.path.abspath(__file__))
if sys.platform == "win32":
file_pre_fix = "///"
config_source_dir = config_source_dir.replace('\\', '/')
else:
file_pre_fix = "//"
resolver_full_path = "file:{}{}/".format(file_pre_fix, config_source_dir)
schema_file = os.path.join(config_source_dir, schema_filename)
with open(schema_file, "r") as schema_fh:
schema = json.load(schema_fh)
resolver = RefResolver(resolver_full_path, schema)
validation_output = Draft4Validator(
schema,
resolver=resolver,
format_checker=FormatChecker(format_checker))
errors = [error for error in sorted(validation_output.iter_errors(config), key=str)]
if not errors:
return
error_msg = process_errors(errors, path_prefix=path_prefix)
file_msg = " in file '{}'".format(filename) if filename else ''
raise ConfigurationError("Validation failed{}, reason(s):\n{}".format(
file_msg,
error_msg))
|
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
alpha, beta = 0.4, 0.5
def f(x1, x2):
return alpha * np.log(x1) + beta * np.log(x2)
p1, p2, m = 1.0, 1.2, 4
def budget_line(x1):
return (m - p1 * x1) / p2
x1star = (alpha / (alpha + beta)) * (m / p1)
x2star = budget_line(x1star)
maxval = f(x1star, x2star)
xgrid = np.linspace(1e-2, 4, 50)
ygrid = xgrid
# === plot value function === #
fig, ax = plt.subplots(figsize=(8,6))
x, y = np.meshgrid(xgrid, ygrid)
if 1:
ax.plot(xgrid, budget_line(xgrid), 'k-', lw=2, alpha=0.8)
#ax.fill_between(xgrid, xgrid * 0.0, budget_line(xgrid), facecolor='blue', alpha=0.3)
if 0: # Annotate with text
ax.text(1, 1, r'$p_1 x_1 + p_2 x_2 < m$', fontsize=16)
ax.annotate(r'$p_1 x_1 + p_2 x_2 = m$',
xy=(2, budget_line(2)),
xycoords='data',
xytext=(40, 40),
textcoords='offset points',
fontsize=16,
arrowprops=dict(arrowstyle="->"))
if 1: # Add maximizer
ax.annotate(r'$(x_1^*, x_2^*)$',
xy=(x1star, x2star),
xycoords='data',
xytext=(30, 30),
textcoords='offset points',
fontsize=16,
arrowprops=dict(arrowstyle="->"))
ax.plot([x1star], [x2star], 'ro', alpha=0.6)
if 1: # Plot with contours
#points = [-10, -2, -1, 0, 0.4, 0.6, 0.8, 1.0, 1.2, 4]
points = [-10, -2, -1, 0, 0.6, 1.0, 1.2, 4]
ax.contourf(x, y, f(x, y), points, cmap=cm.jet, alpha=0.5)
cs = ax.contour(x, y, f(x, y), points, colors='k', linewidth=2, alpha=0.7,
antialias=True)
plt.clabel(cs, inline=1, fontsize=12)
ax.set_xlim(0, 4)
ax.set_ylim(0, 4)
ax.set_xticks((0.0, 1.0, 2.0, 3.0))
ax.set_yticks((1.0, 2.0, 3.0))
ax.set_xlabel(r'$x_1$', fontsize=16)
ax.set_ylabel(r'$x_2$', fontsize=16)
plt.show()
|
import os
import subprocess
import glob
import hashlib
import shutil
from common.basedir import BASEDIR
from selfdrive.swaglog import cloudlog
# OPKR
#android_packages = ("com.mixplorer", "com.opkr.maphack", "com.gmd.hidesoftkeys", "com.google.android.inputmethod.korean", "com.skt.tmap.ku",)
android_packages = ("com.mixplorer","com.gmd.hidesoftkeys", "com.opkr.maphack", "com.mnsoft.mappyobn","com.phillit.akeyboard","com.goodappsoftware.laserlevel","net.androgames.level","com.moon.android.level" ) # "com.skt.tmap.ku",
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"], encoding='utf8').strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v, k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def appops_set(package, op, mode):
system(f"LD_LIBRARY_PATH= appops set {package} {op} {mode}")
def pm_grant(package, permission):
system(f"pm grant {package} {permission}")
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
# *** external functions ***
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "selfdrive/assets/addon/apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.keys():
apk_path = os.path.join(BASEDIR, "selfdrive/assets/addon/apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path, 'rb').read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app], 'rb').read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
if __name__ == "__main__":
update_apks()
|
################################################################################
# Author:Justin Vankirk
# Username:vankirkj
#
# Assignment:A03
# https://docs.google.com/document/d/1upvTP9Z7BehzY-HqRqI9Uh2F3cD19rZi_MtArv0Tn_M/edit?usp=sharing
#################################################################################
# Acknowledgements: Azah and I made the stars
#
#
#################################################################################
import turtle
def makesquare(): #makes the square outline
square = turtle.Turtle()
square.penup()
square.setpos(-241, 241)
square.pendown()
square.pensize(20)
square.color("white")
for i in range(4):
square.forward(500)
square.right(90)
"""
Docstring for function_1
"""
# ...
def boustraphedon():
fanciness = turtle.Turtle() #stars!!!
fanciness.pensize(20)
fanciness.penup()
fanciness.setpos(-221, 221)
fanciness.pendown()
fanciness.speed(100)
for i in range(5): ###This is the algorythm for our stars.###
for i in range(11):
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.left(90)
fanciness.forward(20)
fanciness.left(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.right(180)
fanciness.forward(60)
fanciness.left(180)
for i in range(11):
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.left(90)
fanciness.forward(20)
fanciness.left(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.right(180)
fanciness.backward(20)
fanciness.left(180)
for i in range(11): #if we continued this last loop, it would be impossible to keep the snake inside the box.
fanciness.forward(20) #We subtracted this last line of code in order to fix it.
fanciness.right(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.left(90)
fanciness.forward(20)
fanciness.left(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.right(180)
fanciness.forward(60)
fanciness.left(180)
for i in range(11):
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.left(90)
fanciness.forward(20)
fanciness.left(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.right(90)
fanciness.forward(20)
fanciness.right(180)
fanciness.left(180)
"""
Create the background
"""
# ...
def makeskyscraper():
skyscraper = turtle.Turtle() #makes the skyscrapers base
skyscraper.color(115,115,200)
skyscraper.width(10)
skyscraper.penup()
skyscraper.setpos(-100,-245)
skyscraper.pendown()
skyscraper.begin_fill()
skyscraper.forward(100)
skyscraper.left(90)
skyscraper.forward(400)
skyscraper.left(90)
skyscraper.forward(100)
skyscraper.left(90)
skyscraper.forward(400)
skyscraper.end_fill()
def door():
door = turtle.Turtle() #skyscraper door
door.penup()
door.setpos(-75, -245)
door.pendown()
door.begin_fill()
for i in range(2):
door.forward(30)
door.left(90)
door.forward(50)
door.left(90)
door.end_fill()
door.hideturtle()
def main():
"""
This actually runs the code.
"""
# ...
makesquare() # Function call to function_1
boustraphedon() # Function call to function_2
makeskyscraper()
door()
window = turtle.Screen()
window.bgcolor("yellow")
window.colormode(255)
main()
window.exitonclick() |
# Copyright: (c) 2021, Ryan Gordon
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import (absolute_import, division, print_function)
# Additional import of common code
# from cp4s_common import create_authenticated_client
__metaclass__ = type
DOCUMENTATION = r'''
---
module: cp4s_create_artifact
short_description: A Module used to create an Artifact in CP4S or Resilient
# If this is part of a collection, you need to use semantic versioning,
# i.e. the version is of the form "2.5.0" and not "2.4".
version_added: "1.0.0"
description: Note: This module depends on the global artifacts feature. This module is an example of how you can choose to use a module or a role to achieve a similar outcome. An almost identical piece of functionality exists in the CP4S role but this gives a programmatic way to do it.
options:
type:
description: The Type of the artifact to be created. e.g 'DNS Name'.
required: true
type: str
value:
description: The value of the artifact to be created. e.g 'www.google.com'.
required: true
type: str
other:
description:
- Control to pass any extra information that might be needed to create an artifact.
required: false
type: dict
author:
- Ryan Gordon (@Ryan-Gordon)
'''
EXAMPLES = r'''
# Pass in a message
- name: Test creation of a DNS Name artifact
ryan_gordon1.cloud_pak_for_security.cp4s_create_artifact:
type: 'DNS Name'
value: 'google.com'
# pass in a message and have changed true
- name: Test creation of a IP Address artifact
ryan_gordon1.cloud_pak_for_security.cp4s_create_artifact:
type: 'IP Address'
value: '9.9.9.9'
'''
RETURN = r'''
# These are examples of possible return values, and in general should use other names for return values.
original_message:
description: The original name param that was passed in.
type: str
returned: always
sample: 'hello world'
message:
description: The output message that the test module generates.
type: str
returned: always
sample: 'goodbye'
'''
import resilient
from ansible.module_utils.basic import AnsibleModule
from resilient_lib import close_incident
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
type=dict(type='str', required=True),
value=dict(type='str', required=True),
other=dict(type='dict', required=False, default={})
)
# seed the result dict in the object
# we primarily care about changed and state
# changed is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
result = dict(
changed=False,
original_message='',
message=''
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
if module.check_mode:
module.exit_json(**result)
client = create_authenticated_client()
# TODO: Review if we can make the exception less bare, or if we can use a conditional for the changed property instead
try: # Try to make the API call
# Make an API call to the global artifacts endpoint for the org
# Pass the provided name and value
# the 'other' module param is dict and is expanded to provide a way to add any other properties to the call
response = client.post('/artifacts', {
'type': {
'name': module.params['type']
},
'value': module.params['value'],
**module.params['other']
})
# Add the response to the result to return
result.update({"note_creation_result": response})
except Exception as e: # we need to except in order to do else; use bare except and just raise the exception as normal
# raise # raises the exact error that would have otherwise been raised.
module.fail_json(msg=u'An exception occurred when creating an artifact: {}'.format(e), **result)
else: # if no expections are raised we can assume the API call is successful and has changed state
result['changed'] = True
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
module.exit_json(**result)
def create_authenticated_client():
"""create_authenticated_client uses the resilient package
to gather values from a standard app.config file; the configuration file
used for an Integration Server or App Host App.
This means all credentials needed to run this module can be kept
separate and we can also avoid var prompts.
Note: If your running this module on a host other than localhost,
that host needs to have an app.config file or you need to copy one over.
:return: An authenticated rest client to CP4S or Resilient
:rtype: SimpleClient
"""
import resilient
# Create Resilient API Client
resilient_parser = resilient.ArgumentParser(
config_file=resilient.get_config_file())
resilient_opts = resilient_parser.parse_known_args()
# Instantiate a client using the gathered opts
return resilient.get_client(resilient_opts[0])
def main():
run_module()
if __name__ == '__main__':
main()
|
# Reusable components like navbar/header, footer etc.
# ------------------------------------------------------------
import dash_html_components as html
import dash_bootstrap_components as dbc
header = dbc.Row(children=[
dbc.Col(html.Img(src='/assets/logo.png',
height='30px'),
width='auto',
align="center"),
dbc.Col('MyPortfolio',
className="h1 text-uppercase font-weight-bold",
width=True,
align="center"),
],
className='p-3 bg-dark text-white',
style={'fontFamily': 'Inconsolata'})
|
from datetime import datetime
from typing import Optional, List
from pydantic import BaseModel, root_validator
from application.models.schema.utils import SuccessResponse
from application.utils.api_response import CustomException
class BasePatient(BaseModel):
first_name: str
last_name: str
email: Optional[str]
phone: str
class PatientCreate(BasePatient):
password: str
@root_validator(pre=True)
def check(cls, values):
errors = []
for k, v in values.items():
if v=='string' and k in ['first_name', 'last_name', 'phone', 'password']:
errors.append({k: f'{k.replace("_", " ")} is required'})
if errors:
raise CustomException(error=[error for error in errors])
return values
class _Patient(BasePatient):
id: Optional[int]
full_name: str
created_at: datetime
updated_at: datetime
class Config:
orm_mode = True
class Patient(SuccessResponse):
data: Optional[_Patient]
class PatientList(SuccessResponse):
data: Optional[List[_Patient]]
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import IECore
import Gaffer
import GafferTest
import GafferImage
import GafferImageTest
class ImagePlugTest( GafferImageTest.ImageTestCase ) :
def testTileOrigin( self ) :
ts = GafferImage.ImagePlug.tileSize()
testCases = [
( IECore.V2i( ts-1, ts-1 ), IECore.V2i( 0, 0 ) ),
( IECore.V2i( ts, ts-1 ), IECore.V2i( ts, 0 ) ),
( IECore.V2i( ts, ts ), IECore.V2i( ts, ts ) ),
( IECore.V2i( ts*3-1, ts+5 ), IECore.V2i( ts*2, ts ) ),
( IECore.V2i( ts*3, ts-5 ), IECore.V2i( ts*3, 0 ) ),
( IECore.V2i( -ts+ts/2, 0 ), IECore.V2i( -ts, 0 ) ),
( IECore.V2i( ts*5+ts/3, -ts*4 ), IECore.V2i( ts*5, -ts*4 ) ),
( IECore.V2i( -ts+1, -ts-1 ), IECore.V2i( -ts, -ts*2 ) )
]
for input, expectedResult in testCases :
self.assertEqual(
GafferImage.ImagePlug.tileOrigin( input ),
expectedResult
)
def testTileIndex( self ) :
ts = GafferImage.ImagePlug.tileSize()
for position, tileIndex in [
( IECore.V2i( -ts ), IECore.V2i( -1 ) ),
( IECore.V2i( -ts -1 ), IECore.V2i( -2 ) ),
( IECore.V2i( 0 ), IECore.V2i( 0 ) ),
( IECore.V2i( ts ), IECore.V2i( 1 ) ),
( IECore.V2i( ts - 1 ), IECore.V2i( 0 ) ),
] :
self.assertEqual(
GafferImage.ImagePlug.tileIndex( position ),
tileIndex
)
def testDefaultChannelNamesMethod( self ) :
channelNames = GafferImage.ImagePlug()['channelNames'].defaultValue()
self.assertTrue( 'R' in channelNames )
self.assertTrue( 'G' in channelNames )
self.assertTrue( 'B' in channelNames )
def testCreateCounterpart( self ) :
p = GafferImage.ImagePlug()
p2 = p.createCounterpart( "a", Gaffer.Plug.Direction.Out )
self.assertEqual( p2.getName(), "a" )
self.assertEqual( p2.direction(), Gaffer.Plug.Direction.Out )
self.assertEqual( p2.getFlags(), p.getFlags() )
def testDynamicSerialisation( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = GafferImage.ImagePlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
ss = s.serialise()
s = Gaffer.ScriptNode()
s.execute( ss )
self.assertTrue( isinstance( s["n"]["p"], GafferImage.ImagePlug ) )
self.assertEqual( s["n"]["p"].getFlags(), Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
def testBoxPromotion( self ) :
b = Gaffer.Box()
b["n"] = GafferImage.Grade()
self.assertTrue( Gaffer.PlugAlgo.canPromote( b["n"]["in"] ) )
self.assertTrue( Gaffer.PlugAlgo.canPromote( b["n"]["out"] ) )
i = Gaffer.PlugAlgo.promote( b["n"]["in"] )
o = Gaffer.PlugAlgo.promote( b["n"]["out"] )
self.assertEqual( b["n"]["in"].getInput(), i )
self.assertEqual( o.getInput(), b["n"]["out"] )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( b["n"]["in"] ) )
self.assertTrue( Gaffer.PlugAlgo.isPromoted( b["n"]["out"] ) )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferImage )
self.assertTypeNamesArePrefixed( GafferImageTest )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( GafferImage )
self.assertDefaultNamesAreCorrect( GafferImageTest )
def testImageHash( self ) :
r = GafferImage.ImageReader()
r['fileName'].setValue( os.path.expandvars( "$GAFFER_ROOT/python/GafferImageTest/images/checker.exr" ) )
h = r['out'].imageHash()
for i in range( 20 ) :
self.assertEqual( h, r['out'].imageHash() )
r['refreshCount'].setValue( 2 )
self.assertNotEqual( h, r['out'].imageHash() )
def testDefaultFormatForImage( self ) :
constant = GafferImage.Constant()
with Gaffer.Context() as c :
GafferImage.FormatPlug.setDefaultFormat( c, GafferImage.Format( 100, 200 ) )
self.assertEqual( constant["out"].image().displayWindow, IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 99, 199 ) ) )
GafferImage.FormatPlug.setDefaultFormat( c, GafferImage.Format( 200, 300 ) )
self.assertEqual( constant["out"].image().displayWindow, IECore.Box2i( IECore.V2i( 0 ), IECore.V2i( 199, 299 ) ) )
if __name__ == "__main__":
unittest.main()
|
import numpy
import numpy as np
from sklearn.svm import SVR
#--------------------------------------------------------------------
# Hyperparameters
#--------------------------------------------------------------------
lr = 0.001 # learning rate
#--------------------------------------------------------------------
# Multilayer network
#--------------------------------------------------------------------
class Sequential:
def __init__(self,modules): self.modules = modules
def forward(self,X):
for m in self.modules: X = m.forward(X)
return X
def backward(self,DY):
for m in self.modules[::-1]: DY = m.backward(DY)
return DY
def update(self):
for m in self.modules: m.update()
#--------------------------------------------------------------------
# Linear layer
#--------------------------------------------------------------------
class Linear:
def __init__(self,m,n,last=False):
self.m = m
self.n = n
self.W = numpy.random.uniform(-1/self.m**.5,1/self.m**.5,[m,n]).astype('float32')
self.B = numpy.zeros([n]).astype('float32')
if last: self.W *= 0
def forward(self,X):
self.X = X
return numpy.dot(X,self.W)+self.B
def backward(self,DY):
DX = numpy.dot(DY,self.W.T)
self.DW = (numpy.dot(self.X.T,DY))/ self.m**.5
self.DB = (DY.sum(axis=0)) / self.m**.25
return DX*(self.m**.5/self.n**.5)
def update(self):
self.W -= lr*self.DW
self.B -= lr*self.DB
#--------------------------------------------------------------------
# Hyperbolic tangent layer
#--------------------------------------------------------------------
class Tanh:
def __init__(self): pass
def forward(self,X): self.Y = numpy.tanh(X); return self.Y
def backward(self,DY): return DY*(1-self.Y**2)
def update(self): pass
#====================================================================
# Test
#====================================================================
# Prepare data
nbsamples=200
nbinputdims=100
nboutputdims=1
# Random regression task
X = numpy.random.normal(0,1,[nbsamples,nbinputdims])
T = numpy.random.normal(0,1,[nbsamples,nboutputdims])
T = numpy.random.normal(0,1,[nbsamples])
# Initialize network
nn = Sequential([
Linear(nbinputdims,200),
Tanh(),
Linear(200,20),
Tanh(),
Linear(20,nboutputdims)
])
clf = SVR(C=1000.0, epsilon=0.0002)
clf.fit(X, T)
ypred = clf.predict(X)
print((ypred-T)**2).sum()
T = T[:,np.newaxis]
# Training
for t in range(1000):
Y = nn.forward(X)
nn.backward(Y-T)
nn.update()
if t % 100 == 0: print(t,((Y-T)**2).sum())
|
import os
import signal
import sys
import time
from abc import ABCMeta
from abc import abstractmethod
from contextlib import contextmanager
from functools import partial
from shutil import rmtree
from types import FrameType
from typing import Iterator
from typing import List
from typing import Optional
from typing import Tuple
import colorlog
import staticconf
from backuppy.blob import apply_diff
from backuppy.blob import compute_diff
from backuppy.crypto import compress_and_encrypt
from backuppy.crypto import decrypt_and_unpack
from backuppy.crypto import generate_key_pair
from backuppy.exceptions import DiffTooLargeException
from backuppy.exceptions import ManifestLockedException
from backuppy.io import compute_sha
from backuppy.io import IOIter
from backuppy.manifest import lock_manifest
from backuppy.manifest import Manifest
from backuppy.manifest import MANIFEST_FILE
from backuppy.manifest import MANIFEST_KEY_FILE
from backuppy.manifest import MANIFEST_PREFIX
from backuppy.manifest import ManifestEntry
from backuppy.manifest import unlock_manifest
from backuppy.options import DEFAULT_OPTIONS
from backuppy.options import OptionsDict
from backuppy.util import get_scratch_dir
from backuppy.util import path_join
from backuppy.util import regex_search_list
from backuppy.util import sha_to_path
logger = colorlog.getLogger(__name__)
_UNLOCKED_STORE = None
_SIGNALS_TO_HANDLE = (signal.SIGINT, signal.SIGTERM)
class BackupStore(metaclass=ABCMeta):
backup_name: str
_manifest: Optional[Manifest]
def __init__(self, backup_name: str) -> None:
""" A BackupStore object controls all the reading and writing of data from a particular
backup location (local, S3, ssh, etc)
This is an abstract class that needs to be subclassed with a _save and _load function
that determines how to actually read and write data from/to the store. The remaining
methods for this class are common across different types of stores, and are what establish
many of the "safety" guarantees of backuppy.
:param backup_name: the name of the backup this store corresponds to in the
configuration file
"""
self.backup_name = backup_name
self.config = staticconf.NamespaceReaders(backup_name)
self._manifest = None
@contextmanager
def unlock(self, *, dry_run=False, preserve_scratch=False) -> Iterator:
"""
Unlock the backup store and prep for work
The backup store is responsible for the manifest in the store; unfortunately, since
sqlite3 doesn't accept an open file descriptor when opening a DB connection, we have to
circumvent some of the IOIter functionality and do it ourselves. We wrap this in a
context manager so this can be abstracted away and still ensure that proper cleanup happens.
:param dry_run: whether to actually save any data or not
:param preserve_scratch: whether to clean up the scratch directory before we exit; mainly
used for debugging purposes
"""
# we have to create the scratch dir regardless of whether --dry-run is enabled
# because we still need to be able to figure out what's changed and what we should do
rmtree(get_scratch_dir(), ignore_errors=True)
os.makedirs(get_scratch_dir(), exist_ok=True)
try:
manifests = sorted(self._query(MANIFEST_PREFIX))
if not manifests:
logger.warning(
'''
********************************************************************
This looks like a new backup location; if you are not expecting this
message, someone may be tampering with your backup!
********************************************************************
'''
)
self._manifest = Manifest(os.path.join(
get_scratch_dir(),
MANIFEST_FILE.format(ts=time.time()),
))
else:
self._manifest = unlock_manifest(
manifests[-1],
self.config.read('private_key_filename', default=''),
self._load,
self.options,
)
_register_unlocked_store(self, dry_run, preserve_scratch)
yield
finally:
self.do_cleanup(dry_run, preserve_scratch)
_unregister_store()
def save_if_new(
self,
abs_file_name: str,
*,
dry_run: bool = False,
force_copy: bool = False,
) -> None:
""" The main workhorse function; determine if a file has changed, and if so, back it up!
:param abs_file_name: the name of the file under consideration
:param dry_run: whether to actually save any data or not
"""
curr_entry, new_entry = self.manifest.get_entry(abs_file_name), None
with IOIter(abs_file_name) as new_file:
new_sha = compute_sha(new_file)
# If the file hasn't been backed up before, or if it's been deleted previously, save a
# new copy; we make a copy here to ensure that the contents don't change while backing
# the file up, and that we have the correct sha
if force_copy or not curr_entry or not curr_entry.sha:
new_entry = self._write_copy(abs_file_name, new_sha, new_file, force_copy, dry_run)
# If the file has been backed up, check to see if it's changed by comparing shas
elif new_sha != curr_entry.sha:
if regex_search_list(abs_file_name, self.options['skip_diff_patterns']):
new_entry = self._write_copy(abs_file_name, new_sha, new_file, False, dry_run)
else:
new_entry = self._write_diff(
abs_file_name,
new_sha,
curr_entry,
new_file,
dry_run,
)
# If the sha is the same but metadata on the file has changed, we just store the updated
# metadata
elif (
new_file.uid != curr_entry.uid or
new_file.gid != curr_entry.gid or
new_file.mode != curr_entry.mode
):
logger.info(f'Saving changed metadata for {abs_file_name}')
new_entry = ManifestEntry(
abs_file_name,
curr_entry.sha,
curr_entry.base_sha,
new_file.uid,
new_file.gid,
new_file.mode,
curr_entry.key_pair, # NOTE: this is safe because the data has not changed!
curr_entry.base_key_pair,
)
else:
# we don't want to flood the log with all the files that haven't changed
logger.debug(f'{abs_file_name} is up to date!')
if new_entry and not dry_run:
self.manifest.insert_or_update(new_entry)
return # test_m2_crash_after_file_save
def restore_entry(
self,
entry: ManifestEntry,
orig_file: IOIter,
diff_file: IOIter,
restore_file: IOIter,
) -> None:
if entry.base_sha:
assert entry.base_key_pair # make mypy happy; this cannot be None here
self.load(entry.base_sha, orig_file, entry.base_key_pair)
self.load(entry.sha, diff_file, entry.key_pair)
apply_diff(orig_file, diff_file, restore_file)
else:
self.load(entry.sha, restore_file, entry.key_pair)
def save(self, src: IOIter, dest: str, key_pair: bytes) -> bytes:
""" Wrapper around the _save function that converts the SHA to a path and does encryption
:param src: the file to save
:param dest: the name of the file to write to in the store
:param key_pair: an AES key + nonce to use to encrypt the file
:returns: the HMAC of the saved file
"""
dest = sha_to_path(dest)
# We compress and encrypt the file on the local file system, and then pass the encrypted
# file to the backup store to handle atomically
filename = path_join(get_scratch_dir(), dest)
with IOIter(filename) as encrypted_save_file:
signature = compress_and_encrypt(src, encrypted_save_file, key_pair, self.options)
self._save(encrypted_save_file, dest) # test_f1_crash_file_save
os.remove(filename)
return signature
def load(
self,
src: str,
dest: IOIter,
key_pair: bytes,
) -> IOIter:
""" Wrapper around the _load function that converts the SHA to a path """
src = sha_to_path(src)
with IOIter() as encrypted_load_file:
self._load(src, encrypted_load_file)
decrypt_and_unpack(encrypted_load_file, dest, key_pair, self.options)
dest.fd.seek(0)
return dest
def rotate_manifests(self) -> None:
max_versions = self.options['max_manifest_versions']
if not max_versions:
return # this just means that there's no configured limit to the number of versions
manifests = sorted(self._query(MANIFEST_PREFIX))
for manifest in manifests[:-max_versions]:
ts = manifest.split('.', 1)[1]
self._delete(manifest)
self._delete(MANIFEST_KEY_FILE.format(ts=ts))
def do_cleanup(
self,
dry_run: bool,
preserve_scratch: bool,
) -> None:
""" Ensure that the backup store gets cleaned up appropriately before we shut down; this
can be called as a signal handler, hence the first two arguments. Otherwise this should
be called whenever we lock the store.
:param signum: if called as a signal handler, the signal num; otherwise None
:param frame: if called as a signal handler, the stack trace; otherwise None
:param dry_run: whether to actually save any data or not
:param preserve_scratch: whether to clean up the scratch directory before we exit; mainly
used for debugging purposes
"""
if not self._manifest:
return
if not self._manifest.changed: # test_m1_crash_before_save
logger.info('No changes detected; nothing to do')
elif not dry_run:
lock_manifest(
self._manifest,
self.config.read('private_key_filename', default=''),
self._save,
self._load,
self.options,
)
self.rotate_manifests()
if not preserve_scratch:
rmtree(get_scratch_dir(), ignore_errors=True)
self._manifest = None # test_m1_crash_after_save
def _write_copy(
self,
abs_file_name: str,
new_sha: str,
file_obj: IOIter,
force_copy: bool,
dry_run: bool,
) -> ManifestEntry:
logger.info(f'Saving a new copy of {abs_file_name}')
entry_data = None
if not force_copy:
entry_data = self._find_existing_entry_data(new_sha) # test_f3_file_changed_while_saving
key_pair, base_sha, base_key_pair = entry_data or (generate_key_pair(), None, None)
new_entry = ManifestEntry( # test_m2_crash_before_file_save
abs_file_name,
new_sha,
base_sha,
file_obj.uid,
file_obj.gid,
file_obj.mode,
key_pair,
base_key_pair,
)
if not dry_run and not entry_data:
signature = self.save(file_obj, new_entry.sha, key_pair)
new_entry.key_pair = key_pair + signature # append the HMAC before writing to db
return new_entry
def _write_diff(
self,
abs_file_name: str,
new_sha: str,
curr_entry: ManifestEntry,
file_obj: IOIter,
dry_run: bool,
) -> ManifestEntry:
logger.info(f'Saving a diff for {abs_file_name}')
entry_data = self._find_existing_entry_data(new_sha)
# If the current entry is itself a diff, get its base; otherwise, this
# entry becomes the base
if entry_data:
key_pair, base_sha, base_key_pair = entry_data
elif curr_entry.base_sha:
key_pair = generate_key_pair()
base_sha = curr_entry.base_sha
base_key_pair = curr_entry.base_key_pair
else:
key_pair = generate_key_pair()
base_sha = curr_entry.sha
base_key_pair = curr_entry.key_pair
# compute a diff between the version we've previously backed up and the new version
new_entry = ManifestEntry(
abs_file_name,
new_sha,
base_sha,
file_obj.uid,
file_obj.gid,
file_obj.mode,
key_pair,
base_key_pair,
)
if not entry_data:
assert base_sha and base_key_pair
with IOIter() as orig_file, IOIter() as diff_file:
orig_file = self.load(base_sha, orig_file, base_key_pair)
try:
fd_diff = compute_diff(
orig_file,
file_obj,
diff_file,
self.options['discard_diff_percentage'],
)
except DiffTooLargeException:
logger.info('The computed diff was too large; saving a copy instead.')
logger.info(
'(you can configure this threshold with the skip_diff_percentage option)'
)
file_obj.fd.seek(0)
return self._write_copy(abs_file_name, new_sha, file_obj, False, dry_run)
new_entry.sha = new_sha
if not dry_run:
signature = self.save(fd_diff, new_entry.sha, key_pair)
new_entry.key_pair = key_pair + signature
return new_entry
def _find_existing_entry_data(
self,
sha: str,
) -> Optional[Tuple[bytes, Optional[str], Optional[bytes]]]:
entries = self.manifest.get_entries_by_sha(sha)
if entries:
assert len({e.key_pair for e in entries}) == 1
logger.debug('Found pre-existing sha in the manifest, using that data')
return entries[0].key_pair, entries[0].base_sha, entries[0].base_key_pair
else:
return None
@abstractmethod
def _save(self, src: IOIter, dest: str) -> None: # pragma: no cover
pass
@abstractmethod
def _load(self, path: str, tmpfile: IOIter) -> IOIter: # pragma: no cover
pass
@abstractmethod
def _query(self, prefix: str) -> List[str]: # pragma: no cover
pass
@abstractmethod
def _delete(self, filename: str) -> None: # pragma: no cover
pass
@property
def manifest(self) -> Manifest:
""" Wrapper around the manifest to make sure we've unlocked it in a
with unlock()... block
"""
if not self._manifest:
raise ManifestLockedException('The manifest is currently locked')
return self._manifest
@property
def options(self) -> OptionsDict:
try:
options = self.config.read_list('options', default=[{}])[0]
except IndexError:
options = dict()
return {**DEFAULT_OPTIONS, **options} # type: ignore
def _cleanup_and_exit(signum: int, frame: FrameType, dry_run: bool, preserve_scratch: bool) -> None:
""" Signal handler to safely clean up after Ctrl-C or SIGTERM; this minimizes the amount of
duplicate work we have to do in the event that we cancel the backup partway through
"""
signal.signal(signum, signal.SIG_IGN)
logger.info(f'Received signal {signum}, cleaning up the backup store')
if _UNLOCKED_STORE:
try:
_UNLOCKED_STORE.do_cleanup(dry_run, preserve_scratch)
except Exception as e:
logger.exception(f'Shutdown was requested, but there was an error cleaning up: {str(e)}')
sys.exit(1)
logger.info('Cleanup complete; shutting down')
sys.exit(0)
def _register_unlocked_store(store: BackupStore, dry_run: bool, preserve_scratch: bool) -> None:
global _UNLOCKED_STORE
_UNLOCKED_STORE = store
sig_handler = partial(
_cleanup_and_exit,
dry_run=dry_run,
preserve_scratch=preserve_scratch
)
for sig in _SIGNALS_TO_HANDLE:
signal.signal(sig, sig_handler)
def _unregister_store() -> None:
global _UNLOCKED_STORE
_UNLOCKED_STORE = None
for sig in _SIGNALS_TO_HANDLE:
signal.signal(sig, signal.SIG_DFL)
|
# Copyright (c) 2008, Kundan Singh. All rights reserved. See LICENSING for details.
# @implements RFC3263 (Locating SIP servers)
'''
Uses DNS to resolve a domain name into SIP servers using NAPTR, SRV and A/AAAA records.
TODO: (1) need to make it multitask compatible or have a separate thread, (3) need to return priority and weight.
>>> print resolve('sip:192.1.2.3') # with numeric IP
[('192.1.2.3', 5060, 'udp'), ('192.1.2.3', 5060, 'tcp'), ('192.1.2.3', 5061, 'tls')]
>>> print resolve('sip:192.1.2.3;maddr=192.3.3.3') # and maddr param
[('192.3.3.3', 5060, 'udp'), ('192.3.3.3', 5060, 'tcp'), ('192.3.3.3', 5061, 'tls')]
>>> print resolve('sip:192.1.2.3:5062;transport=tcp') # and port, transport param
[('192.1.2.3', 5062, 'tcp')]
>>> print resolve('sips:192.1.2.3') # and sips
[('192.1.2.3', 5061, 'tls')]
>>> print resolve('sips:192.1.2.3:5062') # and sips, port
[('192.1.2.3', 5062, 'tls')]
>>> print resolve('sip:39peers.net') # with non-numeric without NAPTR/SRV
[('74.220.215.84', 5060, 'udp'), ('74.220.215.84', 5060, 'tcp'), ('74.220.215.84', 5061, 'tls')]
>>> print resolve('sip:39peers.net:5062') # and port
[('74.220.215.84', 5062, 'udp'), ('74.220.215.84', 5062, 'tcp'), ('74.220.215.84', 5062, 'tls')]
>>> print resolve('sip:39peers.net;transport=tcp') # and transport
[('74.220.215.84', 5060, 'tcp')]
>>> print resolve('sips:39peers.net') # and sips
[('74.220.215.84', 5061, 'tls')]
>>> print resolve('sip:iptel.org') # with no NAPTR but has SRV records
[('217.9.36.145', 5060, 'udp'), ('217.9.36.145', 5060, 'tcp')]
>>> print resolve('sips:iptel.org') # and sips
[('217.9.36.145', 5061, 'tls')]
>>> print resolve('sip:columbia.edu') # with one NAPTR and two SRV records
[('128.59.59.229', 5060, 'udp'), ('128.59.59.208', 5060, 'udp')]
>>> print resolve('sips:columbia.edu') # and sips (no NAPTR for sips)
[('128.59.48.24', 5061, 'tls')]
>>> print resolve('sip:adobe.com') # with multiple NAPTR and multiple SRV
[('192.150.12.115', 5060, 'udp')]
>>> print resolve('sip:adobe.com', supported=('tcp', 'tls')) # if udp is not supported
[('192.150.12.115', 5060, 'tcp')]
>>> print resolve('sips:adobe.com') # with multiple NAPTR and multiple SRV
[('192.150.12.115', 5061, 'tls')]
>>> try: resolve('sip:twilio.com') # with incorrectly configured SRV
... except: print 'error'
error
'''
import sys, os, time, random
if __name__ == '__main__': # hack to add other libraries in the sys.path
f = os.path.dirname(sys.path.pop(0))
sys.path.append(os.path.join(f, 'external'))
if os.name == 'nt': # on windows import w32util and use RegistryResolve
import w32util
_nameservers = w32util.RegistryResolve()
else: _nameservers = None
import dns
from std.rfc2396 import URI, isIPv4
_debug = False; # enable debug trace or not
_resolver, _cache = None, {} # Name servers, resolver and DNS cache (plus negative cache)
_proto = {'udp': ('sip+d2u', 5060), 'tcp': ('sip+d2t', 5060), 'tls': ('sips+d2t', 5061), 'sctp': ('sip+d2s', 5060)} # map from transport to details
_rproto = dict([(x[1][0], x[0]) for x in _proto.iteritems()]) # reverse mapping {'sip+d2u': 'udp', ...}
_xproto = dict([(x[0], '_%s._%s'%(x[1][0].split('+')[0], x[0] if x[0] != 'tls' else 'tcp')) for x in _proto.iteritems()]) # mapping {'udp' : '_sip._udp', ...}
_rxproto = dict([(x[1], x[0]) for x in _xproto.iteritems()]) # mapping { '_sips._tcp': 'tls', ...}
_zxproto = dict([(x[0], _proto[x[1]]) for x in _rxproto.iteritems()]) # mapping { '_sips._tcp': ('sip+d2t, 5061), ...}
_group = lambda x: sorted(x, lambda a,b: a[1]-b[1]) # sort a list of tuples based on priority
def _query(key, negTimeout=60): # key is (target, type)
'''Perform a single DNS query, and return the ANSWER section. Uses internal cache to avoid repeating the queries.
The timeout of the cache entry is determined by TTL obtained in the results. It always returns a list, even if empty.'''
global _resolver; resolver = _resolver or dns.Resolver(_nameservers)
if key in _cache and _cache[key][1] < time.time(): return random.shuffle(_cache[key][0]) and _cache[key][0]
try:
raw = resolver.Raw(key[0], key[1], dns.C_IN, recursion=True, proto=None)
if raw and raw['HEADER']['OPCODES']['TC']: # if truncated, try with TCP
raw = resolver.Raw(key[0], key[1], dns.C_IN, recursion=False, proto='tcp')
answer = raw and raw['HEADER']['ANCOUNT'] > 0 and raw['ANSWER'] or []; random.shuffle(answer)
except Exception, e:
if _debug: print '_query(', key, ') exception=', e
answer = []
_cache[key] = (answer, time.time() + min([(x['TTL'] if 'TTL' in x else negTimeout) for x in answer] + [negTimeout]))
return answer
# @implements RFC3263 P1L27-P1L32
def resolve(uri, supported=('udp', 'tcp', 'tls'), secproto=('tls',)):
'''Resolve a URI using RFC3263 to list of (IP address, port) tuples each with its order, preference, transport and
TTL information. The application can supply a list of supported protocols if needed.'''
if not isinstance(uri, URI): uri = URI(uri)
transport = uri.param['transport'] if 'transport' in uri.param else None
target = uri.param['maddr'] if 'maddr' in uri.param else uri.host
numeric, port, naptr, srv, result = isIPv4(target), uri.port, None, None, None
if uri.secure: supported = secproto # only support secproto for "sips"
#@implements rfc3263 P6L10-P8L32
if transport: transports = (transport,) if transport in supported else () # only the given transport is used
elif numeric or port is not None: transports = supported
else:
naptr = _query((target, dns.T_NAPTR))
if naptr: # find the first that is supported
ordered = filter(lambda r: r[1] in supported, sorted(map(lambda r: (r['RDATA']['ORDER'], _rproto.get(r['RDATA']['SERVICE'].lower(), ''), r), naptr), lambda a,b: a[0]-b[0])) # filter out unsupported transports
if ordered:
selected = filter(lambda r: r[0] == ordered[0][0], ordered) # keep only top-ordered values, ignore rest
transports, naptr = map(lambda r: r[1], selected), map(lambda r: r[2], selected) # unzip to transports and naptr values
else: transports, naptr = supported, None # assume failure if not found; clear the naptr response
if not naptr: # do not use "else", because naptr may be cleared in "if"
srv = filter(lambda r: r[1], map(lambda p: (_rxproto.get(p, ''), _query(('%s.%s'%(p, target), dns.T_SRV))), map(lambda t: _xproto[t], supported)))
if srv: transports = map(lambda s: s[0], srv)
else: transports = supported
#@implements rfc3263 P8L34-P9L31
if numeric: result = map(lambda t: (target, port or _proto[t][1], t), transports)
elif port: result = sum(map(lambda t: map(lambda r: (r['RDATA'], port, t), _query((target, dns.T_A))), transports), [])
else:
service = None
if naptr: service = sorted(map(lambda x: (x['RDATA']['REPLACEMENT'].lower(), x['RDATA']['ORDER'], x['RDATA']['PREFERENCE'], x['RDATA']['SERVICE'].lower()), naptr), lambda a,b: a[1]-b[1])
elif transport: service = [('%s.%s'%(_xproto[transport], target), 0, 0, _proto[transport][0])]
if not srv:
srv = filter(lambda y: y[1], map(lambda s: (_rproto[s[3].lower()], _query((s[0], dns.T_SRV))), service)) if service else []
if srv:
out = list(sorted(sum(map(lambda s: map(lambda r: (r['RDATA']['DOMAIN'].lower(), r['RDATA']['PRIORITY'], r['RDATA']['WEIGHT'], r['RDATA']['PORT'], s[0]), s[1]), srv), []), lambda a,b: a[1]-b[1]))
result = sum(map(lambda x: map(lambda y: (y['RDATA'], x[1], x[2]), (_query((x[0], dns.T_A)) or [])), map(lambda r: (r[0], r[3], r[4]), out)), [])
return result or map(lambda x: (x[0], port or _proto[x[1]][1], x[1]), sum(map(lambda b: map(lambda a: (a, b), map(lambda x: x['RDATA'], _query((target, dns.T_A)))), transports), [])) # finally do A record on target, if nothing else worked
if __name__ == '__main__': # Unit test of this module
import doctest; doctest.testmod()
|
# Python program to implement server side of chat room.
import socket
import select
import sys
from thread import *
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# checks whether sufficient arguments have been provided
if len(sys.argv) != 3:
print "Correct usage: script, IP address, port number"
exit()
# takes the first argument from command prompt as IP address
IP_address = str(sys.argv[1])
# takes second argument from command prompt as port number
Port = int(sys.argv[2])
server.bind((IP_address, Port))
server.listen(100)
list_of_clients = []
def clientthread(conn, addr):
# sends a message to the client whose user object is conn
conn.send("Welcome to this chatroom!")
while True:
try:
message = conn.recv(2048)
if message:
print "<" + addr[0] + "> " + message
# Calls broadcast function to send message to all
message_to_send = "<" + addr[0] + "> " + message
broadcast(message_to_send, conn)
else:
remove(conn)
except:
continue
def broadcast(message, connection):
for clients in list_of_clients:
if clients!=connection:
try:
clients.send(message)
except:
clients.close()
# if the link is broken, we remove the client
remove(clients)
def remove(connection):
if connection in list_of_clients:
list_of_clients.remove(connection)
while True:
conn, addr = server.accept()
list_of_clients.append(conn)
# prints the address of the user that just connected
print addr[0] + " connected"
# creates and individual thread for every user
# that connects
start_new_thread(clientthread,(conn,addr))
conn.close()
server.close()
|
"""
Number letter counts
Problem 17
https://projecteuler.net/problem=17
"""
def sol():
under_ten = get_under_ten()
total_of_one_to_ninety_nine = get_total_of_one_to_ninety_nine(under_ten)
total_of_one_hundred_until_thousand = get_total_of_one_hundred_until_thousand(
under_ten, total_of_one_to_ninety_nine
)
print(
total_of_one_to_ninety_nine
+ total_of_one_hundred_until_thousand
+ len("one")
+ len("thousand")
)
def get_under_ten():
return [
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
"nine",
]
def get_total_of_one_to_ninety_nine(under_ten):
ten_to_nineteen = [
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
]
twenty_to_ninety_step_10 = [
"twenty",
"thirty",
"forty",
"fifty",
"sixty",
"seventy",
"eighty",
"ninety",
]
total_length_of_under_ten = sum([len(i) for i in under_ten])
total_length_of_ten_to_nineteen = sum([len(i) for i in ten_to_nineteen])
total_length_of_twenty_to_ninety_nine = 0
for ten_time in twenty_to_ninety_step_10:
tmp = len(ten_time) * 10
tmp += total_length_of_under_ten
total_length_of_twenty_to_ninety_nine += tmp
return (
total_length_of_under_ten
+ total_length_of_ten_to_nineteen
+ total_length_of_twenty_to_ninety_nine
)
def get_total_of_one_hundred_until_thousand(under_ten, total_of_one_to_ninety_nine):
result = 0
hundred_length = len("hundred")
and_length = len("and")
for num in under_ten:
tmp = (len(num) + len("hundred")) * 100
tmp += and_length * 99
tmp += total_of_one_to_ninety_nine
result += tmp
return result
if __name__ == "__main__":
sol()
|
#!/usr/bin/python
"""
This example shows how to create an empty Mininet object
(without a topology object) and add nodes to it manually.
"""
from mininet.net import Mininet, MininetWithControlNet
from mininet.node import Controller, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel, info
class InbandController( RemoteController ):
def checkListening( self ):
"Overridden to do nothing."
return
def netWithVNFs(netconf = False):
"Create an empty network and add nodes to it."
#ctl = InbandController( 'ctl', ip='192.168.123.1' )
#ctl = InbandController( 'ctl', ip='127.0.0.1' )
#net = MininetWithControlNet( )
net = MininetWithControlNet( controller=Controller, autoSetMacs=True )
#net = Mininet( controller=Controller )
info( '*** Adding controller\n' )
ctl = net.addController( 'c0' , controller=RemoteController )
#import pdb; pdb.set_trace();
info( '*** Adding hosts \n' )
h1 = net.addHost( 'h1')
h2 = net.addHost( 'h2')
info( '*** Adding VNFs \n' )
if netconf:
ee1 = net.addEE( 'ee1' )
ee1.setVNF(vnf_name='netconf')
ee2 = net.addEE( 'ee2' )
ee2.setVNF(vnf_name='netconf')
#[ exe1_sw, exe1_container ] = net.addManagedExe( 'exe1', nintf=5)
#exe1_container.cmd = netconf.makeNetConfCmd()
else:
ee1 = net.addEE( 'ee1',cpu=0.1)
#ee1.setVNF(vnf_name='fakeLoad', cpu='8', mem='5MB')
ee1.setVNF(vnf_name='simpleForwarder', device=ee1.name+'_eth1',name=ee1.name)
ee2 = net.addEE( 'ee2',cpu=0.1)
#example for NAT with two ports connected to internal hosts (private addresses) and one port connected to the Internet (public address)
device=[{'index':0,'name':'eth1','ip1':'1.0.0.1','ip2':'1.0.0.10'},{'index':1,'name':'eth2','ip1':'1.0.0.20','ip2':'1.0.0.30'}]
public={'index':2,'name':'eth2'}
ee2.setVNF(vnf_name='nat',device=device,public=public)
# ee2.setVNF(vnf_name='simpleObservationPoint', name=ee2.name)
#ee2.setVNF(vnf_name='fakeLoad', cpu='8', mem='5MB')
#ee2.setVNF(vnf_name='lookbusy',
# mem_util='5MB', cpu_util='8-20', cpu_mode='curve',
# cpu_curve_period='5m', cpu_curve_peak='2m' )
info( '*** Adding switches\n' )
s3 = net.addSwitch( 's3' )
s4 = net.addSwitch( 's4' )
info( '*** Creating links\n' )
net.addLink( h1, s3 )
net.addLink( h2, s4 )
net.addLink( s3, s4 )
if netconf:
net.addLink( exe1_sw, s3 )
else:
net.addLink( ee1, s3 )
net.addLink( ee2, s4 )
info( '*** Starting network\n' )
net.start()
info( '*** Running CLI\n' )
CLI( net )
info( '*** Stopping network' )
net.stop()
def add_VNF():
""" add VNFs to catalog (required parameters should be given) """
from mininet.vnfcatalogiminds import Catalog
#1. First single Click elements are added to the DB
Catalog().add_VNF(vnf_name='FromDevice',vnf_type='Click')
Catalog().add_VNF(vnf_name='ToDevice',vnf_type='Click')
Catalog().add_VNF(vnf_name='Queue',vnf_type='Click')
Catalog().add_VNF(vnf_name='Tee',vnf_type='Click')
#Catalog().add_VNF(vnf_name='Counter',vnf_type='Click',clickPath='/home/click',
# clickSource=['elements/standard/counter.cc','elements/standard/counter.cc'])
Catalog().add_VNF(vnf_name='Counter',vnf_type='Click')
Catalog().add_VNF(vnf_name='Classifier',vnf_type='Click')
Catalog().add_VNF(vnf_name='IPClassifier',vnf_type='Click')
Catalog().add_VNF(vnf_name='ICMPPingSource',vnf_type='Click')
Catalog().add_VNF(vnf_name='ARPQuerier',vnf_type='Click')
Catalog().add_VNF(vnf_name='AggregateIPFlows',vnf_type='Click')
Catalog().add_VNF(vnf_name='RFC2507Comp',vnf_type='Click')
Catalog().add_VNF(vnf_name='RFC2507Decomp',vnf_type='Click')
Catalog().add_VNF(vnf_name='IPAddRewriter',vnf_type='Click')
#2. Then the VNFs composed of several Click elements are added to the DB
Catalog().add_VNF(vnf_name='simpleForwarder',vnf_type='Click',description='receive on the data interface and loop back the packet')
Catalog().add_VNF(vnf_name='simpleObservationPoint',vnf_type='Click',description='A simple observation point in click')
Catalog().add_VNF(vnf_name='headerCompressor',vnf_type='Click',description='Compress IPv4/TCP headers as defined in RFC2507')
Catalog().add_VNF(vnf_name='headerDecompressor',vnf_type='Click',description='Decompress IPv4/TCP headers as defined in RFC2507')
Catalog().add_VNF(vnf_name='nat',vnf_type='Click',description='Provide the functionality of basic network address translator')
if __name__ == '__main__':
add_VNF()
setLogLevel( 'info' )
netWithVNFs()
|
import logging
import amrlib
import penman
import penman.models.noop
import re
from amrlib.alignments.rbw_aligner import RBWAligner
from amrlib.graph_processing.annotator import add_lemmas, load_spacy
from amratom.atomese import is_variable, is_amrset_name
_spacy_model = 'en_core_web_md'
_penman_model = penman.models.noop.NoOpModel()
def _load_spacy():
#return spacy.load(_spacy_model)
# There is no API to inject own model into amrlib pipeline. I keep this
# code because I don't like reimplementing internal `add_lemmas` logic
# using own spacy model
load_spacy(_spacy_model)
from amrlib.graph_processing.annotator import spacy_nlp
logging.getLogger(__name__).debug("_load_spacy(): %s is loaded",
spacy_nlp.path)
return spacy_nlp
_stog_model_cache = None
_gtos_model_cache = None
_spacy_model_cache = None
def load_models():
global _stog_model_cache
if _stog_model_cache is None:
_stog_model_cache = amrlib.load_stog_model()
global _gtos_model_cache
if _gtos_model_cache is None:
_gtos_model_cache = amrlib.load_gtos_model()
global _spacy_model_cache
if _spacy_model_cache is None:
_spacy_model_cache = _load_spacy()
amrlib.setup_spacy_extension()
class AmrProcessor:
def __init__(self):
self.log = logging.getLogger(__name__ + '.' + type(self).__name__)
load_models()
global _spacy_model_cache
self.nlp = _spacy_model_cache
global _gtos_model_cache
self.gtos = _gtos_model_cache
def utterance_to_amr(self, utterance, indent=-1):
doc = self.nlp(utterance)
amrs = doc._.to_amr()
sents = doc.sents
triples_proc = []
for p in zip(amrs, sents):
# Entry point (amr text->triples); can be replaced with penman.decode
triples, top = self._add_pos_tags(doc, p[0], p[1], indent)
# further triples processing
triples_proc += self._sentence_splitter(triples, top)
return list(map(lambda ts: penman.encode(penman.Graph(ts), indent=indent, model=_penman_model),
triples_proc))
def amr_to_utterance(self, amr):
return self.gtos.generate([amr], use_tense=False)[0][0]
def triples_to_utterance(self, triples):
return self.amr_to_utterance(penman.encode(penman.Graph(triples)))
def _add_pos_tags(self, doc, amr, sent, indent):
self.log.debug('_add_pos_tags: amr: %s, sent: %s', amr, sent)
graph = add_lemmas(amr, snt_key='snt')
aligner = RBWAligner.from_penman_w_json(graph)
graph = aligner.get_penman_graph()
triples = graph.triples
self.log.debug('_add_pos_tags: alignments: %s',
penman.surface.alignments(graph))
for triple, alignment in penman.surface.alignments(graph).items():
pos_tag = doc[sent.start + alignment.indices[0]].tag_
triples.append((triple[0], ':pos', '"' + pos_tag + '"'))
self.log.debug('_add_pos_tags: triples: %s', triples)
return triples, graph.top
def _child_lnodes_rec(self, triples, parent):
'''
Returns all those nodes (possibly duplicated) in the subgraph (including `parent`)
which appear on the left side of triples (i.e. not leaves)
'''
grandchildren = []
isOnLeft = False
for t in triples:
if t[0] == parent:
isOnLeft = True
grandchildren += self._child_lnodes_rec(triples, t[2])
return [parent] + grandchildren if isOnLeft else []
def _sentence_splitter(self, triples, top):
top_roles = []
top_concept = None
for triple in triples:
if triple[0] == top:
if triple[1] == ':instance':
top_concept = triple[2]
elif triple[1] != ':pos':
top_roles += [(triple[1], triple[2])]
if top_concept == 'and':
expected = 'op'
elif top_concept == 'multi-sentence':
expected = 'snt'
else: return [triples]
# Just checking that there are no unexpected roles
for r in top_roles:
if not expected in r[0]:
logging.getLogger(__name__).debug("_sentence_splitter(): WARNING - unexpected role %s for %s",
r[0], top_concept)
return [triples]
subgraphs = [[[], self._child_lnodes_rec(triples, r[1])] for r in top_roles]
for t in triples:
for s in subgraphs:
if t[0] in s[1]:
s[0] += [t]
return [s[0] for s in subgraphs]
def is_instance(triple):
return triple[1] == ':instance'
def is_unknown(triple):
return is_instance(triple) and triple[2] == 'amr-unknown'
def _remove_postfix(word):
pos = word.rfind('-')
return word[:pos] if pos > 0 else word
def triple_to_string(triple):
source, role, target = triple
return '(' + source + ', ' + role + ', ' + target + ')'
def triple_from_string(line):
left = line.find('(')
right = line.rfind(')')
source, role, target = line[left+1:right].split(', ')
return (source, role, target)
class AmrInstanceDict:
def __init__(self, id_generator):
self.log = logging.getLogger(__name__ + '.AmrInstanceDict')
self.id_generator = id_generator
self.instance_by_node = {}
self.instance_triples = []
def add_graph(self, graph):
for triple in filter(is_instance, graph.triples):
self.log.debug('triple: %s', triple)
(source, role, target) = triple
instance = self._get_unique_instance(target)
self.instance_by_node[source] = instance
self.instance_triples.append((instance, role, target))
def _get_unique_instance(self, target):
id = self.id_generator()
return target + '-' + '{:06d}'.format(id)
def get_instance_triples(self):
return self.instance_triples
def map_node_to_instance(self, node):
if node in self.instance_by_node:
return self.instance_by_node[node]
else:
return node
_number_or_string_pattern = re.compile(r'\d+(\.\d+)?|"[^\"]+"|-|\+')
def is_amr_set(triple):
return triple[1] == ':amr-set'
def is_const( word):
return _number_or_string_pattern.fullmatch(word)
_roles_with_attrs_at_right = { ':mode', ':pos', ':polarity' }
class PatternInstanceDict(AmrInstanceDict):
def __init__(self, id_generator):
super().__init__(id_generator)
self.log = logging.getLogger(__name__ + '.PatternInstanceDict')
def add_graph(self, graph):
for triple in filter(is_instance, graph.triples):
node, instance_role, concept = triple
assert not(is_variable(node) and is_amrset_name(concept)), (
'($var / @amrset) is not supported')
assert not(node == '-' and is_variable(concept)), (
'(- / $var) is not supported')
if concept is None:
continue
if concept == '-':
self.instance_by_node[node] = node
continue
instance = self._get_unique_instance(concept)
self.instance_by_node[node] = instance
self.instance_triples.append((instance, instance_role, concept))
for triple in filter(lambda x: not is_instance(x), graph.triples):
self.log.debug('triple: %s', triple)
source, role, target = triple
self._add_instance(source, role, True)
self._add_instance(target, role, False)
def _add_instance(self, concept, role, is_source):
if concept in self.instance_by_node:
return
elif is_variable(concept):
self.instance_by_node[concept] = concept
return
elif is_amrset_name(concept):
if role == ':amr-set' and is_source:
self.instance_by_node[concept] = concept
return
elif is_const(concept):
return
elif not is_source and role in _roles_with_attrs_at_right:
self.log.warn('Concept node is generated for the possible attribute '
+ 'name, please use (%s / -) if it is not expected', concept)
instance = self._get_unique_instance(concept)
self.instance_by_node[concept] = instance
self.instance_triples.append((instance, ":instance", concept))
def _get_unique_instance(self, target):
if is_variable(target) or is_amrset_name(target):
return super()._get_unique_instance(target[1:])
else:
return super()._get_unique_instance(target)
class ParsedAmr:
def __init__(self, top, triples):
self.top = top
self.triples = triples
def __iter__(self):
return self.triples.__iter__()
def get_top(self):
return self.top
class TripleProcessor:
def __init__(self, instance_dict_constr=AmrInstanceDict):
self.log = logging.getLogger(__name__ + '.TripleProcessor')
self.next_id = 0
self.instance_dict_constr = instance_dict_constr
def _process_relation(self, triple, amr_instances):
self.log.debug('_process_relation: triple: %s', triple)
(source, role, target) = triple
source = amr_instances.map_node_to_instance(source)
target = amr_instances.map_node_to_instance(target)
return (source, role, target)
def add_triple(self, triple):
source, role, target = triple
if role == ':instance':
self._add_variable(source)
def _add_variable(self, text):
id = int(text.split('-')[-1])
if self.next_id <= id:
self.next_id = id + 1
def _next_id(self):
id = self.next_id
self.next_id += 1
return id
def amr_to_triples(self, amr):
graph = penman.decode(amr, model=_penman_model)
return self._graph_to_triples(graph)
def file_to_triples(self, file):
for graph in penman.iterdecode(file, model=_penman_model):
for triple in self._graph_to_triples(graph):
yield triple
def _graph_to_triples(self, graph):
sentence_vars = {}
amr_instances = self.instance_dict_constr(lambda: self._next_id())
amr_instances.add_graph(graph)
top = graph.top
top = amr_instances.map_node_to_instance(top)
return ParsedAmr(top, self._triples_generator(amr_instances, graph))
def _triples_generator(self, amr_instances, graph):
for triple in amr_instances.get_instance_triples():
yield triple
for triple in filter(lambda x: not is_instance(x), graph.triples):
yield self._process_relation(triple, amr_instances)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 30 13:19:29 2014
@author: enrico.giampieri2
"""
import unittest
import sympy
from utils import Counter
from utils import variazione
from utils import shift
from utils import WRGnumpy
from CME import CME
if __name__ == '__main__':
unittest.main() |
from functools import reduce
import pandas as pd
import glob
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import IsolationForest
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import datetime as dt
import os
def cleanData(imports):
outerjoin = reduce(lambda a, b: pd.merge(a, b, on='ID', how='outer'), imports)
outerjoin.set_index('ID', drop=True, inplace=True)
outerjoin.columns = [str(i+1) for i in range(0, len(outerjoin.columns))]
outerjoin = outerjoin.loc[:, '1':'400'].apply(pd.to_numeric, errors='coerce')
outerjoin.fillna(1, inplace=True)
outerjoin.replace(0, 1, inplace=True)
return outerjoin
def importCSV(kfilepath, tfilepath, catfilepath):
print("Importing CSV files...")
ids = glob.glob(kfilepath + "*.csv")
outerjoinln = cleanData([pd.read_csv(file, delimiter=',', header=None, names=['ID', 'ttest'], usecols=[0,1],
quotechar='"') for file in ids])
outerjoinsp = cleanData([pd.read_csv(file, delimiter=',', header=None, names=['ID', 'ttest'], usecols=[0,2],
quotechar='"') for file in ids])
test = pd.read_csv(tfilepath, delimiter=',', header=None, names=['ID', 'lnnsaf', 'spc'], quotechar='"')
test.set_index('ID', drop=True, inplace=True)
label = pd.read_csv(catfilepath, delimiter=',', header=None)
return outerjoinln, outerjoinsp, test, label
def individualForests(kerberusln, kerberussp, testdata, type, time):
print("Producing " + type + " forests")
count = 0
predictiontest = []
if type == "Isolation":
forest = IsolationForest(max_samples='auto', contamination=0.2)
os.mkdir('src/output/isolationforest/' + time)
foldername = 'src/output/isolationforest/' + time + '/'
elif type == "RandomClassifier":
forest = RandomForestClassifier(n_estimators=100)
os.mkdir('src/output/randomclassifierforest/' + time)
foldername = 'src/output/randomclassifierforest/' + time + '/'
categories = pd.read_csv('src/output/permcategories/permcategories.csv', delimiter=',', header=None)
# This section here will produce 400 separate forests, 1 for every ttest combo,
# indexed to a protein ID
for index, row in testdata.iterrows(): # index = ID and row = all 400 values
count += 1
if index not in kerberusln.index.values:
print("Test not in 400Data...? Why?")
continue
foresttrain = pd.concat([kerberusln.loc[index], kerberussp.loc[index]], axis=1)
foresttrain.columns = ["lnNSAF", "SpC"]
foresttrain.fillna(1, inplace=True)
foresttrain.replace(' NaN', 1, inplace=True)
foresttrain.drop(foresttrain[(foresttrain.lnNSAF == 1) | (foresttrain.SpC == 1) |
(foresttrain.lnNSAF == 0) | (foresttrain.SpC == 0)].index, inplace=True)
test = pd.DataFrame(testdata.loc[index].values.reshape(1, -1))
test.columns = ["lnNSAF", "SpC"]
test.fillna(1, inplace=True)
test.replace(' NaN', 1, inplace=True)
test.drop(test[(test.lnNSAF == 1) | (test.SpC == 1) | (test.lnNSAF == 0) | (test.SpC == 0)].index, inplace=True)
try:
print("Tree " + str(count) + " of " + str(len(testdata.index)) + " completed.")
if type == "Isolation":
forest.fit(foresttrain)
predictiontest.append(forest.predict(test)) # This tells us whether the test is an outlier/in
elif type == "RandomClassifier":
forest.fit(foresttrain, foresttrain)
predictiontest.append(forest.predict(test))
except ValueError:
print("Warning: Incompatible Test @: " + str(count))
test.to_csv(str("src/output/error/" + str(count) + "-Test.csv"), sep=',')
foresttrain.to_csv(str("src/output/error/" + str(count) + "-Train.csv"), sep=',')
continue
plt.clf()
index = index.replace("|", "-")
plt.title(index + " " + type)
xx, yy = np.meshgrid(np.linspace(0, 1, 50), np.linspace(0, 1, 50))
z = forest.decision_function(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
plt.contourf(xx, yy, z, cmap=plt.cm.Greens_r)
b1 = plt.scatter(foresttrain["lnNSAF"], foresttrain["SpC"], c='black', s=1, edgecolor='k')
b2 = plt.scatter(test["lnNSAF"], test["SpC"], c='yellow', s=20)
plt.legend([b1, b2], ["training observations", "new observations"], loc="upper right")
plt.xlabel("TTest from lnNSAF")
plt.ylabel("TTest from SpC")
plt.yticks(np.arange(0, 1, step=0.05))
plt.xticks(np.arange(0, 1, step=0.05), rotation=75)
plt.tight_layout()
plt.savefig(foldername + index + '.jpg')
plt.close("all")
def constructDataFrames(kerberusln, kerberussp, testdata):
print("Making the Train and Test Dataframes for the Mega Forest...")
totaldf = pd.concat([pd.Series(kerberusln.values.ravel()), pd.Series(kerberussp.values.ravel())], axis=1)
totaldf.columns = ["lnNSAF", "SpC"]
totaldf.replace(' NaN', 1, inplace=True)
totaldf.replace('NaN', 1, inplace=True)
totaldf.fillna(1, inplace=True)
totaldf.drop(totaldf[(totaldf.lnNSAF == 1) | (totaldf.SpC == 1) |
(totaldf.lnNSAF == 0) | (totaldf.SpC == 0) |
(totaldf.lnNSAF.dtype == np.str) | (totaldf.SpC.dtype == np.str)].index, inplace=True)
totaldf.reset_index(drop=True, inplace=True)
testdf = testdata
testdf.columns = ["lnNSAF", "SpC"]
testdf.replace(' NaN', 1, inplace=True)
testdf.replace('NaN', 1, inplace=True)
testdf.fillna(1, inplace=True)
testdf.drop(testdf[(testdf.lnNSAF == 1) | (testdf.SpC == 1) |
(testdf.lnNSAF == 0) | (testdf.SpC == 0) |
(testdf.lnNSAF.dtype == np.str) | (testdf.SpC.dtype == np.str)].index, inplace=True)
testdf.reset_index(drop=True, inplace=True)
return totaldf, testdf
def megaIsolationForest(totaldf, testdf):
forest = IsolationForest(max_samples='auto')
# This section computes a mega-forest.
print("Now constructing the mega forest")
forest.fit(totaldf)
predictiontest = forest.predict(testdf)
plt.clf()
plt.title("Isolation Forest")
xx, yy = np.meshgrid(np.linspace(0, 1, 50), np.linspace(0, 1, 50))
z = forest.decision_function(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
plt.contourf(xx, yy, z, cmap=plt.cm.Greens_r)
colours = ['yellow' if test == 1 else 'red' for test in predictiontest]
b1 = plt.scatter(totaldf["lnNSAF"], totaldf["SpC"], c='black', s=.1)
b2 = plt.scatter(testdf["lnNSAF"], testdf["SpC"], c=colours, s=30, edgecolor='k')
plt.legend([b1, b2], ["Training", "Test Inlier"], loc="center left", bbox_to_anchor=(1.05, 1))
plt.plot((0.05, 0.05), (0, 1), c='blue') # (x1 x2), (y1 y2)
plt.plot((0, 1), (0.05, 0.05), c='blue')
plt.xlabel("TTest from lnNSAF")
plt.ylabel("TTest from SpC")
plt.yticks(np.arange(0, 1, step=0.05))
plt.xticks(np.arange(0, 1, step=0.05), rotation=75)
plt.show()
plt.close("all")
def multiLabelMultiClassifier(kerberusln, kerberussp, testdata, labels):
forestcollection = []
types = ["KNeighbors", "Extra Trees", "Random Forest", "Decision Tree"]
widths = [0.8, 0.65, 0.5, 0.35]
colours = ['#0a3052', '#161fc0', '#486bea', '#7fbbf0']
# This is a second round of data cleaning
traindatalist = []
testdatalist = []
for index, row in testdata.iterrows(): # index = ID and row = all 400 values
if index not in kerberusln.index.values:
print("Test not in 400Data...? Why?")
continue
foresttrain = pd.concat([kerberusln.loc[index], kerberussp.loc[index]], axis=1)
for i in range(0, 12):
foresttrain["Label " + str(i + 1)] = labels.iloc[:, i].astype(str).values
foresttrain.columns = ["lnNSAF", "SpC", *list("Label " + str(i) for i in range(1, 13))]
foresttrain.fillna(1, inplace=True)
foresttrain.replace(' NaN', 1, inplace=True)
foresttrain.drop(foresttrain[(foresttrain.lnNSAF == 1) | (foresttrain.SpC == 1) |
(foresttrain.lnNSAF == 0) | (foresttrain.SpC == 0)].index, inplace=True)
test = pd.DataFrame(testdata.loc[index].values.reshape(1, -1))
test.columns = ["lnNSAF", "SpC"]
test.fillna(1, inplace=True)
test.replace(' NaN', 1, inplace=True)
test.drop(test[(test.lnNSAF == 1) | (test.SpC == 1) | (test.lnNSAF == 0) | (test.SpC == 0)].index, inplace=True)
traindatalist.append(foresttrain)
testdatalist.append(test)
# This is where the forest is constructed and executed
for count, foresttype in enumerate([
KNeighborsClassifier(n_jobs=-1),
#ExtraTreesClassifier(testdata.shape[0], n_jobs=-1),
#RandomForestClassifier(testdata.shape[0], n_jobs=-1),
DecisionTreeClassifier()]):
print("Currently Conducting: " + types[count])
testsize = testdata.shape[0]
testindexes = [i for i in range(len(testdatalist))]
totaloutput = np.zeros(12)
for i in testindexes:
totaloutput = totaloutput + performClassifierProcess(testdatalist[i], traindatalist[i], foresttype, testsize, i)
forestcollection.append(list(map(lambda x: x / int(testdata.shape[0]), totaloutput)))
print(forestcollection)
plt.clf()
for i, test in enumerate(forestcollection):
plt.bar(np.arange(len(test)), test, widths[i], alpha=0.75, color=colours[i])
plt.xticks(np.arange(len(test)),
['R{}'.format(i + 1) for i in range(len(test))])
axes = plt.gca()
axes.set_ylim([0, 1])
plt.legend(types)
plt.plot(np.arange(12), np.average(np.asarray(forestcollection), axis=0), c='black', linewidth=2)
plt.plot([0, 11], [.5, .5], '--', c='red', linewidth=1)
plt.show()
def performClassifierProcess(testdatalist, traindatalist, foresttype, size, chunk):
print("Producing Classifier: " + str(chunk) + " of " + str(size))
try:
foresttype.fit(traindatalist.loc[:, "lnNSAF":"SpC"], traindatalist.loc[:, "Label 1":"Label 12"])
prediction = pd.DataFrame(foresttype.predict(testdatalist)).apply(lambda x: int(x))
return np.array(prediction)
except ValueError:
print("Warning: Incompatible Test @: " + str(chunk + 1))
return np.zeros((12,), dtype=int)
if __name__ == '__main__':
time = str(dt.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
Kerberusdataln, Kerberusdatasp, Testdata, Labels = importCSV("src/output/400/", "src/output/6by6/result.csv",
"src/output/permcategories/permtotal.csv")
#multiLabelMultiClassifier(Kerberusdataln, Kerberusdatasp, Testdata, Labels)
individualForests(Kerberusdataln, Kerberusdatasp, Testdata, "Isolation", time)
#individualForests(Kerberusdataln, Kerberusdatasp, Testdata, "RandomClassifier", time)
#TotalDF, TestDF = constructDataFrames(Kerberusdataln, Kerberusdatasp, Testdata)
#megaIsolationForest(TotalDF, TestDF)
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from typing import Optional, Sequence
import numpy as np
from fastmri.data.subsample import MaskFunc, RandomMaskFunc
def create_mask_for_mask_type(
mask_type_str: str,
center_fractions: Sequence[float],
accelerations: Sequence[int],
skip_low_freqs: bool,
) -> MaskFunc:
"""
Creates a mask of the specified type.
Args:
center_fractions: What fraction of the center of k-space to include.
accelerations: What accelerations to apply.
skip_low_freqs: Whether to skip already sampled low-frequency lines
for the purposes of determining where equispaced lines should be.
Set this `True` to guarantee the same number of sampled lines for
all masks with a given (acceleration, center_fraction) setting.
Returns:
A mask func for the target mask type.
"""
if mask_type_str == "random":
return RandomMaskFunc(center_fractions, accelerations)
elif mask_type_str == "adaptive_equispaced_fraction":
return EquispacedMaskFractionFunc(
center_fractions, accelerations, skip_low_freqs
)
else:
raise ValueError(f"{mask_type_str} not supported")
class EquispacedMaskFractionFunc(MaskFunc):
"""
Equispaced mask with strictly exact acceleration matching.
The mask selects a subset of columns from the input k-space data. If the
k-space data has N columns, the mask picks out:
1. N_low_freqs = (N * center_fraction) columns in the center
corresponding to low-frequencies.
2. The other columns are selected with equal spacing at a proportion
that reaches the desired acceleration rate taking into consideration
the number of low frequencies. This ensures that the expected number
of columns selected is equal to (N / acceleration)
It is possible to use multiple center_fractions and accelerations, in which
case one possible (center_fraction, acceleration) is chosen uniformly at
random each time the EquispacedMaskFunc object is called.
Note that this function may not give equispaced samples (documented in
https://github.com/facebookresearch/fastMRI/issues/54), which will require
modifications to standard GRAPPA approaches. Nonetheless, this aspect of
the function has been preserved to match the public multicoil data.
"""
def __init__(
self,
center_fractions: Sequence[float],
accelerations: Sequence[int],
skip_low_freqs: bool = False,
):
"""
Args:
center_fractions: Fraction of low-frequency columns to be retained.
If multiple values are provided, then one of these numbers is
chosen uniformly each time.
accelerations: Amount of under-sampling. This should have the same
length as center_fractions. If multiple values are provided,
then one of these is chosen uniformly each time.
skip_low_freqs: Whether to skip already sampled low-frequency lines
for the purposes of determining where equispaced lines should
be. Set this `True` to guarantee the same number of sampled
lines for all masks with a given (acceleration,
center_fraction) setting.
"""
super().__init__(center_fractions, accelerations)
self.skip_low_freqs = skip_low_freqs
def calculate_acceleration_mask(
self,
num_cols: int,
acceleration: int,
offset: Optional[int],
num_low_frequencies: int,
) -> np.ndarray:
"""
Produce mask for non-central acceleration lines.
Args:
num_cols: Number of columns of k-space (2D subsampling).
acceleration: Desired acceleration rate.
offset: Offset from 0 to begin masking. If no offset is specified,
then one is selected randomly.
num_low_frequencies: Number of low frequencies. Used to adjust mask
to exactly match the target acceleration.
Returns:
A mask for the high spatial frequencies of k-space.
"""
mask = np.zeros(num_cols)
pad = (num_cols - num_low_frequencies + 1) // 2
# determine acceleration rate by adjusting for the number of low frequencies
adjusted_accel = (acceleration * (num_low_frequencies - num_cols)) / (
num_low_frequencies * acceleration - num_cols
)
offset = self.rng.randint(0, round(adjusted_accel) - 1)
# Select samples from the remaining columns
accel_samples = np.arange(
offset, num_cols - num_low_frequencies - 1, adjusted_accel
)
accel_samples = np.around(accel_samples).astype(int)
skip = (
num_low_frequencies # Skip low freq AND optionally lines right next to it
)
for sample in accel_samples:
if sample < pad:
mask[sample] = True
else: # sample is further than center, so skip low_freqs
mask[int(sample + skip)] = True
return mask
|
from datetime import timedelta
import pandas.util.testing as tm
from pandas import TimedeltaIndex, timedelta_range, compat, Index, Timedelta
class TestTimedeltaIndex(tm.TestCase):
_multiprocess_can_split_ = True
def test_insert(self):
idx = TimedeltaIndex(['4day', '1day', '2day'], name='idx')
result = idx.insert(2, timedelta(days=5))
exp = TimedeltaIndex(['4day', '1day', '5day', '2day'], name='idx')
self.assert_index_equal(result, exp)
# insertion of non-datetime should coerce to object index
result = idx.insert(1, 'inserted')
expected = Index([Timedelta('4day'), 'inserted', Timedelta('1day'),
Timedelta('2day')], name='idx')
self.assertNotIsInstance(result, TimedeltaIndex)
tm.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
idx = timedelta_range('1day 00:00:01', periods=3, freq='s', name='idx')
# preserve freq
expected_0 = TimedeltaIndex(['1day', '1day 00:00:01', '1day 00:00:02',
'1day 00:00:03'],
name='idx', freq='s')
expected_3 = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:04'],
name='idx', freq='s')
# reset freq to None
expected_1_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:01',
'1day 00:00:02', '1day 00:00:03'],
name='idx', freq=None)
expected_3_nofreq = TimedeltaIndex(['1day 00:00:01', '1day 00:00:02',
'1day 00:00:03', '1day 00:00:05'],
name='idx', freq=None)
cases = [(0, Timedelta('1day'), expected_0),
(-3, Timedelta('1day'), expected_0),
(3, Timedelta('1day 00:00:04'), expected_3),
(1, Timedelta('1day 00:00:01'), expected_1_nofreq),
(3, Timedelta('1day 00:00:05'), expected_3_nofreq)]
for n, d, expected in cases:
result = idx.insert(n, d)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
def test_delete(self):
idx = timedelta_range(start='1 Days', periods=5, freq='D', name='idx')
# prserve freq
expected_0 = timedelta_range(start='2 Days', periods=4, freq='D',
name='idx')
expected_4 = timedelta_range(start='1 Days', periods=4, freq='D',
name='idx')
# reset freq to None
expected_1 = TimedeltaIndex(
['1 day', '3 day', '4 day', '5 day'], freq=None, name='idx')
cases = {0: expected_0,
-5: expected_0,
-1: expected_4,
4: expected_4,
1: expected_1}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = idx.delete(5)
def test_delete_slice(self):
idx = timedelta_range(start='1 days', periods=10, freq='D', name='idx')
# prserve freq
expected_0_2 = timedelta_range(start='4 days', periods=7, freq='D',
name='idx')
expected_7_9 = timedelta_range(start='1 days', periods=7, freq='D',
name='idx')
# reset freq to None
expected_3_5 = TimedeltaIndex(['1 d', '2 d', '3 d',
'7 d', '8 d', '9 d', '10d'],
freq=None, name='idx')
cases = {(0, 1, 2): expected_0_2,
(7, 8, 9): expected_7_9,
(3, 4, 5): expected_3_5}
for n, expected in compat.iteritems(cases):
result = idx.delete(n)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
result = idx.delete(slice(n[0], n[-1] + 1))
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(result.freq, expected.freq)
|
"""
Based on augmented_lstm.py and stacked_bidirectional_lstm.py from AllenNLP 0.8.3
"""
from typing import Optional, Tuple, Type, List, Dict, Any
import torch
from allennlp.nn.util import get_dropout_mask
from modules.stack_rnn_cell import StackRnnCellBase, StackLstmCell
class StackRnn(torch.nn.Module):
"""
A standard stacked LSTM where the LSTM layers
are concatenated between each layer. The only difference between
this and a regular LSTM is the application of
variational dropout to the hidden states and outputs of each layer apart
from the last layer of the LSTM. Note that this will be slower, as it
doesn't use CUDNN.
Parameters
----------
input_size : int, required
The dimension of the inputs to the LSTM.
hidden_size : int, required
The dimension of the outputs of the LSTM.
num_layers : int, required
The number of stacked Bidirectional LSTMs to use.
recurrent_dropout_probability: float, optional (default = 0.0)
The recurrent dropout probability to be used in a dropout scheme as
stated in `A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks <https://arxiv.org/abs/1512.05287>`_ .
layer_dropout_probability: float, optional (default = 0.0)
The layer wise dropout probability to be used in a dropout scheme as
stated in `A Theoretically Grounded Application of Dropout in
Recurrent Neural Networks <https://arxiv.org/abs/1512.05287>`_ .
use_highway: bool, optional (default = True)
Whether or not to use highway connections between layers. This effectively involves
reparameterising the normal output of an LSTM as::
gate = sigmoid(W_x1 * x_t + W_h * h_t)
output = gate * h_t + (1 - gate) * (W_x2 * x_t)
"""
def __init__(self,
input_size: int,
hidden_size: int,
num_layers: int,
use_highway: bool = True,
recurrent_dropout_probability: float = 0.0,
layer_dropout_probability: float = 0.0,
same_dropout_mask_per_instance: bool = True,
cell: Type[StackRnnCellBase] = StackLstmCell) -> None:
super(StackRnn, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.recurrent_dropout_probability = recurrent_dropout_probability
self.layer_dropout_probability = layer_dropout_probability
self.same_dropout_mask_per_instance = same_dropout_mask_per_instance
layers = []
rnn_input_size = input_size
for layer_index in range(num_layers):
layer = cell(rnn_input_size,
hidden_size,
use_highway=use_highway,
use_input_projection_bias=True)
rnn_input_size = hidden_size
self.add_module('layer_{}'.format(layer_index), layer)
layers.append(layer)
self.rnn_layers = layers
self.stacks: List[List[Dict[str, Any]]] = []
self.push_buffer: List[Optional[Dict[str, Any]]] = []
if self.same_dropout_mask_per_instance:
self.layer_dropout_mask: Optional[torch.Tensor] = None
self.recurrent_dropout_mask: Optional[torch.Tensor] = None
def reset_stack(self,
num_stacks: int) -> None:
self.stacks = [[] for _ in range(num_stacks)]
self.push_buffer = [None for _ in range(num_stacks)]
if self.same_dropout_mask_per_instance:
if 0.0 < self.layer_dropout_probability < 1.0:
self.layer_dropout_mask = [[get_dropout_mask(self.layer_dropout_probability,
torch.ones(layer.hidden_size,
device=self.layer_0.input_linearity.weight.device))
for _ in range(num_stacks)] for layer in self.rnn_layers]
self.layer_dropout_mask = torch.stack([torch.stack(l) for l in self.layer_dropout_mask])
else:
self.layer_dropout_mask = None
if 0.0 < self.recurrent_dropout_probability < 1.0:
self.recurrent_dropout_mask = [[get_dropout_mask(self.recurrent_dropout_probability,
torch.ones(self.hidden_size,
device=self.layer_0.input_linearity.weight.device))
for _ in range(num_stacks)] for _ in range(self.num_layers)]
self.recurrent_dropout_mask = torch.stack([torch.stack(l) for l in self.recurrent_dropout_mask])
else:
self.recurrent_dropout_mask = None
def push(self,
stack_index: int,
input: torch.Tensor,
extra: Optional[Dict[str, Any]] = None) -> None:
if self.push_buffer[stack_index] is not None:
self._apply_push()
self.push_buffer[stack_index] = {'stack_rnn_input': input}
if extra is not None:
self.push_buffer[stack_index].update(extra)
def pop(self,
stack_index: int) -> Dict[str, Any]:
if self.push_buffer[stack_index] is not None:
self._apply_push()
return self.stacks[stack_index].pop(-1)
def pop_penult(self,
stack_index: int) -> Dict[str, Any]:
if self.push_buffer[stack_index] is not None:
self._apply_push()
stack_0 = self.get_stack(stack_index)[-1]
stack_0_emb, stack_0_token = stack_0['stack_rnn_input'], stack_0['token']
self.stacks[stack_index].pop(-1)
rnt = self.stacks[stack_index].pop(-1)
self.push(stack_index, stack_0_emb, {'token': stack_0_token})
return rnt
def get_stack(self,
stack_index: int) -> List[Dict[str, Any]]:
if self.push_buffer[stack_index] is not None:
self._apply_push()
return self.stacks[stack_index]
def get_stacks(self) -> List[List[Dict[str, Any]]]:
self._apply_push()
return self.stacks
def get_len(self,
stack_index: int) -> int:
return len(self.get_stack(stack_index))
def get_output(self,
stack_index: int) -> torch.Tensor:
return self.get_stack(stack_index)[-1]['stack_rnn_output']
def _apply_push(self) -> None:
index_list = []
inputs = []
initial_state = []
layer_dropout_mask = []
recurrent_dropout_mask = []
for i, (stack, buffer) in enumerate(zip(self.stacks, self.push_buffer)):
if buffer is not None:
index_list.append(i)
inputs.append(buffer['stack_rnn_input'].unsqueeze(0))
if len(stack) > 0:
initial_state.append(
(stack[-1]['stack_rnn_state'].unsqueeze(1), stack[-1]['stack_rnn_memory'].unsqueeze(1)))
else:
initial_state.append(
(buffer['stack_rnn_input'].new_zeros(self.num_layers, 1, self.hidden_size),) * 2)
if self.same_dropout_mask_per_instance:
if self.layer_dropout_mask is not None:
layer_dropout_mask.append(self.layer_dropout_mask[:, i].unsqueeze(1))
if self.recurrent_dropout_mask is not None:
recurrent_dropout_mask.append(self.recurrent_dropout_mask[:, i].unsqueeze(1))
else:
if 0.0 < self.layer_dropout_probability < 1.0:
layer_dropout_mask.append(get_dropout_mask(self.layer_dropout_probability,
torch.ones(self.num_layers, 1, self.hidden_size,
device=self.layer_0.input_linearity.weight.device)))
if 0.0 < self.recurrent_dropout_probability < 1.0:
recurrent_dropout_mask.append(get_dropout_mask(self.recurrent_dropout_probability,
torch.ones(self.num_layers, 1, self.hidden_size,
device=self.layer_0.input_linearity.weight.device)))
if len(layer_dropout_mask) == 0:
layer_dropout_mask = None
if len(recurrent_dropout_mask) == 0:
recurrent_dropout_mask = None
if len(index_list) > 0:
inputs = torch.cat(inputs, 0)
initial_state = list(torch.cat(t, 1) for t in zip(*initial_state))
if layer_dropout_mask is not None:
layer_dropout_mask = torch.cat(layer_dropout_mask, 1)
if recurrent_dropout_mask is not None:
recurrent_dropout_mask = torch.cat(recurrent_dropout_mask, 1)
output_state, output_memory = self._forward(inputs, initial_state, layer_dropout_mask,
recurrent_dropout_mask)
for i, stack_index in enumerate(index_list):
output = {
'stack_rnn_state': output_state[:, i, :],
'stack_rnn_memory': output_memory[:, i, :],
'stack_rnn_output': output_state[-1, i, :]
}
output.update(self.push_buffer[stack_index])
self.stacks[stack_index].append(output)
self.push_buffer[stack_index] = None
def _forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]],
layer_dropout_mask: Optional[torch.Tensor] = None,
recurrent_dropout_mask: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Parameters
----------
inputs : torch.Tensor, required.
A batch first torch.Tensor to run the stacked LSTM over.
initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (num_layers, batch_size, hidden_size).
Returns
-------
final_states: torch.Tensor
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers, batch_size, hidden_size).
"""
# if not initial_state:
# hidden_states = [None] * len(self.lstm_layers)
# elif initial_state[0].size()[0] != len(self.lstm_layers):
# raise ConfigurationError("Initial states were passed to forward() but the number of "
# "initial states does not match the number of layers.")
# else:
hidden_states = list(zip(initial_state[0].split(1, 0),
initial_state[1].split(1, 0)))
previous_output = inputs
final_h = []
final_c = []
for i, state in enumerate(hidden_states):
layer = getattr(self, 'layer_{}'.format(i))
# The state is duplicated to mirror the Pytorch API for LSTMs.
if self.training:
if self.same_dropout_mask_per_instance:
if layer_dropout_mask is not None and i > 0:
previous_output = previous_output * layer_dropout_mask[i - 1]
if recurrent_dropout_mask is not None:
state = (state[0] * recurrent_dropout_mask[i], state[1])
else:
pass
final_state = layer(previous_output, state)
previous_output = final_state[0].squeeze(0)
final_h.append(final_state[0])
final_c.append(final_state[1])
final_h = torch.cat(final_h, dim=0)
final_c = torch.cat(final_c, dim=0)
final_state_tuple = (final_h, final_c)
return final_state_tuple
|
'''
Function Name : DirectoryTraversal, DisplayDuplicate
Description : In This Application We Import All Other Modules To Perform Duplicate Files Removing Operation
Function Date : 25 June 2021
Function Author : Prasad Dangare
Input : Get The Directory Name
Output : Delete Duplicate Files And Create Log File And Send It Through Mail To The User / Client.
'''
# ===================
# Imports
# ===================
import os
from sys import *
import CheckSum, log_File, Mailing, Connection
from datetime import *
# ==============================
# Directory Traversal Operations
# ==============================
def DirectoryTraversal(path):
print("\nContents of the Directory are : ")
dictionary = {}
for Folder, SubFolder, Filename in os.walk(path):
print("\nDirectory Name is : " + Folder)
for sub in SubFolder:
print("Subfolder of " + Folder +" is "+ sub)
for file in Filename:
print("File Name is : " + file)
actualpath = os.path.join(Folder, file)
hash = CheckSum.calculateCheckSum(actualpath)
if hash in dictionary:
dictionary[hash].append(actualpath)
else:
dictionary[hash] = [actualpath]
current_date_time = datetime.now().strftime("%d_%m_%Y-%I_%M")+".txt"
log_File.Writing(dictionary, current_date_time)
Show = DisplayDuplicate(dictionary)
Mailing.mail(current_date_time)
return dictionary
# ==================================
# Display Directory Files Operations
# ==================================
def DisplayDuplicate(dictionary):
output = list(filter(lambda x : len(x) > 1, dictionary.values()))
if(len(output) > 0):
print("\nThere Are Duplicate Files")
else:
print("\nThere Are No Dupicate Files")
return
print("\nList Of Duplicate Files Are : \n")
i = 0
icnt = 0
for result in output:
icnt = 0
print(result)
for path in result:
icnt += 1 # skip 1 file
if icnt >= 2:
i += 1
#print("%s"%path)
os.remove(path)
# here type logic for delete the duplicate files
print("\nNumber Of Duplicate Files Deleted ", i)
# =======================
# Entry Point
# =======================
def main():
print("\n\t_____Duplicate File Removing Script_____\n\t")
if(len(argv) != 2):
print("Error : Invalid Number Of Arguments")
exit()
if((argv[1] == "-h") or (argv[1] == "-H")):
print("It Is A Directory Cleaner Script")
exit()
if((argv[1] == "-u") or (argv[1] == "-U")):
print("Usage : Provide Absolute Path Of The Target Directory")
exit()
path = os.path.join(os.getcwd(), argv[1])
if((os.path.isdir(path) == True) and (Connection.Check_Connection() == True)):
DirectoryTraversal(argv[1])
# ===================
# Starter
# ===================
if __name__=="__main__":
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlgorithmGoodsInfo(object):
def __init__(self):
self._algorithm_goods_id = None
self._gif_file_id = None
self._pic_file_id = None
self._three_dimension = None
self._thumbnails = None
@property
def algorithm_goods_id(self):
return self._algorithm_goods_id
@algorithm_goods_id.setter
def algorithm_goods_id(self, value):
self._algorithm_goods_id = value
@property
def gif_file_id(self):
return self._gif_file_id
@gif_file_id.setter
def gif_file_id(self, value):
self._gif_file_id = value
@property
def pic_file_id(self):
return self._pic_file_id
@pic_file_id.setter
def pic_file_id(self, value):
self._pic_file_id = value
@property
def three_dimension(self):
return self._three_dimension
@three_dimension.setter
def three_dimension(self, value):
self._three_dimension = value
@property
def thumbnails(self):
return self._thumbnails
@thumbnails.setter
def thumbnails(self, value):
self._thumbnails = value
def to_alipay_dict(self):
params = dict()
if self.algorithm_goods_id:
if hasattr(self.algorithm_goods_id, 'to_alipay_dict'):
params['algorithm_goods_id'] = self.algorithm_goods_id.to_alipay_dict()
else:
params['algorithm_goods_id'] = self.algorithm_goods_id
if self.gif_file_id:
if hasattr(self.gif_file_id, 'to_alipay_dict'):
params['gif_file_id'] = self.gif_file_id.to_alipay_dict()
else:
params['gif_file_id'] = self.gif_file_id
if self.pic_file_id:
if hasattr(self.pic_file_id, 'to_alipay_dict'):
params['pic_file_id'] = self.pic_file_id.to_alipay_dict()
else:
params['pic_file_id'] = self.pic_file_id
if self.three_dimension:
if hasattr(self.three_dimension, 'to_alipay_dict'):
params['three_dimension'] = self.three_dimension.to_alipay_dict()
else:
params['three_dimension'] = self.three_dimension
if self.thumbnails:
if hasattr(self.thumbnails, 'to_alipay_dict'):
params['thumbnails'] = self.thumbnails.to_alipay_dict()
else:
params['thumbnails'] = self.thumbnails
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlgorithmGoodsInfo()
if 'algorithm_goods_id' in d:
o.algorithm_goods_id = d['algorithm_goods_id']
if 'gif_file_id' in d:
o.gif_file_id = d['gif_file_id']
if 'pic_file_id' in d:
o.pic_file_id = d['pic_file_id']
if 'three_dimension' in d:
o.three_dimension = d['three_dimension']
if 'thumbnails' in d:
o.thumbnails = d['thumbnails']
return o
|
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), "../../../")))
sys.path.insert(0, os.path.abspath(
os.path.join(os.getcwd(), "../../../../FedML")))
from fedml_core.distributed.client.client_manager import ClientManager
class CommunicationManager(ClientManager):
def __init__(self, args, comm, rank, size, backend):
super().__init__(args, comm, rank, size, backend)
def register_message_receive_handlers(self):
pass
def run(self):
super().run()
|
from django.middleware.csrf import CsrfMiddleware, CsrfViewMiddleware, CsrfResponseMiddleware
from django.views.decorators.csrf import csrf_exempt, csrf_view_exempt, csrf_response_exempt
import warnings
warnings.warn("This import for CSRF functionality is deprecated. Please use django.middleware.csrf for the middleware and django.views.decorators.csrf for decorators.",
PendingDeprecationWarning
)
|
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for mathematics_dataset.modules.arithmetic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import absltest
from mathematics_dataset.modules import arithmetic
import sympy
class ArithmeticTest(absltest.TestCase):
def testSurdCoefficients(self):
exp = sympy.sympify('1')
self.assertEqual(arithmetic._surd_coefficients(exp),
(1, 0))
exp = sympy.sympify('1/2')
self.assertEqual(arithmetic._surd_coefficients(exp),
(1/2, 0))
exp = sympy.sympify('sqrt(2)')
self.assertEqual(arithmetic._surd_coefficients(exp),
(0, 1))
exp = sympy.sympify('3*sqrt(2)')
self.assertEqual(arithmetic._surd_coefficients(exp),
(0, 3))
exp = sympy.sympify('3*sqrt(5)/2')
self.assertEqual(arithmetic._surd_coefficients(exp),
(0, 3/2))
exp = sympy.sympify('1 + 3 * sqrt(2)')
self.assertEqual(arithmetic._surd_coefficients(exp),
(1, 3))
exp = sympy.sympify('1/2 + 3 * sqrt(5) / 2')
self.assertEqual(arithmetic._surd_coefficients(exp),
(1/2, 3/2))
exp = sympy.sympify('sqrt(2)/(-1 + 2*sqrt(2))**2')
self.assertEqual(arithmetic._surd_coefficients(exp),
(8/49, 9/49))
if __name__ == '__main__':
absltest.main()
|
#! usr/bin/python3
import pandas as pd
import re
import numpy as np
import os
import sys
from collections import OrderedDict
parent_path = os.path.realpath(os.pardir)
if sys.platform.startswith('win') or sys.platform.startswith('cygwin'):
seseds_path = os.path.join(parent_path, 'MCM-ICM-2018-Problem-C\\data\\csv\\seseds.csv')
msncodes_path = os.path.join(parent_path, 'MCM-ICM-2018-Problem-C\\data\\csv\\msncodes.csv')
elif sys.platform.startswith('darwin') or sys.platform.startswith('linux'):
seseds_path = os.path.join(parent_path, 'MCM-ICM-2018-Problem-C/data/csv/seseds.csv')
msncodes_path = os.path.join(parent_path, 'MCM-ICM-2018-Problem-C/data/csv/msncodes.csv')
else:
pass
seseds = pd.read_csv(seseds_path, skiprows=None, engine='c', low_memory=True)
msncodes = pd.read_csv(msncodes_path, skiprows=None, engine='c', low_memory=True)
# print(seseds)
# print(msncodes)
# print(type(msncodes)) # dict
# for key in msncodes.keys():
# print(msncodes[key])
msn = []
description = []
unit = []
for i in range(len(msncodes["Description"])):
if not re.search("price", msncodes["Description"][i]) and not re.search("expenditures", msncodes["Description"][i]) and \
not re.search("production", msncodes["Description"][i]) and not re.search("imported", msncodes["Description"][i]) and \
not re.search("imports", msncodes["Description"][i]) and not re.search("exported", msncodes["Description"][i]) and \
not re.search("exported", msncodes["Description"][i]) and not re.search("Factor", msncodes["Description"][i]):
msn.append(msncodes["MSN"][i])
description.append(msncodes["Description"][i])
unit.append(msncodes["Unit"][i])
comp_data = OrderedDict()
item_dict = OrderedDict()
item_dict["MSN"] = msn
item_dict["Description"] = description
item_dict["Unit"] = unit
comp_data = pd.DataFrame(item_dict)
# data_frame.to_csv("C:\\Users\\THINKPAD\\PycharmProjects\\MCM-ICM2018\\data\\test.csv",index=False,index_label=False,sep=',')
comp_data.to_csv("data/csv/test.csv", index=False, index_label=False, sep=',')
print(comp_data)
|
# Copyright (c) 2017, Michael Boyle
# See LICENSE file for details: <https://github.com/moble/quaternion/blob/master/LICENSE>
from __future__ import division, print_function, absolute_import
import numpy as np
from quaternion.numba_wrapper import njit, jit, xrange
def derivative(f, t):
"""Fourth-order finite-differencing with non-uniform time steps
The formula for this finite difference comes from Eq. (A 5b) of "Derivative formulas and errors for non-uniformly
spaced points" by M. K. Bowen and Ronald Smith. As explained in their Eqs. (B 9b) and (B 10b), this is a
fourth-order formula -- though that's a squishy concept with non-uniform time steps.
TODO: If there are fewer than five points, the function should revert to simpler (lower-order) formulas.
"""
dfdt = np.empty_like(f)
if (f.ndim == 1):
_derivative(f, t, dfdt)
elif (f.ndim == 2):
_derivative_2d(f, t, dfdt)
elif (f.ndim == 3):
_derivative_3d(f, t, dfdt)
else:
raise NotImplementedError("Taking derivatives of {0}-dimensional arrays is not yet implemented".format(f.ndim))
return dfdt
@njit
def _derivative(f, t, dfdt):
for i in xrange(2):
t_i = t[i]
t1 = t[0]
t2 = t[1]
t3 = t[2]
t4 = t[3]
t5 = t[4]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
dfdt[i] = (-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[0]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[1]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[2]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[3]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[4])
for i in xrange(2, len(t) - 2):
t1 = t[i - 2]
t2 = t[i - 1]
t3 = t[i]
t4 = t[i + 1]
t5 = t[i + 2]
h1 = t1 - t3
h2 = t2 - t3
h4 = t4 - t3
h5 = t5 - t3
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
dfdt[i] = (-((h2 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[i - 2]
+ ((h1 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[i - 1]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[i]
+ ((h1 * h2 * h5) / (h14 * h24 * h34 * h45)) * f[i + 1]
- ((h1 * h2 * h4) / (h15 * h25 * h35 * h45)) * f[i + 2])
for i in xrange(len(t) - 2, len(t)):
t_i = t[i]
t1 = t[-5]
t2 = t[-4]
t3 = t[-3]
t4 = t[-2]
t5 = t[-1]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
dfdt[i] = (-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[-5]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[-4]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[-3]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[-2]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[-1])
return
@njit
def _derivative_2d(f, t, dfdt):
for i in xrange(2):
t_i = t[i]
t1 = t[0]
t2 = t[1]
t3 = t[2]
t4 = t[3]
t5 = t[4]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
dfdt[i, k] = (
-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[0, k]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[1, k]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[2, k]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[3, k]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[4, k])
for i in xrange(2, len(t) - 2):
t1 = t[i - 2]
t2 = t[i - 1]
t3 = t[i]
t4 = t[i + 1]
t5 = t[i + 2]
h1 = t1 - t3
h2 = t2 - t3
h4 = t4 - t3
h5 = t5 - t3
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
dfdt[i, k] = (-((h2 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[i - 2, k]
+ ((h1 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[i - 1, k]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35))
* f[i, k]
+ ((h1 * h2 * h5) / (h14 * h24 * h34 * h45)) * f[i + 1, k]
- ((h1 * h2 * h4) / (h15 * h25 * h35 * h45)) * f[i + 2, k])
for i in xrange(len(t) - 2, len(t)):
t_i = t[i]
t1 = t[-5]
t2 = t[-4]
t3 = t[-3]
t4 = t[-2]
t5 = t[-1]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
dfdt[i, k] = (
-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[-5, k]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[-4, k]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[-3, k]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[-2, k]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[-1, k])
return
@njit
def _derivative_3d(f, t, dfdt):
for i in xrange(2):
t_i = t[i]
t1 = t[0]
t2 = t[1]
t3 = t[2]
t4 = t[3]
t5 = t[4]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
for m in xrange(f.shape[1]):
dfdt[i, k, m] = (
-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[0, k, m]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[1, k, m]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[2, k, m]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[3, k, m]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[4, k, m])
for i in xrange(2, len(t) - 2):
t1 = t[i - 2]
t2 = t[i - 1]
t3 = t[i]
t4 = t[i + 1]
t5 = t[i + 2]
h1 = t1 - t3
h2 = t2 - t3
h4 = t4 - t3
h5 = t5 - t3
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
for m in xrange(f.shape[1]):
dfdt[i, k, m] = (-((h2 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[i - 2, k, m]
+ ((h1 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[i - 1, k, m]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35))
* f[i, k, m]
+ ((h1 * h2 * h5) / (h14 * h24 * h34 * h45)) * f[i + 1, k, m]
- ((h1 * h2 * h4) / (h15 * h25 * h35 * h45)) * f[i + 2, k, m])
for i in xrange(len(t) - 2, len(t)):
t_i = t[i]
t1 = t[-5]
t2 = t[-4]
t3 = t[-3]
t4 = t[-2]
t5 = t[-1]
h1 = t1 - t_i
h2 = t2 - t_i
h3 = t3 - t_i
h4 = t4 - t_i
h5 = t5 - t_i
h12 = t1 - t2
h13 = t1 - t3
h14 = t1 - t4
h15 = t1 - t5
h23 = t2 - t3
h24 = t2 - t4
h25 = t2 - t5
h34 = t3 - t4
h35 = t3 - t5
h45 = t4 - t5
for k in xrange(f.shape[1]):
for m in xrange(f.shape[1]):
dfdt[i, k, m] = (
-((h2 * h3 * h4 + h2 * h3 * h5 + h2 * h4 * h5 + h3 * h4 * h5) / (h12 * h13 * h14 * h15)) * f[-5, k, m]
+ ((h1 * h3 * h4 + h1 * h3 * h5 + h1 * h4 * h5 + h3 * h4 * h5) / (h12 * h23 * h24 * h25)) * f[-4, k, m]
- ((h1 * h2 * h4 + h1 * h2 * h5 + h1 * h4 * h5 + h2 * h4 * h5) / (h13 * h23 * h34 * h35)) * f[-3, k, m]
+ ((h1 * h2 * h3 + h1 * h2 * h5 + h1 * h3 * h5 + h2 * h3 * h5) / (h14 * h24 * h34 * h45)) * f[-2, k, m]
- ((h1 * h2 * h3 + h1 * h2 * h4 + h1 * h3 * h4 + h2 * h3 * h4) / (h15 * h25 * h35 * h45)) * f[-1, k, m])
return
# @njit('void(f8[:,:], f8[:], f8[:,:])')
@jit
def indefinite_integral(f, t):
Sfdt = np.empty_like(f)
Sfdt[0] = 0.0
for i in xrange(1, len(t)):
for j in xrange(f.shape[1]):
Sfdt[i, j] = Sfdt[i - 1, j] + (f[i, j] + f[i - 1, j]) * ((t[i] - t[i - 1]) / 2.0)
return Sfdt
#@njit('void(f8[:,:], f8[:], f8[:])')
@jit
def definite_integral(f, t):
Sfdt = np.zeros_like(f)
Sfdt[1:, ...] = (f[1:, ...] + f[:-1, ...]) * ((t[1:] - t[:-1]) / 2.0)
return np.sum(Sfdt)
|
expected_output = {
'dynamic_nat_entries': '0 entries',
'flow_record': 'Disabled',
'nat_type': 'Static',
'netflow_type': 'NA',
'static_nat_entries': '0 entries',
'total_hw_resource_tcam': '26 of 27648 /0 .09% utilization',
'total_nat_entries': '0 entries'
} |
from django.contrib import admin
from .models import BookAppoinment
# Register your models here.
admin.site.register(BookAppoinment)
|
{
'French': 'fransè',
'Haitian Creole': 'kreyòl ayisyen',
'Haitian French': 'fransè ayisyen',
}
|
class NotSet(object):
"""Sentinel value."""
def __eq__(self, other):
return self is other
def __repr__(self):
return 'NotSet'
class StateNotSet(object):
def __eq__(self, other):
return self is other
def __repr__(self):
return 'NotSet'
def value(self):
return 0
class StateSet(object):
def __eq__(self, other):
return self is other
def __repr__(self):
return 'Set'
def value(self):
return 1
class StateCleared(object):
def __eq__(self, other):
return self is other
def __repr__(self):
return 'Cleared'
def value(self):
return 2
|
from IPython.html.services.contents.filemanager import FileContentsManager
c.ServerApp.jpserver_extensions = {"jupyterlab_hubshare": True}
c.HubShare.file_path_template = "{user}/{path}"
c.HubShare.contents_manager = {
"manager_cls": FileContentsManager,
"kwargs": {"root_dir": "/tmp/"},
}
|
"""Generated client library for run version v1alpha1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.run.v1alpha1 import run_v1alpha1_messages as messages
class RunV1alpha1(base_api.BaseApiClient):
"""Generated client library for service run version v1alpha1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://run.googleapis.com/'
MTLS_BASE_URL = 'https://run.mtls.googleapis.com/'
_PACKAGE = 'run'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1alpha1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'RunV1alpha1'
_URL_VERSION = 'v1alpha1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new run handle."""
url = url or self.BASE_URL
super(RunV1alpha1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.namespaces_domainmappings = self.NamespacesDomainmappingsService(self)
self.namespaces_jobs = self.NamespacesJobsService(self)
self.namespaces = self.NamespacesService(self)
self.projects_locations_domainmappings = self.ProjectsLocationsDomainmappingsService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class NamespacesDomainmappingsService(base_api.BaseApiService):
"""Service class for the namespaces_domainmappings resource."""
_NAME = 'namespaces_domainmappings'
def __init__(self, client):
super(RunV1alpha1.NamespacesDomainmappingsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new domain mapping.
Args:
request: (RunNamespacesDomainmappingsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DomainMapping) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='apis/domains.cloudrun.com/v1alpha1/namespaces/{namespacesId}/domainmappings',
http_method='POST',
method_id='run.namespaces.domainmappings.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='apis/domains.cloudrun.com/v1alpha1/{+parent}/domainmappings',
request_field='domainMapping',
request_type_name='RunNamespacesDomainmappingsCreateRequest',
response_type_name='DomainMapping',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Rpc to delete a domain mapping.
Args:
request: (RunNamespacesDomainmappingsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='apis/domains.cloudrun.com/v1alpha1/namespaces/{namespacesId}/domainmappings/{domainmappingsId}',
http_method='DELETE',
method_id='run.namespaces.domainmappings.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['apiVersion', 'kind', 'orphanDependents', 'propagationPolicy'],
relative_path='apis/domains.cloudrun.com/v1alpha1/{+name}',
request_field='',
request_type_name='RunNamespacesDomainmappingsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Rpc to get information about a domain mapping.
Args:
request: (RunNamespacesDomainmappingsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DomainMapping) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='apis/domains.cloudrun.com/v1alpha1/namespaces/{namespacesId}/domainmappings/{domainmappingsId}',
http_method='GET',
method_id='run.namespaces.domainmappings.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='apis/domains.cloudrun.com/v1alpha1/{+name}',
request_field='',
request_type_name='RunNamespacesDomainmappingsGetRequest',
response_type_name='DomainMapping',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Rpc to list domain mappings.
Args:
request: (RunNamespacesDomainmappingsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDomainMappingsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='apis/domains.cloudrun.com/v1alpha1/namespaces/{namespacesId}/domainmappings',
http_method='GET',
method_id='run.namespaces.domainmappings.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['continue_', 'fieldSelector', 'includeUninitialized', 'labelSelector', 'limit', 'resourceVersion', 'watch'],
relative_path='apis/domains.cloudrun.com/v1alpha1/{+parent}/domainmappings',
request_field='',
request_type_name='RunNamespacesDomainmappingsListRequest',
response_type_name='ListDomainMappingsResponse',
supports_download=False,
)
def ReplaceDomainMapping(self, request, global_params=None):
r"""Rpc to replace a domain mapping. Only the spec and metadata labels and annotations are modifiable. After the Update request, Cloud Run will work to make the 'status' match the requested 'spec'. May provide metadata.resourceVersion to enforce update from last read for optimistic concurrency control.
Args:
request: (RunNamespacesDomainmappingsReplaceDomainMappingRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DomainMapping) The response message.
"""
config = self.GetMethodConfig('ReplaceDomainMapping')
return self._RunMethod(
config, request, global_params=global_params)
ReplaceDomainMapping.method_config = lambda: base_api.ApiMethodInfo(
flat_path='apis/domains.cloudrun.com/v1alpha1/namespaces/{namespacesId}/domainmappings/{domainmappingsId}',
http_method='PUT',
method_id='run.namespaces.domainmappings.replaceDomainMapping',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='apis/domains.cloudrun.com/v1alpha1/{+name}',
request_field='domainMapping',
request_type_name='RunNamespacesDomainmappingsReplaceDomainMappingRequest',
response_type_name='DomainMapping',
supports_download=False,
)
class NamespacesJobsService(base_api.BaseApiService):
"""Service class for the namespaces_jobs resource."""
_NAME = 'namespaces_jobs'
def __init__(self, client):
super(RunV1alpha1.NamespacesJobsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Create a job.
Args:
request: (RunNamespacesJobsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='apis/run.googleapis.com/v1alpha1/namespaces/{namespacesId}/jobs',
http_method='POST',
method_id='run.namespaces.jobs.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='apis/run.googleapis.com/v1alpha1/{+parent}/jobs',
request_field='job',
request_type_name='RunNamespacesJobsCreateRequest',
response_type_name='Job',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Delete a job.
Args:
request: (RunNamespacesJobsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='apis/run.googleapis.com/v1alpha1/namespaces/{namespacesId}/jobs/{jobsId}',
http_method='DELETE',
method_id='run.namespaces.jobs.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['apiVersion', 'kind', 'propagationPolicy'],
relative_path='apis/run.googleapis.com/v1alpha1/{+name}',
request_field='',
request_type_name='RunNamespacesJobsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Get information about a job.
Args:
request: (RunNamespacesJobsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Job) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='apis/run.googleapis.com/v1alpha1/namespaces/{namespacesId}/jobs/{jobsId}',
http_method='GET',
method_id='run.namespaces.jobs.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='apis/run.googleapis.com/v1alpha1/{+name}',
request_field='',
request_type_name='RunNamespacesJobsGetRequest',
response_type_name='Job',
supports_download=False,
)
def List(self, request, global_params=None):
r"""List jobs.
Args:
request: (RunNamespacesJobsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListJobsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='apis/run.googleapis.com/v1alpha1/namespaces/{namespacesId}/jobs',
http_method='GET',
method_id='run.namespaces.jobs.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['continue_', 'fieldSelector', 'includeUninitialized', 'labelSelector', 'limit', 'resourceVersion', 'watch'],
relative_path='apis/run.googleapis.com/v1alpha1/{+parent}/jobs',
request_field='',
request_type_name='RunNamespacesJobsListRequest',
response_type_name='ListJobsResponse',
supports_download=False,
)
class NamespacesService(base_api.BaseApiService):
"""Service class for the namespaces resource."""
_NAME = 'namespaces'
def __init__(self, client):
super(RunV1alpha1.NamespacesService, self).__init__(client)
self._upload_configs = {
}
class ProjectsLocationsDomainmappingsService(base_api.BaseApiService):
"""Service class for the projects_locations_domainmappings resource."""
_NAME = 'projects_locations_domainmappings'
def __init__(self, client):
super(RunV1alpha1.ProjectsLocationsDomainmappingsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new domain mapping.
Args:
request: (RunProjectsLocationsDomainmappingsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DomainMapping) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/domainmappings',
http_method='POST',
method_id='run.projects.locations.domainmappings.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1alpha1/{+parent}/domainmappings',
request_field='domainMapping',
request_type_name='RunProjectsLocationsDomainmappingsCreateRequest',
response_type_name='DomainMapping',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Rpc to delete a domain mapping.
Args:
request: (RunProjectsLocationsDomainmappingsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/domainmappings/{domainmappingsId}',
http_method='DELETE',
method_id='run.projects.locations.domainmappings.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['apiVersion', 'kind', 'orphanDependents', 'propagationPolicy'],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='RunProjectsLocationsDomainmappingsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Rpc to get information about a domain mapping.
Args:
request: (RunProjectsLocationsDomainmappingsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DomainMapping) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/domainmappings/{domainmappingsId}',
http_method='GET',
method_id='run.projects.locations.domainmappings.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='RunProjectsLocationsDomainmappingsGetRequest',
response_type_name='DomainMapping',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Rpc to list domain mappings.
Args:
request: (RunProjectsLocationsDomainmappingsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDomainMappingsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/domainmappings',
http_method='GET',
method_id='run.projects.locations.domainmappings.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['continue_', 'fieldSelector', 'includeUninitialized', 'labelSelector', 'limit', 'resourceVersion', 'watch'],
relative_path='v1alpha1/{+parent}/domainmappings',
request_field='',
request_type_name='RunProjectsLocationsDomainmappingsListRequest',
response_type_name='ListDomainMappingsResponse',
supports_download=False,
)
def ReplaceDomainMapping(self, request, global_params=None):
r"""Rpc to replace a domain mapping. Only the spec and metadata labels and annotations are modifiable. After the Update request, Cloud Run will work to make the 'status' match the requested 'spec'. May provide metadata.resourceVersion to enforce update from last read for optimistic concurrency control.
Args:
request: (RunProjectsLocationsDomainmappingsReplaceDomainMappingRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DomainMapping) The response message.
"""
config = self.GetMethodConfig('ReplaceDomainMapping')
return self._RunMethod(
config, request, global_params=global_params)
ReplaceDomainMapping.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/domainmappings/{domainmappingsId}',
http_method='PUT',
method_id='run.projects.locations.domainmappings.replaceDomainMapping',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='domainMapping',
request_type_name='RunProjectsLocationsDomainmappingsReplaceDomainMappingRequest',
response_type_name='DomainMapping',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(RunV1alpha1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(RunV1alpha1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
|
import mock
import pyconfig
from unittest.case import SkipTest
from humbledb import Mongo, Document, _version
from humbledb.errors import DatabaseMismatch, ConnectionFailure
from ..util import database_name, ok_, eq_, DBTest, raises
def teardown():
DBTest.connection.drop_database(database_name())
def test_new():
eq_(DBTest, DBTest())
@raises(TypeError)
def test_missing_config_host():
class Test(Mongo):
config_port = 27017
@raises(TypeError)
def test_missing_config_port():
class Test(Mongo):
config_host = 'localhost'
def test_reload():
with mock.patch.object(DBTest, '_new_connection') as _new_conn:
pyconfig.reload()
_new_conn.assert_called_once()
# Have to reload again to get real connection instance back
pyconfig.reload()
@raises(RuntimeError)
def test_nested_conn():
with DBTest:
with DBTest:
pass
def test_harmless_end():
# This shouldn't raise any errors
DBTest.end()
DBTest.start()
DBTest.end()
DBTest.end()
def test_replica_works_for_versions_between_2_1_and_2_4():
if _version._lt('2.1') or _version._gte('2.4'):
raise SkipTest
with mock.patch('pymongo.ReplicaSetConnection') as replica:
class Replica(Mongo):
config_host = 'localhost'
config_port = 27017
config_replica = 'test'
with Replica:
pass
replica.assert_called_once()
def test_replica_works_for_versions_after_2_4():
if _version._lt('2.4'):
raise SkipTest
if _version._gte('3'):
raise SkipTest
with mock.patch('pymongo.MongoReplicaSetClient') as replica:
class Replica(Mongo):
config_host = 'localhost'
config_port = 27017
config_replica = 'test'
with Replica:
pass
replica.assert_called_once()
@raises(TypeError)
def test_replica_errors_for_versions_before_2_1():
if _version._gte('2.1'):
raise SkipTest
class Replica(Mongo):
config_host = 'localhost'
config_port = 27017
config_replica = 'test'
def test_reconnect():
with mock.patch.object(DBTest, '_new_connection') as _new_conn:
DBTest.reconnect()
_new_conn.assert_called_once()
# Have to reconnect again to get real connection instance back
DBTest.reconnect()
def test_mongo_uri_with_database():
if _version._lt('2.6.0'):
raise SkipTest("Needs version 2.6.0 or later")
host = pyconfig.get('humbledb.test.db.host', 'localhost')
port = pyconfig.get('humbledb.test.db.port', 27017)
uri = 'mongodb://{}:{}/{}'.format(host, port, database_name())
class DBuri(Mongo):
config_uri = uri
with DBuri:
eq_(DBuri.database.name, database_name())
eq_(Mongo.context.database.name, database_name())
@raises(DatabaseMismatch)
def test_mongo_uri_database_with_conflict_raises_error():
if _version._lt('2.6.0'):
raise SkipTest("Needs version 2.6.0 or later")
host = pyconfig.get('humbledb.test.db.host', 'localhost')
port = pyconfig.get('humbledb.test.db.port', 27017)
uri = 'mongodb://{}:{}/{}'.format(host, port, database_name())
class DBuri(Mongo):
config_uri = uri
from humbledb import Document
class TestDoc(Document):
config_database = database_name() + '_is_different'
config_collection = 'test'
with DBuri:
TestDoc.find()
@raises(TypeError)
def test_mongo_client_with_ssl_before_2_1():
if _version._gte('2.1'):
raise SkipTest("Only test this with version 2.1 or earlier.")
class SSLMongo(Mongo):
config_host = 'localhost'
config_port = 27017
config_ssl = True
def test_mongo_client_with_ssl_after_2_1():
if _version._lt('2.1'):
raise SkipTest("This test requires version 2.1 or later.")
class SSLMongo(Mongo):
config_host = 'localhost'
config_port = 27017
config_ssl = True
config_mongo_client = ({'serverSelectionTimeoutMS': 300} if
_version._gte('3.0') else {})
class SomeDoc(Document):
config_database = database_name()
config_collection = 'ssl_collection'
name = 'n'
try:
SomeDoc.insert
except:
raise
try:
import socket
socket.setdefaulttimeout(3)
with SSLMongo:
SomeDoc.insert({SomeDoc.name:'foobar'})
ok_(SomeDoc.find({SomeDoc.name:'foobar'}))
except ConnectionFailure as err:
raise SkipTest("SSL may not be enabled on mongodb server: %r" % err)
|
B_35_01_11 = {0: {'A': 0.282, 'C': -0.023, 'E': -0.036, 'D': 0.029, 'G': 0.081, 'F': -0.29, 'I': -0.259, 'H': -0.031, 'K': 0.25, 'M': -0.315, 'L': -0.34, 'N': -0.029, 'Q': 0.063, 'P': 0.313, 'S': 0.153, 'R': 0.507, 'T': 0.079, 'W': -0.183, 'V': -0.151, 'Y': -0.102}, 1: {'A': -0.243, 'C': 0.021, 'E': 0.07, 'D': 0.048, 'G': -0.002, 'F': 0.075, 'I': -0.06, 'H': 0.181, 'K': 0.112, 'M': 0.031, 'L': -0.11, 'N': 0.056, 'Q': 0.007, 'P': -0.452, 'S': 0.096, 'R': 0.24, 'T': -0.032, 'W': 0.089, 'V': -0.244, 'Y': 0.117}, 2: {'A': -0.132, 'C': 0.009, 'E': 0.031, 'D': 0.046, 'G': 0.029, 'F': -0.066, 'I': -0.131, 'H': 0.065, 'K': 0.046, 'M': -0.021, 'L': -0.093, 'N': 0.079, 'Q': 0.137, 'P': 0.06, 'S': -0.028, 'R': 0.036, 'T': -0.06, 'W': 0.06, 'V': -0.196, 'Y': 0.127}, 3: {'A': -0.171, 'C': -0.135, 'E': 0.007, 'D': -0.002, 'G': -0.031, 'F': -0.469, 'I': -0.475, 'H': 0.303, 'K': 0.147, 'M': 0.012, 'L': -0.251, 'N': 0.291, 'Q': 0.379, 'P': -0.659, 'S': 0.485, 'R': 0.325, 'T': 0.264, 'W': 0.023, 'V': -0.333, 'Y': 0.289}, 4: {'A': 0.027, 'C': -0.008, 'E': -0.015, 'D': 0.001, 'G': 0.001, 'F': -0.077, 'I': -0.021, 'H': -0.003, 'K': 0.007, 'M': -0.018, 'L': -0.026, 'N': 0.017, 'Q': 0.03, 'P': 0.037, 'S': 0.031, 'R': 0.013, 'T': 0.039, 'W': -0.017, 'V': 0.01, 'Y': -0.028}, 5: {'A': -0.467, 'C': -0.031, 'E': -0.017, 'D': -0.068, 'G': 0.007, 'F': 0.319, 'I': 0.167, 'H': 0.079, 'K': 0.198, 'M': 0.105, 'L': 0.206, 'N': -0.001, 'Q': 0.03, 'P': -0.014, 'S': -0.438, 'R': 0.213, 'T': -0.448, 'W': 0.136, 'V': -0.174, 'Y': 0.199}, 6: {'A': 0.21, 'C': -0.123, 'E': 0.07, 'D': -0.004, 'G': -0.134, 'F': 0.083, 'I': 0.226, 'H': -0.206, 'K': 0.062, 'M': 0.025, 'L': 0.264, 'N': -0.334, 'Q': 0.086, 'P': 0.308, 'S': -0.414, 'R': 0.14, 'T': -0.132, 'W': -0.134, 'V': 0.251, 'Y': -0.245}, 7: {'A': 0.432, 'C': 0.028, 'E': 0.013, 'D': 0.019, 'G': 0.094, 'F': -0.106, 'I': -0.291, 'H': -0.095, 'K': -0.221, 'M': 0.019, 'L': 0.264, 'N': 0.237, 'Q': -0.058, 'P': 0.019, 'S': 0.294, 'R': -0.51, 'T': 0.215, 'W': -0.19, 'V': 0.255, 'Y': -0.418}, 8: {'A': -0.15, 'C': -0.04, 'E': 0.06, 'D': 0.138, 'G': 0.028, 'F': -0.318, 'I': -0.379, 'H': 0.143, 'K': -0.037, 'M': -0.103, 'L': -0.207, 'N': 0.172, 'Q': 0.187, 'P': 0.082, 'S': 0.239, 'R': 0.049, 'T': 0.259, 'W': 0.084, 'V': -0.306, 'Y': 0.097}, 9: {'A': -0.067, 'C': -0.057, 'E': -0.165, 'D': -0.106, 'G': 0.067, 'F': -0.118, 'I': -0.099, 'H': 0.096, 'K': 0.139, 'M': 0.042, 'L': -0.001, 'N': 0.081, 'Q': 0.06, 'P': -0.187, 'S': 0.145, 'R': 0.136, 'T': 0.092, 'W': -0.015, 'V': 0.004, 'Y': -0.047}, 10: {'A': 0.131, 'C': -0.057, 'E': 0.023, 'D': -0.014, 'G': -0.004, 'F': -0.381, 'I': 0.366, 'H': -0.196, 'K': 0.127, 'M': 0.063, 'L': 0.166, 'N': -0.038, 'Q': 0.24, 'P': 0.253, 'S': 0.099, 'R': 0.043, 'T': 0.049, 'W': -0.181, 'V': 0.212, 'Y': -0.9}, -1: {'con': 4.27325}} |
from typing import Dict, Set, List, Tuple
import pkgutil
import sys
import typing
from importlib import import_module, invalidate_caches
from databases import Database
from .migration import Migration
def build_dependants(dependencies: Dict[str, Set[str]]) -> Dict[str, Set[str]]:
"""
Given a dependencies mapping, return the reversed dependants dictionary.
"""
dependants = {name: set() for name in dependencies.keys()}
for child, parents in dependencies.items():
for parent in parents:
dependants[parent].add(child)
return dependants
def order_dependencies(
dependencies: Dict[str, Set[str]], dependants: Dict[str, Set[str]]
) -> List[str]:
"""
Given the dependencies and dependants mappings, return an ordered list
of the dependencies.
"""
# The root nodes are the only ones with no dependencies.
root_nodes = sorted([name for name, deps in dependencies.items() if not deps])
ordered = list(root_nodes)
seen = set(root_nodes)
children = set()
for node in root_nodes:
children |= dependants[node]
while children:
for node in sorted(children):
if dependencies[node].issubset(seen):
ordered.append(node)
seen.add(node)
children.remove(node)
children |= dependants[node]
break
else:
raise Exception()
return ordered
def load_migrations(applied: Set[str], dir_name: str) -> List[Migration]:
migration_classes = {}
dependencies = {}
if "." not in sys.path:
sys.path.insert(0, ".")
names = [name for _, name, is_pkg in pkgutil.iter_modules([dir_name])]
for name in names:
module = import_module(f"{dir_name}.{name}")
migration_cls = getattr(module, "Migration")
migration_classes[name] = migration_cls
dependencies[name] = set(migration_cls.dependencies)
dependants = build_dependants(dependencies)
names = order_dependencies(dependencies, dependants)
migrations = []
for name in names:
migration_cls = migration_classes[name]
is_applied = name in applied
dependant_list = sorted(dependants[name])
migration = migration_cls(
name=name, is_applied=is_applied, dependants=dependant_list
)
migrations.append(migration)
return migrations
|
import unittest
from .. api_client import ApcUrl, UrlBadParam, IncompleteUrl
class TestApcUrl(unittest.TestCase):
def test_init(self):
url = "/testing"
u = ApcUrl(url)
self.assertEquals( u.base_url, url)
def test_params_1(self):
u = ApcUrl("/testing/%%key%%")
self.assertEquals(u.params(key='val').url(), '/testing/val')
def test_params_2(self):
u = ApcUrl('/testing/%%key%%/%%api%%/more_testing')
full_url = u.params(key="AAA",api="BBB").url()
self.assertEquals(full_url, '/testing/AAA/BBB/more_testing')
def test_params_ex(self):
u = ApcUrl("/testing/%%key%%")
with self.assertRaises(UrlBadParam):
u.params(bad_key='testing')
def test_url(self):
u = "one/two/three"
self.assertEquals( ApcUrl(u).url(), u )
def test_url_ex(self):
u = ApcUrl('/%%one%%/%%two%%/three').params(two='testing')
with self.assertRaises(IncompleteUrl): u.url()
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Prepare dataset for NCF.
Load the training dataset and evaluation dataset from csv file into memory.
Prepare input for model training and evaluation.
"""
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from official.recommendation import constants # pylint: disable=g-bad-import-order
# The buffer size for shuffling train dataset.
_SHUFFLE_BUFFER_SIZE = 1024
class NCFDataSet(object):
"""A class containing data information for model training and evaluation."""
def __init__(self, train_data, num_users, num_items, num_negatives,
true_items, all_items, all_eval_data):
"""Initialize NCFDataset class.
Args:
train_data: A list containing the positive training instances.
num_users: An integer, the number of users in training dataset.
num_items: An integer, the number of items in training dataset.
num_negatives: An integer, the number of negative instances for each user
in train dataset.
true_items: A list, the ground truth (positive) items of users for
evaluation. Each entry is a latest positive instance for one user.
all_items: A nested list, all items for evaluation, and each entry is the
evaluation items for one user.
all_eval_data: A numpy array of eval/test dataset.
"""
self.train_data = train_data
self.num_users = num_users
self.num_items = num_items
self.num_negatives = num_negatives
self.eval_true_items = true_items
self.eval_all_items = all_items
self.all_eval_data = all_eval_data
def load_data(file_name):
"""Load data from a csv file which splits on tab key."""
lines = tf.gfile.Open(file_name, "r").readlines()
# Process the file line by line
def _process_line(line):
return [int(col) for col in line.split("\t")]
data = [_process_line(line) for line in lines]
return data
def data_preprocessing(train_fname, test_fname, test_neg_fname, num_negatives):
"""Preprocess the train and test dataset.
In data preprocessing, the training positive instances are loaded into memory
for random negative instance generation in each training epoch. The test
dataset are generated from test positive and negative instances.
Args:
train_fname: A string, the file name of training positive dataset.
test_fname: A string, the file name of test positive dataset. Each user has
one positive instance.
test_neg_fname: A string, the file name of test negative dataset. Each user
has 100 negative instances by default.
num_negatives: An integer, the number of negative instances for each user
in train dataset.
Returns:
ncf_dataset: A NCFDataset object containing information about training and
evaluation/test dataset.
"""
# Load training positive instances into memory for later train data generation
train_data = load_data(train_fname)
# Get total number of users in the dataset
num_users = len(np.unique(np.array(train_data)[:, 0]))
# Process test dataset to csv file
test_ratings = load_data(test_fname)
test_negatives = load_data(test_neg_fname)
# Get the total number of items in both train dataset and test dataset (the
# whole dataset)
num_items = len(
set(np.array(train_data)[:, 1]) | set(np.array(test_ratings)[:, 1]))
# Generate test instances for each user
true_items, all_items = [], []
all_test_data = []
for idx in range(num_users):
items = test_negatives[idx]
rating = test_ratings[idx]
user = rating[0] # User
true_item = rating[1] # Positive item as ground truth
# All items with first 100 as negative and last one positive
items.append(true_item)
users = np.full(len(items), user, dtype=np.int32)
users_items = list(zip(users, items)) # User-item list
true_items.append(true_item) # all ground truth items
all_items.append(items) # All items (including positive and negative items)
all_test_data.extend(users_items) # Generate test dataset
# Create NCFDataset object
ncf_dataset = NCFDataSet(
train_data, num_users, num_items, num_negatives, true_items, all_items,
np.asarray(all_test_data)
)
return ncf_dataset
def generate_train_dataset(train_data, num_items, num_negatives):
"""Generate train dataset for each epoch.
Given positive training instances, randomly generate negative instances to
form the training dataset.
Args:
train_data: A list of positive training instances.
num_items: An integer, the number of items in positive training instances.
num_negatives: An integer, the number of negative training instances
following positive training instances. It is 4 by default.
Returns:
A numpy array of training dataset.
"""
all_train_data = []
# A set with user-item tuples
train_data_set = set((u, i) for u, i, _ in train_data)
for u, i, _ in train_data:
# Positive instance
all_train_data.append([u, i, 1])
# Negative instances, randomly generated
for _ in xrange(num_negatives):
j = np.random.randint(num_items)
while (u, j) in train_data_set:
j = np.random.randint(num_items)
all_train_data.append([u, j, 0])
return np.asarray(all_train_data)
def input_fn(training, batch_size, ncf_dataset, repeat=1):
"""Input function for model training and evaluation.
The train input consists of 1 positive instance (user and item have
interactions) followed by some number of negative instances in which the items
are randomly chosen. The number of negative instances is "num_negatives" which
is 4 by default. Note that for each epoch, we need to re-generate the negative
instances. Together with positive instances, they form a new train dataset.
Args:
training: A boolean flag for training mode.
batch_size: An integer, batch size for training and evaluation.
ncf_dataset: An NCFDataSet object, which contains the information about
training and test data.
repeat: An integer, how many times to repeat the dataset.
Returns:
dataset: A tf.data.Dataset object containing examples loaded from the files.
"""
# Generate random negative instances for training in each epoch
if training:
train_data = generate_train_dataset(
ncf_dataset.train_data, ncf_dataset.num_items,
ncf_dataset.num_negatives)
# Get train features and labels
train_features = [
(constants.USER, np.expand_dims(train_data[:, 0], axis=1)),
(constants.ITEM, np.expand_dims(train_data[:, 1], axis=1))
]
train_labels = [
(constants.RATING, np.expand_dims(train_data[:, 2], axis=1))]
dataset = tf.data.Dataset.from_tensor_slices(
(dict(train_features), dict(train_labels))
)
dataset = dataset.shuffle(buffer_size=_SHUFFLE_BUFFER_SIZE)
else:
# Create eval/test dataset
test_user = ncf_dataset.all_eval_data[:, 0]
test_item = ncf_dataset.all_eval_data[:, 1]
test_features = [
(constants.USER, np.expand_dims(test_user, axis=1)),
(constants.ITEM, np.expand_dims(test_item, axis=1))]
dataset = tf.data.Dataset.from_tensor_slices(dict(test_features))
# Repeat and batch the dataset
dataset = dataset.repeat(repeat)
dataset = dataset.batch(batch_size)
# Prefetch to improve speed of input pipeline.
dataset = dataset.prefetch(1)
return dataset
|
import re
import urllib2
from django.http import HttpRequest, HttpResponse
from django.shortcuts import redirect
# {STIX/ID Alias}:{type}-{GUID}
_STIX_ID_REGEX = r"[a-z][\w\d-]+:[a-z]+-[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}"
_OBJECT_ID_MATCHER = re.compile("%s$" % _STIX_ID_REGEX, re.IGNORECASE)
_URL_OBJECT_ID_MATCHER = re.compile(r".*/(%s)/?$" % _STIX_ID_REGEX, re.IGNORECASE)
_STIX_TYPE_ID_REGEX = r"[a-z][\w\d-]+:([a-z]+)-[a-f\d]{8}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{4}-[a-f\d]{12}"
_OBJECT__TYPE_ID_MATCHER = re.compile("%s$" % _STIX_TYPE_ID_REGEX, re.IGNORECASE)
def is_valid_stix_id(candidate_stix_id):
match = _OBJECT_ID_MATCHER.match(candidate_stix_id)
return match is not None
def get_type_string(stix_id):
if not is_valid_stix_id(stix_id):
return ''
match = _OBJECT__TYPE_ID_MATCHER.match(stix_id)
if not match:
return ''
try:
return match.group(1)
except:
return ''
def find_id(request):
"""
Discovers a STIX id from the request's referrer.
Args:
request (HttpRequest): the request to inspect
Returns:
string: A STIX id
"""
def has_single_match(match_result):
return match_result is not None and len(match_result.groups()) == 1
referer = urllib2.unquote(request.META.get("HTTP_REFERER", ""))
match = _URL_OBJECT_ID_MATCHER.match(referer)
id_ = None
if has_single_match(match):
id_ = match.group(1)
return id_
def discover(request, matched_to, failed_to):
"""
Discovers a STIX id from the request's referrer, returning an appropriate redirect response.
Args:
request (HttpRequest): the request to inspect
matched_to (Any): model, view name or url redirected to if discovery is successful. `id_` contains the STIX id
failed_to (Any): model, view name or url redirected to if discovery fails
Returns:
HttpResponse: A redirect response
"""
id_ = find_id(request)
if id_:
response = redirect(matched_to, id_=id_)
else:
response = redirect(failed_to)
return response
|
"""
https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/
Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
According to the definition of LCA on Wikipedia: “The lowest common ancestor is defined between two nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node to be a descendant of itself).”
Given the following binary tree: root = [3,5,1,6,2,0,8,null,null,7,4]
Example 1:
Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1
Output: 3
Explanation: The LCA of nodes 5 and 1 is 3.
Example 2:
Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4
Output: 5
Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant of itself according to the LCA definition.
Note:
All of the nodes' values will be unique.
p and q are different and both values will exist in the binary tree.
"""
# the two solutions are provided by the solution set of the question.
# the code and algorithms are very elegant.
# time complexity: O(n), space complexity: O(n)
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
# the main idea of the first algorithm is to find the moment when p and q are spread in node, node.left and node.right, so that node+left+right>=2
"""
recursive
if not root:
return None
if root.val == q.val or root.val == p.val:
return root
self.answer = None
def recurse_tree(node):
if node is None:
return False
if node.val == p.val or node.val == q.val:
mid = True
else:
mid = False
left = recurse_tree(node.left)
right= recurse_tree(node.right)
if mid + left + right >= 2:
self.answer = node
return left or mid or right
recurse_tree(root)
return self.answer
"""
# the second algorithm is to traverse the tree until both q and p are found
# when we traverse, we mark down the parent of each node we have visited
# Then we from p using the parent notes, find the path to root
# at last, we start from q and go back to root, see when we will find the same node in p's path, that will be the lowest common ancestor
stack = [root]
parent = {root:None}
while p not in parent or q not in parent:
node = stack.pop()
if node.left:
stack.append(node.left)
parent[node.left] = node
if node.right:
stack.append(node.right)
parent[node.right] = node
p_parents = set()
while p:
p_parents.add(p)
p = parent[p]
while q not in p_parents:
q = parent[q]
return q
|
#!/usr/bin/env python3
import numpy as np
import pyboof as pb
from pyboof.swing import visualize_matches
# Load two images
image0 = pb.load_single_band("../data/example/stitch/cave_01.jpg", np.uint8)
image1 = pb.load_single_band("../data/example/stitch/cave_02.jpg", np.uint8)
# Set up the SURF fast hessian feature detector. Reduce the number of features it will detect by putting a limit
# on how close two features can be and the maximum number at each scale
config_fh = pb.ConfigFastHessian()
config_fh.extractRadius = 4
config_fh.maxFeaturesPerScale = 300
# Create the detector and use default for everything else
feature_detector = pb.FactoryDetectDescribe(np.uint8).createSurf(config_detect=config_fh)
# Detect features in the first image
locs0, desc0 = feature_detector.detect(image0)
locs1, desc1 = feature_detector.detect(image1)
print("Detected {:4d} features in image 0".format(len(desc0)))
print(" {:4d} image 1".format(len(desc1)))
config_greedy = pb.ConfigAssociateGreedy()
config_greedy.forwardsBackwards = True
config_greedy.scoreRatioThreshold = 0.95
factory_association = pb.FactoryAssociate()
factory_association.set_score(pb.AssocScoreType.DEFAULT, feature_detector.get_descriptor_type())
associator = factory_association.greedy(config_greedy)
associator.set_source(desc0)
associator.set_destination(desc1)
matches = associator.associate()
print("Associated {} features".format(len(matches)))
# Visualize the images using a Java function
visualize_matches(image0, image1, locs0, locs1, associator.get_java_matches())
# TODO add support for python to java formatted matches
input("Press any key to exit")
|
# Testing raytracing functions against lenstools
import tensorflow as tf
import numpy as np
from numpy.testing import assert_allclose
from scipy.interpolate import InterpolatedUnivariateSpline as iuspline
from scipy.ndimage import fourier_gaussian
import flowpm
import flowpm.constants as constants
import lenstools as lt
import bigfile
import os
import astropy.units as u
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
np.random.seed(0)
bs = 200
nc = 64
plane_resolution = 64
field = 5
npix = 64
class FlowPMSnapshot(lt.simulations.nbody.NbodySnapshot):
"""
A class that handles FlowPM simulation snapshots for lenstools
"""
_header_keys = [
'masses', 'num_particles_file', 'num_particles_total', 'box_size',
'num_files', 'Om0', 'Ode0', 'h'
]
############################
#Open the file with bigfile#
############################
@classmethod
def open(cls, filename, pool=None, header_kwargs=dict(), **kwargs):
if bigfile is None:
raise ImportError("bigfile must be installed!")
fp = bigfile.BigFile(cls.buildFilename(filename, pool, **kwargs))
return cls(fp, pool, header_kwargs=header_kwargs)
###################################################################################
######################Abstract method implementation###############################
###################################################################################
@classmethod
def buildFilename(cls, root, pool):
return root
@classmethod
def int2root(cls, name, n):
return name
def getHeader(self):
#Initialize header
header = dict()
bf_header = self.fp["Header"].attrs
###############################################
#Translate FlowPM header into lenstools header#
###############################################
#Number of particles/files
header["num_particles_file"] = bf_header["NC"][0]**3
header["num_particles_total"] = header["num_particles_file"]
header["num_files"] = 1
#Cosmology
header["Om0"] = bf_header["OmegaCDM"][0] + bf_header["OmegaB"][0]
header["Ode0"] = 1. - header["Om0"]
header["w0"] = -1.
header["wa"] = 0.
header["h"] = bf_header["h"][0]
header["redshift"] = 1 / (bf_header["Time"][0]) - 1
header["comoving_distance"] = bf_header["comoving_distance"][0] * 1.0e3
header["scale_factor"] = bf_header["Time"][0]
#Box size in kpc/h
header["box_size"] = bf_header["BoxSize"][0] * 1.0e3
header["box_size_mpch"] = bf_header["BoxSize"][0]
#Plane Resolution
header["nc"] = bf_header["NC"][0]
#Masses
header["masses"] = np.array(
[0., bf_header["M0"][0] * header["h"], 0., 0., 0., 0.])
#################
return header
def setLimits(self):
if self.pool is None:
self._first = None
self._last = None
else:
#Divide equally between tasks
Nt, Np = self.pool.size + 1, bigfile.BigData(self.fp).size
part_per_task = Np // Nt
self._first = part_per_task * self.pool.rank
self._last = part_per_task * (self.pool.rank + 1)
#Add the remainder to the last task
if (Np % Nt) and (self.pool.rank == Nt - 1):
self._last += Np % Nt
def getPositions(self, first=None, last=None, save=True):
#Get data pointer
data = self.fp
#Read in positions in Mpc/h
if (first is None) or (last is None):
positions = (data["0/Position"][:] +
np.array([
0.5 / self.header["nc"] * self.header["box_size_mpch"],
0.5 / self.header["nc"] * self.header["box_size_mpch"], 0
],
dtype=np.float32)) * self.Mpc_over_h
else:
positions = data["0/Position"][first:last] * self.Mpc_over_h
#Enforce periodic boundary conditions
for n in (0, 1):
positions[:, n][positions[:, n] < 0] += self.header["box_size"]
positions[:, n][
positions[:, n] > self.header["box_size"]] -= self.header["box_size"]
#Maybe save
if save:
self.positions = positions
#Initialize useless attributes to None
self.weights = None
self.virial_radius = None
self.concentration = None
#Return
return positions
def test_density_plane(return_results=False):
""" Tests cutting density planes from snapshots against lenstools
"""
klin = np.loadtxt(data_path + '/flowpm/data/Planck15_a1p00.txt').T[0]
plin = np.loadtxt(data_path + '/flowpm/data/Planck15_a1p00.txt').T[1]
ipklin = iuspline(klin, plin)
cosmo = flowpm.cosmology.Planck15()
a0 = 0.9
r0 = flowpm.background.rad_comoving_distance(cosmo, a0)
# Create a state vector
initial_conditions = flowpm.linear_field(nc, bs, ipklin, batch_size=2)
state = flowpm.lpt_init(cosmo, initial_conditions, a0)
# Export the snapshot
flowpm.io.save_state(
cosmo,
state,
a0, [nc, nc, nc], [bs, bs, bs],
'snapshot_density_testing',
attrs={'comoving_distance': r0})
# Reload the snapshot with lenstools
snapshot = FlowPMSnapshot.open('snapshot_density_testing')
# Cut a lensplane in the middle of the volume
lt_plane, resolution, NumPart = snapshot.cutPlaneGaussianGrid(
normal=2,
plane_resolution=plane_resolution,
center=(bs / 2) * snapshot.Mpc_over_h,
thickness=(bs / 4) * snapshot.Mpc_over_h,
left_corner=np.zeros(3) * snapshot.Mpc_over_h,
smooth=None,
kind='density')
# Cut the same lensplane with flowpm
fpm_plane = flowpm.raytracing.density_plane(
state, nc, center=nc / 2, width=nc / 4, plane_resolution=plane_resolution)
# Apply additional normalization terms to match lenstools definitions
constant_factor = 3 / 2 * cosmo.Omega_m * (constants.H0 / constants.c)**2
density_normalization = bs / 4 * r0 / a0
fpm_plane = fpm_plane * density_normalization * constant_factor
# Checking first the mean value, which accounts for any normalization
# issues
assert_allclose(np.mean(fpm_plane[0]), np.mean(lt_plane), rtol=1e-5)
# To check pixelwise difference, we need to do some smoothing as lenstools and
# flowpm use different painting kernels
smooth_lt_plane = np.fft.ifft2(fourier_gaussian(np.fft.fft2(lt_plane),
3)).real
smooth_fpm_plane = np.fft.ifft2(
fourier_gaussian(np.fft.fft2(fpm_plane[0]), 3)).real
assert_allclose(smooth_fpm_plane, smooth_lt_plane, rtol=2e-2)
if return_results:
return fpm_plane, lt_plane, smooth_fpm_plane, smooth_lt_plane
def test_convergence_Born(return_results=False):
""" This function tests that given a set of density planes,
both lenstools and flowpm recover the same convergence maps in
angular coordinates.
"""
klin = np.loadtxt(data_path + '/flowpm/data/Planck15_a1p00.txt').T[0]
plin = np.loadtxt(data_path + '/flowpm/data/Planck15_a1p00.txt').T[1]
ipklin = iuspline(klin, plin)
cosmo = flowpm.cosmology.Planck15()
a0 = 0.9
# Create a state vector
initial_conditions = flowpm.linear_field([nc, nc, 10 * nc], [bs, bs, 10 * bs],
ipklin,
batch_size=2)
state = flowpm.lpt_init(cosmo, initial_conditions, a0)
r = tf.linspace(0., 10 * bs, 11)
r_center = 0.5 * (r[1:] + r[:-1])
a_center = flowpm.background.a_of_chi(cosmo, r_center)
constant_factor = 3 / 2 * cosmo.Omega_m * (constants.H0 / constants.c)**2
# To make it convenient to access simulation properties in lenstools
# let's quicly export and reload the sim
# TODO: remove the need for this!
flowpm.io.save_state(
cosmo,
state,
a0, [nc, nc, 10 * nc], [bs, bs, 10 * bs],
'snapshot_born_testing',
attrs={'comoving_distance': r_center[0]})
# Reload the snapshot with lenstools
snapshot = FlowPMSnapshot.open('snapshot_born_testing')
# Get some density planes and create lenstool tracer
lensplanes = []
tracer = lt.simulations.RayTracer(lens_type=lt.simulations.DensityPlane)
for i in range(len(r_center)):
plane = flowpm.raytracing.density_plane(
state, [nc, nc, 10 * nc],
r_center[i] / bs * nc,
width=nc,
plane_resolution=plane_resolution)
r, a, p = r_center[i], a_center[i], plane[0]
lensplanes.append((r, a, plane))
density_normalization = bs * r / a
# We upsample the lensplanes before giving them to lenstools because
# lentools is using a weird kind of interpolation when converting from
# comoving coordinates to angular coords. with a larger
p = tf.image.resize(
tf.reshape(p, [1, plane_resolution, plane_resolution, 1]), [2048, 2048])
p = (p[0, :, :, 0] * constant_factor * density_normalization).numpy()
p = p - np.mean(p)
lt_plane = lt.simulations.DensityPlane(
p,
angle=snapshot.header["box_size"],
redshift=1 / a - 1,
cosmology=snapshot.cosmology)
tracer.addLens(lt_plane)
# Adding dummy lensplane at the end
tracer.addLens(
lt.simulations.DensityPlane(
np.zeros((2048, 2048)),
angle=snapshot.header["box_size"],
redshift=0.99,
cosmology=snapshot.cosmology))
tracer.addLens(
lt.simulations.DensityPlane(
np.zeros((2048, 2048)),
angle=snapshot.header["box_size"],
redshift=2,
cosmology=snapshot.cosmology))
tracer.reorderLenses()
# Create an array of coordinates at which to retrieve the convernge maps
xgrid, ygrid = np.meshgrid(
np.linspace(0, field, npix, endpoint=False), # range of X coordinates
np.linspace(0, field, npix, endpoint=False)) # range of Y coordinates
coords = np.stack([xgrid, ygrid], axis=0) * u.deg
c = coords.reshape([2, -1]).T
# Compute convergence map with lenstool
lt_map = tracer.convergenceBorn(coords, z=1.0)
# Compute convergemce map with flowpm
fpm_map = flowpm.raytracing.convergenceBorn(
cosmo, lensplanes, bs / nc, bs, c.to(u.rad), z_source=tf.ones([1]))
# Comparing the final maps
assert_allclose(
lt_map, fpm_map[0].numpy().reshape([npix, npix, -1])[:, :, -1], atol=5e-4)
if return_results:
return lt_map, fpm_map
|
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import numpy as np
import logging
import os.path
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
def fit_tophat(x, y, verify=False):
"""
Fit the x and y data to a tophat function, returning:
base_level - the y-value inside the tophat
hat_level - the y-value outside the tophat
hat_start - start of the tophat
hat_end - end of the tophat
:param x: iterable of x values
:param y: corresponding iterable of y values
:param verify: Show a plot of the fit, blocking progress until it is dismissed
:param verify_file:
:return: (base_level, hat_level, hat_start, hat_end)
"""
# TODO Try to spot close to zero height tophats, which may confuse the algorithm
def top_hat(x, base_level, hat_level, hat_start, hat_end):
return np.where((hat_start < x) & (x < hat_end), hat_level, base_level)
gradient = list(get_derivative(y, x))
max_gradient = max(gradient)
min_gradient = min(gradient)
# The tophat could be upside down, so we don't know which of these comes
# in the x direction
max_gradient_index = gradient.index(max_gradient)
min_gradient_index = gradient.index(min_gradient)
step_indices = (max_gradient_index, min_gradient_index)
max_gradient_x = x[max_gradient_index]
min_gradient_x = x[min_gradient_index]
step_xs = (max_gradient_x, min_gradient_x)
base_level = np.mean(y[:min(step_indices)])
hat_level = np.mean(y[min(*step_indices):max(*step_indices)])
hat_start = min(*step_xs)
hat_end = max(*step_xs)
if verify:
plt.close('all')
plt.figure(figsize=(8, 5))
plt.plot(x, y)
plt.plot(x, top_hat(x, base_level, hat_level, hat_start, hat_end))
plt.show()
return base_level, hat_level, hat_start, hat_end
def find_peaks(x, y, threshold=0, verify=False):
"""
Count signficant spikes in y values above threshold, for some definition
of "significant". This is a very naive approach - any time the trace goes
then below the threshold, find the highest value while it is above. That
is the peak. It works so far. scipy.signal.findpeaks may provide a more
robust approach if needed, using a peaks "prominence".
:param x: list of x values
:param y: list of y values
:param threshold: y threshold that neesd to be crossed to count as a peak
:param verify: If True, a plot showing peaks found will be shown, or saved to file if verify=='offline'
:return: list of (idx, x, y) values of peaks at point of maximum y
"""
logger.debug('Peak threshold is {}'.format(threshold))
in_peak = False
peak = [] # list of data points that are part of a peak
peaks = [] # list or peaks
for idx, data_point in enumerate(zip(x, y)):
if in_peak:
if data_point[1] > threshold:
peak.append((idx, data_point[0], data_point[1]))
else:
in_peak = False
peaks.append(peak)
elif data_point[1] > threshold:
in_peak = True
peak = [(idx, *data_point)]
# print([peak[0] for peak in peaks])
# if len(peaks) > 0:
# print([max(peak, key=lambda d: d[1]) for peak in peaks])
# else:
# print('No peaks')
# print(len(peaks))
maximums = [max(peak, key=lambda d: d[2]) for peak in peaks]
if verify:
plt.close(fig='all') # Make sure there are no unwanted figures
plt.figure(figsize=(16, 10))
plt.plot(x, y)
for m in maximums:
plt.axvline(x=m[1], color='red')
plt.show()
logger.info('Found {} peaks'.format(len(maximums)))
return maximums
def get_derivative(y, x):
"""
Return the numerical derivatice of the data dy/dx
:param y: y values list
:param x: x values list
:return: dy/dx
"""
return np.gradient(y, x)
|
from google.appengine.ext import db
# Database
# font db
class EnFamily(db.Model):
# width = x A width / cap height
# x height = x height / cap height
# m serif = serif length of m 500% chrome screen - font-size 20px
# h_stem_horizontal_balance = H horizontal height / H stem width
# o_stroke_axis = angle 0 - vertical
# p_descender = length of p descender / cap height
# i_line_thickness: thickness of i 500% chrome screen - font-size 20px
name = db.StringProperty(required=True)
style = db.StringProperty(required=True)
category = db.StringProperty(required=True,
choices=('sans', 'serif', 'other'))
width = db.FloatProperty(required=True)
w_width = db.FloatProperty(required=True)
x_height = db.FloatProperty(required=True)
m_serif = db.FloatProperty(required=True)
h_stem_horizontal_balance = db.FloatProperty(required=True)
o_stroke_axis = db.FloatProperty(required=True)
p_descender = db.FloatProperty(required=True)
i_line_thickness = db.FloatProperty(required=True)
design = db.StringProperty(required=True)
# aggregation of width, x-height, descender
distance_v = db.FloatProperty(required=True)
# aggregation of m_serif, stem-horizontal, stroke axis, line thickness
distance_h = db.FloatProperty(required=True)
# sum of abs of distance_v and distance_h
distance = db.FloatProperty(required=True)
lang = db.StringProperty(required=True)
def get_position(cls):
position_style = 'left: '+str(cls.distance_h*8+50)+'%;top :'+str(cls.distance_v*8+50)+'%'
return position_style
def get_svg(cls):
svg = 'font_en'+cls.style+'.svg'
return svg
def get_num(cls):
category = cls.category
width = (cls.width + cls.w_width)/2
x_height = cls.x_height
m_serif = cls.m_serif
h_stem_horizontal_balance = cls.h_stem_horizontal_balance
o_stroke_axis = cls.o_stroke_axis
p_descender = cls.p_descender
i_line_thickness = cls.i_line_thickness
num1 = width
num2 = x_height + p_descender
num3 = h_stem_horizontal_balance
num4 = o_stroke_axis+m_serif + m_serif*5
num5 = i_line_thickness
return category, num1, num2, num3, num4, num5
|
import os
import sys
import urllib2
import urlparse
import json
import base64
import time
from boto.s3.key import Key
import boto.s3
import logging
from selenium import webdriver
logger = logging.getLogger('sitetest')
def test_screenshots(set, credentials, options, test_category_id, batch_id, max_test_count=20, verbose=False):
use_browserstack = False # True if('browserstack' in credentials and 'USERNAME' in credentials['browserstack']) else False
if max_test_count is None:
max_test_count = 20
use_basic_auth = False if 'use_basic_auth' not in options else truthy(options['use_basic_auth'])
basic_auth_username = '' if not use_basic_auth else options['basic_auth_username']
basic_auth_password = '' if not use_basic_auth else options['basic_auth_password']
if use_browserstack:
username = credentials['browserstack']['USERNAME']
password = credentials['browserstack']['PASSWORD']
user_data = base64.encodestring('%s:%s' % (username, password)).replace('\n', '')
else:
if use_basic_auth:
profile = webdriver.FirefoxProfile()
profile.set_preference('network.http.phishy-userpass-length', 255)
browser = webdriver.Firefox(firefox_profile=profile)
else:
browser = webdriver.Firefox()
total = len(set.parsed_links)
count = 0
tested_count = 0
for link_url in set.parsed_links:
if verbose:
logger.debug("%s/%s" % (count, total))
count += 1
link = set.parsed_links[link_url]
if link.is_internal_html and not link.skip_test:
if tested_count < max_test_count:
tested_count += 1
if use_browserstack:
browser_data = {
"browsers": [
{"os": "Windows", "os_version": "7", "browser_version": "8.0", "browser": "ie"},
{"os": "Windows", "os_version": "7", "browser_version": "9.0", "browser": "ie"}
],
"url": link.url
}
api_url = 'http://www.browserstack.com/screenshots'
request = urllib2.Request(api_url, json.dumps(browser_data))
request.add_header("Authorization", "Basic %s" % user_data)
request.add_header('Content-Type', 'application/json')
request.add_header('Accept', 'application/json')
# response
urllib2.urlopen(request)
# result = json.load(response)
# TODO -- handle results...
else:
# !/usr/bin/env python
if use_basic_auth:
parsed = urlparse.urlparse(link.url)
updated_location = "%s:%s@%s" % (basic_auth_username, basic_auth_password, parsed.netloc)
parsed = parsed._replace(netloc=updated_location)
updated = urlparse.urlunparse(parsed)
url = updated
else:
url = link.url
browser.get(url)
if 'screenshots' in options:
for key, screenshot in options['screenshots'].iteritems():
screenshots_directory = 'screenshots'
width = screenshot[0]
height = screenshot[1]
browser.set_window_size(width, height)
# delay, allow to render
time.sleep(1)
results_dir = os.path.join(os.path.dirname(__file__), '..', 'test_results', test_category_id, batch_id, screenshots_directory, link.page_slug)
if not os.path.exists(results_dir):
os.makedirs(results_dir)
filename = '%s/%s-%s.png' % (results_dir, width, height)
browser.save_screenshot(filename)
folder = '%s/%s/%s/%s' % (test_category_id, batch_id, screenshots_directory, link.page_slug)
image_url = copy_to_amazon(filename, folder, test_category_id, batch_id, credentials, verbose)
link.screenshots[key] = image_url
browser.quit()
def copy_to_amazon(file_name, folder, test_category_id, batch_id, credentials, verbose):
if 'aws' in credentials and 'AWS_ACCESS_KEY_ID' in credentials['aws']:
AWS_ACCESS_KEY_ID = credentials['aws']['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = credentials['aws']['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = credentials['aws']['AWS_STORAGE_BUCKET_NAME']
AWS_RESULTS_PATH = credentials['aws']['AWS_RESULTS_PATH']
base_name = os.path.basename(file_name)
bucket_name = AWS_STORAGE_BUCKET_NAME
conn = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(bucket_name)
if verbose:
logger.debug('Uploading %s to Amazon S3 from %s' % (base_name, file_name))
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
k = Key(bucket)
k.key = u"%s/%s/%s" % (AWS_RESULTS_PATH, folder, base_name)
k.set_contents_from_filename(file_name, cb=percent_cb, num_cb=10)
k.set_acl('public-read')
url = "http://s3.amazonaws.com/%s/%s/%s/%s" % (AWS_STORAGE_BUCKET_NAME, AWS_RESULTS_PATH, folder, base_name)
# if verbose:
# logger.debug('Uploaded to %s' % (url))
return url
else:
logger.warn("Warning: AWS API credentials not supplied.")
return None
def truthy(value):
if value == 'True':
return True
elif value == 'False':
return False
return value
|
"""
A python module that generates uni-variate and bi-variate analysis graphs for
exploratory data analysis. Specifically, the module contains functions to
graph histograms for numeric data, bar plots for categorical data, a
correlation matrix heat map, and a scatter pair plot. The graphs produced
are added into the final eda pdf report.
"""
import matplotlib.pyplot as plt
import seaborn as sns
from eda_assistant import _calc_dataframe_statistics
from eda_assistant import _calc_variable_statistics
from eda_assistant import _format_graphs
def plot_numeric_hist(df):
"""
Returns histogram plot(s) for all numeric data in the dataset. If there
are no numeric variables, function will return None.
Parameters:
df (pandas DataFrame): Dataframe to create histogram plot(s) for
numeric variables in the dataset
Returns:
fig (figure): figure containing histogram plot(s) for numeric
variables in df
"""
n = _calc_dataframe_statistics.count_numeric(df)
if n == 0:
return None
else:
fig, ax = plt.subplots(n, 1, figsize=(10, n * 10))
counter = 0
for i in range(len(df.columns)):
col = df.iloc[:, i]
if _calc_variable_statistics.is_numeric(col):
if n == 1:
plot = sns.histplot(data=df, x=col.name)
else:
plot = sns.histplot(data=df, x=col.name, ax=ax[counter])
_format_graphs.format_numeric(fig, plot, col)
counter += 1
return fig
def plot_categorical_bar(df, set_limit=10):
"""
Returns count bar plot(s) for all categorical data in the dataset. Criteria
for a categorical variable when graphing the bar plot is determined by a
set_limit of unique values in the column which is default set to 10 because
anything greater than this would result in a very compact, condensed, and
messy figure displayed. Function returns None if there are no categorical
variables.
Parameters:
df (pandas DataFrame): Dataframe to create count bar plot(s) for
categorical variables in dataset
set_limit (int): set limit for number of unique values in a
categorical variable column. Default is set to 10
Returns:
fig (figure): figure containing count bar plot(s) for categorical
variables in df
"""
n = _format_graphs.count_categorical_bar(df)
if n == 0:
return None
else:
fig, ax = plt.subplots(n, 1, figsize=(10, n * 10))
counter = 0
for i in range(len(df.columns)):
col = df.iloc[:, i]
if _format_graphs.is_categorical_bar(col, set_limit=set_limit):
if n == 1:
plot = sns.countplot(data=df, x=col.name)
else:
plot = sns.countplot(data=df, x=col.name, ax=ax[counter])
_format_graphs.format_categorical(fig, plot, col)
counter += 1
return fig
def plot_corr_graph(df):
"""
Returns correlation matrix heat map plot for the dataset. If dataframe is
empty, or if the number of numeric variables in the dataset is less than
or equal to 1, the function returns None.
Parameters:
df (pandas DataFrame): Dataframe to create correlation matrix heat
map plot for the dataset
Returns:
plot_corr (figure): figure containing correlation matrix heat map
plot for df
"""
if len(df) == 0 or _calc_dataframe_statistics.count_numeric(df) <= 1:
return None
else:
corr = df.corr()
plot_corr = sns.heatmap(corr, annot=True, fmt='.0f')
_format_graphs.format_corr()
return plot_corr.figure
def plot_pair_graph(df, set_limit=10):
"""
Returns scatter pair plots for the dataset. If dataframe is empty, or if
there are greater than the set_limit of numerical variables in the dataset
(processing time is too long for this instance), or if the number of
numeric variables in the dataset is less than or equal to 1, the function
returns None.
Parameters:
df (pandas DataFrame): Dataframe to create pair plots
for the dataset
set_limit (int): the set limit for the number of numeric
variables in a dataset. Default is set to 10
Returns:
plot_pair (figure): figure containing pair plots for df
"""
if len(df) == 0 \
or _calc_dataframe_statistics.count_numeric(df) >= set_limit \
or _calc_dataframe_statistics.count_numeric(df) <= 1:
return None
else:
plot_pair = sns.pairplot(data=df)
_format_graphs.format_pair(plot_pair, df)
return plot_pair.figure
|
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 226171178
"""
"""
random actions, total chaos
"""
board = gamma_new(3, 3, 2, 3)
assert board is not None
assert gamma_move(board, 1, 0, 2) == 1
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_busy_fields(board, 2) == 1
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 2, 1, 1) == 1
assert gamma_move(board, 2, 0, 1) == 1
assert gamma_busy_fields(board, 2) == 3
assert gamma_move(board, 1, 1, 2) == 1
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_busy_fields(board, 1) == 3
assert gamma_move(board, 2, 2, 1) == 1
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 2, 1, 0) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_busy_fields(board, 2) == 5
assert gamma_golden_possible(board, 2) == 1
board255575207 = gamma_board(board)
assert board255575207 is not None
assert board255575207 == ("112\n" "222\n" "12.\n")
del board255575207
board255575207 = None
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_free_fields(board, 1) == 1
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 1, 1) == 0
assert gamma_free_fields(board, 1) == 1
assert gamma_move(board, 2, 0, 2) == 0
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_busy_fields(board, 2) == 5
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 1, 2) == 0
assert gamma_move(board, 1, 2, 2) == 0
assert gamma_move(board, 2, 0, 0) == 0
assert gamma_busy_fields(board, 2) == 5
board426797504 = gamma_board(board)
assert board426797504 is not None
assert board426797504 == ("112\n" "222\n" "12.\n")
del board426797504
board426797504 = None
gamma_delete(board)
|
import logging
import os
import sys
from kolibri.core.logger.csv_export import classes_info
from kolibri.core.logger.csv_export import csv_file_generator
from kolibri.core.tasks.management.commands.base import AsyncCommand
logger = logging.getLogger(__name__)
class Command(AsyncCommand):
def add_arguments(self, parser):
parser.add_argument(
"-O",
"--output-file",
action="store",
dest="output_file",
default=None,
type=str,
help="The generated file will be saved with this name",
)
parser.add_argument(
"-l",
"--log-type",
action="store",
dest="log_type",
default="session",
choices=classes_info.keys(),
help='Log type to be exported. Valid values are "session" and "summary".',
)
parser.add_argument(
"-w",
"--overwrite",
action="store_true",
dest="overwrite",
default=False,
help="Allows overwritten of the exported file in case it exists",
)
def handle_async(self, *args, **options):
log_type = options["log_type"]
log_info = classes_info[log_type]
if options["output_file"] is None:
filename = log_info["filename"]
else:
filename = options["output_file"]
filepath = os.path.join(os.getcwd(), filename)
queryset = log_info["queryset"]
total_rows = queryset.count()
with self.start_progress(total=total_rows) as progress_update:
try:
for row in csv_file_generator(
log_type, filepath, overwrite=options["overwrite"]
):
progress_update(1)
except (ValueError, IOError) as e:
logger.error("Error trying to write csv file: {}".format(e))
sys.exit(1)
|
# coding: utf-8
import ast
from functools import partial, wraps
from json import JSONDecodeError
from django.conf import settings
from django.core.exceptions import EmptyResultSet
from django.db import models
from django.db.models import Avg, Count, F, Max, Min, Q, QuerySet, StdDev, Sum, Variance, functions
from rest_framework import serializers, viewsets
from rest_framework.decorators import api_view
from rest_framework.exceptions import NotFound, ValidationError
from rest_framework.relations import PrimaryKeyRelatedField
from rest_framework.response import Response
from common.api.fields import ChoiceDisplayField, ReadOnlyObjectField
from common.utils import (
get_field_by_path,
get_pk_field,
get_prefetchs,
get_related,
json_decode,
parsedate,
prefetch_metadata,
str_to_bool,
)
# URLs dans les serializers
HYPERLINKED = settings.REST_FRAMEWORK.get("HYPERLINKED", False)
# Mots clés réservés dans les URLs des APIs
AGGREGATES = {
"count": Count,
"sum": Sum,
"avg": Avg,
"min": Min,
"max": Max,
"stddev": StdDev,
"variance": Variance,
}
CASTS = {
"bool": models.BooleanField,
"date": models.DateField,
"datetime": models.DateTimeField,
"decimal": models.DecimalField,
"float": models.FloatField,
"int": models.IntegerField,
"str": models.CharField,
"time": models.TimeField,
}
FUNCTIONS = {
"cast": lambda value, cast_type="", *args: (
partial(functions.Cast, output_field=CASTS.get(cast_type, models.CharField)())(value, *args)
),
"coalesce": functions.Coalesce,
"greatest": functions.Greatest,
"least": functions.Least,
"nullif": functions.NullIf,
"extract_year": functions.ExtractIsoYear,
"extract_month": functions.ExtractMonth,
"extract_day": functions.ExtractDay,
"extract_week_day": functions.ExtractIsoWeekDay,
"extract_week": functions.ExtractWeek,
"extract_quarter": functions.ExtractQuarter,
"extract_hour": functions.ExtractHour,
"extract_minute": functions.ExtractMinute,
"extract_second": functions.ExtractSecond,
"trunc_year": functions.TruncYear,
"trunc_month": functions.TruncMonth,
"trunc_week": functions.TruncWeek,
"trunc_quarter": functions.TruncQuarter,
"trunc_date": functions.TruncDate,
"trunc_time": functions.TruncTime,
"trunc_day": functions.TruncDay,
"trunc_hour": functions.TruncHour,
"trunc_minute": functions.TruncMinute,
"trunc_second": functions.TruncSecond,
"abs": functions.Abs,
"acos": functions.ACos,
"asin": functions.ASin,
"atan": functions.ATan,
"atan2": functions.ATan2,
"ceil": functions.Ceil,
"cos": functions.Cos,
"cot": functions.Cot,
"degrees": functions.Degrees,
"exp": functions.Exp,
"floor": functions.Floor,
"ln": functions.Ln,
"log": functions.Log,
"mod": functions.Mod,
"radians": functions.Radians,
"round": functions.Round,
"power": functions.Power,
"sign": functions.Sign,
"sin": functions.Sin,
"sqrt": functions.Sqrt,
"tan": functions.Tan,
"left": functions.Left,
"length": functions.Length,
"lower": functions.Lower,
"lpad": functions.LPad,
"ltrim": functions.LTrim,
"md5": functions.MD5,
"ord": functions.Ord,
"repeat": functions.Repeat,
"replace": functions.Replace,
"reverse": functions.Reverse,
"right": functions.Right,
"rpad": functions.RPad,
"rtrim": functions.RTrim,
"sha1": functions.SHA1,
"sha224": functions.SHA224,
"sha256": functions.SHA256,
"sha384": functions.SHA384,
"sha512": functions.SHA512,
"strindex": functions.StrIndex,
"substr": functions.Substr,
"trim": functions.Trim,
"upper": functions.Upper,
}
RESERVED_QUERY_PARAMS = (
[
"filters",
"fields",
"order_by",
"group_by",
"all",
"display",
"distinct",
"silent",
"simple",
"meta",
"cache",
"timeout",
]
+ list(AGGREGATES.keys())
+ list(FUNCTIONS.keys())
)
def url_value(filter, value):
"""
Transforme la valeur dans l'URL à partir du filtre
:param filter: Filtre
:param value: Valeur
:return: Valeur
"""
if not isinstance(value, str):
return value
try:
value = ast.literal_eval(value)
evaluated = True
except (SyntaxError, ValueError):
evaluated = False
if not filter:
return value
if any(
filter.endswith(lookup)
for lookup in ("__in", "__range", "__hasany", "__hasall", "__has_keys", "__has_any_keys", "__overlap")
):
if evaluated:
if not isinstance(value, (list, set, tuple)):
return (value,)
else:
return value.split(",")
if any(filter.endswith(lookup) for lookup in ("__isnull", "__isempty")):
return str_to_bool(value)
if any(filter.endswith(lookup) for lookup in ("__contains", "__contained_by", "__hasdict", "__indict")):
if not isinstance(value, str):
return value
try:
return json_decode(value)
except (JSONDecodeError, TypeError, ValueError):
if ":" in value:
data = {}
for subvalue in value.split(","):
key, val = subvalue.split(":")
data[key] = val
return data
elif "," in value:
return value.split(",")
return value
def parse_filters(filters):
"""
Parse une chaîne de caractères contenant des conditions au format suivant :
[and|or|not](champ__lookup:valeur[,champ__lookup:valeur])
Il est possible de chainer les opérateurs dans un même filtres, exemple :
or(and(champ_1:valeur_1,champ_2:valeur_2),and(not(champ_3:valeur_3),champ_4:valeur_4))
:param filters: Filtres sous forme de chaîne de caractères
:return: Chaîne de conditions Django
"""
if isinstance(filters, str):
try:
import ast
import re
filters = filters.replace("'", "\\'").replace('"', '\\"')
filters = re.sub(r"([\w.]+):([^,()]*)", r'{"\1":"\2"}', filters)
filters = re.sub(r"(\w+)\(", r'("\1",', filters)
filters = ast.literal_eval(filters)
except Exception as exception:
raise Exception("{filters}: {exception}".format(filters=filters, exception=exception))
if isinstance(filters, dict):
filters = (filters,)
operator = None
elements = []
for filter in filters:
if isinstance(filter, tuple):
elements.append(parse_filters(filter))
elif isinstance(filter, dict):
fields = {}
for key, value in filter.items():
key = key.strip().replace(".", "__")
if value.startswith("[") and value.endswith("]"):
value = F(value[1:-1].replace(".", "__"))
fields[key] = url_value(key, value)
elements.append(Q(**fields))
elif isinstance(filter, str):
operator = filter.lower()
if operator == "or":
q = elements.pop(0)
for element in elements:
q |= element
else:
q = ~elements.pop(0) if operator == "not" else elements.pop(0)
for element in elements:
q &= element
return q
def to_model_serializer(model, **metadata):
"""
Décorateur permettant d'associer un modèle à une définition de serializer
:param model: Modèle
:param metadata: Metadonnées du serializer
:return: Serializer
"""
from common.api.fields import JsonField as ApiJsonField
from common.fields import JsonField as ModelJsonField
def wrapper(serializer):
for field in model._meta.fields:
if "fields" in metadata and field.name not in metadata.get("fields", []):
continue
if "exclude" in metadata and field.name in metadata.get("exclude", []):
continue
# Injection des identifiants de clés étrangères
if HYPERLINKED and field.related_model:
serializer._declared_fields[field.name + "_id"] = serializers.ReadOnlyField()
if "fields" in metadata and "exclude" not in metadata:
metadata["fields"] = list(metadata.get("fields", [])) + [field.name + "_id"]
# Injection des valeurs humaines pour les champs ayant une liste de choix
if field.choices:
serializer_field_name = "{}_display".format(field.name)
source_field_name = "get_{}".format(serializer_field_name)
serializer._declared_fields[serializer_field_name] = serializers.CharField(
source=source_field_name, label=field.verbose_name or field.name, read_only=True
)
if "fields" in metadata and "exclude" not in metadata:
metadata["fields"] = list(metadata.get("fields", [])) + [serializer_field_name]
# Injection des données des champs de type JSON
if isinstance(field, ModelJsonField):
serializer._declared_fields[field.name] = ApiJsonField(
label=field.verbose_name,
help_text=field.help_text,
required=not field.blank,
allow_null=field.null,
read_only=not field.editable,
)
# Mise à jour des métadonnées du serializer
if "fields" not in metadata and "exclude" not in metadata:
metadata.update(fields="__all__")
metadata.update(model=model)
metadata.update(ref_name=model._meta.label)
serializer.Meta = type("Meta", (), metadata)
return serializer
return wrapper
def to_model_viewset(model, serializer, permissions=None, queryset=None, bases=None, **metadata):
"""
Décorateur permettant d'associer un modèle et un serializer à une définition de viewset
:param model: Modèle
:param serializer: Serializer
:param permissions: Permissions spécifiques
:param queryset: Surcharge du queryset par défaut pour le viewset
:param bases: Classes dont devra hériter le serializer par défaut
:param metadata: Metadonnées du serializer
:return: ViewSet
"""
from common.api.permissions import CommonModelPermissions
def wrapper(viewset):
viewset.queryset = queryset or model.objects.all()
viewset.model = model
viewset.serializer_class = serializer
viewset.simple_serializer = create_model_serializer(model, bases=bases, **metadata)
excludes_many_to_many_from_serializer(viewset.simple_serializer)
viewset.default_serializer = create_model_serializer(model, bases=bases, hyperlinked=False, **metadata)
viewset.permission_classes = permissions or [CommonModelPermissions]
return viewset
return wrapper
def excludes_many_to_many_from_serializer(serializer):
"""
Permet d'exclure les champs de type many-to-many d'un serializer de modèle
:param serializer: Serializer (classe)
:return: Rien
"""
model = getattr(serializer.Meta, "model", None)
if model is None:
return
fields = getattr(serializer.Meta, "fields", None)
if fields == "__all__":
fields = None
del serializer.Meta.fields
if fields is None:
serializer.Meta.exclude = list(
set(getattr(serializer.Meta, "exclude", [])) | {field.name for field in model._meta.many_to_many}
)
def create_model_serializer(model, bases=None, attributes=None, hyperlinked=True, **metas):
"""
Permet de créer le ModelSerializer pour le modèle fourni en paramètre
:param model: Modèle à sérialiser
:param bases: Classes dont devra hériter le serializer
:param attributes: Attributs spécifiques du serializer
:param hyperlinked: Active ou non la gestion des URLs pour la clé primaire
:param metas: Métadonnées du serializer
:return: serializer
"""
from common.api.serializers import CommonModelSerializer
serializer = type(
"{}GenericSerializer".format(model._meta.object_name), (bases or (CommonModelSerializer,)), (attributes or {})
)
if not hyperlinked:
serializer.serializer_related_field = PrimaryKeyRelatedField
return to_model_serializer(model, **metas)(serializer)
def serializer_factory(excludes):
"""
Factory fournissant les 2 méthodes de récuperation de classe et d'instance du serializer
:param excludes: Liste de champs à exclure du ModelSerializer
:return: Méthode de récupération de la classe du serializer, méthode de récupération de l'instance du serializer
"""
def get_serializer_class(model):
return create_model_serializer(model, excludes=excludes.get(model, ()))
def get_serializer(model, *args, **kwargs):
return get_serializer_class(model)(*args, **kwargs)
return get_serializer_class, get_serializer
def create_model_serializer_and_viewset(
model,
foreign_keys=True,
many_to_many=False,
one_to_one=True,
one_to_many=False,
fks_in_related=False,
null_fks=False,
serializer_base=None,
viewset_base=None,
serializer_data=None,
viewset_data=None,
permissions=None,
queryset=None,
metas=None,
exclude_related=None,
depth=0,
height=1,
_level=0,
_origin=None,
_field=None,
**options
):
"""
Permet de créer les classes de serializer et de viewset associés à un modèle
:param model: Modèle
:param foreign_keys: Récupérer les données des clés étrangères ?
:param many_to_many: Récupérer les données des many-to-many ?
:param one_to_one: Récupérer les données des one-to-one (selon profondeur) ?
:param one_to_many: Récupérer les données des one-to-many (selon profondeur) ?
:param fks_in_related: Récupérer les données de clés étrangères dans les relations inversées ?
:param null_fks: Récupérer les données de clés étrangères pouvant être nulles ?
:param serializer_base: Classes dont devra hériter le serializer (dictionnaire organisé par modèle)
:param viewset_base: Classes dont devra hériter le viewset (dictionnaire organisé par modèle)
:param serializer_data: Données complémentaires à ajouter dans le serializer (dictionnaire organisé par modèle)
:param viewset_data: Données complémentaires à ajouter dans le viewset (dictionnaire organisé par modèle)
:param permissions: Permissions à vérifier dans le viewset
:param queryset: Surcharge du queryset dans le viewset
:param metas: Metadonnées des serializers dépendants (dictionnaire organisé par modèle)
:param exclude_related: Nom des relations inversées à exclure
:param depth: Profondeur de récupération des modèles dépendants
:param height: Hauteur maximale de récupération des clés étrangères
:param _level: Profondeur actuelle (utilisé par la récursivité)
:param _origin: Modèle d'origine dans la récursivité pour éviter la redondance (utilisé par la récursivité)
:param _field: Nom du champ dans le modèle d'origine (utilisé par la récursivité)
:param options: Metadonnées du serializer de base
:return: Tuple (serializer, viewset)
"""
object_name = model._meta.object_name
# Héritages du serializer et viewset
from common.api.serializers import CommonModelSerializer
from common.api.viewsets import CommonModelViewSet
_serializer_base = (serializer_base or {}).get(model, (CommonModelSerializer,))
_viewset_base = (viewset_base or {}).get(model, (CommonModelViewSet,))
# Ajout du serializer des hyperlinks à la liste si ils sont activés
_bases = _serializer_base # Le serializer par défaut des viewsets ne doit pas hériter du serializer des hyperlinks
# Si aucune surcharge des serializer et/ou du viewset, utilisation des modèles par défaut
_serializer_base = _serializer_base or (serializers.ModelSerializer,)
_viewset_base = _viewset_base or (viewsets.ModelViewSet,)
# Données complémentaires du serializer et viewset
_serializer_data = (serializer_data or {}).get(model, {}).copy()
_viewset_data = (viewset_data or {}).get(model, {}).copy()
# Métadonnées du serializer
exclude_related = exclude_related if isinstance(exclude_related, dict) else {model: exclude_related or []}
metadata = (metas or {}).get(model, {})
metadata.update(options)
metadata["extra_kwargs"] = metadata.get("extra_kwargs", {})
# Vérifie qu'un nom de champ donné est inclu ou exclu
def field_allowed(field_name):
return field_name in metadata.get("fields", []) or (
field_name not in metadata.get("exclude", []) and field_name not in exclude_related.get(model, [])
)
# Création du serializer et du viewset
serializer = to_model_serializer(model, **metadata)(
type(object_name + "Serializer", _serializer_base, _serializer_data)
)
viewset = to_model_viewset(model, serializer, permissions, bases=_bases, **metadata)(
type(object_name + "ViewSet", _viewset_base, _viewset_data)
)
# Surcharge du queryset par défaut dans le viewset
if queryset is not None:
viewset.queryset = queryset
# Gestion des clés étrangères
relateds = []
prefetchs = []
prefetchs_metadata = [] # Prefetch pour récupérer les métadonnées à chaque niveau
excludes = []
for field in model._meta.fields:
if field.primary_key or not field.remote_field or field.related_model is _origin:
continue
# Vérification que le champ est bien inclu ou n'est pas exclu
if not field_allowed(field.name):
excludes.append(field.name)
continue
# Ajout du serializer pour la relation de clé étrangère
if (foreign_keys and 0 >= _level > -height) or (fks_in_related and _level > 0):
fk_serializer, fk_viewset = create_model_serializer_and_viewset(
field.related_model,
foreign_keys=foreign_keys,
many_to_many=False,
one_to_one=False,
one_to_many=False,
fks_in_related=False,
null_fks=False,
serializer_base=serializer_base,
viewset_base=viewset_base,
serializer_data=serializer_data,
viewset_data=viewset_data,
exclude_related=exclude_related,
metas=metas,
depth=0,
height=height,
_level=_level - 1,
_origin=model,
_field=field.name,
)
serializer._declared_fields[field.name] = fk_serializer(read_only=True)
relateds.append(field.name)
# Récupération des relations de plus haut niveau si nécessaire
field_relateds = get_related(field.related_model, null=null_fks, height=height - 1, _models=[model])
relateds += [
"__".join([field.name, field_related])
for field_related in field_relateds
if field_related not in exclude_related.get(field.related_model, [])
]
elif _level > 0:
# Les clés étrangères des relations inversées qui pointent sur le modèle d'origine peuvent être nulles
if field.remote_field and not field.primary_key and field.related_model is _origin:
serializer.Meta.extra_kwargs[field.name] = dict(required=False, allow_null=True)
# Prefetch des métadonnées
prefetchs_metadata += prefetch_metadata(field.related_model, field.name)
# Gestion des many-to-many
if many_to_many and depth > _level:
for field in model._meta.many_to_many:
# Vérification que le champ est bien inclu ou n'est pas exclu
if not field_allowed(field.name):
excludes.append(field.name)
continue
# Ajout du serializer pour la relation many-to-many
m2m_serializer, m2m_viewset = create_model_serializer_and_viewset(
field.related_model,
foreign_keys=False,
many_to_many=False,
one_to_one=False,
one_to_many=False,
fks_in_related=False,
null_fks=False,
serializer_base=serializer_base,
viewset_base=viewset_base,
serializer_data=serializer_data,
viewset_data=viewset_data,
exclude_related=exclude_related,
metas=metas,
depth=0,
height=0,
_level=0,
_origin=model,
_field=field.name,
)
serializer._declared_fields[field.name] = m2m_serializer(many=True, read_only=True)
prefetchs.append(field.name)
# Prefetch des métadonnées
prefetchs_metadata += prefetch_metadata(field.related_model, field.name)
else:
# Exclusion du champ many-to-many du serializer
excludes_many_to_many_from_serializer(viewset.serializer_class)
# Gestion des one-to-one
if one_to_one and depth > _level:
for field in model._meta.related_objects:
if not field.auto_created or not field.one_to_one:
continue
# Vérification que le champ est bien inclu ou n'est pas exclu
if not field_allowed(field.name):
excludes.append(field.name)
continue
field_name = field.get_accessor_name()
# Ajout du serializer pour la relation inversée
child_serializer, child_viewset = create_model_serializer_and_viewset(
field.related_model,
foreign_keys=foreign_keys,
many_to_many=many_to_many,
one_to_one=one_to_one,
one_to_many=one_to_many,
fks_in_related=fks_in_related,
null_fks=null_fks,
serializer_base=serializer_base,
viewset_base=viewset_base,
serializer_data=serializer_data,
viewset_data=viewset_data,
exclude_related=exclude_related,
metas=metas,
depth=depth,
height=0,
_level=_level + 1,
_origin=model,
_field=field_name,
)
serializer._declared_fields[field_name] = child_serializer(read_only=True)
relateds.append(field_name)
# Récupération des relations de plus haut niveau si nécessaire
field_relateds = get_related(
field.related_model, one_to_one=True, null=null_fks, height=height - 1, _models=[model]
)
relateds += [
"__".join([field_name, field_related])
for field_related in field_relateds
if field_related not in exclude_related.get(field.related_model, [])
]
# Gestion des one-to-many
if one_to_many and depth > _level:
for field in model._meta.related_objects:
if not field.auto_created or not field.one_to_many:
continue
# Vérification que le champ est bien inclu ou n'est pas exclu, et qu'il s'agisse bien d'un champ
if not field_allowed(field.name):
excludes.append(field.name)
continue
field_name = field.get_accessor_name()
# Ajout du serializer pour la relation inversée
child_serializer, child_viewset = create_model_serializer_and_viewset(
field.related_model,
foreign_keys=foreign_keys,
many_to_many=many_to_many,
one_to_one=one_to_one,
one_to_many=one_to_many,
fks_in_related=fks_in_related,
null_fks=null_fks,
serializer_base=serializer_base,
viewset_base=viewset_base,
serializer_data=serializer_data,
viewset_data=viewset_data,
exclude_related=exclude_related,
metas=metas,
depth=depth,
height=0,
_level=_level + 1,
_origin=model,
_field=field_name,
)
serializer._declared_fields[field_name] = child_serializer(many=True, read_only=True)
# Récupération des relations inversées
arguments = dict(
depth=depth,
excludes=excludes,
foreign_keys=fks_in_related,
one_to_one=one_to_one,
one_to_many=one_to_many,
many_to_many=many_to_many,
null=null_fks,
)
prefetchs += get_prefetchs(model, **arguments)
prefetchs_metadata += get_prefetchs(model, metadata=True, **arguments)
# Injection des clés étrangères dans le queryset du viewset
if relateds:
viewset.queryset = viewset.queryset.select_related(*relateds)
# Injection des many-to-many et des relations inversées dans le queryset du viewset
if prefetchs:
viewset.queryset = viewset.queryset.prefetch_related(*prefetchs)
viewset.metadata = prefetchs_metadata
return serializer, viewset
def perishable_view(func):
"""
Décorateur permettant d'enrichir la request utilisée par la fonction des attributs 'date_de_reference' (date) et
'valide' (bool) ainsi que du valid_filter à appliquer sur le select_valid récupérés dans les query_params
(None si non présents)
:param func: Fonction à décorer
:return: Fonction avec la request enrichie
"""
@wraps(func)
def wrapper(item, *args, **kwargs):
# "request = item.request" dans le cas d'une ViewSet, "item" dans le cas d'une api_view
request = item.request if hasattr(item, "request") else item
valid = None
valid_date = None
params = request.data if request.data else request.query_params
if params:
valid = str_to_bool(params.get("valid", None))
valid_date = parsedate(params.get("valid_date", None))
setattr(request, "valid", valid)
setattr(request, "valid_date", valid_date)
setattr(request, "valid_filter", dict(valid=valid, date=valid_date))
return func(item, *args, **kwargs)
return wrapper
def api_view_with_serializer(http_method_names=None, input_serializer=None, serializer=None, validation=True):
"""
Décorateur permettant de créer une APIView à partir d'une fonction suivant la structure d'un serializer
Elle remplace le décorateur @api_view fourni par défaut dans Django REST Framework
:param http_method_names: Méthodes HTTP supportées
:param input_serializer: Serializer des données d'entrée
:param serializer: Serializer des données de sortie
:param validation: Exécuter la validation des données d'entrée ? (request contiendra alors "validated_data")
:return: APIView
"""
def decorator(func):
@wraps(func)
def inner_func(request, *args, **kwargs):
result = func(request, *args, **kwargs)
if isinstance(result, Response):
return result
if not serializer:
return Response(result)
try:
many = isinstance(result, (list, QuerySet))
return Response(serializer(result, many=many, context=dict(request=request)).data)
except: # noqa
return Response(result)
view = api_view(http_method_names)(inner_func)
if input_serializer:
view_class = view.view_class
view_class.serializer_class = input_serializer
# Reprise des méthodes d'accès au serializer pour les métadonnées de l'APIView
from rest_framework.generics import GenericAPIView
view_class.get_serializer = GenericAPIView.get_serializer
view_class.get_serializer_context = GenericAPIView.get_serializer_context
view_class.get_serializer_class = GenericAPIView.get_serializer_class
if validation:
# POST
post_handler = getattr(view_class, "post", None)
if post_handler:
def handler(self, request, *args, **kwargs):
serializer_instance = input_serializer(data=request.data)
serializer_instance.is_valid(raise_exception=True)
request.validated_data = serializer_instance.validated_data
return post_handler(self, request, *args, **kwargs)
view_class.post = handler
# PUT
put_handler = getattr(view_class, "put", None)
if put_handler:
def handler(self, request, *args, **kwargs):
partial = kwargs.pop("partial", False)
instance = self.get_object()
serializer_instance = input_serializer(instance, data=request.data, partial=partial)
serializer_instance.is_valid(raise_exception=True)
request.validated_data = serializer_instance.validated_data
return post_handler(self, request, *args, **kwargs)
view_class.put = handler
return view
return decorator
def auto_view(
http_method_names=None,
input_serializer=None,
serializer=None,
validation=True,
many=False,
custom_func=None,
query_func=None,
func_args=None,
func_kwargs=None,
):
"""
Décorateur permettant de générer le corps d'une APIView à partir d'un QuerySet
:param http_method_names: Méthodes HTTP supportées
:param input_serializer: Serializer des données d'entrée
:param serializer: Serializer des données de sortie
:param validation: Exécuter la validation des données d'entrée ? (request contiendra alors "validated_data")
:param many: Affichage de plusieurs éléments ou élément individuel (404 si élément non trouvé) ?
:param custom_func: Fonction facultive de transformation du QuerySet
fonction(request: Request, queryset: QuerySet) -> Union[QuerySet, Tuple[QuerySet, dict]]
:param query_func: Fonction de récupération des éléments ('first' ou 'all' par défaut selon le paramètre 'many')
:param func_args: Arguments optionnels de la fonction de récupération (pour 'latest' ou 'earliest' par exemple)
:param func_kwargs: Arguments optionnels nommés de la fonction de récupération
:return: API View
"""
query_func = (query_func or QuerySet.all) if many else (query_func or QuerySet.first)
func_args = func_args or []
func_kwargs = func_kwargs or {}
def wrapper(func):
@wraps(func)
def wrapped(request, **kwargs):
context = {}
queryset = func(request, **kwargs)
if isinstance(queryset, tuple):
# (Facultatif) La fonction peut retourner un contexte en plus de son QuerySet
queryset, context = queryset
if custom_func:
queryset = custom_func(request, queryset)
if many and serializer:
return api_paginate(
request,
queryset,
serializer,
context=context,
query_func=query_func,
func_args=func_args,
func_kwargs=func_kwargs,
)
queryset = query_func(queryset, *func_args, **func_kwargs)
if not isinstance(queryset, QuerySet) and not queryset:
raise NotFound()
if not serializer:
return Response(queryset)
return Response(serializer(queryset, context=dict(request=request, **context)).data)
return api_view_with_serializer(
http_method_names=http_method_names,
input_serializer=input_serializer,
serializer=serializer,
validation=validation,
)(wrapped)
return wrapper
def api_paginate(
request,
queryset,
serializer,
pagination=None,
enable_options=True,
context=None,
query_func=None,
func_args=None,
func_kwargs=None,
):
"""
Ajoute de la pagination aux résultats d'un QuerySet dans un serializer donné
:param request: Requête HTTP
:param queryset: QuerySet
:param serializer: Serializer
:param pagination: Classe de pagination
:param enable_options: Active toutes les options de filtre/tri/aggregation/distinct
:param context: Contexte du serializer
:param query_func: Fonction spécifique à exécuter sur le QuerySet avant la pagination
:param func_args: Arguments de la fonction
:param func_kwargs: Arguments mots-clés de la fonction
:return: Réponse HTTP des résultats avec pagination
"""
from common.api.pagination import CustomPageNumberPagination
pagination = pagination or CustomPageNumberPagination
# Mots-clés réservés dans les URLs
default_reserved_query_params = ["format", pagination.page_query_param, pagination.page_size_query_param]
reserved_query_params = default_reserved_query_params + RESERVED_QUERY_PARAMS
url_params = request.query_params.dict()
context = dict(request=request, **(context or {}))
options = dict(aggregates=None, annotates=None, distinct=None, filters=None, order_by=None)
# Activation des options
if enable_options:
# Critères de recherche dans le cache
cache_key = url_params.pop("cache", None)
if cache_key:
from django.core.cache import cache
cache_params = cache.get(settings.API_CACHE_PREFIX + cache_key, {})
new_url_params = {}
new_url_params.update(**cache_params)
new_url_params.update(**url_params)
url_params = new_url_params
new_cache_params = {
key: value for key, value in url_params.items() if key not in default_reserved_query_params
}
if new_cache_params:
from datetime import timedelta
from django.utils.timezone import now
cache_timeout = int(url_params.pop("timeout", settings.API_CACHE_TIMEOUT)) or None
cache.set(settings.API_CACHE_PREFIX + cache_key, new_cache_params, timeout=cache_timeout)
options["cache_expires"] = now() + timedelta(seconds=cache_timeout) if cache_timeout else "never"
cache_url = "{}?cache={}".format(request.build_absolute_uri(request.path), cache_key)
plain_url = cache_url
for key, value in url_params.items():
url_param = "&{}={}".format(key, value)
if key in default_reserved_query_params:
cache_url += url_param
plain_url += url_param
options["cache_data"] = new_cache_params
options["cache_url"] = cache_url
options["raw_url"] = plain_url
# Erreurs silencieuses
silent = str_to_bool(url_params.get("silent", ""))
# Filtres (dans une fonction pour être appelé par les aggregations sans group_by)
def do_filter(queryset):
try:
filters, excludes = {}, {}
for key, value in url_params.items():
key = key.replace(".", "__")
if value.startswith("[") and value.endswith("]"):
value = F(value[1:-1].replace(".", "__"))
if key in reserved_query_params:
continue
if key.startswith("-"):
key = key[1:].strip()
excludes[key] = url_value(key, value)
else:
key = key[1:].strip() if key.startswith("+") else key.strip()
filters[key] = url_value(key, value)
if filters:
queryset = queryset.filter(**filters)
if excludes:
queryset = queryset.exclude(**excludes)
# Filtres génériques
others = url_params.get("filters", "")
if others:
queryset = queryset.filter(parse_filters(others))
if filters or excludes or others:
options["filters"] = True
except Exception as error:
if not silent:
raise ValidationError(dict(error="filters: {}".format(error)), code="filters")
options["filters"] = False
if settings.DEBUG:
options["filters_error"] = str(error)
return queryset
# Annotations
annotations = {}
try:
for annotation, function in FUNCTIONS.items():
for field_name in url_params.pop(annotation, "").split(","):
if not field_name:
continue
field_name, *args = field_name.split("|")
function_args = []
for arg in args:
try:
function_args.append(ast.literal_eval(arg))
except (SyntaxError, ValueError):
arg = arg.replace(".", "__")
if any(arg.endswith(":{}".format(cast)) for cast in CASTS):
arg, *junk, cast = arg.split(":")
cast = CASTS.get(cast.lower())
arg = functions.Cast(arg, output_field=cast()) if cast else arg
function_args.append(arg)
field_name = field_name.replace(".", "__")
field = field_name
if any(field_name.endswith(":{}".format(cast)) for cast in CASTS):
field_name, *junk, cast = field_name.split(":")
cast = CASTS.get(cast.lower())
field = functions.Cast(field_name, output_field=cast()) if cast else field_name
annotations[annotation + "__" + field_name] = function(field, *function_args)
if annotations:
queryset = queryset.annotate(**annotations)
options["annotates"] = True
except Exception as error:
if not silent:
raise ValidationError(dict(error="annotates: {}".format(error)), code="annotates")
options["annotates"] = False
if settings.DEBUG:
options["annotates_error"] = str(error)
# Aggregations
aggregations = {}
try:
for aggregate, function in AGGREGATES.items():
for field_name in url_params.get(aggregate, "").split(","):
if not field_name:
continue
distinct = field_name.startswith(" ") or field_name.startswith("+")
field_name = field_name[1:] if distinct else field_name
field_name = field_name.strip().replace(".", "__")
value = field_name
if any(field_name.endswith(":{}".format(cast)) for cast in CASTS):
field_name, *junk, cast = field_name.split(":")
cast = CASTS.get(cast.lower())
value = functions.Cast(field_name, output_field=cast()) if cast else value
aggregations[aggregate + "__" + field_name] = function(value, distinct=distinct)
group_by = url_params.get("group_by", "")
if group_by:
_queryset = queryset.values(*group_by.replace(".", "__").split(","))
if aggregations:
_queryset = _queryset.annotate(**aggregations)
else:
_queryset = _queryset.distinct()
queryset = _queryset
options["aggregates"] = True
elif aggregations:
options["aggregates"] = True
queryset = do_filter(queryset) # Filtres éventuels
return queryset.aggregate(**aggregations)
except ValidationError:
raise
except Exception as error:
if not silent:
raise ValidationError(dict(error="aggregates: {}".format(error)), code="aggregates")
options["aggregates"] = False
if settings.DEBUG:
options["aggregates_error"] = str(error)
# Filtres
queryset = do_filter(queryset)
# Tris
orders = []
try:
order_by = url_params.get("order_by", "")
if order_by:
for order in order_by.replace(".", "__").split(","):
nulls_first, nulls_last = order.endswith("<"), order.endswith(">")
order = order[:-1] if nulls_first or nulls_last else order
if order.startswith("-"):
orders.append(F(order[1:]).desc(nulls_first=nulls_first, nulls_last=nulls_last))
else:
order = order[1:] if order.startswith("+") or order.startswith(" ") else order
orders.append(F(order).asc(nulls_first=nulls_first, nulls_last=nulls_last))
temp_queryset = queryset.order_by(*orders)
str(temp_queryset.query) # Force SQL evaluation to retrieve exception
queryset = temp_queryset
options["order_by"] = True
except EmptyResultSet:
pass
except Exception as error:
if not silent:
raise ValidationError(dict(error="order_by: {}".format(error)), code="order_by")
options["order_by"] = False
if settings.DEBUG:
options["order_by_error"] = str(error)
# Distinct
distincts = []
try:
distinct = url_params.get("distinct", "")
if distinct:
distincts = distinct.replace(".", "__").split(",")
if str_to_bool(distinct) is not None:
distincts = []
queryset = queryset.distinct(*distincts)
options["distinct"] = True
except EmptyResultSet:
pass
except Exception as error:
if not silent:
raise ValidationError(dict(error="distinct: {}".format(error)), code="distinct")
options["distinct"] = False
if settings.DEBUG:
options["distinct_error"] = str(error)
# Extraction de champs spécifiques
fields = url_params.get("fields", "")
if fields:
# Supprime la récupération des relations
queryset = queryset.select_related(None).prefetch_related(None)
# Champs spécifiques
try:
relateds = set()
field_names = set()
for field in fields.replace(".", "__").split(","):
if not field:
continue
field_names.add(field)
*related, field_name = field.split("__")
if related and field not in annotations:
relateds.add("__".join(related))
if relateds:
queryset = queryset.select_related(*relateds)
if field_names:
queryset = queryset.values(*field_names)
except Exception as error:
if not silent:
raise ValidationError(dict(error="fields: {}".format(error)), code="fields")
# Fonction utilitaire d'ajout de champ au serializer
def add_field_to_serializer(fields, field_name):
field_name = field_name.strip()
source = field_name.strip().replace(".", "__")
# Champ spécifique en cas d'énumération
choices = getattr(get_field_by_path(queryset.model, field_name), "flatchoices", None)
if choices and str_to_bool(url_params.get("display", "")):
fields[field_name + "_display"] = ChoiceDisplayField(choices=choices, source=source)
# Champ spécifique pour l'affichage de la valeur
fields[field_name] = ReadOnlyObjectField(source=source if "." in field_name else None)
# Création de serializer à la volée en cas d'aggregation ou de restriction de champs
aggregations = {}
for aggregate in AGGREGATES.keys():
for field in url_params.get(aggregate, "").split(","):
if not field:
continue
field_name = aggregate + "__" + field.strip()
source = field_name.replace(".", "__") if "." in field else None
aggregations[field_name] = serializers.ReadOnlyField(source=source)
# Regroupements & aggregations
if "group_by" in url_params or aggregations:
fields = {}
for field in url_params.get("group_by", "").split(","):
add_field_to_serializer(fields, field)
fields.update(aggregations)
fields.update(annotations)
# Un serializer avec les données groupées est créé à la volée
serializer = type(serializer.__name__, (serializers.Serializer,), fields)
# Restriction de champs
elif "fields" in url_params:
fields = {}
for field in url_params.get("fields", "").split(","):
add_field_to_serializer(fields, field)
fields.update(annotations)
# Un serializer avec restriction des champs est créé à la volée
serializer = type(serializer.__name__, (serializers.Serializer,), fields)
elif annotations:
serializer._declared_fields.update({key: serializers.ReadOnlyField() for key, value in annotations.items()})
# Fonction spécifique
if query_func:
func_args = func_args or []
func_kwargs = func_kwargs or {}
queryset = query_func(queryset, *func_args, **func_kwargs)
# Uniquement si toutes les données sont demandées
all_data = str_to_bool(url_params.get("all", ""))
if all_data:
return Response(serializer(queryset, context=context, many=True).data)
# Pagination avec ajout des options de filtres/tris dans la pagination
paginator = pagination()
if enable_options and hasattr(paginator, "additional_data"):
paginator.additional_data = dict(options=options)
# Force un tri sur la clé primaire en cas de pagination
if hasattr(queryset, "ordered") and not queryset.ordered:
primary_key = get_pk_field(queryset.model)
queryset = queryset.order_by(
*(getattr(queryset, "_fields", None) or (enable_options and distincts) or [primary_key.name])
)
serializer = serializer(paginator.paginate_queryset(queryset, request), context=context, many=True)
return paginator.get_paginated_response(serializer.data)
def create_api(
*models,
default_config=None,
router=None,
all_serializers=None,
all_viewsets=None,
all_bases_serializers=None,
all_bases_viewsets=None,
all_data_serializers=None,
all_data_viewsets=None,
all_querysets=None,
all_metadata=None,
all_configs=None
):
"""
Crée les APIs REST standard pour les modèles donnés
:param models: Liste des modèles
:param default_config: Configuration par défaut des APIs
:param router: Router existant à mettre à jour
:param all_serializers: Tous les serializers créés jusqu'à présent
:param all_viewsets: Tous les viewsets créés jusqu'à présent
:param all_bases_serializers: Toutes les bases de serializers créées jusqu'à présent
:param all_bases_viewsets: Toutes les bases de viewsets créées jusqu'à présent
:param all_metadata: Toutes les métadonnées créées jusqu'à présent
:param all_data_serializers: Toutes les données de serializers créées jusqu'à présent
:param all_data_viewsets: Toutes les données de viewsets créées jusqu'à présent
:param all_querysets: Toutes les requêtes créées jusqu'à présent
:param all_configs: Toutes les configs créées jusqu'à présent
:return: Router, Serializers, Viewsets
"""
serializers = {}
viewsets = {}
# Récupération de la configuration générale
from common.api.base import (
CONFIGS,
DEFAULT_CONFIG,
METADATA,
QUERYSETS,
SERIALIZERS,
SERIALIZERS_BASE,
SERIALIZERS_DATA,
VIEWSETS,
VIEWSETS_BASE,
VIEWSETS_DATA,
)
all_serializers = all_serializers or SERIALIZERS
all_viewsets = all_viewsets or VIEWSETS
all_bases_serializers = all_bases_serializers or SERIALIZERS_BASE
all_bases_viewsets = all_bases_viewsets or VIEWSETS_BASE
all_data_serializers = all_data_serializers or SERIALIZERS_DATA
all_data_viewsets = all_data_viewsets or VIEWSETS_DATA
all_querysets = all_querysets or QUERYSETS
all_metadata = all_metadata or METADATA
all_configs = all_configs or CONFIGS
default_config = default_config or DEFAULT_CONFIG
# Création des serializers et viewsets par défaut
for model in models:
if not model:
continue
serializers[model], viewsets[model] = create_model_serializer_and_viewset(
model,
serializer_base=all_bases_serializers,
viewset_base=all_bases_viewsets,
serializer_data=all_data_serializers,
viewset_data=all_data_viewsets,
queryset=all_querysets.get(model, None),
metas=all_metadata,
**all_configs.get(model, default_config or {})
)
# Création des routes par défaut
from rest_framework import routers
router = router or routers.DefaultRouter()
for model, viewset in sorted(viewsets.items(), key=lambda key: key[0]._meta.model_name):
code = model._meta.model_name
router.register(code, viewset, basename=code)
# Mise à jour des serializers et viewsets par défaut
all_serializers.update(serializers)
all_viewsets.update(viewsets)
return router, serializers, viewsets
def disable_relation_fields(*models, all_metadata=None):
"""
Remplace la liste de choix par un simple champ de saisie pour toutes les relations des modèles donnés
(Permet d'améliorer significativement les performances lors de l'affichage du formulaire dans les APIs)
:param models: Liste des modèles
:param all_metadata: Toutes les métadonnées créées jusqu'à présent
:return: Rien
"""
from common.api.base import METADATA
all_metadata = all_metadata or METADATA
for model in models:
if not model:
continue
metas = {}
for field in model._meta.get_fields():
if field.concrete and not field.auto_created and field.related_model:
metas[field.name] = dict(style={"base_template": "input.html", "placeholder": str(field.verbose_name)})
if metas:
metadata = all_metadata[model] = all_metadata.get(model, {})
extra_kwargs = metadata["extra_kwargs"] = metadata.get("extra_kwargs", {})
for key, value in metas.items():
extra_kwargs[key] = extra_kwargs.get(key, {})
extra_kwargs[key].update(value)
|
from math import sqrt
import sys
import scipy.stats
import seaborn as sns
import pandas as pd
from matplotlib import pyplot as plt
sys.path.append('/home/silvio/git/track-ml-1/utils')
from tracktop import *
original_tracks = sys.argv[1]
reconstructed_tracks = sys.argv[2]
dfOriginal = pd.read_csv(original_tracks)
dfReconstructed = pd.read_csv(reconstructed_tracks)
track_plot(dfReconstructed) #,'teste')
'''
original_tracks = sys.argv[1]
reconstructed_tracks = sys.argv[2]
outputfig = sys.argv[3]
dfOriginal = pd.read_csv(original_tracks)
dfReconstructed = pd.read_csv(reconstructed_tracks)
lines = dfOriginal.shape[0]
columns = dfOriginal.shape[1]
dfEval = pd.DataFrame(index=range(lines),columns=range(29))
ind_dfEval=0
#for hit in range(1, 20):
for hit in range(1, 28):
dataOR=dfOriginal.iloc[:, [ (hit*6)+1,(hit*6)+2,(hit*6)+3 ]]
dataRE=dfReconstructed.iloc[:, [ (hit*6)+1,(hit*6)+2,(hit*6)+3 ]]
dftemp = pd.DataFrame(index=range(lines),columns=range(7))
dftemp[0]=dataOR.iloc[:,[0]]
dftemp[1]=dataOR.iloc[:,[1]]
dftemp[2]=dataOR.iloc[:,[2]]
dftemp[3]=dataRE.iloc[:,[0]]
dftemp[4]=dataRE.iloc[:,[1]]
dftemp[5]=dataRE.iloc[:,[2]]
dftemp[6]=((dftemp[0]-dftemp[3])**2)+((dftemp[1]-dftemp[4])**2)+((dftemp[2]-dftemp[5])**2)
dfEval[ind_dfEval] = dftemp[6]
ind_dfEval=ind_dfEval+1
col = dfEval.loc[: , 0:26]
dfEval[27] = col.mean(axis=1)
dfEval[28] = col.std(axis=1)
print(dfEval.shape)
print(dfEval.iloc[:,[27]].min())
print(dfEval.iloc[:,[27]].max())
plt.figure()
#sns_plot = sns.pairplot(dfEval.iloc[:,[27]]) #, hue='27', size=2.5)
#sns_plot = sns.pairplot(dfEval) #, hue='27', size=2.5)
sns_plot = sns.distplot(dfEval.iloc[:,[27]]);
plt.savefig(outputfig)
'''
|
from locust import HttpLocust, TaskSet, task
import bs4
import random
import sys, os
class WebsiteTasks(TaskSet):
def on_start(self):
res = self.client.get("/runestone/default/user/login")
pq = bs4.BeautifulSoup(res.content, features="lxml")
# Get the csrf key for successful submission
i = pq.select('input[name="_formkey"]')
token = i[0]["value"]
# login a user
try:
user = os.environ["RUNESTONE_TESTUSER"]
pw = os.environ["RUNESTONE_TESTPW"]
except:
print("ERROR please set RUNESTONE_TESTUSER and RUNESTONE_TESTPW ")
sys.exit(-1)
res = self.client.post(
"/runestone/default/user/login",
{"username": user, "password": pw, "_formkey": token, "_formname": "login"},
)
# Get the index and make a list of all chapters/subchapters
res = self.client.get("/runestone/books/published/fopp/index.html")
pq = bs4.BeautifulSoup(res.content, features="lxml")
pages = pq.select(".toctree-l2 a")
self.bookpages = [p["href"] for p in pages]
@task(5)
def index(self):
self.client.get("/runestone")
@task(20)
def boookpage(self):
# pick a page at random
url = random.choice(self.bookpages)
base = "/runestone/books/published/fopp/"
res = self.client.get(base + url)
pq = bs4.BeautifulSoup(res.content, features="lxml")
# client.get ONLY gets the html, so we need to simulate getting all
# of the static assets ourselves.
for s in pq.select("script"):
if s.has_attr("src"):
if s["src"].startswith(("http", "//")) == False:
js = self.client.get(
base + s["src"].replace("../", ""), name="scripts"
)
for s in pq.select("link"):
if s.has_attr("href"):
if s["href"].startswith(("http", "//")) == False:
css = self.client.get(
base + s["href"].replace("../", ""), name="css"
)
class WebsiteUser(HttpLocust):
host = "http://localhost"
task_set = WebsiteTasks
min_wait = 1000
max_wait = 15000
|
from django.core.urlresolvers import reverse
from django.test import TestCase
class AccountsDeleteViewTestCase(TestCase):
fixtures = ['users_views_testdata.yaml']
def test_delete_non_existent_user(self):
url = reverse('del', args=[99])
resp = self.client.get(url, follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTrue('alert' in resp.context)
self.assertIsNotNone(resp.context['alert'])
|
import numpy as np
import matplotlib.pyplot as plt
# The following helper functions simplify the SPEIS data import from MPT files
def get_boundaries(highest_freq, lowest_freq, f):
if (np.count_nonzero(f[0] >= highest_freq) > 0):
cutoff_start = np.where(f[0] >= highest_freq)[0][-1]
else:
cutoff_start = 0
if (np.count_nonzero(f[0] <= lowest_freq) > 0):
cutoff_end = np.where(f[0] <= lowest_freq)[0][0]
else:
cutoff_end = len(f[0])-1
return cutoff_start, cutoff_end
def read_mpt_parameters(filename):
with open(filename, 'r', encoding="latin-1") as input_file:
lines = input_file.readlines()
header_line = lines[1]
header_lines_number = int(header_line.split(":")[1])
# Iterate over headline section containing relevant information
for i in range(header_lines_number):
if lines[i].startswith("Ei (V)"):
pot_start = float(lines[i].split("(V)")[1])*1E+3
if lines[i].startswith("Ef (V)"):
pot_end = float(lines[i].split("(V)")[1])*1E+3
if lines[i].startswith("N"):
potstepnumber = int(lines[i].strip().split(" ")[-1])+1
pot_step = int(abs(pot_start-pot_end)/(potstepnumber-1))
return pot_start, pot_step, potstepnumber |
"""
返回数据的基类
create by judy 2018/10/22
"""
import datetime
import enum
import io
import json
import os
import threading
from abc import ABCMeta, abstractmethod
import pytz
from commonbaby.helpers import helper_crypto, helper_str
from datacontract.idowndataset import Task
from datacontract.outputdata import (EStandardDataType, OutputData,
OutputDataSeg)
# from idownclient.config_output import muti_seg_count
# get_write_lines函数弃用,muti_seg_count参数现在不是从config_output里来的
muti_seg_count = 1000
class EGender(enum.Enum):
"""统一性别枚举值"""
Female = 1
Male = 2
Unknown = 3
class EResourceType(enum.Enum):
"""标识资源类型\n
0图片 1视频 2音频 3网站(分享链接) 4其他"""
Picture = 0
Video = 1
Audio = 2
Url = 3
Other_Text = 4
class ESign(enum.Enum):
"""对RESOURCE预定义的sign标记枚举"""
Null = 0
PicUrl = 1 # 标记当前数据为一个头像图片
class DriveTreeData:
"""表示一个目录树结构中的数据(文件夹或文件)。\n
treedataid:当前目录树对象在当前树结构中的的唯一标识\n
path:当前DriveTreeData的完整路径\n"""
__metaclass = ABCMeta
@property
def is_dir(self) -> bool:
"""指示当前数据是否为一个目录"""
return self._is_dir()
@abstractmethod
def _is_dir(self) -> bool:
raise NotImplementedError()
@property
def parent(self):
"""当前DriveTreeData的父目录,
没有父级(本身就是根级)则为None"""
return self._parent
@parent.setter
def parent(self, value):
"""当前DriveTreeData的父目录,
没有父级(本身就是根级)则为None"""
if not isinstance(value, DriveTreeData):
raise Exception(
"Invalid property value set for DriveTreeData.parent")
self._parent = value
def __init__(self, treedataid: str, path: str = '/'):
if treedataid is None:
raise Exception("Invalid param 'treedataid' for DriveTreeData")
if not isinstance(path, str) or path == "":
raise Exception("Invalid param 'path' for DriveTreeData")
self._treedataid: str = treedataid
self._path: str = path
# 父级对象
self._parent: DriveTreeData = None
# 子级对象
self._subitems: dict = {}
self._subitems_locker = threading.RLock()
# 用于存储已序列化好的json结构
self._jsondata: dict = {}
self._jsondata_locker = threading.RLock()
self._jsondata_ok: bool = False
def append_item(self, item):
"""向当前网盘数据添加一个子级数据"""
if not isinstance(item, DriveTreeData):
raise Exception("Invalid param 'item' for DriveTreeData")
item: DriveTreeData = item
with self._subitems_locker:
if self._subitems.__contains__(item._treedataid):
# 一般情况下,任何网盘都不允许同一文件夹下有重名文件
# 所以此处暂时不添加,也不报错,真遇到了id一样的再想办法处理下
return
self._subitems[item._treedataid] = item
def _get_tree_json(self) -> dict:
"""创建当前DriveTreeData的json目录树结构"""
if self._jsondata_ok:
return self._jsondata
with self._jsondata_locker:
if self._jsondata_ok:
return self._jsondata
self._jsondata['key'] = self._treedataid
for i in self._get_current_tree_json().items():
key = i[0]
val = i[1]
if self._jsondata.__contains__(key):
continue
self._jsondata[key] = val
if self.is_dir and any(self._subitems):
tmplist: list = []
for i in self._subitems.values():
i: DriveTreeData = i
d: dict = i._get_current_tree_json()
if not isinstance(d, dict):
# raise Exception(
# "Get subitem json dict error, invalid json dict: treedataid={}".
# format(self._treedataid))
continue
if any(d):
tmplist.append(d)
if any(tmplist):
self._jsondata['items'] = tmplist
self._jsondata_ok = True
return self._jsondata
@abstractmethod
def _get_current_tree_json(self) -> dict:
'''子类实现时,拼接一个字典,存放当前DriveTreeData的
除了self._treedataid以外的相关信息,键和要统一!
例如:\n
{
"name":"xxx",
"url":"xxxx"
}'''
raise NotImplementedError()
class OrderData:
"""表示带有订单self._order字段的数据"""
_transjsonpath = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'translation.json'))
_transjsonloaded: bool = False
_translocker = threading.RLock()
_transjson: dict = None
def __init__(self):
self._order: dict = {}
self._order_locker = threading.Lock()
self._order_res = {}
self._load_transjson()
def _load_transjson(self):
if OrderData._transjsonloaded:
return
with OrderData._translocker:
if OrderData._transjsonloaded:
return
succ = self.__load_transjson()
if succ:
OrderData._transjsonloaded = True
def __load_transjson(self):
with open(OrderData._transjsonpath, 'rb') as f:
string = f.read().decode('utf-8')
OrderData._transjson = json.loads(string)
return True
def append_order(self, **kwargs):
"""将订单以键值对的形式添加到当前数据的详情字典中"""
if kwargs is None or len(kwargs) < 1:
return
with self._order_locker:
self._order.update(kwargs)
def append_orders(self, kwargs: dict):
"""将订单以字典的形式添加到当前数据的详情字典中"""
if not isinstance(kwargs, dict) or len(kwargs) < 1:
return
with self._order_locker:
self._order.update(kwargs)
def _format_order(self):
"""将 self._order 这个字典展平成只有1级的,并
翻译其所有可翻译的键,然后按照标准格式化并返回
纯文本。
例:
{ "orderid":"a", "b":"b", "c": ["c1","c2"], "d": [{"d1":"d1","d2":"d2"}]...}
↓格式化为
订单号:a
b:b
c:["c1","c2"]
d1:d1
d2:d2
"""
dic: dict = OrderData._transjson
if dic is None:
dic = {}
orders = self.__order_iter(self._order)
new_dic = {}
for key, value in orders.items():
if dic.__contains__(key):
new_dic[dic[key]] = orders[key]
else:
new_dic[key] = orders[key]
order_str = ''
for key, value in new_dic.items():
order_str = order_str + key + ':' + str(value) + '\n'
return order_str
def __order_iter(self, order, key='order'):
"""内部格式化函数,递归"""
if isinstance(order, dict):
for key, value in order.items():
if isinstance(value, dict):
self.__order_iter(value, key)
elif isinstance(value, list):
for value1 in value:
if isinstance(value1, dict):
self.__order_iter(value1, key)
elif isinstance(value1, list):
self.__order_iter(value1, key)
else:
self._order_res[key] = value
else:
self._order_res[key] = value
elif isinstance(order, list):
for li in order:
if isinstance(li, dict) or isinstance(li, list):
self.__order_iter(li)
else:
break
self._order_res[key] = order
else:
self._order_res[key] = order
return self._order_res
class DetailedData:
"""表示带有详情self._detail字段的数据"""
def __init__(self):
self._detail: dict = {}
self._detail_locker = threading.Lock()
def append_detail(self, **kwargs):
"""将详情以键值对的形式添加到当前数据的详情字典中"""
if kwargs is None or len(kwargs) < 1:
return
with self._detail_locker:
self._detail.update(kwargs)
def append_details(self, kwargs: dict):
"""将详情以字典的形式添加到当前数据的详情字典中"""
if not isinstance(kwargs, dict) or len(kwargs) < 1:
return
with self._detail_locker:
self._detail.update(kwargs)
class Resource:
"""表示一个资源数据。\n
url:当前资源的唯一标识\n
rsctype:EResourceType资源类型"""
@property
def sign(self):
'''当前Resource资源的特殊标记,统一使用resourcefeedback.ESign枚举值'''
return self.__sign
@sign.setter
def sign(self, value):
'''当前Resource资源的特殊标记,统一使用resourcefeedback.ESign枚举值'''
if not isinstance(value,
ESign) or not self._sign_map.__contains__(value):
raise Exception("Value must be Esign or value is invalid")
self.__sign = value
def __init__(self,
url: str,
rsctype: EResourceType = EResourceType.Other_Text):
if not isinstance(url, str) or url == "":
raise Exception("Invalid param 'url' for Resource")
if not isinstance(rsctype, EResourceType):
raise Exception("Invalid param 'rsctype' for Resource")
self._url: str = url
self._resourcetype: EResourceType = rsctype
self.filename: str = None
self.__sign: ESign = ESign.Null
self._sign_map: dict = {
ESign.Null: None,
ESign.PicUrl: "picurl",
}
class ResourceData:
"""表示一个可能附带资源的数据,即带有resources列表字段的数据类型,
其中可能关联了多个Resource数据。"""
_sign_map: dict = {
ESign.Null: "0",
ESign.PicUrl: "picurl",
}
def __init__(self):
self._resources: list = [] # 格式详情参见数据标准
def append_resource(self, rsc: Resource):
"""将一个RESOURCE资源数据关联到当前数据的resources列表"""
if not issubclass(type(rsc), Resource) or \
helper_str.is_none_or_empty(rsc._url) or \
not isinstance(rsc._resourcetype, EResourceType):
raise Exception(
"Invalid param 'rsc' Resource for append_resource: {}".format(
rsc))
tmp = {
"url": rsc._url,
"type": rsc._resourcetype.value,
}
if isinstance(rsc.sign, ESign) and not rsc.sign == ESign.Null:
tmp['sign'] = ResourceData._sign_map[rsc.sign]
if not helper_str.is_none_or_empty(rsc.filename):
tmp['name'] = rsc.filename
self._resources.append(tmp)
class UniqueData:
"""表示一个可以去重的,有唯一ID标识的数据"""
__metaclass = ABCMeta
def __init__(self, task: Task, apptype: int):
if not isinstance(apptype, int):
raise Exception("AppType is invalid in Uniquedata")
if not isinstance(task, Task):
raise Exception("Task is invalid in Uniquedata")
self._datatype_str_unique: str = type(self).__name__ # 数据类型
self._task: Task = task
self._apptype = apptype
@abstractmethod
def get_write_lines(self) -> str:
raise NotImplementedError()
@abstractmethod
def get_uniqueid(self):
"""子类实现时,返回当前数据的唯一标识id,用于去重数据,和增量下载。
默认实现为返回当前数据的hash值"""
# return helper_crypto.get_md5_from_str(
# self._task.taskid.decode('utf-8'))
hs = self.__hash__()
return hs
@abstractmethod
def get_display_name(self):
"""返回当前数据的显示名称"""
return self.get_uniqueid()
class InnerDataBase(UniqueData, OutputDataSeg):
"""表示多段数据类型的一段数据"""
__metaclass = ABCMeta
def __init__(self, task: Task, apptype: int):
UniqueData.__init__(self, task, apptype)
OutputDataSeg.__init__(self)
# 用于将一些爬取过程中有用的东西记录下来
self.remarks = None
def get_write_lines(self) -> str:
"""返回当前数据段构建的字段内容文本"""
lines: str = ''
lines += self._get_write_lines()
if not lines.endswith('\r\n\r\n'):
lines = lines.strip() + '\r\n\r\n'
return lines
@abstractmethod
def _get_write_lines(self) -> str:
"""子类返回当前数据段构建的字段内容文本"""
return ''
@abstractmethod
def get_uniqueid(self):
"""子类实现时,返回当前数据的唯一标识id,用于去重数据,和增量下载"""
return helper_crypto.get_md5_from_str(self.get_write_lines())
def get_output_fields(self) -> dict:
"""返回当前数据段应输出的所有字段字典"""
self._get_output_fields()
return self._fields
@abstractmethod
def _get_output_fields(self) -> dict:
"""子类实现时,返回当前数据段应输出的所有字段字典"""
raise NotImplementedError()
class FeedDataBase(UniqueData, OutputData, OutputDataSeg):
"""
datatype: 数据类型唯一标识\n
suffix: 数据类型后缀\n
task: 关联当前数据的task任务\n
apptype: 当前生成此数据的插件的apptype"""
__metaclass = ABCMeta
@property
def innerdata_len(self):
with self.__innerdata_locker:
return len(self.__innerdatas)
@property
def io_stream(self) -> io.RawIOBase:
"""当前数据的数据体,二进制流"""
return self._get_io_stream()
@io_stream.setter
def io_stream(self, value):
"""当前数据的数据体,二进制流"""
self._set_io_stream(value)
@abstractmethod
def _get_io_stream(self) -> io.RawIOBase:
"""子类实现时,返回当前数据的文件体,二进制流"""
return self._io_stream
@abstractmethod
def _set_io_stream(self, value):
"""子类实现时,设置当前数据的文件体,二进制流"""
self._io_stream = value
def __init__(self,
suffix,
datatype: EStandardDataType,
task: Task,
apptype: int,
clientid: str,
is_muti_seg: bool = False):
UniqueData.__init__(self, task, apptype)
OutputData.__init__(self, self._task.platform, datatype)
OutputDataSeg.__init__(self)
if not isinstance(clientid, str) or clientid == "":
raise Exception("Invalid param 'clientid' for FeedDataBase")
self._clientid: str = clientid
# 东8区时间
self.time = datetime.datetime.now(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S')
if not isinstance(suffix, str) or suffix == "":
raise Exception("Suffix is invalid.")
self._is_muti_seg: bool = False
if isinstance(is_muti_seg, bool):
self._is_muti_seg = is_muti_seg
self._suffix: str = suffix # 文件后缀
self.__innerdatas: list = [] # 内部多段数据
self.__innerdata_locker = threading.Lock()
self._io_stream = None # 从网上下载回来的数据流
# 统一使用ha来获取head里面的length
# resp = ha.get_response()
# lengthn = resp.headers.get('Content-Length')
# responseio = ResponseIO(resp)
self.stream_length = 0 # 下载的文件流大小,用来做文件大小过滤
self.remarks = None # 用于将一些爬取过程中有用的东西记录下来
def __iter__(self):
return self.__innerdatas.__iter__()
def first_innerdata(self):
"""若当前数据的innerdata集合不为空,则返回Innerdata集合中的第一个元素。
否则返回None"""
if self.innerdata_len > 0:
return self.__innerdatas[0]
return None
def append_innerdata(self, innerdata: InnerDataBase):
"""添加一个内部数据结构体到当前总数据结构体中"""
if not isinstance(innerdata, InnerDataBase):
raise Exception("Inner data is invalid.")
with self.__innerdata_locker:
innerdata.owner_data = self
self.__innerdatas.append(innerdata)
def remove_innerdata(self, innerdata: InnerDataBase):
'''将指定的InnerDataBase对象从当前数据的子数据段集合中移除'''
if not isinstance(innerdata, InnerDataBase):
return
with self.__innerdata_locker:
if innerdata in self.__innerdatas:
self.__innerdatas.remove(innerdata)
@abstractmethod
def get_output_segs(self) -> iter:
"""子类实现时,返回当前数据包含的数据段iterable"""
if any(self.__innerdatas):
segidx = 1
with self.__innerdata_locker:
for seg in self.__innerdatas:
seg: InnerDataBase = seg
seg.append_to_fields('clientid', self._clientid)
seg.append_to_fields('taskid', self._task.taskid)
seg.append_to_fields('batchid', self._task.batchid)
seg.append_to_fields('apptype', self._apptype)
seg.append_to_fields('time', self.time)
seg.append_to_fields('casenode', self._task.casenode)
seg.append_to_fields('source', self._task.source)
seg.segindex = segidx
segidx += 1
yield seg
else:
self.segindex = 1
if self.owner_data is None:
self.owner_data = self
yield self
def get_output_fields(self) -> dict:
"""返回当前数据段应输出的所有字段字典"""
self.append_to_fields('clientid', self._clientid)
self.append_to_fields('taskid', self._task.taskid)
self.append_to_fields('batchid', self._task.batchid)
self.append_to_fields('apptype', self._apptype)
self.append_to_fields('time', self.time)
self.append_to_fields('casenode', self._task.casenode)
self.append_to_fields('source', self._task.source)
for field in self._task._other_fields.items():
self.append_to_fields(field[0], field[1])
return self._get_output_fields()
@abstractmethod
def _get_output_fields(self) -> dict:
"""子类实现时,返回当前数据段应输出的所有字段字典"""
raise NotImplementedError()
def get_write_lines(self) -> iter:
""""""
lines: str = ''
if any(self.__innerdatas):
with self.__innerdata_locker:
segcount: int = 0
for innerdata in self.__innerdatas:
innerdata: InnerDataBase = innerdata
lines += self._get_common_fields_lines()
lines += innerdata.get_write_lines()
if not lines.endswith('\r\n\r\n'):
lines = lines.strip() + '\r\n\r\n'
segcount += 1
if segcount >= muti_seg_count:
yield lines.encode('utf-8')
lines = ''
segcount = 0
if not helper_str.is_none_or_empty(lines):
yield lines.encode('utf-8')
elif isinstance(self.io_stream, io.IOBase):
lines += self._get_common_fields_lines()
lines += self._get_write_lines()
if not lines.endswith('\r\n\r\n'):
lines = lines.strip() + '\r\n\r\n'
yield lines.encode('utf-8')
else:
lines += self._get_common_fields_lines()
lines += self._get_write_lines()
if not lines.endswith('\r\n\r\n'):
lines = lines.strip() + '\r\n\r\n'
yield lines.encode('utf-8')
@abstractmethod
def _get_write_lines(self) -> str:
"""子类按数据类型返回子类应有的字段"""
return ''
def _get_common_fields_lines(self):
"""以每行一个字段的形式返回当前数据中的基本共有字段"""
lines: str = ''
lines += 'taskid:{}\r\n'.format(self._task.taskid)
lines += 'apptype:{}\r\n'.format(self._task.apptype)
lines += 'time:{}\r\n'.format(self.time)
for field in self._task._other_fields.items():
if field[0] is None or field[1] is None:
continue
lines += "{}:{}\r\n".format(field[0],
helper_str.base64format(field[1]))
return lines
def get_stream(self):
return self.io_stream
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''main.py - Waqas Bhatti ([email protected]) - Aug 2018
License: MIT - see the LICENSE file for the full text.
This is the main file for the authnzerver, a simple authorization and
authentication server backed by SQLite and Tornado for use with the lcc-server.
'''
#############
## LOGGING ##
#############
import logging
# setup a logger
LOGMOD = __name__
#############
## IMPORTS ##
#############
import os
import os.path
import socket
import sys
import signal
import time
from functools import partial
# setup signal trapping on SIGINT
def recv_sigint(signum, stack):
'''
handler function to receive and process a SIGINT
'''
raise KeyboardInterrupt
#####################
## TORNADO IMPORTS ##
#####################
# experimental, probably will remove at some point
try:
import asyncio
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
IOLOOP_SPEC = 'uvloop'
except Exception as e:
HAVE_UVLOOP = False
IOLOOP_SPEC = 'asyncio'
import tornado.ioloop
import tornado.httpserver
import tornado.web
import tornado.options
from tornado.options import define, options
import multiprocessing as mp
###############################
### APPLICATION SETUP BELOW ###
###############################
modpath = os.path.abspath(os.path.dirname(__file__))
# define our commandline options
# the port to serve on
# indexserver will serve on 12600-12604 by default
define('port',
default=12600,
help='Run on the given port.',
type=int)
# the address to listen on
define('serve',
default='127.0.0.1',
help='Bind to given address and serve content.',
type=str)
# whether to run in debugmode or not
define('debugmode',
default=0,
help='start up in debug mode if set to 1.',
type=int)
# number of background threads in the pool executor
define('backgroundworkers',
default=4,
help=('number of background workers to use '),
type=int)
# basedir is the directory at the root where all LCC collections are stored this
# contains subdirs for each collection and a lcc-collections.sqlite file that
# contains info on all collections.
define('basedir',
default=os.getcwd(),
help=('The base directory of the light curve collections.'),
type=str)
# the path to the authentication DB
define('authdb',
default=None,
help=('An SQLAlchemy database URL to override the use of '
'the local authentication DB. '
'This should be in the form discussed at: '
'https://docs.sqlalchemy.org/en/latest'
'/core/engines.html#database-urls'),
type=str)
# the path to the cache directory used by indexserver
define('cachedir',
default='/tmp/lccserver-cache',
help=('Path to the cache directory used by the main LCC-Server '
'indexserver process as defined in its site-info.json config '
'file.'),
type=str)
define('sessionexpiry',
default=7,
help=('This tells the lcc-server the session-expiry time in days.'),
type=int)
#######################
## UTILITY FUNCTIONS ##
#######################
def setup_auth_worker(authdb_path,
fernet_secret):
'''This stores secrets and the auth DB path in the worker loop's context.
The worker will then open the DB and set up its Fernet instance by itself.
'''
# unregister interrupt signals so they don't get to the worker
# and the executor can kill them cleanly (hopefully)
signal.signal(signal.SIGINT, signal.SIG_IGN)
currproc = mp.current_process()
currproc.auth_db_path = authdb_path
currproc.fernet_secret = fernet_secret
def close_authentication_database():
'''This is used to close the authentication database when the worker loop
exits.
'''
currproc = mp.current_process()
if getattr(currproc, 'table_meta', None):
del currproc.table_meta
if getattr(currproc, 'connection', None):
currproc.connection.close()
del currproc.connection
if getattr(currproc, 'engine', None):
currproc.engine.dispose()
del currproc.engine
print('Shutting down database engine in process: %s' % currproc.name,
file=sys.stdout)
##########
## MAIN ##
##########
def main():
# parse the command line
tornado.options.parse_command_line()
DEBUG = True if options.debugmode == 1 else False
# get a logger
LOGGER = logging.getLogger(__name__)
if DEBUG:
LOGGER.setLevel(logging.DEBUG)
else:
LOGGER.setLevel(logging.INFO)
###################
## LOCAL IMPORTS ##
###################
from ..utils import ProcExecutor
##############
## HANDLERS ##
##############
from .handlers import AuthHandler, EchoHandler
from . import authdb
from . import cache
from . import actions
###################
## SET UP CONFIG ##
###################
MAXWORKERS = options.backgroundworkers
FERNETSECRET = authdb.get_secret_token('LCC_FERNETSECRET',
os.path.join(
options.basedir,
'.lccserver.secret-fernet'
),
LOGGER)
# use the local sqlite DB as the default auth DB
AUTHDB_SQLITE = os.path.join(options.basedir, '.authdb.sqlite')
# pass the DSN to the SQLAlchemy engine
if os.path.exists(AUTHDB_SQLITE):
AUTHDB_PATH = 'sqlite:///%s' % os.path.abspath(AUTHDB_SQLITE)
elif options.authdb:
# if the local authdb doesn't exist, we'll use the DSN provided by the
# user
AUTHDB_PATH = options.authdb
else:
raise ConnectionError(
"No auth DB connection available. "
"The local auth DB is missing or "
"no SQLAlchemy database URL was provided to override it"
)
#
# this is the background executor we'll pass over to the handler
#
executor = ProcExecutor(max_workers=MAXWORKERS,
initializer=setup_auth_worker,
initargs=(AUTHDB_PATH,
FERNETSECRET),
finalizer=close_authentication_database)
# we only have one actual endpoint, the other one is for testing
handlers = [
(r'/', AuthHandler,
{'authdb':AUTHDB_PATH,
'fernet_secret':FERNETSECRET,
'executor':executor}),
]
if DEBUG:
# put in the echo handler for debugging
handlers.append(
(r'/echo', EchoHandler,
{'authdb':AUTHDB_PATH,
'fernet_secret':FERNETSECRET,
'executor':executor})
)
########################
## APPLICATION SET UP ##
########################
app = tornado.web.Application(
debug=DEBUG,
autoreload=False, # this sometimes breaks Executors so disable it
)
# try to guard against the DNS rebinding attack
# http://www.tornadoweb.org/en/stable/guide/security.html#dns-rebinding
app.add_handlers(r'(localhost|127\.0\.0\.1)',
handlers)
# start up the HTTP server and our application
http_server = tornado.httpserver.HTTPServer(app)
######################################################
## CLEAR THE CACHE AND REAP OLD SESSIONS ON STARTUP ##
######################################################
removed_items = cache.cache_flush(
cache_dirname=options.cachedir
)
LOGGER.info('removed %s stale items from authdb cache' % removed_items)
session_killer = partial(actions.auth_kill_old_sessions,
session_expiry_days=options.sessionexpiry,
override_authdb_path=AUTHDB_PATH)
# run once at start up
session_killer()
######################
## start the server ##
######################
# register the signal callbacks
signal.signal(signal.SIGINT, recv_sigint)
signal.signal(signal.SIGTERM, recv_sigint)
# make sure the port we're going to listen on is ok
# inspired by how Jupyter notebook does this
portok = False
serverport = options.port
maxtries = 10
thistry = 0
while not portok and thistry < maxtries:
try:
http_server.listen(serverport, options.serve)
portok = True
except socket.error as e:
LOGGER.warning('%s:%s is already in use, trying port %s' %
(options.serve, serverport, serverport + 1))
serverport = serverport + 1
if not portok:
LOGGER.error('Could not find a free port after %s tries, giving up' %
maxtries)
sys.exit(1)
LOGGER.info('Started authnzerver. listening on http://%s:%s' %
(options.serve, serverport))
LOGGER.info('Background worker processes: %s. IOLoop in use: %s' %
(MAXWORKERS, IOLOOP_SPEC))
LOGGER.info('Base directory is: %s' % os.path.abspath(options.basedir))
# start the IOLoop and begin serving requests
try:
loop = tornado.ioloop.IOLoop.current()
# add our periodic callback for the session-killer
# runs daily
periodic_session_kill = tornado.ioloop.PeriodicCallback(
session_killer,
86400000.0,
jitter=0.1,
)
periodic_session_kill.start()
# start the IOLoop
loop.start()
except KeyboardInterrupt:
LOGGER.info('Received Ctrl-C: shutting down...')
# close down the processpool
executor.shutdown()
time.sleep(2)
tornado.ioloop.IOLoop.instance().stop()
currproc = mp.current_process()
if getattr(currproc, 'table_meta', None):
del currproc.table_meta
if getattr(currproc, 'connection', None):
currproc.connection.close()
del currproc.connection
if getattr(currproc, 'engine', None):
currproc.engine.dispose()
del currproc.engine
print('Shutting down database engine in process: %s' % currproc.name,
file=sys.stdout)
# run the server
if __name__ == '__main__':
main()
|
from .elements.element_base import ElementBase
from .elements.list import List
from .elements.value_element_base import ValueElementBase
from .elements.missing.missing_element_base import MissingElementBase
from .elements.missing.missing_list import MissingList
from .elements.missing.missing_value_element_base import MissingValueElementBase
def _register(name: str, function):
if name == 'string':
raise ValueError("You cannot register 'string' as a type/loader with enolib as this conflicts with the native string type accessors.")
setattr(ElementBase, f"{name}_key", lambda self: self.key(function))
setattr(ElementBase, f"optional_{name}_comment", lambda self: self.optional_comment(function))
setattr(ElementBase, f"required_{name}_comment", lambda self: self.required_comment(function))
setattr(ValueElementBase, f"optional_{name}_value", lambda self: self.optional_value(function))
setattr(ValueElementBase, f"required_{name}_value", lambda self: self.required_value(function))
setattr(List, f"optional_{name}_values", lambda self: self.optional_values(function))
setattr(List, f"required_{name}_values", lambda self: self.required_values(function))
setattr(MissingElementBase, f"{name}_key", MissingElementBase.string_key)
setattr(MissingElementBase, f"optional_{name}_comment", MissingElementBase.optional_string_comment)
setattr(MissingElementBase, f"required_{name}_comment", MissingElementBase.required_string_comment)
setattr(MissingValueElementBase, f"optional_{name}_value", MissingValueElementBase.optional_string_value)
setattr(MissingValueElementBase, f"required_{name}_value", MissingValueElementBase.required_string_value)
setattr(MissingList, f"optional_{name}_values", MissingList.optional_string_values)
setattr(MissingList, f"required_{name}_values", MissingList.required_string_values)
# TODO: Specs for different register signatures (see below)
def register(*definitions_list, **definitions_dict):
for definition in definitions_list:
if callable(definition):
_register(definition.__name__, definition)
else:
for name, function in definition.items():
_register(name, function)
for name, function in definitions_dict.items():
_register(name, function)
|
import pickle as pkl
plotname = 'DentateGyrus'
from scvi.dataset.MouseBrain import DentateGyrus10X, DentateGyrusC1
from scvi.dataset.dataset import GeneExpressionDataset
dataset1= DentateGyrus10X()
dataset1.subsample_genes(dataset1.nb_genes)
dataset2 = DentateGyrusC1()
dataset2.subsample_genes(dataset2.nb_genes)
gene_dataset = GeneExpressionDataset.concat_datasets(dataset1,dataset2)
from scvi.dataset.dataset import SubsetGenes
dataset1, dataset2, gene_dataset = SubsetGenes(dataset1, dataset2, gene_dataset, plotname)
magan = MAGAN(dim_b1=10, dim_b2=10, correspondence_loss=correspondence_loss)
from scvi.harmonization.clustering.Magan import Magan
from utils import now
from model import MAGAN
from loader import Loader
import tensorflow as tf
from sklearn.decomposition import PCA
magan = Magan()
out1, out2 = magan.fit_transform(gene_dataset.X, gene_dataset.batch_indices.ravel(), [0, 1])
import numpy as np
batch = gene_dataset.batch_indices.ravel()
index_0 = np.where(batch == 0)[0]
index_1 = np.where(batch == 1)[0]
X = gene_dataset.X
X1 = np.log(1 + X[index_0])
X2 = np.log(1 + X[index_1])
loadb1 = Loader(X1, shuffle=True)
loadb2 = Loader(X2, shuffle=True)
# Build the tf graph
def correspondence_loss(b1, b2):
"""
The correspondence loss.
:param b1: a tensor representing the object in the graph of the current minibatch from domain one
:param b2: a tensor representing the object in the graph of the current minibatch from domain two
:returns a scalar tensor of the correspondence loss
"""
domain1cols = [0]
domain2cols = [0]
loss = tf.constant(0.)
for c1, c2 in zip(domain1cols, domain2cols):
loss += tf.reduce_mean((b1[:, c1] - b2[:, c2])**2)
return loss
magan = MAGAN(dim_b1=X1.shape[1], dim_b2=X2.shape[1], correspondence_loss=correspondence_loss)
# Train
for i in range(1, self.n_epochs):
if i % 100 == 0: print("Iter {} ({})".format(i, now()))
xb1_ = loadb1.next_batch(self.batch_size)
xb2_ = loadb2.next_batch(self.batch_size)
magan.train(xb1_, xb2_)
latent = PCA(n_components=10).fit_transform(out1)
# np.save('../' + filename + '/' + 'MNN' + '.npy', latent) |
import requests
import random
def handle(st):
r = requests.get("http://api.open-notify.org/astros.json")
result = r.json()
index = random.randint(0, len(result["people"])-1)
name = result["people"][index]["name"]
print (name + " is in space")
|
# from django.forms import ModelForm
from .models import ClockIn, SetUp
from django import forms
# from django_bootstrap5.widgets import RadioSelectButtonGroup
from django.forms import ModelForm
class ClockInForm(forms.Form):
def __init__(self, user, *args, **kwargs):
# 动态的选项
self._user = user
super(ClockInForm, self).__init__(*args, **kwargs)
self.fields['setup_id'] = forms.ChoiceField(
label="选择打卡",
widget=forms.RadioSelect,
# choices=(("1", "2"), ("2", "4"),),
choices=[(su.id, su.name)
for su in SetUp.objects.filter(user_id=user.id)],
initial=1,
)
# for visible in self.visible_fields():
# visible.field.widget.attrs['class'] = 'form-control'
setup_id = forms.ChoiceField(label="选择打卡类型")
iamge_0 = forms.ImageField(label="图片")
def save(self):
user_id = self._user.id
setup_id = self.cleaned_data['setup_id']
image_0 = self.cleaned_data['iamge_0']
clock_in = ClockIn(
user_id=user_id,
setup_id=setup_id,
image_0=image_0,
)
clock_in.save()
class SetUpForm(ModelForm):
class Meta:
model = SetUp
fields = ['name', 'times']
def save(self, user_id):
# name = self.cleaned_data["name"]
# times = self.cleaned_data["times"]
data = {
"name": self.cleaned_data["name"],
"times": self.cleaned_data["times"],
"user_id": user_id
}
setup = SetUp.objects.create(**data)
setup.save()
|
import os
import sys
import argparse
from azureml.core.workspace import Workspace
from azureml.core import Experiment, Datastore, RunConfiguration
from azureml.train.estimator import Estimator
parser = argparse.ArgumentParser()
parser.add_argument('experiment', help='Azure ML experiment name')
parser.add_argument('--workspace-config', default="azureml_config.json", help='Download from the Azure ML portal')
parser.add_argument('--compute', default="nc6v3", help='Azure ML training cluster')
parser.add_argument('--max_epochs', type=int, default=300)
args = parser.parse_args()
print(args)
# load workspace configuration from the config.json file
ws = Workspace.from_config(path=args.workspace_config)
print('=' * 40)
print(ws)
# create an experiment
exp = Experiment(workspace=ws, name=args.experiment)
print('=' * 40)
print(exp)
# specify a cluster
compute_target = ws.compute_targets[args.compute]
print('=' * 40)
print(compute_target)
# Mount the blob to the training container
# NOTE: (prerequisite) unzip and upload the ePillID data to the blob
data_ds = Datastore.get(ws, datastore_name='data')
rc = RunConfiguration()
rc.environment.docker.enabled = True
# Using an image from https://hub.docker.com/r/naotous/pytorch-image
# TODO: clean up the Dockerfile
rc.environment.docker.base_image = "naotous/pytorch-image:py36torch041-legacy"
rc.environment.docker.gpu_support = True
# don't let the system build a new conda environment
rc.environment.python.user_managed_dependencies = True
# point to an existing python environment in the Docker image
rc.environment.python.interpreter_path = '/app/miniconda/envs/py36/bin/python'
# flag for the user input
SUBMIT_ALL = False
def submit(script_params):
global SUBMIT_ALL
input_data = None
# the files in source_directory will be uploaded to the cluster
est = Estimator(source_directory='src',
script_params=script_params,
compute_target=compute_target,
entry_script='train_cv.py',
environment_definition=rc.environment)
print('script_params', script_params)
while not SUBMIT_ALL and True:
input_data = input("Submit? [Y/y/n/s]")
if input_data in ['Y', 'y', 'n', 's']:
break
if input_data == 'Y':
SUBMIT_ALL = True
if SUBMIT_ALL or input_data == 'y':
run = exp.submit(est)
print('=' * 40)
print(run)
print('Monitor the training progress on the portal: URL=', run.get_portal_url())
elif input_data == 'n':
print("aborting!")
sys.exit(0)
else:
print("skip!")
# define the entry script
base_script_params = {
'--data_root_dir': data_ds.path('ePillID_data').as_mount().as_mount(),
'--max_epochs': args.max_epochs
}
loss_params = [
{'--contrastive_w': 0.0, '--triplet_w': 0.0, '--arcface_w': 0.0, '--ce_w': 1.0, '--focal_w': 0.0, '--dropout': 0.5}, # plain classification (logits)
{'--contrastive_w': 1.0, '--triplet_w': 1.0, '--arcface_w': 0.1, '--ce_w': 1.0, '--focal_w': 0.0}, # multihead metric learning
]
networks = [
#{'--appearance_network': 'resnet18'},
# {'--appearance_network': 'resnet34'},
#{'--appearance_network': 'resnet50', '--train_with_side_labels': '0', '--metric_simul_sidepairs_eval': '0'},
#{'--appearance_network': 'resnet50', '--train_with_side_labels': '0', '--metric_simul_sidepairs_eval': '1'},
#{'--appearance_network': 'resnet50', '--train_with_side_labels': '1', '--metric_simul_sidepairs_eval': '0'},
#{'--appearance_network': 'resnet50', '--train_with_side_labels': '1', '--metric_simul_sidepairs_eval': '1'},
{'--appearance_network': 'resnet50'},
{'--appearance_network': 'resnet50', '--pooling': 'CBP'},
{'--appearance_network': 'resnet50', '--pooling': 'BCNN'},
# {'--appearance_network': 'resnet101'},
# {'--appearance_network': 'resnet152'},
# {'--appearance_network': 'densenet121'},
# {'--appearance_network': 'densenet161'},
# {'--appearance_network': 'densenet201'},
]
for l in loss_params:
for n in networks:
submit({**l, **n, **base_script_params})
|
#!/usr/bin/env python
import sys
import os
import re
import numpy
from PIL import Image
import StringIO
# you need to install this library yourself
# recent versions handle bigtiff too...
import tifffile
"""
Extract a pyramidal TIFF with JPEG tiled storage into a tree of
separate JPEG files as expected by Zoomify.
usage: extract.py pyramid-file dest-dir
The pyramid-file must be a multi-page TIFF with each page having an
image scaled by 1/2 from the previous page. All pages must be tiled
with the same tile size, and tiles must be stored using the new-style
JPEG compression format, i.e. TIFF compression == 7.
If the lowest resolution page must have 4 or fewer tiles. If it has
more than 1, this script will leave space for the user to manually
generate the final lowest zoom tile 0-0-0.jpg that is 1/2 scaled
version of the image represented by that last page.
"""
tiff_files = []
tiff_outpages = []
tiff_tifffile = []
tiff_infile = []
tiff_maxval = []
redColors = ['Rhodamine', 'RFP', 'Alexa Fluor 555', 'Alexa Fluor 594', 'tdTomato', 'Alexa Fluor 633', 'Alexa Fluor 647']
greenColors = ['FITC', 'Alexa 488', 'EGFP', 'Alexa Fluor 488']
blueColors = ['DAPI']
tiff_colors = [redColors, greenColors, blueColors]
def checkFileColors(files):
for file in files:
colorMatched = None
for colors in tiff_colors:
for color in colors:
if re.match('.*[-]%s[-]Z1[.]tif' % color, file):
colorMatched = True
break
if colorMatched:
break
if not colorMatched:
sys.stderr.write('Unknown color for file "%s" \n' % file)
sys.exit(1)
def colorFile(files, colors, pattern):
tifFiles = []
for color in colors:
colorFiles = [ f for f in files if re.match('.*[-]%s%s' % (color, pattern), f) ]
if len(colorFiles) == 1:
tifFiles.append(colorFiles[0])
sys.stdout.write('colorFile %s\n'%colorFiles[0]);
if len(tifFiles) > 0:
return tifFiles
else:
return None
def getTiffFiles(dname):
global tiff_files
files = os.listdir(dname)
z1 = [f for f in files if re.match('.*[-]Z1[.]tif', f)]
if len(z1) > 0:
checkFileColors(z1)
stacks = len(files) / len(z1)
stackNo = stacks / 2
if stackNo * 2 < stacks:
stackNo += 1
stackPattern = '[-]Z%d[.]tif' % stackNo
else:
stackPattern = '[.]tif'
for colors in tiff_colors:
colorFiles = colorFile(files, colors, stackPattern)
if colorFiles:
for file in colorFiles:
tiff_files.append('%s%s%s' % (dname, os.sep, file))
sys.stdout.write('adding, %s\n'%file);
if len(tiff_files) == 0:
tiff_files = [ '%s%s%s' % (dname, os.sep, f) for f in files if re.match('.*%s' % stackPattern, f) ]
try:
dname = sys.argv[1]
outdir = sys.argv[2]
if not os.path.exists(dname) or not os.path.isdir(dname):
sys.stderr.write('Pyramid directory must be given and exist')
sys.exit(1)
getTiffFiles(dname)
if not os.path.exists(outdir):
os.makedirs(outdir)
except SystemExit:
raise
except:
sys.stderr.write('\nusage: extract_rgb.py pyramid-directory dest-dir\n\n')
raise
if len(tiff_files) == 0:
print 'Nothing to do'
sys.exit()
dir_template = '%(outdir)s/TileGroup%(groupno)d'
tile_template = dir_template + '/%(zoomno)d-%(tcolno)d-%(trowno)d.jpg'
for file in range(0, len(tiff_files)):
tiff = tifffile.TiffFile(tiff_files[file])
tiff_tifffile.append(tiff)
pages = list(tiff)
pages.reverse()
outpages = [ page for page in pages if hasattr(page.tags, 'tile_offsets') ]
if type(outpages[0].tags.tile_offsets.value) is int:
outpages[0].tags.tile_offsets.value=[outpages[0].tags.tile_offsets.value]
outpages[0].tags.tile_byte_counts.value=[outpages[0].tags.tile_byte_counts.value]
tiff_outpages.append(outpages)
infile = open(tiff_files[file], 'rb')
tiff_infile.append(infile)
tile_group = 0
tiles_per_group = 256
# skip pages that aren't tiled... thumbnails?!
outpages = tiff_outpages[0]
if hasattr(outpages[0].tags, 'tile_offsets') and len(outpages[0].tags.tile_offsets.value) > 1:
# first input zoom level is multi-tile
assert len(outpages[0].tags.tile_offsets.value) <= 4
# so leave space for tile 0-0-0
zoomno = 1
group_file_count = 1
total_tiles = 1
need_to_build_0 = True
else:
# input includes first zoom level already
zoomno = 0
group_file_count = 0
total_tiles = 0
need_to_build_0 = False
# remember values for debugging sanity checks
prev_page = None
tile_width = None
tile_length = None
def jpeg_assemble(jpeg_tables_bytes, jpeg_bytes):
# start-image + tables + rest of image to end-image
return jpeg_bytes[0:2] + jpeg_tables_bytes + jpeg_bytes[2:]
def load_tile(infile, tile_offset, tile_length):
infile.seek(tile_offset)
return infile.read(tile_length)
def write_tile(tileno, trow, tcol, rgb_image):
"""Output one tile. Note this manages global state for tile grouping in subdirs."""
global group_file_count
global zoomno
global tile_group
global total_tiles
if group_file_count >= tiles_per_group:
# last group is full already
tile_group += 1
group_file_count = 0
group_file_count += 1
total_tiles += 1
dirname = dir_template % dict(
outdir = outdir,
groupno = tile_group
)
if not os.path.exists(dirname):
# create tile group dir on demand
os.makedirs(dirname, mode=0755)
outname = tile_template % dict(
outdir = outdir,
groupno = tile_group,
zoomno = zoomno,
tcolno = tcol,
trowno = trow
)
rgb_image.save(outname, 'JPEG')
outinfo = []
def get_page_info(page):
pxsize = page.tags.image_width.value
pysize = page.tags.image_length.value
# get common JPEG tables to insert into all tiles
if hasattr(page.tags, 'jpeg_tables'):
# trim off start-image/end-image byte markers at prefix and suffix
jpeg_tables_bytes = bytes(bytearray(page.tags.jpeg_tables.value))[2:-2]
else:
# no common tables to insert?
jpeg_tables_bytes = bytes(bytearray([]))
# this page has multiple JPEG tiles
txsize = page.tags.tile_width.value
tysize = page.tags.tile_length.value
tcols = pxsize / txsize + (pxsize % txsize > 0)
trows = pysize / tysize + (pysize % tysize > 0)
return pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes
def getTile(page, infile, jpeg_tables_bytes, tileno):
jpeg = jpeg_assemble(jpeg_tables_bytes, load_tile(infile, page.tags.tile_offsets.value[tileno], page.tags.tile_byte_counts.value[tileno]))
outfile = StringIO.StringIO()
outfile.write( jpeg )
outfile.seek(0)
image = Image.open(outfile)
ret = numpy.asarray(image)
outfile.close()
return ret
def maxTile(page, infile):
pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes = get_page_info(page)
maxval = 0
for tileno in range(0, len(page.tags.tile_offsets.value)):
tile = getTile(page, infile, jpeg_tables_bytes, tileno)
maxval = max(maxval, tile.max())
return maxval
for channelno in range(0, len(tiff_outpages)):
tiff_maxval.append([])
for pageno in range(0, len(tiff_outpages[0])):
tiff_maxval[channelno].append(max(0, maxTile(tiff_outpages[channelno][pageno], tiff_infile[channelno])))
for pageno in range(0, len(tiff_outpages[0])):
page = tiff_outpages[0][pageno]
# panic if these change from reverse-engineered samples
assert page.tags.fill_order.value == 1
assert page.tags.orientation.value == 1
assert page.tags.compression.value == 7 # new-style JPEG
if prev_page is not None:
assert prev_page.tags.image_width.value == (page.tags.image_width.value / 2)
assert prev_page.tags.image_length.value == (page.tags.image_length.value / 2)
tiff_page_info = []
for channelno in range(0, len(tiff_outpages)):
tiff_page_info.append(tiff_outpages[channelno][pageno])
for tileno in range(0, len(page.tags.tile_offsets.value)):
tile_array = []
for channelno in range(0, len(tiff_outpages)):
tiffPage = tiff_outpages[channelno][pageno]
pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes = get_page_info(tiffPage)
# figure position of tile within tile array
trow = tileno / tcols
tcol = tileno % tcols
assert trow >= 0 and trow < trows
assert tcol >= 0 and tcol < tcols
if tile_width is not None:
assert tile_width == txsize
assert tile_length == tysize
else:
tile_width = txsize
tile_length = tysize
tile = getTile(tiffPage, tiff_infile[channelno], jpeg_tables_bytes, tileno)
tile_norm = (255 * (tile.astype('float') / tiff_maxval[channelno][pageno])).astype('uint8')
tile_array.append(tile_norm)
rgb_array = numpy.dstack( tuple(tile_array) )
rgb_image = Image.fromarray(rgb_array)
write_tile(tileno, trow, tcol, rgb_image)
# each page is next higher zoom level
zoomno += 1
prev_page = page
outinfo.append(
dict(
tile_width= txsize,
tile_length= tysize,
image_width_orig= pxsize,
image_length_orig= pysize,
image_width_padded= tcols * txsize,
image_length_padded= trows * tysize,
total_tile_count= total_tiles
)
)
for infile in tiff_infile:
infile.close()
if need_to_build_0:
# tier 0 was missing from input image, so built it from tier 1 data
page = outpages[0]
pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes = get_page_info(page)
tier1 = None
for tileno in range(0, len(page.tags.tile_offsets.value)):
trow = tileno / tcols
tcol = tileno % tcols
image = Image.open(tile_template % dict(zoomno=1, tcolno=tcol, trowno=trow, outdir=outdir, groupno=0))
if tier1 is None:
# lazily create with proper pixel data type
tier1 = Image.new(image.mode, (tcols * txsize, trows * tysize))
# accumulate tile into tier1 image
tier1.paste(image, (tcol * txsize, trow * tysize))
# generate reduced resolution tier and crop to real page size
tier0 = tier1.resize( (txsize * tcols / 2, tysize * trows / 2), Image.ANTIALIAS ).crop((0, 0, pxsize / 2, pysize / 2))
assert tier0.size[0] <= txsize
assert tier0.size[1] <= tysize
# write final tile
tier0.save(tile_template % dict(zoomno=0, tcolno=0, trowno=0, outdir=outdir, groupno=0), 'JPEG')
else:
# tier 0 must be cropped down to the page size...
page = outpages[0]
pxsize, pysize, txsize, tysize, tcols, trows, jpeg_tables_bytes = get_page_info(page)
image = Image.open(tile_template % dict(zoomno=0, tcolno=0, trowno=0, outdir=outdir, groupno=0))
image = image.crop((0,0, pxsize,pysize))
image.save(tile_template % dict(zoomno=0, tcolno=0, trowno=0, outdir=outdir, groupno=0), 'JPEG')
zoomify_descriptor = """
<IMAGE_PROPERTIES width="%(image_width_padded)d" height="%(image_length_padded)d" numTiles="%(total_tile_count)d" numImages="1" version="1.8" tileSize="%(tile_width)d" />
""" % outinfo[-1]
f = open('%s/ImageProperties.xml' % outdir, 'w')
f.write(zoomify_descriptor)
f.close
|
from django.contrib import admin
from . models import Videos, Comment
# Register your models here.
admin.site.register(Videos)
admin.site.register(Comment)
|
# myproject/apps/core/admin.py
from django.conf import settings
def get_multilingual_field_names(field_name):
lang_code_underscored = settings.LANGUAGE_CODE.replace("-",
"_")
field_names = [f"{field_name}_{lang_code_underscored}"]
for lang_code, lang_name in settings.LANGUAGES:
if lang_code != settings.LANGUAGE_CODE:
lang_code_underscored = lang_code.replace("-", "_")
field_names.append(
f"{field_name}_{lang_code_underscored}"
)
return field_names
|
# -*- coding: utf-8 -*-
# Python Libraries
import MySQLdb
import json
import io
import datetime
import re
# Importing other code
from classes import *
def saveJson(json_name, objects_list, encoder_class):
with io.open(''+json_name+'.json', 'w', encoding='utf-8') as jsonTemp:
jsonTemp.write(unicode(json.dumps(objects_list, jsonTemp, cls=encoder_class, indent=2)))
jsonTemp.close()
'''
Get users and data from database
'''
print "\nGet data from database\n"
# Set the proper params
database_host = ""
database_user = ""
database_passwd = ""
database_name = ""
db = MySQLdb.connect(host=database_host,user=database_user,passwd=database_passwd, db=database_name)
users_list = []
groups_list = []
membership_list = []
friendship_list = []
cur = db.cursor()
# SQL Query
cur.execute("""SELECT PrincipalID, FirstName, LastName, Email from UserAccounts """)
for row in cur.fetchall():
p = User(user_id=row[0], user_name=row[1].replace("*pending* ", ""), user_surname=row[2], email=row[3])
users_list.append(p)
saveJson("users", users_list, UserEncoder)
'''
Other queries that could be used
- These queries produce json files with the info.
- You can format these data to ARFF using utilities available in this code
# Interesting fields in the "UserAccounts" table
# PrincipalID
# FirstName
# LastName
# Email
# UserLevel -> superadmins have a 100 user level. Common users an user level = 0
# UserTitle -> the label for an user, not all have one.
# "friends" Table
# PrincipalID
# Friend
# Flags ?
# Offered ?
# The "griduser" table keeps the users' positions within the virtual world
# "osgroup" table
# GroupID
# Name (of the group)
# OwnerRoleID
# "osgroupmembership" Table
# GroupID
# AgentID
# Get groups
cur.execute("""SELECT GroupID, Name, OwnerRoleID from osgroup """)
for row in cur.fetchall():
print row[0]
print row[1]
print row[2]
g = Group(group_id=row[0], group_name=row[1], group_owner=row[2])
groups_list.append(g)
with io.open('path/groups.json', 'w', encoding='utf-8') as groups_json:
groups_json.write(unicode(json.dumps(groups_list, groups_json, cls=GroupEncoder, indent=2)))
groups_json.close()
# Get members of groups
cur.execute("""SELECT GroupID, AgentID from osgroupmembership """)
for row in cur.fetchall():
print row[0]
print row[1]
g = Member(group_id=row[0], user_id=row[1], )
membership_list.append(g)
with io.open('path/groupsMembers.json', 'w', encoding='utf-8') as members_json:
members_json.write(unicode(json.dumps(membership_list, members_json, cls=MembersEncoder, indent=2)))
members_json.close()
# Get friends
cur.execute("""SELECT PrincipalID, Friend from friends """)
for row in cur.fetchall():
print row[0]
print row[1]
g = Friend(user_id1=row[0], user_id2=row[1], )
friendship_list.append(g)
with io.open('path/friends.json', 'w', encoding='utf-8') as json_friends:
json_friends.write(unicode(json.dumps(friendship_list, json_friends, cls=FriendsEncoder, indent=2)))
json_friends.close()
'''
'''
Get sessions from Opensim log
'''
session_list = []
date_temp = []
hour_temp = []
h2_temp = []
begin_session = 0
fin_session = 0
print "\nReal Sessions\n"
# path to Opensim Robust.log
log_path = ""
f = open(log_path)
line = f.readline()
for line in f:
if line.find("OpenSim.Services.PresenceService.PresenceService [PRESENCE SERVICE]: LoginAgent") >= 0:
begin_session = begin_session+1
session_init = re.search("(.+).LoginAgent (.+).with session (.+).and ssession (.+)", line)
if session_init is not None:
date_beg_session = re.split(" ", session_init.group(1))
date_temp = date_beg_session[0].split("-")
hour_temp = date_beg_session[1].split(":")
h2_temp=hour_temp[2].split(",",1)
p = Session(session_id=str(session_init.group(3)), user_id=str(session_init.group(2)),
date_init=str(date_beg_session[0]), year_init=str(date_temp[0]),
month_init=str(date_temp[1]), day_init=str(date_temp[2]),
hour_init=str(date_beg_session[1]), hours_init=str(hour_temp[0]),
minutes_init=str(hour_temp[1]), seconds_init=str(h2_temp[0]), date_fin=0,
year_fin=0, month_fin=0, day_fin=0, hour_fin=0, hours_fin=0,
minutes_fin=0, seconds_fin=0, session_t=0)
session_list.append(p)
elif line.find("OpenSim.Services.PresenceService.PresenceService [PRESENCE SERVICE]: Session") >= 0:
fin_session = fin_session+1
endSession = re.search("(.+).Session (.+).logout", line)
if endSession is not None:
date_e_session = re.split(" ", endSession.group(1))
for x in session_list:
if x.session_id == endSession.group(2):
x.date_fin = str(date_e_session[0])
x.hour_fin = str(date_e_session[1])
date_temp = date_e_session[0].split("-")
x.year_fin = str(date_temp[0])
x.month_fin = str(date_temp[1])
x.day_fin = str(date_temp[2])
hour_temp = x.hour_fin.split(":")
h2_temp = hour_temp[2].split(",",1)
x.hours_fin = str(hour_temp[0])
x.minutes_fin = str(hour_temp[1])
x.seconds_fin = str(h2_temp[0])
d1 = datetime.datetime.strptime(x.date_init+" "+x.hours_init+":"+x.minutes_init
+ ":"+x.seconds_init, "%Y-%m-%d %H:%M:%S")
d2 = datetime.datetime.strptime(x.date_fin+" "+x.hours_fin+":"+x.minutes_fin
+ ":"+x.seconds_fin, "%Y-%m-%d %H:%M:%S")
result = d2 - d1
x.session_t = str(result.total_seconds())
time_total_sec = 0
time_total_hours = 0.0
time_average_min = 0.0
for x in session_list:
time_total_sec += float(x.session_t)
tMedioSeg = time_total_sec/len(session_list)
time_total_hours = time_total_sec/3660
time_average_min = tMedioSeg/60
print "With real users"
print "How many sessions started %d, complete sessions: %d" % (begin_session, fin_session)
print "Users have employed %d seconds or %.2f hours in the world" % (time_total_sec, time_total_hours)
print "Average time of %d seconds or %.2f minutes per user" % (tMedioSeg, time_average_min)
saveJson("sessions", session_list, SessionEncoder)
print "\n\nReal sessions for Weka\n\n"
arff = open ("sessions.arff", "w")
arff.write("% Title: Info about sessions\n% Creator: Juan Cruz-Benito\n% Date: June, 2013\n\n")
arff.write("@relation 'Sessions'\n")
arff.write("\n@attribute CLASE {Login, Logout}\n")
arff.write("@attribute user_id string\n@attribute fecha DATE yyyy-MM-dd HH:mm:ss\n")
arff.write("\n\n@data\n")
for x in session_list:
if x.date_fin != 0:
arff.write("Login,"+str(x.user_id)+","+str(x.date_init)+" "+str(x.hours_init)+":"
+str(x.minutes_init)+":"+str(x.seconds_init)+"\n")
arff.write("Logout,"+str(x.user_id)+","+str(x.date_fin)+" "+str(x.hours_fin)+":"
+str(x.minutes_fin)+":"+str(x.seconds_fin)+"\n")
arff.close()
'''
Get movements from Virtual World's log
'''
teleports_requests_counter = 0
teleports_incomplete_counter = 0
teleports_complete_counter = 0
teleports_list = []
out_terrain_counter = 0
close_terrain_counter = 0
close_connection_counter = 0
arrival_terrain_counter = 0
print "\nTeleports Reales\n"
# path to Opensim Opensim.log
log_path = ""
f = open(log_path)
line = f.readline()
for line in f:
if line.find("Request Teleport to") >= 0:
# Example
# 2012-07-05 09:43:34,697 DEBUG - OpenSim.Region.CoreModules.Framework.EntityTransfer.EntityTransferModule
# [ENTITY TRANSFER MODULE]: Request Teleport to http://212.128.146.39:1935/ (http://212.128.146.39:1935/)
# USALPHARMA/<128, 128, 1.5>
teleport_request = re.search("(.+).Request Teleport to http://(.+)/ \(http://(.+)/\) (.+)/.", line)
if teleport_request is not None:
teleport_date = re.split(" ", teleport_request.group(1))
teleport_region_dest_o = teleport_request.group(4)
teleports_requests_counter = teleports_requests_counter+1
for line in f:
if line.find("Closing child agents. Checking") >= 0:
# Example
# 2012-07-05 09:35:02,498 DEBUG - OpenSim.Region.Framework.Scenes.ScenePresence [SCENE PRESENCE]:
# Closing child agents. Checking 1 regions in USAL SIPPE
teleport_o = re.search("(.+).Closing child agents. Checking (.+) regions in (.+)", line)
if teleport_o is not None:
teleport_date = re.split(" ", teleport_request.group(1))
source_region_teleport = teleport_o.group(3)
horaTemp = teleport_date[1].split(",",1)
hour_initTemp = horaTemp[0]
p = Teleport(user_name="", date_init=str(teleport_date[0]), hour_init=str(hour_initTemp),
teleport_source=str(source_region_teleport.replace(" ", "_")), teleport_dest="")
teleports_list.append(p)
posLisAct = len(teleports_list)
out_terrain_counter = out_terrain_counter+1
elif line.find("Upgrading child to root agent") >= 0:
# Example
# 2012-07-05 09:35:04,490 DEBUG - OpenSim.Region.Framework.Scenes.ScenePresence [SCENE]:
# Upgrading child to root agent for Admin SIPPE in USALBIO
teleport_d = re.search("Upgrading child to root agent for (.+) in (.+)", line)
if teleport_d is not None:
arrival_counter = arrival_terrain_counter+1
if teleport_d.group(2) == teleport_region_dest_o:
regionteleport_d = teleport_d.group(2)
teleports_complete_counter = teleports_complete_counter+1
teleports_list[posLisAct-1].teleport_dest = str(regionteleport_d.replace (" ", "_"))
teleports_list[posLisAct-1].user_name = str(teleport_d.group(1).replace (" ", "_"))
else:
teleports_incomplete_counter = teleports_incomplete_counter+1
break
elif line.find("Closing child agents. Checking") >= 0:
# Example
# 2012-07-05 09:35:02,498 DEBUG - OpenSim.Region.Framework.Scenes.ScenePresence [SCENE PRESENCE]: Closing
# child agents. Checking 1 regions in USAL SIPPE
teleport_source = re.search("(.+).Closing child agents. Checking (.+) regions in (.+)", line)
if teleport_source is not None:
out_terrain_counter = out_terrain_counter+1
elif line.find("Removing root agent") >= 0:
# Example
# 2012-12-03 14:49:16,846 DEBUG - OpenSim.Region.Framework.Scenes.Scene [SCENE]: Removing root agent
# Patricia Gonzalez f09f6a7e-2baf-4cb4-a9af-db3ca7714ad5 from USALBIO
terrain_close = re.search(".Removing root agent (.+) (.+)from (.+)", line)
if terrain_close is not None:
close_terrain_counter = close_terrain_counter+1
elif line.find("Removing child agent") >= 0:
# Example
# 2012-12-03 14:49:16,863 DEBUG - OpenSim.Region.Framework.Scenes.Scene [SCENE]: Removing child agent
# Patricia Gonzalez f09f6a7e-2baf-4cb4-a9af-db3ca7714ad5 from Animal Recovery Center
connection_close = re.search(".Removing child agent (.+) (.+)from (.+)", line)
if connection_close is not None:
close_connection_counter = close_connection_counter+1
elif line.find("Upgrading child to root agent") >= 0:
# Example
# 2012-07-05 09:35:04,490 DEBUG - OpenSim.Region.Framework.Scenes.ScenePresence [SCENE]: Upgrading child
# to root agent for Admin SIPPE in USALBIO
teleport_dest = re.search(".Upgrading child to root agent for (.+) in (.+)", line)
if teleport_dest is not None:
arrival_terrain_counter = arrival_terrain_counter+1
teleport_source_list = []
teleport_dest_list = []
for x in teleports_list:
if x.teleport_source not in teleport_source_list:
teleport_source_list.append(x.teleport_source)
if x.teleport_dest not in teleport_dest_list:
teleport_dest_list.append(x.teleport_dest)
teleport_source_list_str = str(teleport_source_list).replace("[", "")
teleport_source_list_str = teleport_source_list_str.replace("]", "")
teleport_dest_list_str = str(teleport_dest_list).replace("[", "")
teleport_dest_list_str = teleport_dest_list_str.replace("]", "")
print "\n\nWeka Teleports\n\n"
arff = open ("teleports.arff", "w")
arff.write("% Title: Info about teleports\n% Creator: Juan Cruz-Benito\n% Date: June, 2013\n\n")
arff.write("@relation 'Teleports'\n")
arff.write("\n@attribute CLASE {CompleteTeleport, IncompleteTeleport}\n")
arff.write("@attribute user_name string\n@attribute teleport_source {"+teleport_source_list_str+"}"
"\n@attribute teleport_dest {"+teleport_dest_list_str+"}\n@attribute date "
"DATE yyyy-MM-dd HH:mm:ss")
arff.write("\n\n@data\n")
for x in teleports_list:
if (x.user_name != "") or (x.teleport_dest != ""):
arff.write("CompleteTeleport," + str(x.user_name) + "," + str(x.teleport_source) + "," + str(x.teleport_dest)
+ "," + str(x.date_init) + " " + str(x.hour_init)+"\n")
elif x.user_name != "":
arff.write("IncompleteTeleport,?" + ","+str(x.teleport_source) + "," + str(x.teleport_dest)
+ "," + str(x.date_init) + " " + str(x.hour_init)+"\n")
elif x.teleport_dest != "":
arff.write("IncompleteTeleport," + str(x.user_name)+"," + str(x.teleport_source)
+ ",?," + str(x.date_init)+" " + str(x.hour_init)+"\n")
else:
arff.write("IncompleteTeleport,?" + "," + str(x.teleport_source) + ",?," + str(x.date_init)
+ " " + str(x.hour_init) + "\n")
arff.close()
print "Number of teleport requests %d" % teleports_requests_counter
print "Complete teleports: %d" % teleports_complete_counter
print "Incomplete teleports: %d" % teleports_incomplete_counter
print "Departures from terrain/island: %d" % out_terrain_counter
print "Clossing connections in a terrain: %d" % close_terrain_counter
print "Clossing connections: %d" % close_connection_counter
print "Arrivals to terrain/island: %d" % arrival_terrain_counter
saveJson("real_movements", teleports_list, TeleportEncoder)
|
import random
import numpy as np
from kerastuner.tuners import Hyperband
from sklearn.model_selection import RandomizedSearchCV
class Model:
def __init__(
self, X_train, X_test, y_train, y_test,
metric, metric_sign, cv
):
self.metric = metric
self.metric_sign = metric_sign
self.results = []
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.cv = cv
def param_tunning_method(self, estimator, desc, params=None, sqrt=False):
if params is None:
params = {}
if not bool(params):
if desc == "Neural Networks":
trained_model = self.__train_neural_networks(estimator)
else:
trained_model = self.__train_without_optimizer(estimator)
else:
trained_model = self.__randomized_search(estimator, params, sqrt)
y_pred = trained_model.predict(self.X_test)
if desc == "Neural Networks":
if estimator.get_output_units() == 2:
y_pred[y_pred > .5] = 1
y_pred[y_pred <= .5] = 0
else:
y_pred = np.argmax(y_pred, axis=1)
metric = self.metric(self.y_test, y_pred)
if hasattr(trained_model, "best_params_"):
print("The best params found: " + str(trained_model.best_params_))
print("[%s] Score: %f\n" % (desc, metric))
self.results.append(
{
"name": desc,
"model": trained_model,
"score": metric
}
)
def __randomized_search(self, estimator, params, sqrt=False):
n_space = np.prod([len(params[x]) for x in params.keys()])
if sqrt:
n_space = np.sqrt(n_space)
try:
randomized = RandomizedSearchCV(
estimator=estimator,
param_distributions=params,
random_state=0,
cv=self.cv,
n_jobs=-1, # uses all available processors
n_iter=n_space
)
except:
randomized = RandomizedSearchCV(
estimator=estimator,
param_distributions=params,
random_state=0,
cv=self.cv,
n_iter=n_space
)
return randomized.fit(self.X_train, self.y_train)
def __train_without_optimizer(self, estimator):
return estimator.fit(self.X_train, self.y_train)
def __train_neural_networks(self, estimator):
if estimator.get_metric() == "mse":
tuner = Hyperband(
estimator,
max_epochs=20,
objective="val_mse",
executions_per_trial=1,
directory="regression_nn" + str(random.randint(0, 1000))
)
else:
tuner = Hyperband(
estimator,
max_epochs=20,
objective="val_accuracy",
executions_per_trial=1,
directory="classification_nn" + str(random.randint(0, 1000))
)
tuner.search(self.X_train, self.y_train, epochs=1, validation_split=0.1, verbose=0)
return tuner.get_best_models(num_models=1)[0]
|
"""
Spatial helper files
"""
__version__ = '0.2'
import math
import numpy as np
import pysal.lib.cg.sphere as sphere
def geointerpolate(point0, point1, proportion):
"""Interpolate on great sphere (world coords)
Arguments:
point0 {list} -- (lng,lat) of first point
point1 {list} -- (lng,lat) of first point
proportion {float} -- the interpolation distance
Returns:
list -- the list of interpolated points (lng,lat)
"""
return sphere.geointerpolate(point0, point1, proportion)
def geo_distance_in_km(pt1, pt2):
"""
Get the distance in km between two (lng,lat) poinmts
Arguments:
pt1 {list} -- (lng,lat) of first point
pt2 {list} -- (lng,lat) of second point
Returns:
float -- Ditance between pt1 and pt2
"""
return sphere.harcdist(pt1, pt2)
def get_nearest_point(kdtree, the_pt, dist_thresh_in_km=0.005):
"""
Get the nearest point to "the_pt" using the the kd=tree index
Arguments:
kd {[KDTree]} -- The KD-tree of all the points
the_pt {list} -- [x,y] of the point
Keyword Arguments:
dist_thresh_in_km {float} -- The maximum distance (in km) within
which the nearest points have to be searched (default: {0.005})
Returns:
list -- A tuple of the nearest point in the tuple
(distance, point index)
"""
dist_in_km, ind = kdtree.query(the_pt, k=1)
if dist_in_km is not None and dist_in_km <= dist_thresh_in_km:
return dist_in_km, ind
return None, None
def add_graph_attr_to_entry_exit(entry_points, kd_points,
points_arr, rev_point_dict):
"""
Enhance the graph attributes for entry and exit points
Arguments:
entry_points {list} -- List of entry points
kd_points {KDTree} -- KDTree of points
points_arr {list} -- list of points
rev_point_dict {dictionary} -- A dictionary mapping points to ids
"""
for ept in entry_points:
_, ind = get_nearest_point(
kd_points, ept["approx_coordinate"], dist_thresh_in_km=0.005)
if ind is not None:
ept['node_id'] = rev_point_dict[points_arr[ind]]
def get_lng_lat_coord(origin_lnglat, xy_pt):
"""
Get the (lng, lat) from a given eucledian point (x,y). The origin point is
given by origin_lnglat in (longitude, latitude) format. We assume (x,y)
is "x" kilometers along longitude from the origin_lnglat, and "y"
kilometers along the latitude.
NOTE: (x,y) is in km from origin (and NOT in meters)
Arguments:
origin_lnglat {list} -- The (longitude,latitude) of the origin point
xy_pt {tuple} -- The (x,y) point (see above for details)
Returns:
list -- The (longitude, latitude) of the (x,y) point
"""
ret_pt = list((0, 0))
(var_dx, var_dy) = xy_pt
ret_pt[1] = origin_lnglat[1] - (var_dy * 360.0 / 40000.0)
ret_pt[0] = origin_lnglat[0] - \
(var_dx * 360.0 /
(40000.0 * math.cos((origin_lnglat[1] + ret_pt[1]) * math.pi /
360.0)))
ret_pt = tuple(ret_pt)
return ret_pt
def get_flat_earth_coord(origin, the_pt):
"""
Get the flat earth coordinates (x,y) from a given (lng,lat) point "pt".
The (x,y) is the offset in lng and lat dimensions (in meters or km)
from a given origin point (olng, olat)
Arguments:
origin {list} -- (lng,lat) of the origin point
pt {list} -- (lnt,lat) of the point
Returns:
[type] -- [description]
"""
vardx = ((origin[0] - the_pt[0]) * 40000.0 *
math.cos((origin[1] + the_pt[1]) * math.pi / 360) / 360)
vardy = (origin[1] - the_pt[1]) * 40000.0 / 360
return vardx, vardy
def get_euc_dist(pt1, pt2):
"""
Get eucledian distance between two points pt1 and pt2
Note that we assume we pass flat earth coords (x,y)
and (lng,lat), since eucledian distance of (lng,lat)
does not yield meaningful distances
Arguments:
pt1 {[type]} -- [description]
pt2 {[type]} -- [description]
Returns:
[type] -- [description]
"""
return math.hypot(pt2[0] - pt1[0], pt2[1] - pt1[1])
def get_radangle_flat_earth_old(pt1, pt2):
"""Get the angle for two given points (in flat earth coords)
Arguments:
pt1 {list} -- (x,y) for the first point
pt2 {list} -- (x,y) for the first point
Returns:
float -- Angle (in radians) betweent pt1 and pt2
"""
ydiff = pt1[1] - pt2[1]
xdiff = pt1[0] - pt2[0]
if abs(xdiff) < 1E-6:
return math.pi / 2.0
return math.atan(ydiff / float(xdiff))
def angle_trunc(the_angle):
"""
Truncate the angle between [0, 360)
Arguments:
the_angle {float} -- angle
Returns:
float -- Angle between [0,360)]
"""
while the_angle < 0.0:
the_angle += math.pi * 2
return the_angle
def get_radangle_flat_earth(the_pt1, the_pt2):
"""Get the angle for two given points (in flat earth coords)
Arguments:
pt1 {list} -- (x,y) for the first point
pt2 {list} -- (x,y) for the first point
Returns:
float -- Angle (in radians) betweent pt1 and pt2
"""
delta_y = the_pt2[1] - the_pt1[1]
delta_x = the_pt2[0] - the_pt1[0]
return angle_trunc(math.atan2(delta_y, delta_x))
def get_all_rects(rect, the_pt):
"""
Get all rectangles for the point the_pt
Arguments:
rect {KDTree rect} -- Input rectangle kdtree
the_pt {[type]} -- [description]
Returns:
[type] -- [description]
"""
res = [r.leaf_obj() for r in rect.query_point(the_pt) if r.is_leaf()]
return res
def get_angle_between_pts_on_sphere(the_pt1, the_pt2):
"""
Get the angle between two points on a sphere
Arguments:
the_pt1 {list} -- (lng,lat) of first point
the_pt2 {list} -- (lng,lat) of second point
Returns:
float -- angle in radians between first and second point
"""
return sphere.radangle(the_pt1, the_pt2)
def get_origin(pt_dict):
"""
Get the origin point of all points (mean lng and lat)
Arguments:
pt_dict {[list]} -- list of all points
Returns:
list -- the origin point
"""
# Compute origin
lngs = [v[0] for v in pt_dict.values()]
lats = [v[1] for v in pt_dict.values()]
origin = (np.mean(lngs), np.mean(lats))
#origin_angle_rad = get_angle_between_pts_on_sphere((0,0), origin)
return origin
def add_camera_attr_with_entry_exit(cameras_dict, cam_rtree,
entry_point_attributes,
exit_point_attributes):
"""Based on entry and exit points, we assign entry and exit camearas
Arguments:
cameras_dict {dict} -- Dict of cameras
cam_rtree {RTree} -- R-tree of camera points
entry_point_attributes {list} -- List of entry points
exit_point_attributes {list} -- List of exit points
"""
for ept in entry_point_attributes.values():
the_pt = ept['approx_coordinate']
cams = get_all_rects(cam_rtree, the_pt)
cameras_detected = [cameras_dict[i] for i in cams]
for cam in cameras_detected:
cam['isEntryCamera'] = 1
for ept in exit_point_attributes.values():
the_pt = ept['approx_coordinate']
cams = get_all_rects(cam_rtree, the_pt)
cameras_detected = [cameras_dict[i] for i in cams]
for cam in cameras_detected:
cam['isExitCamera'] = 1
|
# -*- coding: utf-8 -*-
from symbol_table import SymbolTable
TYPES = ['inteiro', 'flutuante']
OPERATIONS = ['=', '<>', '>', '<', '>=', '<=', '&&', '||']
success = True
class Analyzer():
def __init__(self):
self.symboltable = SymbolTable()
success = True
def scan_tree(self, node):
currentStatus = self.verify_node(node)
if(not currentStatus['goNextNode']):
return
for child in node.children:
self.scan_tree(child)
if(currentStatus['isNewContext']):
self.symboltable.removeCurrentContext()
if(currentStatus['isFunction']):
line = self.symboltable.getGlobal()
if(line['type'] != '' and not self.symboltable.hasReturn()):
success = False
print('[ERRO] Função ' + line['name'] + ' deveria retornar ' + line['type'] + ' em ' + str(line['line']) + ':' + str(line['column']))
def verify_node(self, node):
if(node.value == 'declaracao_variaveis'):
for var in node.children[1:]:
dimension = self.verify_variable(var)
status = self.symboltable.insert({
'name': var.children[0].value,
'type': node.children[0].value,
'used': False,
'symbol_type': 'var',
'initialized': False,
'dimension': dimension,
'line': var.children[0].line,
'column': var.children[0].column,
'value': None
})
var.children[0].table_pointer = self.symboltable.searchFor(var.children[0].value)
if(not status):
success = False
return {
'goNextNode': False,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'lista_parametros'):
for param in node.children:
self.symboltable.insert({
'name': param.children[1].value,
'type': param.children[0].value,
'used': False,
'symbol_type': 'params',
'initialized': True,
'dimension': int(len(param.children[2:]) / 2),
'line': param.children[0].line,
'column': param.children[0].column
})
line = self.symboltable.searchFor(param.children[1].value)
param.children[1].table_pointer = line
return {
'goNextNode': False,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'atribuicao'):
var = node.children[0]
expression = node.children[1]
self.verify_variable(var)
line = self.verify_tableline(var, initialized = True, used = False)
var_type = 'inteiro'
if(line):
var_type = line['type']
expression_type = self.verify_expression(expression)
if(expression_type == 'wrong_type'):
print('[AVISO] Atribuição de tipos distintos \'' + var.table_pointer['name'] + '\' ' + var_type + ' em ' + str(var.table_pointer['line']) + ':' + str(var.table_pointer['column']))
success = False
elif(var_type != expression_type and expression_type != None):
print('[AVISO] Atribuição de tipos distintos \'' + var.table_pointer['name'] + '\' ' + var_type + ' e ' + expression_type + ' em ' + str(var.table_pointer['line']) + ':' + str(var.table_pointer['column']))
success = False
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'corpo'):
for child in node.children:
if(child.value == 'expressao'):
self.verify_expression(child)
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'retorna'):
self.symboltable.setReturn()
expression_type = self.verify_expression(node.children[0])
line = self.symboltable.getGlobal()
if(line['type'] not in TYPES or expression_type not in TYPES):
success = False
print('[ERRO] Tipo de Retorno inválido em ' + str(node.line) + ':' + str(node.column))
elif(line['type'] != expression_type):
print('[AVISO] Conversão Implícita de tipos em ' + str(node.line) + ':' + str(node.column))
return {
'goNextNode': False,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'declaracao_funcao'):
params = []
function_type = None
if(len(node.children) == 4):
function_type = node.children[0].value
function_name = node.children[1].value
params_list = node.children[2]
else:
function_name = node.children[0].value
params_list = node.children[1]
for param in params_list.children:
params.append({
'type': param.children[0].value,
'vet': 0 if len(param.children) == 2 else int((len(param.children) - 2)/2)
})
status = self.symboltable.insert({
'name': function_name,
'type': function_type if function_type else '',
'used': False,
'symbol_type': 'function',
'initialized': True,
'dimension': 0,
'params': params,
'line': node.children[0].line,
'column': node.children[0].column
})
line = self.symboltable.searchFor(function_name, used = False)
if(len(node.children) == 4):
node.children[1].table_pointer = line
else:
node.children[0].table_pointer = line
if(not status):
success = False
self.symboltable.insertContex(function_name)
return {
'goNextNode': True,
'isNewContext': True,
'isFunction': True,
}
elif(node.value == 'repita' or node.value == 'se' or node.value == 'senão'):
self.symboltable.insertContex(node.value)
if(node.value == 'repita'):
for child in node.children:
if(child.value == 'expression'):
self.verify_expression(child)
return {
'goNextNode': True,
'isNewContext': True,
'isFunction': False,
}
elif(node.value == 'condicional'):
for child in node.children:
if(child.value == 'expression'):
self.verify_expression(child)
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'leia'):
var = node.children[0]
var.children[0].table_pointer = self.verify_tableline(var, initialized = True)
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'chamada_funcao'):
self.verify_function(node)
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'escreva'):
self.verify_expression(node.children[0])
return {
'goNextNode': False,
'isNewContext': False,
'isFunction': False,
}
else:
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
def verify_variable(self, node):
dimension = 0
if(len(node.children) > 1):
for child in node.children[1].children:
if(child.value != '[' and child.value != ']'):
var_type = self.verify_expression(child)
var = self.verify_tableline(node, False)
if(var_type and var_type != 'inteiro'):
success = False
print('[ERRO] Índice de array \'' + node.children[0].value + '\' não é inteiro, em ' + str(node.children[0].line) + ':' + str(node.children[0].column))
dimension += 1
return dimension
def verify_function(self, node):
function = self.verify_tableline(node, False)
node.table_pointer = function
if(function):
params = function['params']
args = []
for expression in node.children[-1].children:
arg = {}
expression_type = self.verify_expression(expression).split(' ')
arg['type'] = expression_type[0]
arg['vet'] = int(expression_type[1]) if len(expression_type) == 2 else 0
args.append(arg)
if(function['name'] == 'principal'):
if(self.symboltable.getCurrentContex().scope == 'principal'):
print('[AVISO] Chamada recursiva para principal.')
else:
success = False
print('[ERRO] Chamada para a função principal não permitida.')
if(len(params) != len(args)):
success = False
print('[ERRO] Chamada à função \'' + function['name'] + '\' com número de parâmetros diferente que o declarado. Esperado ' + str(len(params)) + ', mas recebido ' + str(len(args)) + ', em ' + str(function['line']) + ':' + str(function['column']))
elif(params != args):
success = False
print('[ERRO] Conversão Implícita em função \'' + function['name'] + '\' em ' + str(function['line']) + ':' + str(function['column']))
def verify_tableline(self, node_type, isError = True, used = True, initialized = False):
aux = node_type.children[0].value
line = self.symboltable.searchFor(aux, used = used, initialized = initialized)
node_type.table_pointer = line
if(not line):
success = False
if(isError):
success = False
print('[ERRO] Chamada à ' + ('variável ' if(node_type.value == 'var') else 'função ') + aux + ' que não foi declarada em ' + str(node_type.children[0].line) + ':' + str(node_type.children[0].column))
return line if line else None
def verify_expression(self, node):
if(node.value == 'expressao'):
return self.verify_expression(node.children[0])
if(node.value == 'expressao_unaria'):
children = node.children
if(len(children) == 1):
expression_type = children[0].children[0]
else:
operation = children[0].value
expression_type = children[1].children[0]
if(operation == '!'):
if(expression_type.value == 'expressao'):
self.verify_expression(expression_type)
return 'wrong_type'
if(expression_type.value == 'numero'):
number = expression_type.children[0].value
return 'inteiro' if(type(number) is int) else 'flutuante'
elif(expression_type.value == 'expressao'):
return self.verify_expression(expression_type)
else:
line = self.verify_tableline(expression_type)
if(line and (line['symbol_type'] == 'var' or line['symbol_type'] == 'params')):
dimension = line['dimension']
if(dimension != 0):
real_dimension = len(expression_type.children) - 1
if(dimension - real_dimension != 0):
return line['type']
if(expression_type.value == 'chamada_funcao'):
self.verify_function(expression_type)
return line['type'] if line else None
elif(len(node.children) >= 2):
type1 = self.verify_expression(node.children[0])
type2 = self.verify_expression(node.children[1])
if(node.value in OPERATIONS):
if(not type1 or not type2 or (len(type1.split(' ')) == 2 or len(type2.split(' ')) == 2)):
print('[AVISO] Tipo Inválido em ' + str(node.line) + ':' + str(node.column))
return 'wrong_type'
if(type1 == type2):
return type1
elif(type1 in TYPES and type2 in TYPES):
return 'flutuante'
else:
return self.verify_expression(node.children[0])
return None
def verify_principal(self):
line = self.symboltable.hasPrincipal()
if(line and line['used']):
print('[ERRO] Chamada para a função principal não permitida.')
success = False
elif(not line):
print('[ERRO] Função principal não declarada.')
success = False
def verify_other_points(self):
for line in self.symboltable.getUninitializedLines():
print('[AVISO] Variável \'' + line['name'] + '\' declarada, e não utilizada em ' + str(line['line']) + ':' + str(line['column']))
for line in self.symboltable.getUnusedLines():
if(line['name'] == 'principal'):
continue
print('[AVISO] Função \'' + line['name'] + '\' declarada, mas não utilizada em ' + str(line['line']) + ':' + str(line['column']))
def analyzer(tree):
analyzer = Analyzer()
analyzer.scan_tree(tree)
analyzer.verify_principal()
analyzer.verify_other_points()
return success |
DATA = [
{'timestamp': 100000000, 'momentary': -79.89229196784386, 'shortterm': -88.642904601760861, 'global': float('-inf'),
'window': -90.861392097924423, 'range': 0},
{'timestamp': 200000000, 'momentary': -66.953377330406298, 'shortterm': -75.703989964323284,
'global': float('-inf'), 'window': -77.92247746048686, 'range': 0},
{'timestamp': 300000000, 'momentary': -36.757750473281597, 'shortterm': -45.508363107198605,
'global': float('-inf'), 'window': -47.726850603362166, 'range': 0},
{'timestamp': 400000000, 'momentary': -32.821624718837185, 'shortterm': -41.572237352754179,
'global': -32.821624718837185, 'window': -43.790724848917741, 'range': 0},
{'timestamp': 500000000, 'momentary': -30.739924072566936, 'shortterm': -39.490483917308033,
'global': -31.657222948761262, 'window': -41.708971413471595, 'range': 0},
{'timestamp': 600000000, 'momentary': -29.191061788402944, 'shortterm': -37.940947452455575,
'global': -30.671388479460788, 'window': -40.159434948619136, 'range': 0},
{'timestamp': 700000000, 'momentary': -28.173243287136938, 'shortterm': -36.360404506607537,
'global': -29.89996316806052, 'window': -38.578892002771099, 'range': 0},
{'timestamp': 800000000, 'momentary': -27.059463442954048, 'shortterm': -34.788043237402839,
'global': -29.164016371220104, 'window': -37.006530733566393, 'range': 0},
{'timestamp': 900000000, 'momentary': -24.934545614523024, 'shortterm': -32.672141130775842,
'global': -28.109949343636824, 'window': -34.89062862693941, 'range': 0},
{'timestamp': 1000000000, 'momentary': -23.058202928981988, 'shortterm': -30.861793862582122,
'global': -26.922965683881078, 'window': -33.080281358745687, 'range': 0},
{'timestamp': 1100000000, 'momentary': -21.394062127219279, 'shortterm': -29.213905515414311,
'global': -25.712365280980293, 'window': -31.432393011577872, 'range': 0},
{'timestamp': 1200000000, 'momentary': -19.98117393839529, 'shortterm': -27.769794410919399,
'global': -24.557320692722932, 'window': -29.98828190708296, 'range': 0},
{'timestamp': 1300000000, 'momentary': -19.804646360860072, 'shortterm': -27.132817139207223,
'global': -23.770135372744758, 'window': -29.351304635370784, 'range': 0},
{'timestamp': 1400000000, 'momentary': -19.934685931767618, 'shortterm': -26.628304045141157,
'global': -23.243373042839192, 'window': -28.846791541304722, 'range': 0},
{'timestamp': 1500000000, 'momentary': -20.88727028393912, 'shortterm': -26.41042249555279,
'global': -22.990203731923454, 'window': -28.628909991716355, 'range': 0},
{'timestamp': 1600000000, 'momentary': -23.066111209687978, 'shortterm': -26.327701245780499,
'global': -22.995995897109623, 'window': -28.546188741944061, 'range': 0},
{'timestamp': 1700000000, 'momentary': -25.07298856065384, 'shortterm': -26.289734669937484,
'global': -23.115546578265732, 'window': -28.508222166101046, 'range': 0},
{'timestamp': 1800000000, 'momentary': -28.481812628835826, 'shortterm': -26.265953674536416,
'global': -23.325938716764206, 'window': -28.484441170699977, 'range': 0},
{'timestamp': 1900000000, 'momentary': -32.08704529327035, 'shortterm': -26.256486134563954,
'global': -23.567884908961119, 'window': -28.474973630727519, 'range': 0},
{'timestamp': 2000000000, 'momentary': -35.012235630574096, 'shortterm': -26.250008728606399,
'global': -23.567884908961119, 'window': -28.468496224769964, 'range': 0},
{'timestamp': 2100000000, 'momentary': -36.514574009813032, 'shortterm': -26.235095475230427,
'global': -23.567884908961119, 'window': -28.453582971393992, 'range': 0},
{'timestamp': 2200000000, 'momentary': -32.341418349672971, 'shortterm': -26.125308338332044,
'global': -23.79532198676598, 'window': -28.343795834495605, 'range': 0},
{'timestamp': 2300000000, 'momentary': -28.335550032218929, 'shortterm': -25.91176600140825,
'global': -23.95466628457714, 'window': -28.130253497571811, 'range': 0},
{'timestamp': 2400000000, 'momentary': -26.168839070238189, 'shortterm': -25.69680361761219,
'global': -24.046933600786414, 'window': -27.915291113775755, 'range': 0},
{'timestamp': 2500000000, 'momentary': -24.907195955920113, 'shortterm': -25.512519691157657,
'global': -24.086130884859191, 'window': -27.731007187321218, 'range': 0},
{'timestamp': 2600000000, 'momentary': -23.849495596113446, 'shortterm': -25.243327370090057,
'global': -24.07456530811297, 'window': -27.461814866253622, 'range': 0},
{'timestamp': 2700000000, 'momentary': -22.842689283230918, 'shortterm': -24.872702248693745,
'global': -24.010300110202007, 'window': -27.091189744857306, 'range': 0},
{'timestamp': 2800000000, 'momentary': -20.464404718814905, 'shortterm': -24.098683654888152,
'global': -23.778222099352966, 'window': -26.317171151051721, 'range': 0},
{'timestamp': 2900000000, 'momentary': -18.890359166023554, 'shortterm': -23.43733748202774,
'global': -23.416980367412787, 'window': -25.655824978191301, 'range': 0},
{'timestamp': 3000000000, 'momentary': -18.327172255623648, 'shortterm': -23.054108563545935,
'global': -23.046166973502832, 'window': -25.2725960597095, 'range': 0},
{'timestamp': 3100000000, 'momentary': -18.137408944334162, 'shortterm': -22.754197417370442,
'global': -22.709373023701726, 'window': -24.972683794318968, 'range': 0},
{'timestamp': 3200000000, 'momentary': -19.601679596346695, 'shortterm': -22.71401980561691,
'global': -22.54440077709743, 'window': -24.932485485223904, 'range': 0},
{'timestamp': 3300000000, 'momentary': -21.826648728806532, 'shortterm': -22.692855927138144,
'global': -22.373155941093014, 'window': -24.888691738136099, 'range': 0},
{'timestamp': 3400000000, 'momentary': -23.591080698627398, 'shortterm': -22.625363739466192,
'global': -22.549563561616036, 'window': -24.788853581523924, 'range': 0},
{'timestamp': 3500000000, 'momentary': -23.595920223393183, 'shortterm': -22.385354439689934,
'global': -22.580669617289157, 'window': -24.520074301737857, 'range': 0},
{'timestamp': 3600000000, 'momentary': -22.236999497677242, 'shortterm': -22.226475284807375,
'global': -22.569148338011232, 'window': -24.329995032531297, 'range': 0},
{'timestamp': 3700000000, 'momentary': -21.461064124489322, 'shortterm': -22.126435168279048,
'global': -22.404705954365912, 'window': -24.184109387174491, 'range': 0},
{'timestamp': 3800000000, 'momentary': -20.863706586362337, 'shortterm': -22.010829516561099,
'global': -22.347280568264228, 'window': -24.006036368081546, 'range': 0},
{'timestamp': 3900000000, 'momentary': -20.352576660858315, 'shortterm': -21.820934562021158,
'global': -22.271230033166379, 'window': -23.696341046011376, 'range': 0},
{'timestamp': 4000000000, 'momentary': -19.242493397547545, 'shortterm': -21.600802181589483,
'global': -22.144283124499612, 'window': -23.332736426181537, 'range': 1.4533063819564518},
{'timestamp': 4100000000, 'momentary': -18.247406214762872, 'shortterm': -21.452405297657531,
'global': -21.853146256266303, 'window': -22.998560347392417, 'range': 1.4533063819564518},
{'timestamp': 4200000000, 'momentary': -17.805365816883981, 'shortterm': -21.447968884757039,
'global': -21.666179966970649, 'window': -22.75595309271489, 'range': 1.4533063819564518},
{'timestamp': 4300000000, 'momentary': -18.285664395879323, 'shortterm': -21.451385417587321,
'global': -21.41461943830544, 'window': -22.63078698993106, 'range': 1.4533063819564518},
{'timestamp': 4400000000, 'momentary': -19.452371324220096, 'shortterm': -21.502111580306757,
'global': -21.346255197376848, 'window': -22.557273759605899, 'range': 1.4533063819564518},
{'timestamp': 4500000000, 'momentary': -21.104973891037634, 'shortterm': -21.484771777739535,
'global': -21.339554703706359, 'window': -22.491969640101026, 'range': 1.4533063819564518},
{'timestamp': 4600000000, 'momentary': -22.578071433877778, 'shortterm': -21.400779037238632,
'global': -21.36800487218505, 'window': -22.408286335758948, 'range': 1.4533063819564518},
{'timestamp': 4700000000, 'momentary': -22.987180325982713, 'shortterm': -21.299026110552948,
'global': -21.402800249171086, 'window': -22.321969014000732, 'range': 1.4533063819564518},
{'timestamp': 4800000000, 'momentary': -22.71060453118913, 'shortterm': -21.191187559821767,
'global': -21.431123796752725, 'window': -22.234210242942954, 'range': 1.4533063819564518},
{'timestamp': 4900000000, 'momentary': -22.427085332231954, 'shortterm': -21.087776554549325,
'global': -21.452885833551516, 'window': -22.152892455252211, 'range': 1.4533063819564518},
{'timestamp': 5000000000, 'momentary': -22.435387303803285, 'shortterm': -20.98975533530124,
'global': -21.473872016989777, 'window': -22.076050470672687, 'range': 2.0643532282446948},
{'timestamp': 5100000000, 'momentary': -22.282309698796492, 'shortterm': -20.876001782612779,
'global': -21.491060684888208, 'window': -21.984781842410175, 'range': 2.0643532282446948},
{'timestamp': 5200000000, 'momentary': -21.91617501046991, 'shortterm': -20.766995629311793,
'global': -21.500274326620794, 'window': -21.875609805253571, 'range': 2.0643532282446948},
{'timestamp': 5300000000, 'momentary': -21.959668345356722, 'shortterm': -20.737672391406374,
'global': -21.509972510150575, 'window': -21.815162272702775, 'range': 2.0643532282446948},
{'timestamp': 5400000000, 'momentary': -22.308936927868768, 'shortterm': -20.745122973371871,
'global': -21.525866323017439, 'window': -21.785927939510255, 'range': 2.0643532282446948},
{'timestamp': 5500000000, 'momentary': -23.024355301223505, 'shortterm': -20.753530894162271,
'global': -21.552914321755157, 'window': -21.763327121033438, 'range': 2.0643532282446948},
{'timestamp': 5600000000, 'momentary': -24.057448712627039, 'shortterm': -20.78032975491076,
'global': -21.592748026606582, 'window': -21.732240353025595, 'range': 2.0643532282446948},
{'timestamp': 5700000000, 'momentary': -24.4720430518996, 'shortterm': -20.850695620278433,
'global': -21.63592051467781, 'window': -21.70840441257452, 'range': 2.0643532282446948},
{'timestamp': 5800000000, 'momentary': -24.250234162057552, 'shortterm': -21.120239034496795,
'global': -21.67538270183093, 'window': -21.693099591382651, 'range': 2.0643532282446948},
{'timestamp': 5900000000, 'momentary': -23.466029215285019, 'shortterm': -21.375176751106132,
'global': -21.704251047645396, 'window': -21.696493677186332, 'range': 2.0643532282446948},
{'timestamp': 6000000000, 'momentary': -22.795713067291757, 'shortterm': -21.489872470117387,
'global': -21.797902708964006, 'window': -21.71631824704809, 'range': 2.0643532282446948},
{'timestamp': 6100000000, 'momentary': -22.597672190459811, 'shortterm': -21.607201956549222,
'global': -21.811450650393834, 'window': -21.799769177175254, 'range': 2.0643532282446948},
{'timestamp': 6200000000, 'momentary': -22.240378627581336, 'shortterm': -21.511367039569397,
'global': -21.818882952344897, 'window': -21.907304776010122, 'range': 2.0643532282446948},
{'timestamp': 6300000000, 'momentary': -22.122500481564739, 'shortterm': -21.409681682988662,
'global': -21.824122675440901, 'window': -21.924507473487143, 'range': 2.0643532282446948},
{'timestamp': 6400000000, 'momentary': -22.870123822818396, 'shortterm': -21.425888602059608,
'global': -21.840461587640906, 'window': -21.981523928282694, 'range': 2.0643532282446948},
{'timestamp': 6500000000, 'momentary': -23.000295124900415, 'shortterm': -21.553686044987888,
'global': -21.858046685776085, 'window': -21.968137222698985, 'range': 2.0643532282446948},
{'timestamp': 6600000000, 'momentary': -22.358390197117949, 'shortterm': -21.524893421645881,
'global': -21.866064205830387, 'window': -21.860467251194958, 'range': 2.0643532282446948},
{'timestamp': 6700000000, 'momentary': -22.212084321150869, 'shortterm': -21.501520429411759,
'global': -21.871610989958082, 'window': -21.770378067714919, 'range': 2.0643532282446948},
{'timestamp': 6800000000, 'momentary': -20.214548728289067, 'shortterm': -21.320907829727783,
'global': -21.838661794313175, 'window': -21.558687178120316, 'range': 2.0643532282446948},
{'timestamp': 6900000000, 'momentary': -18.682709109350704, 'shortterm': -21.209666933087743,
'global': -21.700363084595288, 'window': -21.313534778320342, 'range': 2.0643532282446948},
{'timestamp': 7000000000, 'momentary': -19.080681723874889, 'shortterm': -21.487872100612876,
'global': -21.642749972623786, 'window': -21.261588705785577, 'range': 2.0643532282446948},
{'timestamp': 7100000000, 'momentary': -18.898707724643284, 'shortterm': -21.675559584030132,
'global': -21.582433863762599, 'window': -21.154759976759632, 'range': 2.0643532282446948},
{'timestamp': 7200000000, 'momentary': -19.180833586996506, 'shortterm': -21.689266886791945,
'global': -21.532611217133766, 'window': -21.021618438820536, 'range': 2.0643532282446948},
{'timestamp': 7300000000, 'momentary': -18.714943875693486, 'shortterm': -21.317857710334366,
'global': -21.472018971094204, 'window': -20.784105495921924, 'range': 2.0643532282446948},
{'timestamp': 7400000000, 'momentary': -16.368607250946845, 'shortterm': -20.623124863975846,
'global': -21.327164904609322, 'window': -20.39450280024527, 'range': 2.0643532282446948},
{'timestamp': 7500000000, 'momentary': -15.553761132291642, 'shortterm': -20.233389568391377,
'global': -21.150685292975911, 'window': -20.162055225920732, 'range': 2.0643532282446948},
{'timestamp': 7600000000, 'momentary': -15.681683172073445, 'shortterm': -20.156973323796286,
'global': -20.992472843853591, 'window': -20.115992518507699, 'range': 2.0643532282446948},
{'timestamp': 7700000000, 'momentary': -16.359207540510681, 'shortterm': -20.089612595903152,
'global': -20.874121215092892, 'window': -20.095552560689693, 'range': 2.0643532282446948},
{'timestamp': 7800000000, 'momentary': -17.766080962368861, 'shortterm': -19.922855318062183,
'global': -20.809734557120404, 'window': -20.109566093962322, 'range': 2.0643532282446948},
{'timestamp': 7900000000, 'momentary': -18.726481329114797, 'shortterm': -19.787285794283289,
'global': -20.77224361851702, 'window': -20.144186001396744, 'range': 2.0643532282446948},
{'timestamp': 8000000000, 'momentary': -19.01812324973957, 'shortterm': -19.765187264965245,
'global': -20.742328938282469, 'window': -20.193834871015106, 'range': 1.8356149166242375},
{'timestamp': 8100000000, 'momentary': -19.713077265658853, 'shortterm': -19.816390402494601,
'global': -20.726447753878659, 'window': -20.264751472760896, 'range': 1.8356149166242375},
{'timestamp': 8200000000, 'momentary': -21.064178200393332, 'shortterm': -19.844258489924862,
'global': -20.730840954539698, 'window': -20.222687055309951, 'range': 1.8356149166242375},
{'timestamp': 8300000000, 'momentary': -21.422899776135186, 'shortterm': -19.741333969625206,
'global': -20.73937912952707, 'window': -20.121269273101365, 'range': 1.8356149166242375},
{'timestamp': 8400000000, 'momentary': -19.080348195461326, 'shortterm': -19.423397235420179,
'global': -20.712875579551334, 'window': -19.913044477269203, 'range': 1.8356149166242375},
{'timestamp': 8500000000, 'momentary': -17.636880650280727, 'shortterm': -19.184797726493517,
'global': -20.655139844519457, 'window': -19.814018290189455, 'range': 1.8356149166242375},
{'timestamp': 8600000000, 'momentary': -17.6948112888787, 'shortterm': -19.169163545814033,
'global': -20.601072988977062, 'window': -19.837010329308363, 'range': 1.8356149166242375},
{'timestamp': 8700000000, 'momentary': -18.149430044017041, 'shortterm': -19.143786299770372,
'global': -20.55956929144968, 'window': -19.838778219918225, 'range': 1.8356149166242375},
{'timestamp': 8800000000, 'momentary': -19.937409399567805, 'shortterm': -19.111042685496059,
'global': -20.551215693310212, 'window': -19.847185533742724, 'range': 1.8356149166242375},
{'timestamp': 8900000000, 'momentary': -21.72269024576946, 'shortterm': -19.079360538781884,
'global': -20.563910454866711, 'window': -19.897861906961371, 'range': 1.8356149166242375},
{'timestamp': 9000000000, 'momentary': -20.745127099070753, 'shortterm': -19.020143222364769,
'global': -20.566075481045708, 'window': -19.955129336628428, 'range': 2.5806589592247136},
{'timestamp': 9100000000, 'momentary': -20.887237646023106, 'shortterm': -19.019407102176121,
'global': -20.569806897202781, 'window': -20.073281878947196, 'range': 2.5806589592247136},
{'timestamp': 9200000000, 'momentary': -21.114701884661887, 'shortterm': -19.02848014625004,
'global': -20.575907582149796, 'window': -20.154276400763127, 'range': 2.5806589592247136},
{'timestamp': 9300000000, 'momentary': -21.346538522834646, 'shortterm': -19.023507711723564,
'global': -20.584222946603198, 'window': -20.160357444993309, 'range': 2.5806589592247136},
{'timestamp': 9400000000, 'momentary': -21.668616125193548, 'shortterm': -18.944746671293498,
'global': -20.595395559996934, 'window': -20.113906537981599, 'range': 2.5806589592247136},
{'timestamp': 9500000000, 'momentary': -20.861605957424405, 'shortterm': -18.874514309900832,
'global': -20.598364570081262, 'window': -20.057519527609003, 'range': 2.5806589592247136},
{'timestamp': 9600000000, 'momentary': -20.242250285752885, 'shortterm': -18.862797712515469,
'global': -20.594149320247627, 'window': -20.014904274614732, 'range': 2.5806589592247136},
{'timestamp': 9700000000, 'momentary': -20.122310881768325, 'shortterm': -18.855084321202341,
'global': -20.58855265998918, 'window': -19.994294632488671, 'range': 2.5806589592247136},
{'timestamp': 9800000000, 'momentary': -20.509458290430754, 'shortterm': -18.973218026229524,
'global': -20.587665872740541, 'window': -19.989588500954479, 'range': 2.5806589592247136},
{'timestamp': 9900000000, 'momentary': -21.308681203990677, 'shortterm': -19.158183935148344,
'global': -20.594972488473477, 'window': -19.998782548019808, 'range': 2.5806589592247136},
{'timestamp': 10000000000, 'momentary': -22.443328364609854, 'shortterm': -19.170248663647861,
'global': -20.611366008085945, 'window': -20.015267805563155, 'range': 2.5806589592247136},
{'timestamp': 10100000000, 'momentary': -23.843215126664706, 'shortterm': -19.263342217457545,
'global': -20.635945853110254, 'window': -20.056678208481507, 'range': 2.5806589592247136},
{'timestamp': 10187755102, 'momentary': -25.429005280781066, 'shortterm': -19.405104198349111,
'global': -20.635945853110254, 'window': -20.115126912051082, 'range': 2.5806589592247136},
]
|
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from tuiuiu.tuiuiuembeds.views import chooser
urlpatterns = [
url(r'^chooser/$', chooser.chooser, name='chooser'),
url(r'^chooser/upload/$', chooser.chooser_upload, name='chooser_upload'),
]
|
""" API for Pioneer Network Receivers. """
# pylint: disable=logging-fstring-interpolation,broad-except
import logging
import traceback
import socket
import telnetlib
import time
import random
from threading import Thread, Event, Lock
_LOGGER = logging.getLogger(__name__)
MAX_VOLUME = 185
MAX_VOLUME_ZONEX = 81
MAX_SOURCE_NUMBERS = 60
RECONNECT_DELAY_MAX = 64
PIONEER_COMMANDS = {
"turn_on": {"1": "PO", "2": "APO", "3": "BPO", "Z": "ZEO",},
"turn_off": {"1": "PF", "2": "APF", "3": "BPF", "Z": "ZEF",},
"select_source": {
"1": ["FN", "FN"],
"2": ["ZS", "Z2F"],
"3": ["ZT", "Z3F"],
"Z": ["ZEA", "ZEA"],
},
"volume_up": {
"1": ["VU", "VOL"],
"2": ["ZU", "ZV"],
"3": ["YU", "YV"],
"Z": ["HZU", "XV"],
},
"volume_down": {
"1": ["VD", "VOL"],
"2": ["ZD", "ZV"],
"3": ["YD", "YV"],
"Z": ["HZD", "XV"],
},
"set_volume_level": {
"1": ["VL", "VOL"],
"2": ["ZV", "ZV"],
"3": ["YV", "YV"],
"Z": ["HZV", "XV"],
},
"mute_on": {
"1": ["MO", "MUT"],
"2": ["Z2MO", "Z2MUT"],
"3": ["Z3MO", "Z3MUT"],
"Z": ["HZMO", "HZMUT"],
},
"mute_off": {
"1": ["MF", "MUT"],
"2": ["Z2MF", "Z2MUT"],
"3": ["Z3MF", "Z3MUT"],
"Z": ["HZMF", "HZMUT"],
},
"query_power": {
"1": ["?P", "PWR"],
"2": ["?AP", "APR"],
"3": ["?BP", "BPR"],
"Z": ["?ZEP", "ZEP"],
},
"query_volume": {
"1": ["?V", "VOL"],
"2": ["?ZV", "ZV"],
"3": ["?YV", "YV"],
"Z": ["?HZV", "XV"],
},
"query_mute": {
"1": ["?M", "MUT"],
"2": ["?Z2M", "Z2MUT"],
"3": ["?Z3M", "Z3MUT"],
"Z": ["?HZM", "HZMUT"],
},
"query_source_id": {
"1": ["?F", "FN"],
"2": ["?ZS", "Z2F"],
"3": ["?ZT", "Z3F"],
"Z": ["?ZEA", "ZEA"],
},
}
## https://stackoverflow.com/questions/12248132/how-to-change-tcp-keepalive-timer-using-python-script
def sock_set_keepalive(sock, after_idle_sec=1, interval_sec=3, max_fails=5):
"""Set TCP keepalive on an open socket.
It activates after 1 second (after_idle_sec) of idleness,
then sends a keepalive ping once every 3 seconds (interval_sec),
and closes the connection after 5 failed ping (max_fails), or 15 seconds
"""
if sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails)
def get_backoff_delay(retry_count):
""" Calculate exponential backoff with random jitter delay. """
delay = round(
min(RECONNECT_DELAY_MAX, (2 ** retry_count)) + (random.randint(0, 1000) / 1000),
4,
)
return delay
class PioneerAVR:
""" Pioneer AVR interface. """
def __init__(
self,
host,
port=8102,
timeout=2,
scan_interval=60,
volume_workaround=True,
command_delay=0.1,
):
""" Initialize the Pioneer AVR interface. """
_LOGGER.debug("PioneerAVR.__init__()")
self._host = host
self._port = port
self._timeout = timeout
self._scan_interval = scan_interval
self._volume_workaround = volume_workaround
self._command_delay = command_delay
## Public properties
self.available = False
self.zones = []
self.power = {}
self.volume = {}
self.max_volume = {}
self.mute = {}
self.source = {}
## Internal state
self._full_update = True
self._last_updated = 0.0
self._last_command = 0.0
self._source_name_to_id = {}
self._source_id_to_name = {}
self._zone_callback = {}
self._update_callback = None
self._telnet_obj = None
self._telnet_lock = Lock()
self._telnet_thread = None
self._telnet_reconnect = True
self._request_lock = Lock()
self._response_lock = Lock()
self._response_event = Event()
self._response_prefix = None
self._response_value = None
self._response_commands = []
## Connect to AVR and determine zones and sources
self.telnet_connect()
self.query_zones()
self.build_source_dict()
def __del__(self):
_LOGGER.debug("PioneerAVR.__del__()")
self.telnet_disconnect()
def telnet_connect(self):
""" Open telnet connection to AVR and start listener thread. """
## Open telnet connection
_LOGGER.info(">> telnet_connect()")
with self._telnet_lock:
if not self._telnet_obj:
try:
self._telnet_obj = telnetlib.Telnet(
self._host, self._port, self._timeout
)
sock_set_keepalive(self._telnet_obj.get_socket())
_LOGGER.info("AVR connection opened")
except Exception as e: # pylint: disable=invalid-name
raise Exception(f"Could not open AVR connection: {e}")
self.available = True
self._last_updated = 0.0
else:
_LOGGER.debug("AVR connection already open")
## Create child thread to listen to telnet socket
if not self._telnet_thread or not self._telnet_thread.is_alive():
try:
_LOGGER.debug("Creating new AVR listener thread")
self._telnet_thread = Thread(target=self.telnet_listener)
if self._telnet_thread:
self._telnet_thread.start()
_LOGGER.debug("AVR listener started")
else:
raise Exception("Could not create thread")
except Exception as e: # pylint: disable=invalid-name
raise Exception(f"Could not start AVR listener: {e}")
else:
_LOGGER.debug("AVR listener already started")
return True
def telnet_disconnect(self, reconnect=True):
""" Shutdown and close telnet connection to AVR. """
_LOGGER.info(f">> telnet_disconnect(reconnect={reconnect})")
with self._telnet_lock:
if self._telnet_obj:
_LOGGER.debug("Closing AVR connection")
self._telnet_reconnect = reconnect
self.available = False
self.call_zone_callbacks()
sock = self._telnet_obj.get_socket()
if sock:
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
self._telnet_obj.close()
self._telnet_obj = None
_LOGGER.info("AVR connection closed")
def telnet_listener(self):
""" Telnet listener thread. """
_LOGGER.info("AVR listener running")
while True:
self.telnet_listener_main_loop() # rc not checked
## Main loop exited, reconnect after delay
_LOGGER.info(">> Calling telnet_disconnect()")
self.telnet_disconnect()
_LOGGER.info(">> Telnet disconnected")
if not self._telnet_reconnect:
_LOGGER.info("AVR reconnection disabled, not reconnecting")
break
_LOGGER.info("Reconnecting to AVR")
retry = 0
while True:
delay = get_backoff_delay(retry)
_LOGGER.debug(f"Waiting {delay}s before retrying connection")
time.sleep(delay)
retry += 1
try:
self.telnet_connect()
## Schedule update to be run in HA event loop
## NOTE: Cannot run self.update() in listener thread as it
## depends on the listener to process AVR responses
_LOGGER.debug("Scheduling full AVR status update")
self._full_update = True
self.call_update_callback()
break
except Exception as e: # pylint: disable=invalid-name
_LOGGER.debug(f"Could not reconnect to AVR: {e}")
self.telnet_disconnect()
_LOGGER.info("AVR listener thread terminating")
self.telnet_disconnect()
def telnet_listener_main_loop(self):
""" Main loop of telnet listener. """
_LOGGER.debug("Running AVR listener main loop")
while self._telnet_obj:
try:
## Check for telnet responses
raw_response = self._telnet_obj.read_until(b"\r\n")
response = raw_response.decode().strip()
self._last_updated = time.time() ## include empty responses
if not response:
_LOGGER.debug("Ignoring empty response")
## Skip processing empty responses (keepalives?)
continue
_LOGGER.debug(f"Received response: {response}")
## Parse response, update cached properties
updated_zones = self.parse_response(response)
if self._response_commands:
## Send post-response commands
for cmd in self._response_commands:
self.telnet_send_command(cmd)
self._response_commands = []
if updated_zones:
## Call zone callbacks for updated zones
self.call_zone_callbacks(updated_zones)
## NOTE: this does not seem to reset the scan interval
# if "1" not in updated_zones:
# self.call_update_callback() ## Reset scan interval timer
# ## update will be skipped within timeout period
## Check whether a request is waiting for a response
if self._response_lock.locked() and self._response_prefix:
if response.startswith("E"):
_LOGGER.debug(f"Signalling error {response} to waiting request")
self._response_value = response
self._response_event.set()
elif response.startswith(self._response_prefix):
_LOGGER.debug(
f"Signalling response {response} to waiting request"
)
self._response_value = response
self._response_event.set()
except EOFError:
_LOGGER.debug("AVR listener: EOFError")
return False
except TimeoutError:
_LOGGER.debug("AVR listener: TimeoutError")
return False
except Exception as e: # pylint: disable=invalid-name
if not self.available:
## Connection closed outside of listener
_LOGGER.debug(f"AVR listener exception: {e}")
return None
else:
_LOGGER.error(f"AVR listener fatal exception: {e}")
traceback.print_exc()
_LOGGER.info(">> Exiting telnet_listener_main_loop()")
return None
## Telnet connection closed
return False
def telnet_send_command(self, command, rate_limit=True):
""" Send a command to the AVR via telnet."""
# _LOGGER.info(f">> telnet_send_command({command})")
_LOGGER.debug(f"Sending AVR command {command}")
## Check if connection available
if not self.available:
raise Exception("AVR connection not available")
now = time.time()
if rate_limit:
## Rate limit commands
since_command = now - self._last_command
if since_command < self._command_delay:
delay = self._command_delay - since_command
_LOGGER.debug(f"Delaying command for {delay:.3f}s")
time.sleep(self._command_delay - since_command)
self._last_command = now
try:
self._telnet_obj.write(command.encode("UTF-8") + b"\r")
return True
except Exception as e: # pylint: disable=invalid-name
_LOGGER.error(f"Could not send AVR command: {str(e)}")
self.telnet_disconnect()
return False
def telnet_send_request(
self, command, response_prefix, ignore_error=None, rate_limit=True
):
""" Execute a request synchronously on the AVR via telnet. """
# _LOGGER.info(
# f">> telnet_send_request({command}, {response_prefix}, ignore_error={ignore_error})"
# )
with self._request_lock: ## single request
if not self.telnet_send_command(command, rate_limit=rate_limit):
return False
self._response_event.clear()
self._response_prefix = response_prefix
with self._response_lock:
self._response_event.wait(timeout=self._timeout)
self._response_prefix = None
if self._response_event.is_set():
response = self._response_value
if response.startswith("E"):
## Error value returned
err = f"AVR command {command} returned error {response}"
if ignore_error is None:
raise Exception(err)
elif not ignore_error:
_LOGGER.error(err)
return False
elif ignore_error:
_LOGGER.debug(err)
return False
return response
## Request timed out
_LOGGER.debug(f"AVR command {command} returned no response")
return None
## Raw functions. Hook for re-implementing HTTP command/requests.
def send_raw_command(self, raw_command, rate_limit=True):
""" Send a raw command to the device. """
return self.telnet_send_command(raw_command, rate_limit)
def send_raw_request(
self, raw_command, response_prefix, ignore_error=None, rate_limit=True
):
""" Execute a raw command and return the response. """
return self.telnet_send_request(
raw_command, response_prefix, ignore_error, rate_limit
)
def send_command(
self, command, zone="1", prefix="", ignore_error=None, rate_limit=True
):
""" Send a command or request to the device. """
# pylint: disable=unidiomatic-typecheck
raw_command = PIONEER_COMMANDS.get(command, {}).get(zone)
if type(raw_command) is list:
if len(raw_command) == 2:
## Handle command as request
expected_response = raw_command[1]
raw_command = raw_command[0]
return self.send_raw_request(
prefix + raw_command, expected_response, ignore_error, rate_limit
)
else:
_LOGGER.error(f"Invalid request {raw_command} for zone {zone}")
return None
elif type(raw_command) is str:
return self.send_raw_command(prefix + raw_command, rate_limit)
else:
_LOGGER.warning(f"Invalid command {command} for zone {zone}")
return None
## Initialisation functions
def query_zones(self):
""" Query zones on Pioneer AVR by querying power status. """
if not self.zones:
_LOGGER.info("Querying available zones on AVR")
if self.send_command("query_power", "1", ignore_error=True):
_LOGGER.info("Zone 1 discovered")
if "1" not in self.zones:
self.zones.append("1")
self.max_volume["1"] = MAX_VOLUME
else:
raise RuntimeError("Main Zone not found on AVR")
if self.send_command("query_power", "2", ignore_error=True):
_LOGGER.info("Zone 2 discovered")
if "2" not in self.zones:
self.zones.append("2")
self.max_volume["2"] = MAX_VOLUME_ZONEX
if self.send_command("query_power", "3", ignore_error=True):
_LOGGER.info("Zone 3 discovered")
if "3" not in self.zones:
self.zones.append("3")
self.max_volume["3"] = MAX_VOLUME_ZONEX
if self.send_command("query_power", "Z", ignore_error=True):
_LOGGER.info("HDZone discovered")
if "Z" not in self.zones:
self.zones.append("Z")
self.max_volume["Z"] = MAX_VOLUME_ZONEX
def build_source_dict(self):
""" Generate source id<->name translation tables. """
timeouts = 0
if not self._source_name_to_id:
_LOGGER.info("Querying source names on AVR")
for src in range(MAX_SOURCE_NUMBERS):
response = self.send_raw_request(
"?RGB" + str(src).zfill(2),
"RGB",
ignore_error=True,
rate_limit=False,
)
if response is None:
timeouts += 1
_LOGGER.debug(f"Timeout {timeouts} retrieving source {src}")
elif response is not False:
timeouts = 0
source_name = response[6:]
source_active = response[5] == "1"
source_number = str(src).zfill(2)
if source_active:
self._source_name_to_id[source_name] = source_number
self._source_id_to_name[source_number] = source_name
_LOGGER.debug(f"Source name->id: {self._source_name_to_id}")
_LOGGER.debug(f"Source id->name: {self._source_id_to_name}")
if not self._source_name_to_id:
raise RuntimeError("No input sources found on AVR")
def get_source_list(self):
""" Return list of available input sources. """
return list(self._source_name_to_id.keys())
## Callback functions
def set_zone_callback(self, zone, callback):
""" Register a callback for a zone. """
if zone in self.zones:
if callback:
self._zone_callback[zone] = callback
else:
self._zone_callback.pop(zone)
def call_zone_callbacks(self, zones=None):
""" Call callbacks to signal updated zone(s). """
if zones is None:
zones = self.zones
for zone in zones:
if zone in self._zone_callback:
callback = self._zone_callback[zone]
if callback:
_LOGGER.debug(f"Calling callback for zone {zone}")
callback()
def set_update_callback(self, callback):
""" Register a callback to trigger update. """
if callback:
self._update_callback = callback
else:
self._update_callback = None
def call_update_callback(self):
""" Trigger update. """
if self._update_callback:
_LOGGER.debug("Calling update callback")
self._update_callback()
## Update functions
def parse_response(self, response):
""" Parse response and update cached parameters. """
updated_zones = set()
if response.startswith("PWR"):
value = response == "PWR0"
if self.power.get("1") != value:
self.power["1"] = value
updated_zones.add("1")
_LOGGER.info(f"Zone 1: Power: {value}")
if value and self._volume_workaround:
self._response_commands.extend(["VU", "VD"])
elif response.startswith("APR"):
value = response == "APR0"
if self.power.get("2") != value:
self.power["2"] = value
updated_zones.add("2")
_LOGGER.info(f"Zone 2: Power: {value}")
elif response.startswith("BPR"):
value = response == "BPR0"
if self.power.get("3") != value:
self.power["3"] = value
updated_zones.add("3")
_LOGGER.info(f"Zone 3: Power: {value}")
elif response.startswith("ZEP"):
value = response == "ZEP0"
if self.power.get("Z") != value:
self.power["Z"] = value
updated_zones.add("Z")
_LOGGER.info(f"HDZone: Power: {value}")
elif response.startswith("VOL"):
value = int(response[3:])
if self.volume.get("1") != value:
self.volume["1"] = value
updated_zones.add("1")
_LOGGER.info(f"Zone 1: Volume: {value}")
elif response.startswith("ZV"):
value = int(response[2:])
if self.volume.get("2") != value:
self.volume["2"] = value
updated_zones.add("2")
_LOGGER.info(f"Zone 2: Volume: {value}")
elif response.startswith("YV"):
value = int(response[2:])
if self.volume.get("3") != value:
self.volume["3"] = value
updated_zones.add("3")
_LOGGER.info(f"Zone 3: Volume: {value}")
elif response.startswith("XV"):
value = int(response[2:])
if self.volume.get("Z") != value:
self.volume["Z"] = value
updated_zones.add("Z")
_LOGGER.info(f"HDZone: Volume: {value}")
elif response.startswith("MUT"):
value = response == "MUT0"
if self.mute.get("1") != value:
self.mute["1"] = value
updated_zones.add("1")
_LOGGER.info(f"Zone 1: Mute: {value}")
elif response.startswith("Z2MUT"):
value = response == "Z2MUT0"
if self.mute.get("2") != value:
self.mute["2"] = value
updated_zones.add("2")
_LOGGER.info(f"Zone 2: Mute: {value}")
elif response.startswith("Z3MUT"):
value = response == "Z3MUT0"
if self.mute.get("3") != value:
self.mute["3"] = value
updated_zones.add("3")
_LOGGER.info(f"Zone 3: Mute: {value}")
elif response.startswith("HZMUT"):
value = response == "HZMUT0"
if self.mute.get("Z") != value:
self.mute["Z"] = value
updated_zones.add("Z")
_LOGGER.info(f"HDZone: Mute: {value}")
elif response.startswith("FN"):
raw_id = response[2:]
value = self._source_id_to_name.get(raw_id, raw_id)
if self.source.get("1") != value:
self.source["1"] = value
updated_zones.add("1")
_LOGGER.info(f"Zone 1: Source: {value}")
elif response.startswith("Z2F"):
raw_id = response[3:]
value = self._source_id_to_name.get(raw_id, raw_id)
if self.source.get("2") != value:
self.source["2"] = value
updated_zones.add("2")
_LOGGER.info(f"Zone 2: Source: {value}")
elif response.startswith("Z3F"):
raw_id = response[3:]
value = self._source_id_to_name.get(raw_id, raw_id)
if self.source.get("3") != value:
value = self.source["3"]
updated_zones.add("3")
_LOGGER.info(f"Zone 3: Source: {value}")
elif response.startswith("ZEA"):
raw_id = response[3:]
value = self._source_id_to_name.get(raw_id, raw_id)
if self.source.get("Z") != value:
self.source["Z"] = value
updated_zones.add("Z")
_LOGGER.info(f"HDZone: Source: {value}")
return updated_zones
def update_zone(self, zone):
""" Update an AVR zone. """
## Check for timeouts, but ignore errors (eg. ?V will
## return E02 immediately after power on)
if (
self.send_command("query_power", zone, ignore_error=True) is None
or self.send_command("query_volume", zone, ignore_error=True) is None
or self.send_command("query_mute", zone, ignore_error=True) is None
or self.send_command("query_source_id", zone, ignore_error=True) is None
):
## Timeout occurred, indicates AVR disconnected
raise TimeoutError("Timeout waiting for data")
def update(self):
""" Update AVR cached status. """
if self.available:
now = time.time()
since_updated = now - self._last_updated
full_update = self._full_update
if full_update or since_updated > self._scan_interval:
_LOGGER.debug(
f"Updating AVR status (full={full_update}, last updated {since_updated:.3f}s ago)"
)
self._last_updated = now
self._full_update = False
try:
for zone in self.zones:
self.update_zone(zone)
if full_update:
## Trigger updates to all zones on full update
self.call_zone_callbacks()
return True
except Exception as e: # pylint: disable=invalid-name
_LOGGER.error(f"Could not update AVR status: {e}")
self.telnet_disconnect()
return False
else:
## NOTE: any response from the AVR received within
## scan_interval, including keepalives and responses triggered
## via the remote and by other clients, will cause an update to
## be skipped on the basis that the AVR is alive.
##
## Keepalives may be sent by the AVR (every 30 seconds on the
## VSX-930) when connected to port 8102, but are not sent when
## connected to port 23.
_LOGGER.debug(f"Skipping update: last updated {since_updated:.3f}s ago")
return True
else:
_LOGGER.debug("Skipping update: AVR is unavailable")
return True
## State change functions
def turn_on(self, zone="1"):
""" Turn on the Pioneer AVR. """
return self.send_command("turn_on", zone)
def turn_off(self, zone="1"):
""" Turn off the Pioneer AVR. """
return self.send_command("turn_off", zone)
def select_source(self, source, zone="1"):
""" Select input source. """
source_id = self._source_name_to_id.get(source)
if source_id:
return self.send_command(
"select_source", zone, prefix=source_id, ignore_error=False
)
else:
_LOGGER.error(f"Invalid source {source} for zone {zone}")
return False
def volume_up(self, zone="1"):
""" Volume up media player. """
return self.send_command("volume_up", zone, ignore_error=False)
def volume_down(self, zone="1"):
""" Volume down media player. """
return self.send_command("volume_down", zone, ignore_error=False)
def bounce_volume(self):
"""
Send volume up/down to work around Main Zone reporting bug where
an initial volume is set. This initial volume is not reported until
the volume is changed.
"""
if self.volume_up():
return self.volume_down()
else:
return False
def set_volume_level(self, volume, zone="1"):
""" Set volume level (0..185 for Zone 1, 0..81 for other Zones). """
if (
volume < 0
or (zone == "1" and volume > MAX_VOLUME)
or (zone != "1" and volume > MAX_VOLUME_ZONEX)
):
raise ValueError(f"volume {volume} out of range for zone {zone}")
vol_len = 3 if zone == "1" else 2
vol_prefix = str(volume).zfill(vol_len)
return self.send_command(
"set_volume_level", zone, prefix=vol_prefix, ignore_error=False
)
def mute_on(self, zone="1"):
""" Mute AVR. """
return self.send_command("mute_on", zone, ignore_error=False)
def mute_off(self, zone="1"):
""" Unmute AVR. """
return self.send_command("mute_off", zone, ignore_error=False)
logging.basicConfig(level=logging.DEBUG)
|
#
# ______ ____ ______ ____ _____ ____ _____ __ __
# | ___ \ / ___ \ | ___ \ / ___ \ | ___ \ / ___ \ / _____\ | | / /
# | |__| / | | | | | |__| / | | | | | | | | | | | | | | | |/ /
# | \ | | | | | ___ \ | | | | | | | | | | | | | | | /
# | |\ \ | |__| | | |__| | | |__| | | |__| | | |__| | | |_____ | |\ \
# |__| \__\ \_____/ |_______/ \_____/ |______/ \_____/ \______/ |__| \__\
#
# Written by Mutlu Polatcan
# 21.06.2018
import yaml
class Config:
def __init__(self, config_filename):
try:
self.config = yaml.load(open(config_filename, "r"))
except yaml.YAMLError as err:
print(err)
def __check_attr_defined(self, attr_name):
return self.config.keys().__contains__(attr_name)
def get_int(self, attr_name):
if self.__check_attr_defined(attr_name=attr_name):
return int(self.config[attr_name])
else:
return None
def get_str(self, attr_name):
if self.__check_attr_defined(attr_name=attr_name):
return str(self.config[attr_name])
else:
return None
def get(self, attr_name):
if self.__check_attr_defined(attr_name=attr_name):
return self.config[attr_name]
else:
return None
def len(self, attr_name):
if self.__check_attr_defined(attr_name=attr_name):
self.config[attr_name].__len__()
else:
return 0
# Returns count of service which is defined by attr_name
def count(self, attr_name):
if self.__check_attr_defined(attr_name=attr_name):
return self.nodes(attr_name).__len__()
else:
return 0
# Returns configs for current service which is defined by attr_name
def configs(self, attr_name):
if self.__check_attr_defined(attr_name=attr_name) and \
self.get(attr_name=attr_name) is not None and \
self.get(attr_name=attr_name).keys().__contains__("configs") and \
self.get(attr_name=attr_name)["configs"] is not None:
return self.config[attr_name]["configs"]
else:
return []
def nodes(self, attr_name):
if self.__check_attr_defined(attr_name=attr_name) and \
self.get(attr_name=attr_name) is not None and \
self.get(attr_name=attr_name).keys().__contains__("nodes") and \
self.get(attr_name=attr_name)["nodes"] is not None:
return self.get(attr_name=attr_name)["nodes"].split(',')
else:
return []
|
import datetime
import io
import os
import re
import subprocess
import warnings
from collections import namedtuple
from decimal import Decimal
from pathlib import Path
import six
from PyPDF2 import PdfFileMerger
from reportlab.pdfgen import canvas
class DoctorUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return f"{original}. You passed in {self.obj!r} ({type(self.obj)})"
def force_bytes(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == "utf-8":
return s
else:
return s.decode("utf-8", errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, six.memoryview):
return bytes(s)
if isinstance(s, Promise):
return six.text_type(s).encode(encoding, errors)
if not isinstance(s, six.string_types):
try:
if six.PY3:
return six.text_type(s).encode(encoding)
else:
return bytes(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return b" ".join(
force_bytes(arg, encoding, strings_only, errors) for arg in s
)
return six.text_type(s).encode(encoding, errors)
else:
return s.encode(encoding, errors)
def force_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Similar to smart_text, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if issubclass(type(s), six.text_type):
return s
if strings_only and is_protected_type(s):
return s
try:
if not issubclass(type(s), six.string_types):
if six.PY3:
if isinstance(s, bytes):
s = six.text_type(s, encoding, errors)
else:
s = six.text_type(s)
elif hasattr(s, "__unicode__"):
s = six.text_type(s)
else:
s = six.text_type(bytes(s), encoding, errors)
else:
# Note: We use .decode() here, instead of six.text_type(s, encoding,
# errors), so that if s is a SafeBytes, it ends up being a
# SafeText at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError as e:
if not isinstance(s, Exception):
raise DoctorUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = " ".join(force_text(arg, encoding, strings_only, errors) for arg in s)
return s
def smart_text(s, encoding="utf-8", strings_only=False, errors="strict"):
"""
Returns a text object representing 's' -- unicode on Python 2 and str on
Python 3. Treats bytestrings using the 'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_text(s, encoding, strings_only, errors)
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
_PROTECTED_TYPES = six.integer_types + (
type(None),
float,
Decimal,
datetime.datetime,
datetime.date,
datetime.time,
)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_text(strings_only=True).
"""
return isinstance(obj, _PROTECTED_TYPES)
def audio_encoder(data):
return namedtuple("AudioFile", data.keys())(*data.values())
def ignore_warnings(test_func):
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore", ResourceWarning)
warnings.simplefilter("ignore", DeprecationWarning)
test_func(self, *args, **kwargs)
return do_test
def make_png_thumbnail_for_instance(filepath, max_dimension):
"""Abstract function for making a thumbnail for a PDF
See helper functions below for how to use this in a simple way.
:param filepath: The attr where the PDF is located on the item
:param max_dimension: The longest you want any edge to be
:param response: Flask response object
"""
command = [
"pdftoppm",
"-singlefile",
"-f",
"1",
"-scale-to",
str(max_dimension),
filepath,
"-png",
]
p = subprocess.Popen(
command, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
return stdout, stderr.decode("utf-8"), str(p.returncode)
def make_png_thumbnails(filepath, max_dimension, pages, directory):
"""Abstract function for making a thumbnail for a PDF
See helper functions below for how to use this in a simple way.
:param filepath: The attr where the PDF is located on the item
:param max_dimension: The longest you want any edge to be
:param response: Flask response object
"""
for page in pages:
command = [
"pdftoppm",
"-singlefile",
"-f",
str(page),
"-scale-to",
str(max_dimension),
filepath,
"-png",
f"{directory.name}/thumb-{page}",
]
p = subprocess.Popen(
command, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
p.communicate()
def pdf_bytes_from_image_array(image_list, output_path) -> None:
"""Make a pdf given an array of Image files
:param image_list: List of images
:type image_list: list
:return: pdf_data
:type pdf_data: PDF as bytes
"""
image_list[0].save(
output_path,
"PDF",
resolution=100.0,
save_all=True,
append_images=image_list[1:],
)
del image_list
def strip_metadata_from_path(file_path):
"""Convert PDF file into PDF and remove metadata from it
Stripping the metadata allows us to hash the PDFs
:param pdf_bytes: PDF as binary content
:return: PDF bytes with metadata removed.
"""
with open(file_path, "rb") as f:
pdf_merger = PdfFileMerger()
pdf_merger.append(io.BytesIO(f.read()))
pdf_merger.addMetadata({"/CreationDate": "", "/ModDate": ""})
byte_writer = io.BytesIO()
pdf_merger.write(byte_writer)
return force_bytes(byte_writer.getvalue())
def strip_metadata_from_bytes(pdf_bytes):
"""Convert PDF bytes into PDF and remove metadata from it
Stripping the metadata allows us to hash the PDFs
:param pdf_bytes: PDF as binary content
:return: PDF bytes with metadata removed.
"""
pdf_merger = PdfFileMerger()
pdf_merger.append(io.BytesIO(pdf_bytes))
pdf_merger.addMetadata({"/CreationDate": "", "/ModDate": ""})
byte_writer = io.BytesIO()
pdf_merger.write(byte_writer)
return force_bytes(byte_writer.getvalue())
def cleanup_form(form):
"""Clean up a form object"""
os.remove(form.cleaned_data["fp"])
def make_file(filename, dir=None):
filepath = f"{Path.cwd()}/doctor/test_assets/{filename}"
with open(filepath, "rb") as f:
return {"file": (filename, f.read())}
def make_buffer(filename, dir=None):
filepath = f"{Path.cwd()}/doctor/test_assets/{filename}"
with open(filepath, "rb") as f:
return {"file": ("filename", f.read())}
def pdf_has_images(path: str) -> bool:
"""Check raw PDF for embedded images.
We need to check if a PDF contains any images. If a PDF contains images it
likely has content that needs to be scanned.
:param path: Location of PDF to process.
:return: Does the PDF contain images?
:type: bool
"""
with open(path, "rb") as pdf_file:
pdf_bytes = pdf_file.read()
return True if re.search(rb"/Image ?/", pdf_bytes) else False
def ocr_needed(path: str, content: str) -> bool:
"""Check if OCR is needed on a PDF
Check if images are in PDF or content is empty.
:param path: The path to the PDF
:param content: The content extracted from the PDF.
:return: Whether OCR should be run on the document.
"""
if content.strip() == "" or pdf_has_images(path):
return True
return False
def make_page_with_text(page, data, h, w):
"""Make a page with text
:param page:
:param data:
:param h:
:param w:
:return:
"""
packet = io.BytesIO()
can = canvas.Canvas(packet, pagesize=(w, h))
# Set to a standard size and font for now.
can.setFont("Helvetica", 9)
# Make the text transparent
can.setFillAlpha(0)
for i in range(len(data["level"])):
try:
letter, (x, y, ww, hh), pg = (
data["text"][i],
(data["left"][i], data["top"][i], data["width"][i], data["height"][i]),
data["page_num"][i],
)
except:
continue
# Adjust the text to an 8.5 by 11 inch page
sub = ((11 * 72) / h) * int(hh)
x = ((8.5 * 72) / w) * int(x)
y = ((11 * 72) / h) * int(y)
yy = (11 * 72) - y
if int(page) == int(pg):
can.drawString(x, yy - sub, letter)
can.showPage()
can.save()
packet.seek(0)
return packet
|
if not 1121 + 100 < 1920 & 1010 + 80 < 1080:
print("aa")
|
from django.db import models
from django.utils.encoding import smart_text
from django.utils.encoding import python_2_unicode_compatible
from django.urls import reverse
from django.utils.text import slugify
from redactor.fields import RedactorField
@python_2_unicode_compatible
class Topic(models.Model):
title = models.CharField(max_length=255)
content = RedactorField()
summary = models.TextField(null=True, blank=True)
class Meta:
ordering = ('id',)
def __str__(self):
return smart_text(self.title)
def get_absolute_url(self):
return reverse('topic-detail', args=[self.id, slugify(self.title)])
|
import re
import requests
class InvalidIDError(Exception):
pass
class OrgIDGuide():
def __init__(self):
self._request_cache = {}
self._org_id_re = re.compile(r'([^-]+-[^-]+)-(.+)')
self._dac_channel_code_re = re.compile(r'\d{5}')
self._dac_donor_code_re = re.compile(r'([A-Z]{2})-(\d+)')
def _cache(var_name):
def __cache(fn):
def ___cache(self):
cached = self._request_cache.get(var_name)
if cached:
return cached
data = fn(self)
self._request_cache[var_name] = data
return data
return ___cache
return __cache
@property
@_cache('org_id_guide')
def _org_id_guide(self):
org_id_guide_url = 'http://org-id.guide/download.json'
org_id_guide_data = requests.get(org_id_guide_url).json()['lists']
return {x['code']: x for x in org_id_guide_data}
@property
@_cache('dac_channel_codes')
def _dac_channel_codes(self):
dac_channels_url = 'https://datahub.io/core/' + \
'dac-and-crs-code-lists/r/channel-codes.json'
dac_channels_data = requests.get(dac_channels_url).json()
return {x['code']: x for x in dac_channels_data}
@property
@_cache('dac_donor_codes')
def _dac_donor_codes(self):
dac_donors_url = 'https://datahub.io/core/' + \
'dac-and-crs-code-lists/r/dac-members.json'
dac_donors_data = requests.get(dac_donors_url).json()
return {x['name_en'].upper(): x for x in dac_donors_data}
@property
@_cache('country_codes')
def _country_codes(self):
country_codes_url = 'https://datahub.io/core/' + \
'country-codes/r/country-codes.json'
country_codes_url_data = requests.get(country_codes_url).json()
return {x['ISO3166-1-Alpha-2']: x for x in country_codes_url_data}
@property
@_cache('xi_iati_codes')
def _xi_iati_codes(self):
xi_iati_url = 'http://iatistandard.org/202/codelists/downloads/' + \
'clv2/json/en/IATIOrganisationIdentifier.json'
xi_iati_data = requests.get(xi_iati_url).json()['data']
return {x['code']: x for x in xi_iati_data}
@property
@_cache('org_types')
def _org_types(self):
org_type_url = 'http://iatistandard.org/202/codelists/downloads/' + \
'clv2/json/en/OrganisationType.json'
org_type_data = requests.get(org_type_url).json()['data']
return {x['code']: x['name'] for x in org_type_data}
def lookup_prefix(self, prefix):
return self._org_id_guide.get(prefix)
def is_valid_prefix(self, prefix):
return self.lookup_prefix(prefix) is not None
def split_id(self, org_id):
match = self._org_id_re.match(org_id)
if not match:
raise InvalidIDError()
return match.groups()
def is_valid_id(self, org_id):
try:
pref, suf = self.split_id(org_id)
except InvalidIDError:
return False
return self.is_valid_prefix(pref)
def get_suggested_id(self, org_id):
if self.is_valid_id(org_id):
return org_id
try:
# looks a bit like an org ID.
# Try uppercasing
pref, suf = self.split_id(org_id)
if self.is_valid_prefix(pref.upper()):
return '{pref}-{suf}'.format(pref=pref.upper(), suf=suf)
except InvalidIDError:
pass
if self._dac_channel_code_re.match(org_id):
# looks like a channel code
if self._dac_channel_codes.get(org_id):
return 'XM-DAC-{}'.format(org_id)
dac_donor_code_match = self._dac_donor_code_re.match(org_id)
if dac_donor_code_match:
# looks like a donor code
country_code, agency_code = dac_donor_code_match.groups()
country = self._country_codes.get(country_code)
if country:
country_name = country['official_name_en'].upper()
dac_donor = self._dac_donor_codes.get(country_name)
if dac_donor:
return 'XM-DAC-{country}-{agency}'.format(
country=dac_donor['code'],
agency=agency_code,
)
xi_iati_org_id = 'XI-IATI-{}'.format(org_id)
if self._xi_iati_codes.get(xi_iati_org_id):
# I mean this is pretty rare
return xi_iati_org_id
|
from pytest import fixture
from app import factory
@fixture(scope="function")
def test_app():
app = factory.create_app("testing")
with app.app_context():
yield app
|
# Generated by Django 3.2a1 on 2021-03-08 19:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('geodata', '0004_auto_20210302_2017'),
]
operations = [
migrations.AlterField(
model_name='checksumfile',
name='name',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='fmventry',
name='name',
field=models.CharField(max_length=1000),
),
migrations.AlterField(
model_name='geometryentry',
name='name',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='imageentry',
name='name',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='imageset',
name='name',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='kwcocoarchive',
name='name',
field=models.CharField(blank=True, max_length=1000),
),
migrations.AlterField(
model_name='rasterentry',
name='name',
field=models.CharField(blank=True, max_length=1000),
),
]
|
class Solution:
def isValidBST(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
def traverse(node, low, high):
if not node:
return True
if node.val <= low or node.val >= high:
return False
return traverse(node.left, low, node.val) and traverse(node.right, node.val, high)
return traverse(root, -math.inf, math.inf)
|
class ShapesList:
def __init__(self):
self.shapes = []
print("In init")
def add(self, s):
print(s)
self.shapes.append(s)
def areas(self):
return [shapes.area() for shapes in self.shapes]
class Triangle:
def __init__(self, b, h):
self.base = b
self.height = h
def area(self):
return self.base * self.height / 2.0
class Rectangle:
def __init__(self, l, w):
self.length = l
self.height = w
def area(self):
return self.length * self.height / 2.0
class Circle:
def __init__(self, r):
self.radius = r
def area(self):
return 3.14 * self.radius * self.radius
|
import os
import numpy as np
import tensorflow as tf
import math
from PIL import Image
#import pdb
F = tf.app.flags.FLAGS
"""
Save tensorflow model
Parameters:
* checkpoint_dir - name of the directory where model is to be saved
* sess - current tensorflow session
* saver - tensorflow saver
"""
def save_model(checkpoint_dir, sess, saver):
model_name = "model.ckpt"
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver.save(sess, os.path.join(checkpoint_dir, model_name))
"""
Load tensorflow model
Parameters:
* checkpoint_dir - name of the directory where model is to be loaded from
* sess - current tensorflow session
* saver - tensorflow saver
Returns: True if the model loaded successfully, else False
"""
def load_model(checkpoint_dir, sess, saver):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
return True
else:
return False
"""
To recompose an array of 3D images from patches
"""
def recompose3D_overlap(preds, img_h, img_w, img_d, stride_h, stride_w, stride_d):
patch_h = preds.shape[1]
patch_w = preds.shape[2]
patch_d = preds.shape[3]
N_patches_h = (img_h-patch_h)//stride_h+1
N_patches_w = (img_w-patch_w)//stride_w+1
N_patches_d = (img_d-patch_d)//stride_d+1
N_patches_img = N_patches_h * N_patches_w * N_patches_d
print("N_patches_h: " ,N_patches_h)
print("N_patches_w: " ,N_patches_w)
print("N_patches_d: " ,N_patches_d)
print("N_patches_img: ",N_patches_img)
assert(preds.shape[0]%N_patches_img==0)
N_full_imgs = preds.shape[0]//N_patches_img
print("According to the dimension inserted, there are " \
+str(N_full_imgs) +" full images (of " +str(img_h)+"x" +str(img_w)+"x" +str(img_d) +" each)")
# itialize to zero mega array with sum of Probabilities
raw_pred_martrix = np.zeros((N_full_imgs,img_h,img_w,img_d))
raw_sum = np.zeros((N_full_imgs,img_h,img_w,img_d))
final_matrix = np.zeros((N_full_imgs,img_h,img_w,img_d),dtype='uint16')
k = 0
# iterator over all the patches
for i in range(N_full_imgs):
for h in range((img_h-patch_h)//stride_h+1):
for w in range((img_w-patch_w)//stride_w+1):
for d in range((img_d-patch_d)//stride_d+1):
raw_pred_martrix[i,h*stride_h:(h*stride_h)+patch_h,\
w*stride_w:(w*stride_w)+patch_w,\
d*stride_d:(d*stride_d)+patch_d]+=preds[k]
raw_sum[i,h*stride_h:(h*stride_h)+patch_h,\
w*stride_w:(w*stride_w)+patch_w,\
d*stride_d:(d*stride_d)+patch_d]+=1.0
k+=1
assert(k==preds.shape[0])
#To check for non zero sum matrix
assert(np.min(raw_sum)>=1.0)
final_matrix = np.around(raw_pred_martrix/raw_sum)
return final_matrix
#functions below are added by liuhuaqing 2019-07-15
def make_grid(tensor, nrow=8, padding=2,
normalize=False, scale_each=False):
"""Code based on https://github.com/pytorch/vision/blob/master/torchvision/utils.py"""
nmaps = tensor.shape[0]
xmaps = min(nrow, nmaps)
ymaps = int(math.ceil(float(nmaps) / xmaps))
height, width = int(tensor.shape[1] + padding), int(tensor.shape[2] + padding)
grid = np.zeros([height * ymaps + 1 + padding // 2, width * xmaps + 1 + padding // 2], dtype=np.uint8)
k = 0
for y in range(ymaps):
for x in range(xmaps):
if k >= nmaps:
break
h, h_width = y * height + 1 + padding // 2, height - padding
w, w_width = x * width + 1 + padding // 2, width - padding
grid[h:h+h_width, w:w+w_width] = tensor[k]
k = k + 1
return grid
def save_image(tensor, filename, nrow=8, padding=2,
normalize=False, scale_each=False):
ndarr = make_grid(tensor, nrow=nrow, padding=padding,
normalize=normalize, scale_each=scale_each)
im = Image.fromarray(ndarr)
im.save(filename)
# 语义分割准确率的定义和计算,参考:https://blog.csdn.net/majinlei121/article/details/78965435
def fast_hist(a, b, n):
k = (a >= 0) & (a < n) #正常情况下全是True
return np.bincount(n * a[k].astype(int) + b[k], minlength=n**2).reshape(n, n)#np.bincount 用于统计数组中(从小到大)给取值出现的次数
def Hist(a,b,n):
hist = fast_hist(a,b,n)
return hist
def pixelAccuracy(trueMask,predMask,n_cls):
hist = Hist(trueMask,predMask,n_cls)
PA = np.diag(hist).sum() / hist.sum()
return PA
def MeanPixelAccuracy(trueMask,predMask,n_cls):
#epsilon = 1
hist = Hist(trueMask,predMask,n_cls)
PAs = np.diag(hist) / hist.sum(1)
return PAs
def IntersectionoverUnion(trueMask,predMask,n_cls):
#epsilon = 1
hist = Hist(trueMask,predMask,n_cls)
IoUs = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
return IoUs
def DiceScore(trueMask,predMask,n_cls):
# epsilon = 1
hist = Hist(trueMask,predMask,n_cls)
correct_pred = np.diag(hist) # 给类别正确预测的像素点数
pred_classes = np.sum(hist,0) # 预测处的各类别像素点数,
true_classes = np.sum(hist,1) # 真实的各类别像素点数
DSs = 2*correct_pred/(pred_classes+true_classes)
return DSs |
from unittest import TestCase
import numpy.testing as npt
from itertools import zip_longest
from distancematrix.consumer.contextmanager import GeneralStaticManager
class TestGeneralStaticManager(TestCase):
def test_does_not_return_empty_contexts(self):
r = [range(1, 5), range(0, 0), range(5, 10)]
m = GeneralStaticManager(r)
_assert_equal_iteration(m.series_contexts(0, 1), [])
_assert_equal_iteration(m.series_contexts(0, 4), [(1, 5, 0)])
_assert_equal_iteration(m.series_contexts(0, 8), [(1, 5, 0), (5, 10, 2)])
_assert_equal_iteration(m.series_contexts(0, 12), [(1, 5, 0), (5, 10, 2)])
_assert_equal_iteration(m.series_contexts(5, 12), [(5, 10, 2)])
_assert_equal_iteration(m.query_contexts(0, 1), [])
_assert_equal_iteration(m.query_contexts(0, 4), [(1, 5, 0)])
_assert_equal_iteration(m.query_contexts(0, 8), [(1, 5, 0), (5, 10, 2)])
_assert_equal_iteration(m.query_contexts(0, 12), [(1, 5, 0), (5, 10, 2)])
_assert_equal_iteration(m.query_contexts(5, 12), [(5, 10, 2)])
def _assert_equal_iteration(actual, expected, msg=''):
"""
Assert function similar to TestCase.assertSequenceEqual, but that actually treats 2D numpy arrays as iterables.
"""
sentinel = object()
for actual_value, expected_value in zip_longest(actual, expected, fillvalue=sentinel):
if sentinel is actual_value:
raise AssertionError("Actual iterator is shorter, does not include " + str(expected_value))
if sentinel is expected_value:
raise AssertionError("Actual iterator is longer, contained " + str(actual_value))
npt.assert_equal(actual_value, expected_value, msg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.