max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tests/test_langs_fr.py | honzajavorek/tipi | 3 | 5600 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from tipi import tipi as _tipi
tipi = lambda s: _tipi(s, lang='fr')
def test_double_quotes():
assert tipi('''"brutal" "quote's"''') == (
'''«brutal» «quote's»'''
)
def test_single_quotes():
assert tipi("""'brutal' 'quote's'""") == (
"""‹brutal› ‹quote's›"""
)
| 2.546875 | 3 |
vendor/models.py | brethauer/mirage | 8 | 5601 | <gh_stars>1-10
from django.db import models
VEHICLE_CHOICES = (
('OASISSB', 'OASIS Small Business'),
('OASIS', 'OASIS Unrestricted')
)
STATUS_CHOICES = (
('P', 'In Progress'),
('C', 'Completed'),
('F', 'Cancelled')
)
class Vendor(models.Model):
name = models.CharField(max_length=128)
duns = models.CharField(max_length=9, unique=True)
duns_4 = models.CharField(max_length=13, unique=True)
cage = models.CharField(max_length=15, null=True)
sam_address = models.CharField(null=True, max_length=128)
sam_citystate = models.CharField(null=True, max_length=128)
cm_name = models.CharField(null=True, max_length=128)
cm_email = models.CharField(null=True, max_length=128)
cm_phone = models.CharField(null=True, max_length=128)
pm_name = models.CharField(null=True, max_length=128)
pm_email = models.CharField(null=True, max_length=128)
pm_phone = models.CharField(null=True, max_length=128)
pools = models.ManyToManyField('Pool', through='PoolPIID')
setasides = models.ManyToManyField('SetAside', null=True, blank=True)
sam_status = models.CharField(null=True, max_length=128)
sam_activation_date = models.DateTimeField(null=True)
sam_expiration_date = models.DateTimeField(null=True)
sam_exclusion = models.NullBooleanField(null=True)
sam_url = models.URLField(null=True)
annual_revenue = models.BigIntegerField(null=True)
number_of_employees = models.IntegerField(null=True)
def __str__(self):
return self.name
class Pool(models.Model):
id = models.CharField(primary_key=True, max_length=128)
name = models.CharField(max_length=128, default='Pool')
number = models.CharField(max_length=128)
vehicle = models.CharField(choices=VEHICLE_CHOICES, max_length=7)
naics = models.ManyToManyField('Naics')
threshold = models.CharField(null=True, max_length=128)
def __str__(self):
return "Pool {0} - {1}".format(self.number, self.get_vehicle_display())
class PoolPIID(models.Model):
vendor = models.ForeignKey('Vendor')
pool = models.ForeignKey('Pool')
piid = models.CharField(max_length=128)
def __str__(self):
return "{0} - {1} - {2}".format(self.vendor.name, self.pool.id, self.piid)
class SetAside(models.Model):
code = models.CharField(unique=True, max_length=128)
short_name = models.CharField(max_length=128)
abbreviation = models.CharField(max_length=10, null=True)
far_order = models.IntegerField(null=True)
def __str__(self):
return self.short_name
class Naics(models.Model):
code = models.CharField(max_length=128)
description = models.TextField()
short_code = models.CharField(unique=True, max_length=25)
def __str__(self):
return "{0} - {1}".format(self.code, self.description)
class SamLoad(models.Model):
sam_load = models.DateField()
| 2.125 | 2 |
two_qubit_simulator/circuits.py | L-McCormack/two-qubit-simulator | 0 | 5602 | """
Contains the QuantumCircuit class
boom.
"""
class QuantumCircuit(object): # pylint: disable=useless-object-inheritance
""" Implements a quantum circuit.
- - - WRITE DOCUMENTATION HERE - - -
"""
def __init__(self):
""" Initialise a QuantumCircuit object """
pass
def add_gate(self, gate):
""" Add a gate to the circuit """
pass
def run_circuit(self, register):
""" Run the circuit on a given quantum register """
pass
def __call__(self, register):
""" Run the circuit on a given quantum register """
pass
| 2.53125 | 3 |
examples/bathymetricGradient.py | usgs/water-datapreptools | 2 | 5603 | import sys
sys.path.append("..") # change environment to see tools
from make_hydrodem import bathymetricGradient
workspace = r"" # path to geodatabase to use as a workspace
snapGrid = r"" # path to snapping grid
hucPoly = r"" # path to local folder polygon
hydrographyArea = r"" # path to NHD area feature class
hydrographyFlowline = r"" # path to NHD flowline feature class
hydrographyWaterbody = r"" # path to NHD water body feature class
cellsize = '' # cell size
bathymetricGradient(workspace, snapGrid, hucPoly, hydrographyArea,
hydrographyFlowline, hydrographyWaterbody,cellsize) | 2.15625 | 2 |
out/flowContext.py | hxb1997/Menge | 0 | 5604 | <filename>out/flowContext.py
# This is the OpenGL context for drawing flow calculation lines
from Context import *
from primitives import Vector2, Segment
from OpenGL.GL import *
from copy import deepcopy
class GLFlowSegment( Segment ):
'''The OpenGL representation of a flow line. Basically a segment
with a direciton indicator. The direction indicator shows which
way flow is expected to cross the line. The flow direction is to
the RIGHT of the segment. The forward direction is the direction
from p1 to p2.'''
def __init__( self, p1, p2 ):
'''Constructor.
@param p1 An instance of Vector2. The start point of the segment.
@param p2 An instance of Vector2. The end point of the segment.
'''
Segment.__init__( self, p1, p2 )
def __str__( self ):
return "GLFlowSegment (%s, %s)" % ( self.p1, self.p2 )
def __repr__( self ):
return str( self )
def drawGL( self, color=(0.1, 1.0, 0.1) ):
'''Draw the flow segment into a GL context.
@param A 3-tuple of floats. The color of the line.
All values should lie in the range [0, 1], to be
interpreted as r, g, b color values.
'''
glPushAttrib( GL_COLOR_BUFFER_BIT )
glBegin( GL_LINES )
glColor3fv( color )
glVertex2f( self.p1.x, self.p1.y )
glVertex2f( self.p2.x, self.p2.y )
mp = self.midPoint()
l = self.magnitude()
n = self.normal() * (0.25 * l )
end = mp + n
glVertex2f( mp.x, mp.y )
glVertex2f( end.x, end.y )
glEnd()
glPopAttrib()
class FlowLineContext( BaseContext ):
'''Context for drawing, creating and editing lines'''
MIN_LINE_LENGTH = 2 # the minimum drag required to draw a line
# edit state - used for knowing what to do with the active line and cancellation
NO_EDIT = 0
EDIT = 1
ADD = 2
def __init__( self, cancelCB=None, editCB=None ):
'''Constructor.
@param cancelCB A callable. An optional callback object
for when flow line drawing is canceled.
@param editCB A callable. An optional callback object
for when a flow line values are edited.
'''
BaseContext.__init__( self )
self.lines = []
self.names = []
self.activeID = -1 # the line currently affected by modifications
self.editState = self.NO_EDIT
self.cancelCB = cancelCB
self.editCB = editCB
self.activeLine = None
self.canDraw = False
self.dragging = False
self.downPost = None
def copy( self, context ):
'''Copy the state of the given FlowLineContext into this'''
assert( isinstance( context, FlowLineContext ) )
self.clear()
self.names = [ a for a in context.names ]
self.lines = deepcopy( context.lines )
def clear( self ):
'''Clears out all of the lines'''
self.lines = []
self.names = []
self.activeID = -1
self.editState = self.NO_EDIT
self.activeLine = None
self.canDraw = False
self.dragging = False
self.downPost = None
def lineCount( self ):
return len( self.lines )
def getName( self, id ):
'''Returns the name associated with the line index, id.
@param id An integer. The index into the stored set of lines.
@return A string. The stored name.
'''
return self.names[ id ]
def getLine( self, id ):
'''Returns the name associated with the line index, id.
@param id An integer. The index into the stored set of lines.
@return An instance of a FlowLine.
'''
return self.lines[ id ]
def addLine( self ):
'''Causes the context to go into new line mode. Returning the new name.'''
self.canDraw = True
self.editState = self.ADD
self.activeID = -1
self.names.append( 'Line %d' % len( self.names ) )
self.lines.append( GLFlowSegment( Vector2(0, 0), Vector2(0, 0) ) )
self.activeLine = self.lines[-1]
return self.names[-1]
def editLine( self, idx ):
'''Edits the indicated line'''
if ( self.editState == self.ADD): return
if ( idx < 0 ):
self.editState = self.NO_EDIT
self.canDraw = False
self.activeID = -1
else:
self.editState = self.EDIT
self.canDraw = True
self.activeID = idx
def setLineName( self, idx, name ):
'''Sets the name for the line with the given index'''
self.names[ idx ] = name
def deleteLine( self, idx ):
'''Removes a line from the set'''
assert( idx >= 0 and idx < len( self.lines ) )
self.lines.pop( idx )
self.names.pop( idx )
self.activeID = -1
def flipLine( self, idx ):
'''Flips the direction of the line in the set'''
assert( idx >= 0 and idx < len( self.lines ) )
self.lines[ idx ].flip()
def setActive( self, idx ):
'''Sets the active line'''
self.activeID = idx
def stopEdit( self ):
'''Stops the ability to edit'''
self.editState = self.NO_EDIT
self.canDraw = False
def getLineCount( self ):
"""Returns the number of defined lines"""
return len( self.lines )
def setMultiLines( self, names, lines ):
'''Sets the lines in the context with the given names and lines.
It is asserted that len( names ) == len( lines ).
@param names A list of strings. One name per line.
@param lines A list of Segment instaces. One line per name.
'''
self.lines = map( lambda x: GLFlowSegment( x.p1, x.p2 ), lines )
self.names = names
self.activeID = -1
self.editState = self.NO_EDIT
def handleMouse ( self, evt, view ):
"""Detects click, drag, release and creates a line"""
result = ContextResult()
try:
event = self.canonicalEvent( evt )
except ValueError as e:
return result
if ( not self.canDraw ):
return result
if ( event.noModifiers() ):
btn = event.button
eX = event.x
eY = event.y
if ( event.type == MouseEvent.DOWN ): #QtCore.QEvent.MouseButtonPress ):
if ( btn == MouseEvent.LEFT ):
self.downPos = Vector2( eX, eY )
x, y = view.screenToWorld( ( eX, eY ) )
p1 = Vector2( x, y )
self.activeLine = GLFlowSegment( p1, p1 )
result.set( True, True, False )
self.dragging = True
self.notifyEdit( self.activeLine )
elif ( btn == MouseEvent.RIGHT and self.dragging ):
# cancel the edit
if ( self.editState == self.ADD ):
self.editState = self.NO_EDIT
self.lines.pop(-1)
self.names.pop(-1)
if ( not self.cancelCB is None ):
self.cancelCB()
self.notifyEdit( None )
canceled = self.activeLine != None
self.activeLine = None
self.dragging = False
result.set( canceled, canceled, False )
elif ( event.type == MouseEvent.UP ):
if ( btn == MouseEvent.LEFT and self.dragging ):
endPos = Vector2( eX, eY )
if ( (endPos - self.downPos).magnitude() >= self.MIN_LINE_LENGTH ):
if ( self.editState == self.ADD ):
self.activeID = len( self.lines ) - 1
self.lines[self.activeID] = self.activeLine
self.editState = self.EDIT
self.notifyEdit( self.activeLine )
elif ( self.editState == self.EDIT ):
assert( self.activeID > -1 )
self.lines[ self.activeID ] = self.activeLine
self.notifyEdit( self.activeLine )
self.activeLine = None
self.activeLine = None
self.dragging = False
result.set( True, True, False )
elif ( event.type == MouseEvent.MOVE ):
if ( self.dragging ):
x, y = view.screenToWorld( ( eX, eY ) )
p2 = Vector2( x, y )
self.activeLine.p2 = p2
result.set( True, True, False )
self.notifyEdit( self.activeLine )
return result
def notifyEdit( self, line ):
'''Notifies call back of a line that has changed'''
if ( not self.editCB is None ):
self.editCB( line )
def drawGL( self ):
'''Basic lines are drawn in default (green), the active line is drawn in yellow,
and when it is being edited, the original disappears and the new line is drawn in
cyan.'''
if ( self.activeLine ):
self.activeLine.drawGL( ( 0.1, 1.0, 1.0 ) )
elif ( self.activeID > -1 and self.editState != self.ADD ):
self.lines[ self.activeID ].drawGL( ( 1.0, 1.0, 0.1 ) )
for i, line in enumerate( self.lines ):
if ( i == self.activeID ): continue
line.drawGL()
| 3.484375 | 3 |
instascrape/collectors/__init__.py | Paola351/instascrape | 1 | 5605 | <reponame>Paola351/instascrape
from .interval_collectors import *
| 1 | 1 |
Codes/gracekoo/test.py | ghoslation/algorithm | 256 | 5606 | # -*- coding: utf-8 -*-
# @Time: 2020/11/8 23:47
# @Author: GraceKoo
# @File: test.py
# @Desc:
from threading import Thread
import time
def print_numbers():
time.sleep(0.2)
print("子线程结束")
if __name__ == "__main__":
t1 = Thread(target=print_numbers)
t1.setDaemon(True)
t1.start()
# print("主线程结束")
| 2.828125 | 3 |
src/_main_/settings.py | gregory-chekler/api | 0 | 5607 | """
Django settings for massenergize_portal_backend project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import firebase_admin
from firebase_admin import credentials
from .utils.utils import load_json
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# ******** LOAD CONFIG DATA ***********#
IS_PROD = False
path_to_config = '/_main_/config/massenergizeProdConfig.json' if IS_PROD else '/_main_/config/massenergizeProjectConfig.json'
CONFIG_DATA = load_json(BASE_DIR + path_to_config)
os.environ.update(CONFIG_DATA)
# ******** END LOAD CONFIG DATA ***********#
SECRET_KEY = CONFIG_DATA["SECRET_KEY"]
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'localhost',
'127.0.0.1',
'api.massenergize.org',
'apis.massenergize.org',
'api.massenergize.com',
'apis.massenergize.com',
'api-prod.massenergize.org',
'api.prod.massenergize.org',
'api-dev.massenergize.org',
'api.dev.massenergize.org',
'massenergize-api.wpdvzstek2.us-east-2.elasticbeanstalk.com'
]
INSTALLED_APPS = [
'authentication',
'carbon_calculator',
'database',
'api',
'website',
'corsheaders',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
#custom middlewares
'authentication.middleware.MassenergizeJWTAuthMiddleware'
]
#-------- FILE STORAGE CONFIGURATION ---------------------#
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
STATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
#-------- FILE STORAGE CONFIGURATION ---------------------#
#-------- AWS CONFIGURATION ---------------------#
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = os.environ.get('AWS_STORAGE_BUCKET_NAME')
AWS_S3_SIGNATURE_VERSION = os.environ.get('AWS_S3_SIGNATURE_VERSION')
AWS_S3_REGION_NAME = os.environ.get('AWS_S3_REGION_NAME')
AWS_DEFAULT_ACL = None
#--------END AWS CONFIGURATION ---------------------#
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
DATA_UPLOAD_MAX_MEMORY_SIZE = 2621440*3
ROOT_URLCONF = '_main_.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '_main_.wsgi.application'
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'remote-default': {
'ENGINE' : os.environ.get('DATABASE_ENGINE'),
'NAME' : os.environ.get('DATABASE_NAME'),
'USER' : os.environ.get('DATABASE_USER'),
'PASSWORD' : os.environ.get('DATABASE_PASSWORD'),
'HOST' : os.environ.get('DATABASE_HOST'),
'PORT' : os.environ.get('DATABASE_PORT')
},
'default': {
'ENGINE' : os.environ.get('DATABASE_ENGINE'),
'NAME' : 'gchekler21',
'USER' : '',
'PASSWORD' : '',
'HOST' : 'localhost',
'PORT' : '5555'
},
}
firebase_service_account_path = '/_main_/config/massenergizeProdFirebaseServiceAccount.json' if IS_PROD else '/_main_/config/massenergizeFirebaseServiceAccount.json'
FIREBASE_CREDENTIALS = credentials.Certificate(BASE_DIR + firebase_service_account_path)
firebase_admin.initialize_app(FIREBASE_CREDENTIALS)
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = os.environ.get('EMAIL')
DEFAULT_FROM_EMAIL = os.environ.get('EMAIL')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASSWORD')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# Simplified static file serving.
STATICFILES_LOCATION = 'static'
MEDIAFILES_LOCATION = 'media'
| 1.625 | 2 |
aiorpcgrid/client.py | urands/aiorpcgrid | 0 | 5608 | import asyncio
# from aiorpcgrid.client import Client
from aiorpcgrid.task import AsyncTask, State
class AsyncClient:
_provider = None
_method = None
_requests: dict = {}
_running = True
_request_queue: asyncio.Queue = asyncio.Queue()
_loop = None
def __init__(self, provider, loop=None):
self._provider = provider
if loop is None:
loop = asyncio.get_event_loop()
self._loop = loop
async def open(self):
await self._provider.open()
asyncio.ensure_future(self.request_loop(), loop=self._loop)
asyncio.ensure_future(self.run(), loop=self._loop)
return self
async def close(self):
self._running = False
await self._provider.close()
await self._request_queue.put(None)
async def request_loop(self):
while self._running:
task = await self._request_queue.get()
if task is not None:
await self.provider.call_method(task)
task.status = State.RUNNING
if self._request_queue.empty():
self._request_queue.task_done()
async def run(self):
while self._running:
responses = await self._provider.recv()
if responses is not None:
for response in responses:
if response.id in self._requests:
task = self._requests[response.id]
task.result = response.result
task.error = response.error
if task.error is None:
self._requests[
response.id
].status = State.COMPLETED
else:
self._requests[response.id].status = State.FAILED
task.event.set()
del self._requests[response.id]
if task._callback is not None:
asyncio.ensure_future(
task.callback(task), loop=self._loop
)
def __call__(self, *args, **kwargs):
if not self.provider.is_connected():
raise ConnectionError(f'Connection lost. {self._provider}')
task = AsyncTask().create(self._method, *args, **kwargs)
if 'parallel' in kwargs:
task._parallel = kwargs['parallel']
self._method = None
task.status = State.PENDING
self._requests[task.id] = task
self._request_queue.put_nowait(self._requests[task.id])
return self._requests[task.id]
| 2.671875 | 3 |
python/src/otel/otel_sdk/opentelemetry/instrumentation/aws_lambda/__init__.py | matt-tyler/opentelemetry-lambda | 0 | 5609 | # Copyright 2020, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: usage
"""
The opentelemetry-instrumentation-aws-lambda package allows tracing AWS
Lambda function.
Usage
-----
.. code:: python
# Copy this snippet into AWS Lambda function
# Ref Doc: https://docs.aws.amazon.com/lambda/latest/dg/lambda-python.html
import boto3
from opentelemetry.instrumentation.aws_lambda import (
AwsLambdaInstrumentor
)
# Enable instrumentation
AwsLambdaInstrumentor().instrument()
# Lambda function
def lambda_handler(event, context):
s3 = boto3.resource('s3')
for bucket in s3.buckets.all():
print(bucket.name)
return "200 OK"
API
---
"""
import logging
import os
from importlib import import_module
from wrapt import wrap_function_wrapper
# TODO: aws propagator
from opentelemetry.sdk.extension.aws.trace.propagation.aws_xray_format import (
AwsXRayFormat,
)
from opentelemetry.instrumentation.aws_lambda.version import __version__
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.instrumentation.utils import unwrap
from opentelemetry.trace import SpanKind, get_tracer, get_tracer_provider
logger = logging.getLogger(__name__)
class AwsLambdaInstrumentor(BaseInstrumentor):
def _instrument(self, **kwargs):
self._tracer = get_tracer(__name__, __version__, kwargs.get("tracer_provider"))
self._tracer_provider = get_tracer_provider()
lambda_handler = os.environ.get("ORIG_HANDLER", os.environ.get("_HANDLER"))
wrapped_names = lambda_handler.rsplit(".", 1)
self._wrapped_module_name = wrapped_names[0]
self._wrapped_function_name = wrapped_names[1]
wrap_function_wrapper(
self._wrapped_module_name,
self._wrapped_function_name,
self._functionPatch,
)
def _uninstrument(self, **kwargs):
unwrap(
import_module(self._wrapped_module_name),
self._wrapped_function_name,
)
def _functionPatch(self, original_func, instance, args, kwargs):
lambda_context = args[1]
ctx_aws_request_id = lambda_context.aws_request_id
ctx_invoked_function_arn = lambda_context.invoked_function_arn
orig_handler = os.environ.get("ORIG_HANDLER", os.environ.get("_HANDLER"))
# TODO: enable propagate from AWS by env variable
xray_trace_id = os.environ.get("_X_AMZN_TRACE_ID", "")
lambda_name = os.environ.get("AWS_LAMBDA_FUNCTION_NAME")
function_version = os.environ.get("AWS_LAMBDA_FUNCTION_VERSION")
propagator = AwsXRayFormat()
parent_context = propagator.extract({"X-Amzn-Trace-Id": xray_trace_id})
with self._tracer.start_as_current_span(
name=orig_handler, context=parent_context, kind=SpanKind.SERVER
) as span:
# Refer: https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/faas.md#example
span.set_attribute("faas.execution", ctx_aws_request_id)
span.set_attribute("faas.id", ctx_invoked_function_arn)
# TODO: fix in Collector because they belong resource attrubutes
span.set_attribute("faas.name", lambda_name)
span.set_attribute("faas.version", function_version)
result = original_func(*args, **kwargs)
# force_flush before function quit in case of Lambda freeze.
self._tracer_provider.force_flush()
return result
| 1.742188 | 2 |
instructors/migrations/0021_alter_user_avatar_url.py | bastoune57/gokiting_back_end | 0 | 5610 | # Generated by Django 4.0.2 on 2022-04-01 16:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('instructors', '0020_alter_user_description_alter_user_title'),
]
operations = [
migrations.AlterField(
model_name='user',
name='avatar_url',
field=models.ImageField(default='profile_pics/einstein_EqBibwO.jpeg', upload_to='profile_pics'),
),
]
| 1.554688 | 2 |
sopa/src/models/utils.py | SamplingAndEnsemblingSolvers/SamplingAndEnsemblingSolvers | 25 | 5611 | <filename>sopa/src/models/utils.py
import numpy as np
import torch
import random
from .odenet_mnist.layers import MetaNODE
def fix_seeds(seed=502):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.set_printoptions(precision=10)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class RunningAverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, momentum=0.99):
self.momentum = momentum
self.reset()
def reset(self):
self.val = None
self.avg = 0
def update(self, val):
if self.val is None:
self.avg = val
else:
self.avg = self.avg * self.momentum + val * (1 - self.momentum)
self.val = val
def load_model(path):
(_, state_dict), (_, model_args), (_, slover_id) = torch.load(path, map_location='cpu').items()
is_odenet = model_args.network == 'odenet'
if not hasattr(model_args, 'in_channels'):
model_args.in_channels = 1
model = MetaNODE(downsampling_method=model_args.downsampling_method,
is_odenet=is_odenet,
in_channels=model_args.in_channels)
model.load_state_dict(state_dict)
return model, model_args | 2.234375 | 2 |
packages/micropython-official/v1.10/esp32/stubs/ubinascii.py | TheVinhLuong102/micropy-stubs | 18 | 5612 | <filename>packages/micropython-official/v1.10/esp32/stubs/ubinascii.py
"""
Module: 'ubinascii' on esp32 1.10.0
"""
# MCU: (sysname='esp32', nodename='esp32', release='1.10.0', version='v1.10 on 2019-01-25', machine='ESP32 module with ESP32')
# Stubber: 1.2.0
def a2b_base64():
pass
def b2a_base64():
pass
def crc32():
pass
def hexlify():
pass
def unhexlify():
pass
| 1.90625 | 2 |
jv/test_jv.py | chenwang/QuantEcon.lectures.code | 56 | 5613 | """
@author : <NAME>
"""
from __future__ import division
import sys
import unittest
from nose.plugins.skip import SkipTest
from jv import JvWorker
from quantecon import compute_fixed_point
from quantecon.tests import get_h5_data_file, write_array, max_abs_diff
# specify params -- use defaults
A = 1.4
alpha = 0.6
beta = 0.96
grid_size = 50
if sys.version_info[0] == 2:
v_nm = "V"
else: # python 3
raise SkipTest("Python 3 tests aren't ready.")
v_nm = "V_py3"
def _new_solution(jv, f, grp):
"gets new solution and updates data file"
V = _solve_via_vfi(jv)
write_array(f, grp, V, v_nm)
return V
def _solve_via_vfi(jv):
"compute policy rules via value function iteration"
v_init = jv.x_grid * 0.6
V = compute_fixed_point(jv.bellman_operator, v_init,
max_iter=3000,
error_tol=1e-5)
return V
def _get_vf_guess(jv, force_new=False):
with get_h5_data_file() as f:
# See if the jv group already exists
group_existed = True
try:
jv_group = f.getNode("/jv")
except:
# doesn't exist
group_existed = False
jv_group = f.create_group("/", "jv", "data for jv.py tests")
if force_new or not group_existed:
# group doesn't exist, or forced to create new data.
# This function updates f in place and returns v_vfi, c_vfi, c_pfi
V = _new_solution(jv, f, jv_group)
return V
# if we made it here, the group exists and we should try to read
# existing solutions
try:
# Try reading vfi
if sys.version_info[0] == 2:
V = jv_group.V[:]
else: # python 3
V = jv_group.V_py3[:]
except:
# doesn't exist. Let's create it
V = _new_solution(jv, f, jv_group)
return V
class TestJvWorkder(unittest.TestCase):
@classmethod
def setUpClass(cls):
jv = JvWorker(A=A, alpha=alpha, beta=beta, grid_size=grid_size)
cls.jv = jv
# compute solution
v_init = _get_vf_guess(jv)
cls.V = compute_fixed_point(jv.bellman_operator, v_init)
cls.s_pol, cls.phi_pol = jv.bellman_operator(cls.V * 0.999,
return_policies=True)
def test_low_x_prefer_s(self):
"jv: s preferred to phi with low x?"
# low x is an early index
self.assertGreaterEqual(self.s_pol[0], self.phi_pol[0])
def test_high_x_prefer_phi(self):
"jv: phi preferred to s with high x?"
# low x is an early index
self.assertGreaterEqual(self.phi_pol[-1], self.s_pol[-1])
def test_policy_sizes(self):
"jv: policies correct size"
n = self.jv.x_grid.size
self.assertEqual(self.s_pol.size, n)
self.assertEqual(self.phi_pol.size, n)
def test_bellman_sol_fixed_point(self):
"jv: solution to bellman is fixed point"
new_V = self.jv.bellman_operator(self.V)
self.assertLessEqual(max_abs_diff(new_V, self.V), 1e-4)
| 1.976563 | 2 |
excentury/command/config.py | LaudateCorpus1/excentury | 0 | 5614 | """Config
This module is in charge of providing all the necessary settings to
the rest of the modules in excentury.
"""
import os
import re
import sys
import textwrap
import argparse
from collections import OrderedDict
from excentury.command import error, trace, import_mod
DESC = """Edit a configuration file for excentury.
Some actions performed by excentury can be overwritten by using
configuration files.
To see the values that the configuration file can overwrite use the
`defaults` command. This will print a list of the keys and values
excentury uses for the given command.
"""
RE = re.compile(r'\${(?P<key>.*?)}')
RE_IF = re.compile(
r'(?P<iftrue>.*?) IF\[\[(?P<cond>.*?)\]\]'
)
RE_IFELSE = re.compile(
r'(?P<iftrue>.*?) IF\[\[(?P<cond>.*?)\]\]ELSE (?P<iffalse>.*)'
)
def disp(msg):
"""Wrapper around sys.stdout.write which is meant to behave as
the print function but it does not add the newline character. """
sys.stdout.write(msg)
def _replacer(*key_val):
"""Helper function for replace.
Source: <http://stackoverflow.com/a/15221068/788553>
"""
replace_dict = dict(key_val)
replacement_function = lambda match: replace_dict[match.group(0)]
pattern = re.compile("|".join([re.escape(k) for k, _ in key_val]), re.M)
return lambda string: pattern.sub(replacement_function, string)
def replace(string, *key_val):
"""Replacement of strings done in one pass. Example:
>>> replace("a < b && b < c", ('<', '<'), ('&', '&'))
'a < b && b < c'
Source: <http://stackoverflow.com/a/15221068/788553>
"""
return _replacer(*key_val)(string)
class ConfigDispAction(argparse.Action): # pylint: disable=R0903
"""Derived argparse Action class to use when displaying the
configuration file and location."""
def __call__(self, parser, namespace, values, option_string=None):
try:
read_config(namespace)
except IOError:
disp('xcpp.config not found in %r\n' % namespace.cfg)
else:
disp('path to xcpp.config: "%s"\n' % namespace.cfg)
with open('%s/xcpp.config' % namespace.cfg, 'r') as _fp:
disp(_fp.read())
exit(0)
def add_parser(subp, raw):
"Add a parser to the main subparser. "
tmpp = subp.add_parser('config', help='configure excentury',
formatter_class=raw,
description=textwrap.dedent(DESC))
tmpp.add_argument('var', type=str, nargs='?', default=None,
help='Must be in the form of sec.key')
tmpp.add_argument('-v', action='store_true',
help='print config file location')
tmpp.add_argument('--print', action=ConfigDispAction,
nargs=0,
help='print config file and exit')
def _get_replacements(tokens, data, sec):
"""Helper function for _read_config. """
replacements = list()
for token in tokens:
if ':' in token:
tsec, tkey = token.split(':')
tval = ''
if tsec in data:
if tkey in data[tsec]:
tval = data[tsec][tkey]
else:
if token in data[sec]:
tval = data[sec][token]
else:
tval = ''
replacements.append(
('${%s}' % token, tval)
)
return replacements
# pylint: disable=invalid-name
# ARG and CFG are names that may be used in the configuration file.
# ARG gives us access to the command line arguments and CFG gives us
# access to the current configuration. Note that using CFG[key][sec]
# is equivalent to ${key:sec}. These names go against the convention
# so that they may be easy to spot in a configuration file.
def _eval_condition(cond, ARG, CFG, line_num, fname):
"""Evaluates a string using the eval function. It prints a
warning if there are any errors. Returns the result of the
evaluation and an error number: 0 if everything is fine, 1 if
there was an error. """
ARG.FILEPATH = '%s/%s/%s' % (ARG.cfg, CFG['xcpp']['path'], ARG.inputfile)
try:
# pylint: disable=eval-used
# To be able to evaluate a condition without creating a whole
# new parser we can use the eval function. We could have use
# a python file as a configuration but then there would be
# no simple structure to the files.
cond = eval(cond)
enum = 0
# pylint: disable=broad-except
# Anything can go wrong during the execution of the `eval`
# function. For this reason we must try to catch anything that
# may come our way so that we may give out a warning message
# and ignore it.
except Exception as exception:
cond = None
enum = 1
trace(
'WARNING: error in line %d of %r: %s\n' % (
line_num, fname, exception.message
)
)
return cond, enum
def _read_config(fname, arg):
"""Simple parser to read configuration files. """
data = OrderedDict()
sec = None
line_num = 0
with open(fname, 'r') as fhandle:
for line in fhandle:
line_num += 1
if line[0] == '[':
sec = line[1:-2]
data[sec] = OrderedDict()
elif '=' in line:
tmp = line.split('=', 1)
key = tmp[0].strip()
val = tmp[1].strip()
val = os.path.expandvars(val)
replacements = _get_replacements(
RE.findall(val), data, sec
)
# pylint: disable=star-args
if replacements:
val = replace(val, *replacements)
match = RE_IFELSE.match(val)
if match:
cond, enum = _eval_condition(
match.group('cond'), arg, data, line_num, fname
)
if enum == 1:
continue
groups = match.groups()
val = groups[0] if cond else groups[2]
else:
match = RE_IF.match(val)
if match:
cond, enum = _eval_condition(
match.group('cond'), arg, data, line_num, fname
)
if enum == 1:
continue
if cond:
val = match.group('iftrue')
else:
continue
data[sec][key] = val
return data
def read_config(arg):
"""Read the configuration file xcpp.config"""
path = arg.cfg
if path == '.' and not os.path.exists('xcpp.config'):
if 'XCPP_CONFIG_PATH' in os.environ:
tmp_path = os.environ['XCPP_CONFIG_PATH']
if os.path.exists('%s/xcpp.config' % tmp_path):
trace("Configured with: '%s/xcpp.config'\n" % tmp_path)
path = tmp_path
elif not os.path.exists('%s/xcpp.config' % path):
error("ERROR: %s/xcpp.config does not exist\n" % path)
arg.cfg = path
try:
config = _read_config('%s/xcpp.config' % path, arg)
except IOError:
config = OrderedDict()
return config
def run(arg):
"""Run command. """
config = read_config(arg)
if arg.v:
disp('path to xcpp.config: "%s"\n' % arg.cfg)
if arg.var is None:
for sec in config:
disp('[%s]\n' % sec)
for key in config[sec]:
disp(' %s = %s\n' % (key, config[sec][key]))
disp('\n')
return
try:
command, var = arg.var.split('.', 1)
except ValueError:
error("ERROR: '%s' is not of the form sec.key\n" % arg.var)
try:
disp(config[command][var]+'\n')
except KeyError:
pass
return
def _update_single(cfg, name, defaults=None):
"Helper function for get_cfg."
if defaults:
for var, val in defaults.iteritems():
cfg[name][var] = os.path.expandvars(str(val))
else:
mod = import_mod('excentury.command.%s' % name)
if hasattr(mod, "DEFAULTS"):
for var, val in mod.DEFAULTS.iteritems():
cfg[name][var] = os.path.expandvars(val)
def _update_from_file(cfg, name, cfg_file):
"Helper function for get_cfg."
if name in cfg_file:
for var, val in cfg_file[name].iteritems():
cfg[name][var] = os.path.expandvars(val)
def _update_from_arg(cfg, argdict, key):
"Helper function for get_cfg."
for var in cfg[key]:
if var in argdict and argdict[var] is not None:
cfg[key][var] = argdict[var]
def get_cfg(arg, names, defaults=None):
"""Obtain the settings for a command. """
cfg = {
'xcpp': {
'root': '.',
'path': '.'
}
}
cfg_file = read_config(arg)
if 'xcpp' in cfg_file:
for var, val in cfg_file['xcpp'].iteritems():
cfg['xcpp'][var] = os.path.expandvars(val)
cfg['xcpp']['root'] = arg.cfg
if isinstance(names, list):
for name in names:
cfg[name] = dict()
_update_single(cfg, name)
_update_from_file(cfg, name, cfg_file)
else:
if names != 'xcpp':
cfg[names] = dict()
_update_single(cfg, names, defaults)
_update_from_file(cfg, names, cfg_file)
argdict = vars(arg)
if arg.parser_name in cfg:
_update_from_arg(cfg, argdict, arg.parser_name)
elif arg.parser_name == 'to' and arg.lang in cfg:
_update_from_arg(cfg, argdict, arg.lang)
_update_from_arg(cfg, argdict, 'xcpp')
return cfg
| 2.546875 | 3 |
tests/test_urls.py | pkjmesra/nseta | 8 | 5615 | # -*- coding: utf-8 -*-
'''
Created on Thu Nov 19 20:52:33 2015
@author: SW274998
'''
from nseta.common.commons import *
import datetime
import unittest
import time
from bs4 import BeautifulSoup
from tests import htmls
import json
import requests
import six
from nseta.common.urls import *
import nseta.common.urls as urls
from six.moves.urllib.parse import urlparse
from baseUnitTest import baseUnitTest
class TestUrls(baseUnitTest):
def setUp(self, redirect_logs=True):
super().setUp()
proxy_on = False
if proxy_on:
urls.session.proxies.update({'http': 'proxy1.wipro.com:8080'})
def runTest(self):
for key in TestUrls.__dict__.keys():
if key.find('test') == 0:
TestUrls.__dict__[key](self)
def test_get_symbol_count(self):
count = get_symbol_count(symbol='SBIN')
self.assertEqual(count, '1')
force_count = get_symbol_count(symbol='SBIN', force_refresh=True)
self.assertEqual(force_count, '1')
def test_equity_history_url(self):
sym_count = get_symbol_count(symbol='SBIN')
txt = 'Data for SBIN - EQ'
resp = equity_history_url(symbol='SBIN',
symbolCount=sym_count,
series='EQ',
fromDate='01-01-2000',
toDate='10-01-2000',
dateRange='')
self.assertGreaterEqual(resp.text.find(txt), 0, resp.text)
def test_nse_intraday_url(self):
txt = 'date|g1_o|g1_h|g1_l|g1_c|g2|g2_CUMVOL' #'<columns><column>date</column><column>pltp</column><column>nltp</column><column>previousclose</column><column>allltp</column>'
resp = nse_intraday_url(CDSymbol='SBIN', Periodicity='1')
self.assertIn(txt, resp.text)
def test_price_list_url(self):
resp = price_list_url('2019', 'DEC', '31DEC2019')
csv = unzip_str(resp.content)
self.assertGreaterEqual(csv.find('SBIN'), 0)
def tests_daily_volatility_url(self):
resp = daily_volatility_url('19112015')
self.assertGreaterEqual(resp.text.find('SBIN'), 0)
def test_pr_price_list_zipped_url(self):
resp = pr_price_list_zipped_url('191115')
csv = unzip_str(resp.content)
def test_index_history_url(self):
resp = index_history_url(indexType='NIFTY 50',
fromDate='01-01-2015',
toDate='10-01-2015')
self.assertGreaterEqual(resp.text.find('High'), 0)
self.assertGreaterEqual(resp.text.find('Low'), 0)
def test_index_daily_snapshot_url(self):
resp = index_daily_snapshot_url('06012020')
csv = str(resp.content)
self.assertGreaterEqual(csv.find('Nifty 50'), 0)
self.assertGreaterEqual(csv.find('Nifty IT'), 0)
self.assertGreaterEqual(csv.find('Nifty Bank'), 0)
self.assertGreaterEqual(csv.find('Nifty Next 50'), 0)
def test_index_pe_history_url(self):
resp = index_pe_history_url(fromDate='01-01-2015',
toDate='10-01-2015',
indexName='NIFTY 50')
self.assertGreaterEqual(resp.text.find('<th>P/E'), 0)
self.assertGreaterEqual(resp.text.find('<th>P/B'), 0)
def test_index_vix_history_url(self):
resp = index_vix_history_url(fromDate='01-Jan-2015',
toDate='10-Jan-2015',
)
self.assertGreaterEqual(resp.text.find('VIX'), 0)
self.assertGreaterEqual(resp.text.find('Change'), 0)
def test_derivative_derivative_expiry_dates_url(self):
resp = derivative_expiry_dates_url()
self.assertGreaterEqual(resp.text.find('vixExpryDt'), 0)
def test_derivative_history_url(self):
resp = derivative_history_url(instrumentType='FUTIDX',
symbol='NIFTY',
expiryDate='26-12-2019',
optionType='select',
strikePrice='',
dateRange='',
fromDate='25-Dec-2019',
toDate='26-Dec-2019')
self.assertGreaterEqual(resp.text.find('NIFTY'), 0)
self.assertGreaterEqual(resp.text.find('Expiry'), 0)
def test_derivative_price_list_url(self):
resp = derivative_price_list_url('2019', 'JUL', '19JUL2019')
csv = unzip_str(resp.content)
def tearDown(self):
super().tearDown()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUrls)
result = unittest.TextTestRunner(verbosity=2).run(suite)
if six.PY2:
if result.wasSuccessful():
print('tests OK')
for (test, error) in result.errors:
print('=========Error in: %s===========' % test)
print(error)
print('======================================')
for (test, failures) in result.failures:
print('=========Error in: %s===========' % test)
print(failures)
print('======================================')
| 2.078125 | 2 |
accounts/forms.py | cheradenine/Django-CRM | 2 | 5616 | from django import forms
from .models import Account
from common.models import Comment, Attachments
from leads.models import Lead
from contacts.models import Contact
from django.db.models import Q
class AccountForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
account_view = kwargs.pop('account', False)
request_user = kwargs.pop('request_user', None)
super(AccountForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['description'].widget.attrs.update({'rows': '8'})
self.fields['status'].choices = [
(each[0], each[1]) for each in Account.ACCOUNT_STATUS_CHOICE]
self.fields['status'].required = False
for key, value in self.fields.items():
if key == 'phone':
value.widget.attrs['placeholder'] = "+91-123-456-7890"
else:
value.widget.attrs['placeholder'] = value.label
self.fields['billing_address_line'].widget.attrs.update({
'placeholder': 'Address Line'})
self.fields['billing_street'].widget.attrs.update({
'placeholder': 'Street'})
self.fields['billing_city'].widget.attrs.update({
'placeholder': 'City'})
self.fields['billing_state'].widget.attrs.update({
'placeholder': 'State'})
self.fields['billing_postcode'].widget.attrs.update({
'placeholder': 'Postcode'})
self.fields["billing_country"].choices = [
("", "--Country--"), ] + list(self.fields["billing_country"].choices)[1:]
self.fields["lead"].queryset = Lead.objects.all(
).exclude(status='closed')
if request_user:
self.fields["lead"].queryset = Lead.objects.filter(
Q(assigned_to__in=[request_user]) | Q(created_by=request_user)).exclude(status='closed')
self.fields["contacts"].queryset = Contact.objects.filter(
Q(assigned_to__in=[request_user]) | Q(created_by=request_user))
if account_view:
self.fields['billing_address_line'].required = True
self.fields['billing_street'].required = True
self.fields['billing_city'].required = True
self.fields['billing_state'].required = True
self.fields['billing_postcode'].required = True
self.fields['billing_country'].required = True
class Meta:
model = Account
fields = ('name', 'phone', 'email', 'website', 'industry',
'description', 'status',
'billing_address_line', 'billing_street',
'billing_city', 'billing_state',
'billing_postcode', 'billing_country', 'lead', 'contacts')
class AccountCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=64, required=True)
class Meta:
model = Comment
fields = ('comment', 'account', 'commented_by')
class AccountAttachmentForm(forms.ModelForm):
attachment = forms.FileField(max_length=1001, required=True)
class Meta:
model = Attachments
fields = ('attachment', 'account')
| 2.1875 | 2 |
pywren/pywren_ibm_cloud/invokers.py | thetolga/pywren-ibm-cloud | 0 | 5617 | #
# Copyright 2018 PyWren Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import time
import logging
import random
from pywren_ibm_cloud.cf_connector import CloudFunctions
logger = logging.getLogger(__name__)
class IBMCloudFunctionsInvoker:
def __init__(self, cf_config, retry_config):
self.namespace = cf_config['namespace']
self.endpoint = cf_config['endpoint']
self.cf_action_name = cf_config['action_name'] # Runtime
self.invocation_retry = retry_config['invocation_retry']
self.retry_sleeps = retry_config['retry_sleeps']
self.retries = retry_config['retries']
self.client = CloudFunctions(cf_config)
log_msg = 'IBM Cloud Functions init for {}'.format(self.cf_action_name)
logger.info(log_msg)
if(logger.getEffectiveLevel() == logging.WARNING):
print(log_msg)
def invoke(self, payload):
"""
Invoke -- return information about this invocation
"""
act_id = self.client.invoke(self.cf_action_name, payload)
attempts = 1
while not act_id and self.invocation_retry and attempts < self.retries:
attempts += 1
selected_sleep = random.choice(self.retry_sleeps)
exec_id = payload['executor_id']
call_id = payload['call_id']
log_msg = ('Executor ID {} Function {} - Invocation failed - retry {} in {} seconds'.format(exec_id, call_id, attempts, selected_sleep))
logger.debug(log_msg)
time.sleep(selected_sleep)
act_id = self.client.invoke(self.cf_action_name, payload)
return act_id
def config(self):
"""
Return config dict
"""
return {'cf_action_name': self.cf_action_name,
'cf_namespace': self.namespace,
'cf_endpoint': self.endpoint}
| 2.03125 | 2 |
Projet1/Dataset/addlinkRealExample.py | Arugakente/DataScienceP1 | 0 | 5618 | <filename>Projet1/Dataset/addlinkRealExample.py
import os
import random
inputDirectory = "./original"
outputDirectory = "./processed"
#probability parameters
TopLevel = 0.6
SecondLevel = 0.5
ThirdLevel = 0.4
FourAndAbove = 0.2
pickInside = 0.5
pickOutside = 0.25
topics = []
siteLevel = []
fileStructure = []
count = 0
def findPossibleIndex(toParse):
toReturn = []
for current in range(0,len(toParse)):
if toParse[current] == " ":
toReturn.append(current)
toReturn.append(len(toParse))
return toReturn
def manageFile(inputPath,outputPath,topicIndex,currentLevel,filename):
count = 0
content = open(inputPath , 'r')
output = open(outputPath ,"w")
currentLine = content.readline()
outputFile = ""
while currentLine:
currentLine = content.readline()
randomPick = random.uniform(0.0,2.0)
if randomPick <= pickInside+pickOutside :
possibleIndexes = findPossibleIndex(currentLine)
insertPosition = possibleIndexes[random.randint(0,len(possibleIndexes)-1)]
selectedTopic = topicIndex
if(randomPick<=pickOutside):
while(selectedTopic == topicIndex):
selectedTopic = random.randint(0,len(topics)-1)
randomPick = random.uniform(0.0,4.0)
if(randomPick <= TopLevel + SecondLevel + ThirdLevel + FourAndAbove):
selectedLevel = 0
if(randomPick <= TopLevel):
selectedLevel = 1
if(randomPick <= TopLevel+ SecondLevel and randomPick > TopLevel):
selectedLevel = 2
if(randomPick <= TopLevel + SecondLevel + ThirdLevel and randomPick > TopLevel+ SecondLevel):
selectedLevel = 3
if(randomPick <= TopLevel + SecondLevel + ThirdLevel + FourAndAbove and randomPick > TopLevel + SecondLevel + ThirdLevel):
if(len(siteLevel[selectedTopic]) == 4):
selectedLevel = 4
else:
selectedLevel = random.randint(4,len(siteLevel[selectedTopic]))
i = 0
found = False
while i<len(siteLevel[selectedTopic]):
if siteLevel[selectedTopic][i] == str(selectedLevel)+"grade":
found = True
selectedLevel = i
i+=1
if(selectedLevel>=currentLevel):
fileLink = filename
while(fileLink == filename):
fileLink = fileStructure[selectedTopic][selectedLevel][random.randint(0,len(fileStructure[selectedTopic][selectedLevel])-1)]
fileLink = " linkTo:"+fileLink
count += 1
print(count)
if insertPosition == len(currentLine):
currentLine += fileLink
else:
currentLine = currentLine[0:insertPosition]+fileLink+currentLine[insertPosition:]
outputFile += currentLine
output.write(outputFile)
return count
topicIndex=0
for foldername in os.listdir(inputDirectory) :
if(foldername[0] != "."):
topics.append(foldername)
siteLevel.append([])
fileStructure.append([])
levelIndex=0
for categoryName in os.listdir(inputDirectory+"/"+foldername):
if(categoryName[0] != "."):
siteLevel[topicIndex].append(categoryName)
fileStructure[topicIndex].append([])
for filename in os.listdir(inputDirectory+"/"+foldername+"/"+categoryName):
if(filename[0] != "."):
fileStructure[topicIndex][levelIndex].append(filename)
levelIndex += 1
topicIndex += 1
for i in range(0,len(topics)):
for j in range(0,len(siteLevel[i])):
for k in range(0,len(fileStructure[i][j])):
count += manageFile(inputDirectory+"/"+topics[i]+"/"+siteLevel[i][j]+"/"+fileStructure[i][j][k],outputDirectory+"/"+fileStructure[i][j][k],i,j,fileStructure[i][j][k])
print(str(count)+" liens créés") | 2.390625 | 2 |
kkcalc/kk.py | benajamin/kkcalc | 0 | 5619 | <reponame>benajamin/kkcalc<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the Kramers-Kronig Calculator software package.
#
# Copyright (c) 2013 <NAME>, <NAME>
#
# The software is licensed under the terms of the zlib/libpng license.
# For details see LICENSE.txt
"""This module implements the Kramers-Kronig transformation."""
import logging, sys
logger = logging.getLogger(__name__)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logging.StreamHandler(stream=sys.stdout)
import math
import numpy
import os
import data
def calc_relativistic_correction(stoichiometry):
"""Calculate the relativistic correction to the Kramers-Kronig transform.
Parameters:
-----------
stoichiometry : array of integer/float pairs
Each pair in the list consists of an atomic number and the relative proportion of that element.
Returns
-------
This function returns a ``float`` holding the relativistic
corection to the Kramers-Kronig transform.
"""
correction = 0
for z, n in stoichiometry:
correction += (z - (z/82.5)**2.37) * n
return correction
def KK_General_PP(Eval_Energy, Energy, imaginary_spectrum, orders, relativistic_correction):
"""Calculate Kramers-Kronig transform with "Piecewise Polynomial"
algorithm plus the Biggs and Lighthill extended data.
Parameters
----------
Eval_Energy : numpy vector of `float`
Set of photon energies describing points at which to evaluate the real spectrum
Energy : numpy vector of `float`
Set of photon energies describing intervals for which each row of `imaginary_spectrum` is valid
imaginary_spectrum : two-dimensional `numpy.array` of `float`
The array consists of columns of polynomial coefficients belonging to the power terms indicated by 'order'
orders : numpy vector of integers
The vector represents the polynomial indices corresponding to the columns of imaginary_spectrum
relativistic_correction : float
The relativistic correction to the Kramers-Kronig transform.
You can calculate the value using the `calc_relativistic_correction` function.
Returns
-------
This function returns the real part of the scattering factors evaluated at photon energies specified by Eval_Energy.
"""
logger = logging.getLogger(__name__)
logger.info("Calculate Kramers-Kronig transform using general piecewise-polynomial algorithm")
# Need to build x-E-n arrays
X = numpy.tile(Energy[:,numpy.newaxis,numpy.newaxis],(1,len(Eval_Energy),len(orders)))
E = numpy.tile(Eval_Energy[numpy.newaxis,:,numpy.newaxis],(len(Energy)-1,1,len(orders)))
C = numpy.tile(imaginary_spectrum[:,numpy.newaxis,:],(1,len(Eval_Energy),1))
N = numpy.tile(orders[numpy.newaxis,numpy.newaxis,:],(len(Energy)-1,len(Eval_Energy),1))
poles = numpy.equal(X,numpy.tile(Eval_Energy[numpy.newaxis,:,numpy.newaxis],(len(Energy),1,len(orders))))
# all N, ln(x+E) and ln(x-E) terms and poles
Integral = numpy.sum(-C*(-E)**N*numpy.log(numpy.absolute((X[1:,:,:]+E)/(X[:-1,:,:]+E)))-C*E**N*(1-poles[1:,:,:])*numpy.log(numpy.absolute((X[1:,:,:]-E+poles[1:,:,:])/((1-poles[:-1,:,:])*X[:-1,:,:]+poles[:-1,:,:]*X[[0]+list(range(len(Energy)-2)),:,:]-E))),axis=(0,2))
if numpy.any(orders<=-2): # N<=-2, ln(x) terms
i = [slice(None,None,None),slice(None,None,None),orders<=-2]
Integral += numpy.sum(C[i]*((-E[i])**N[i]+E[i]**N[i])*numpy.log(numpy.absolute((X[1:,:,orders<=-2])/(X[:-1,:,orders<=-2]))),axis=(0,2))
if numpy.any(orders>=0): # N>=0, x^k terms
for ni in numpy.where(orders>=0)[0]:
i = [slice(None,None,None),slice(None,None,None),ni]
n = orders[ni]
for k in range(n,0,-2):
Integral += numpy.sum(C[i]/float(-k)*2*E[i]**(n-k)*(X[1:,:,ni]**k-X[:-1,:,ni]**k),axis=0)
if numpy.any(orders <=-3): # N<=-3, x^k terms
for ni in numpy.where(orders<=-3)[0]:
i = [slice(None,None,None),slice(None,None,None),ni]
n = orders[ni]
for k in range(n+2,0,2):
Integral += numpy.sum(C[i]/float(k)*((-1)**(n-k)+1)*E[i]**(n-k)*(X[1:,:,ni]**k-X[:-1,:,ni]**k),axis=0)
logger.debug("Done!")
return Integral / math.pi + relativistic_correction
def KK_PP(Eval_Energy, Energy, imaginary_spectrum, relativistic_correction):
"""Calculate Kramers-Kronig transform with "Piecewise Polynomial"
algorithm plus the Biggs and Lighthill extended data.
Parameters
----------
Eval_Energy : numpy vector of `float`
Set of photon energies describing points at which to evaluate the real spectrum
Energy : numpy vector of `float`
Set of photon energies describing intervals for which each row of `imaginary_spectrum` is valid
imaginary_spectrum : two-dimensional `numpy.array` of `float`
The array consists of five columns of polynomial coefficients: A_1, A_0, A_-1, A_-2, A_-3
relativistic_correction : float
The relativistic correction to the Kramers-Kronig transform.
You can calculate the value using the `calc_relativistic_correction` function.
Returns
-------
This function returns the real part of the scattering factors evaluated at photon energies specified by Eval_Energy.
"""
logger = logging.getLogger(__name__)
logger.info("Calculate Kramers-Kronig transform using (n from 1 to -3) piecewise-polynomial algorithm")
X1 = Energy[0:-1]
X2 = Energy[1:]
E = numpy.tile(Eval_Energy, (len(Energy)-1, 1)).T
Full_coeffs = imaginary_spectrum.T
Symb_1 = (( Full_coeffs[0, :]*E+Full_coeffs[1, :])*(X2-X1)+0.5*Full_coeffs[0, :]*(X2**2-X1**2)-(Full_coeffs[3, :]/E+Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute(X2/X1))+Full_coeffs[4, :]/E*(X2**-1-X1**-1))
Symb_2 = ((-Full_coeffs[0, :]*E+Full_coeffs[1, :])*(X2-X1)+0.5*Full_coeffs[0, :]*(X2**2-X1**2)+(Full_coeffs[3, :]/E-Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute(X2/X1))-Full_coeffs[4, :]/E*(X2**-1-X1**-1))+(Full_coeffs[0, :]*E**2-Full_coeffs[1, :]*E+Full_coeffs[2, :]-Full_coeffs[3, :]*E**-1+Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute((X2+E)/(X1+E)))
Symb_3 = (1-1*((X2==E)|(X1==E)))*(Full_coeffs[0, :]*E**2+Full_coeffs[1, :]*E+Full_coeffs[2, :]+Full_coeffs[3, :]*E**-1+Full_coeffs[4, :]*E**-2)*numpy.log(numpy.absolute((X2-E+1*(X2==E))/(X1-E+1*(X1==E))))
Symb_B = numpy.sum(Symb_2 - Symb_1 - Symb_3, axis=1) # Sum areas for approximate integral
# Patch singularities
hits = Energy[1:-1]==E[:,0:-1]
E_hits = numpy.append(numpy.insert(numpy.any(hits, axis=0),[0,0],False),[False,False])
Eval_hits = numpy.any(hits, axis=1)
X1 = Energy[E_hits[2:]]
XE = Energy[E_hits[1:-1]]
X2 = Energy[E_hits[:-2]]
C1 = Full_coeffs[:, E_hits[2:-1]]
C2 = Full_coeffs[:, E_hits[1:-2]]
Symb_singularities = numpy.zeros(len(Eval_Energy))
Symb_singularities[Eval_hits] = (C2[0, :]*XE**2+C2[1, :]*XE+C2[2, :]+C2[3, :]*XE**-1+C2[4, :]*XE**-2)*numpy.log(numpy.absolute((X2-XE)/(X1-XE)))
# Finish things off
KK_Re = (Symb_B-Symb_singularities) / (math.pi*Eval_Energy) + relativistic_correction
logger.debug("Done!")
return KK_Re
def improve_accuracy(Full_E, Real_Spectrum, Imaginary_Spectrum, relativistic_correction, tolerance, recursion=50):
"""Calculate extra data points so that a linear interpolation is more accurate.
Parameters
----------
Full_E : numpy vector of `float`
Set of photon energies describing intervals for which each row of `imaginary_spectrum` is valid
Real_Spectrum : numpy vector of `float`
The real part of the spectrum corresponding to magnitudes at photon energies in Full_E
Imaginary_Spectrum : two-dimensional `numpy.array` of `float`
The array consists of five columns of polynomial coefficients: A_1, A_0, A_-1, A_-2, A_-3
relativistic_correction : float
The relativistic correction to the Kramers-Kronig transform.
(You can calculate the value using the `calc_relativistic_correction` function.)
tolerance : float
Level of error in linear extrapolation of data values to be allowed.
recursion : integer
Number of times an energy interval can be halved before giving up.
Returns
-------
This function returns a numpy array with three columns respectively representing photon energy, the real spectrum and the imaginary spectrum.
"""
logger.debug("Improve data accuracy")
new_points = numpy.cumsum(numpy.ones((len(Full_E)-2,1),dtype=numpy.int8))+1
Im_values = data.coeffs_to_ASF(Full_E, numpy.vstack((Imaginary_Spectrum,Imaginary_Spectrum[-1])))
#plot_Im_values = Im_values
Re_values = Real_Spectrum
E_values = Full_E
temp_Im_spectrum = Imaginary_Spectrum[1:]
count = 0
improved = 1
total_improved_points = 0
while count<recursion and numpy.sum(improved)>0:
#get E_midpoints
midpoints = (E_values[new_points-1]+E_values[new_points])/2.
#evaluate at new points
Im_midpoints = data.coeffs_to_ASF(midpoints, temp_Im_spectrum)
Re_midpoints = KK_PP(midpoints, Full_E, Imaginary_Spectrum, relativistic_correction)
#evaluate error levels
Im_error = abs((Im_values[new_points-1]+Im_values[new_points])/2. - Im_midpoints)
Re_error = abs((Re_values[new_points-1]+Re_values[new_points])/2. - Re_midpoints)
improved = (Im_error>tolerance) | (Re_error>tolerance)
logger.debug(str(numpy.sum(improved))+" points (out of "+str(len(improved))+") can be improved in pass number "+str(count+1)+".")
total_improved_points += numpy.sum(improved)
#insert new points and values
Im_values = numpy.insert(Im_values,new_points[improved],Im_midpoints[improved])
Re_values = numpy.insert(Re_values,new_points[improved],Re_midpoints[improved])
E_values = numpy.insert(E_values,new_points[improved],midpoints[improved])
#prepare for next loop
temp_Im_spectrum =numpy.repeat(temp_Im_spectrum[improved],2,axis=0)
new_points = numpy.where(numpy.insert(numpy.zeros(Im_values.shape, dtype=numpy.bool),new_points[improved],True))[0]
new_points = numpy.vstack((new_points, new_points+1)).T.flatten()
count += 1
#import matplotlib
#matplotlib.use('WXAgg')
#import pylab
#pylab.figure()
#pylab.plot(Full_E,plot_Im_values,'ok')
#pylab.plot(Full_E,Real_Spectrum,'og')
#pylab.plot(midpoints,Im_midpoints,'+b')
#pylab.plot(midpoints,Re_midpoints,'+r')
#pylab.plot(E_values,Im_values,'b-')
#pylab.plot(E_values,Re_values,'r-')
#pylab.plot(midpoints,Im_error,'b-')
#pylab.plot(midpoints,Re_error,'r-')
#pylab.xscale('log')
#pylab.show()
logger.info("Improved data accuracy by inserting "+str(total_improved_points)+" extra points.")
return numpy.vstack((E_values,Re_values,Im_values)).T
def kk_calculate_real(NearEdgeDataFile, ChemicalFormula, load_options=None, input_data_type=None, merge_points=None, add_background=False, fix_distortions=False, curve_tolerance=None, curve_recursion=50):
"""Do all data loading and processing and then calculate the kramers-Kronig transform.
Parameters
----------
NearEdgeDataFile : string
Path to file containg near-edge data
ChemicalFormula : string
A standard chemical formula string consisting of element symbols, numbers and parentheses.
merge_points : list or tuple pair of `float` values, or None
The photon energy values (low, high) at which the near-edge and scattering factor data values
are set equal so as to ensure continuity of the merged data set.
Returns
-------
This function returns a numpy array with columns consisting of the photon energy, the real and the imaginary parts of the scattering factors.
"""
Stoichiometry = data.ParseChemicalFormula(ChemicalFormula)
Relativistic_Correction = calc_relativistic_correction(Stoichiometry)
Full_E, Imaginary_Spectrum = data.calculate_asf(Stoichiometry)
if NearEdgeDataFile is not None:
NearEdge_Data = data.convert_data(data.load_data(NearEdgeDataFile, load_options),FromType=input_data_type,ToType='asf')
Full_E, Imaginary_Spectrum = data.merge_spectra(NearEdge_Data, Full_E, Imaginary_Spectrum, merge_points=merge_points, add_background=add_background, fix_distortions=fix_distortions)
Real_Spectrum = KK_PP(Full_E, Full_E, Imaginary_Spectrum, Relativistic_Correction)
if curve_tolerance is not None:
output_data = improve_accuracy(Full_E,Real_Spectrum,Imaginary_Spectrum, Relativistic_Correction, curve_tolerance, curve_recursion)
else:
Imaginary_Spectrum_Values = data.coeffs_to_ASF(Full_E, numpy.vstack((Imaginary_Spectrum,Imaginary_Spectrum[-1])))
output_data = numpy.vstack((Full_E,Real_Spectrum,Imaginary_Spectrum_Values)).T
return output_data
if __name__ == '__main__':
#use argparse here to get command line arguments
#process arguments and pass to a pythonic function
#I will abuse this section of code for initial testing
#Output = kk_calculate_real('../../data/Xy_norm_bgsub.txt', 'C10SH14', input_data_type='NEXAFS')
Output = kk_calculate_real('../../data/LaAlO3/LaAlO3_Exp.csv', 'LaAlO3', input_data_type='NEXAFS', fix_distortions=True, curve_tolerance=0.05)
#Output = kk_calculate_real('../../data/GaAs/As.xmu.csv', 'GaAs', input_data_type='NEXAFS', fix_distortions=True, curve_tolerance=0.05)
Stoichiometry = data.ParseChemicalFormula('LaAlO3')
#Stoichiometry = data.ParseChemicalFormula('GaAs')
Relativistic_Correction = calc_relativistic_correction(Stoichiometry)
ASF_E, ASF_Data = data.calculate_asf(Stoichiometry)
ASF_Data3 = data.coeffs_to_linear(ASF_E, ASF_Data, 0.1)
ASF_Data2 = data.coeffs_to_ASF(ASF_E, numpy.vstack((ASF_Data,ASF_Data[-1])))
#Test_E = (Output[1:,0]+Output[0:-1,0])*0.5
#Test_E = numpy.linspace(41257.87,41259.87,num=21)
#Real_Spectrum2 = KK_PP(Test_E, Output[:,0], Im, Relativistic_Correction)
import matplotlib
matplotlib.use('WXAgg')
import pylab
pylab.figure()
pylab.plot(Output[:,0],Output[:,1],'xg-',Output[:,0],Output[:,2],'xb-')
pylab.plot(ASF_E,ASF_Data2,'+r')
#pylab.plot(ASF_E,ASF_Data22,'xr')
pylab.plot(ASF_Data3[0],ASF_Data3[1],'r-')
#pylab.plot(Test_E,Real_Spectrum2,'*y')
pylab.xscale('log')
pylab.show()
| 2.3125 | 2 |
random-images/hexxy.py | dominicschaff/random | 0 | 5620 | <reponame>dominicschaff/random
from PIL import ImageDraw, Image
from math import cos,sin,radians
from random import randint
import sys
a = "a0A1b2B3c4C5d6D7e8E9f!F,g.G/h?H<i>I:j;J'k\"K\\l|L/m M\nn\tN@o#O$p%P^q&Q*r(R)s_S-t+T=u{U}v[V]w W x X y Y z Z"
if len(a) > 128:
print("TOO MANY CHARACTERS")
sys.exit(1)
# for i in a:
# print("%s -> %d %d %d %d %d %d %d "%(i,
# 1 if a.index(i) & 1 == 1 else 0,
# 1 if a.index(i) & 2 == 2 else 0,
# 1 if a.index(i) & 4 == 4 else 0,
# 1 if a.index(i) & 8 == 8 else 0,
# 1 if a.index(i) & 16 == 16 else 0,
# 1 if a.index(i) & 32 == 32 else 0,
# 1 if a.index(i) & 64 == 64 else 0,
# ))
# sys.exit(0)
WHITE=(255,255,255)
PINK=(217,154,197)
BLUE=(103,170,249)
BLACK=(0,0,0)
img = Image.new('RGB', (2560,1600), BLACK)
id = ImageDraw.Draw(img)
def hex(offset, size):
points = []
x,y = offset
for angle in range(0, 360, 60):
x += cos(radians(angle)) * size
y += sin(radians(angle)) * size
points.append((x, y))
return points
def drawHex(id, sx,sy,s,c):
ox = sx - cos(radians(120)) * s
oy = sy - sin(radians(120)) * s
id.polygon(hex((ox-s,oy-s*2),s), fill=BLUE if c & 1 == 1 else PINK)
id.polygon(hex((ox+s,oy-s*2),s), fill=BLUE if c & 2 == 2 else PINK)
id.polygon(hex((ox-s*2,oy),s), fill=BLUE if c & 4 == 4 else PINK)
id.polygon(hex((ox,oy),s), fill=BLUE if c & 8 == 8 else PINK)
id.polygon(hex((ox+s*2,oy),s), fill=BLUE if c & 16 == 16 else PINK)
id.polygon(hex((ox-s,oy+s*2),s), fill=BLUE if c & 32 == 32 else PINK)
id.polygon(hex((ox+s,oy+s*2),s), fill=BLUE if c & 64 == 64 else PINK)
q = """This is a test
0123456789%"""
s = 10
cutOff = int(2560/(s*7))
print (cutOff)
x,y = 0,0
for c in q:
drawHex(id, s*2 + x*s*7, s*3 + y*s*7, s, a.index(c))
x+=1
if x >= cutOff or c == "\n":
x,y = 0,y+1
img.show() | 2.671875 | 3 |
src/plugins/maimaidx.py | LonelyFantasy/Chiyuki-Bot | 0 | 5621 | <reponame>LonelyFantasy/Chiyuki-Bot
import math
from collections import defaultdict
from typing import List, Dict, Any
from nonebot import on_command, on_message, on_notice, on_regex, get_driver
from nonebot.log import logger
from nonebot.permission import Permission
from nonebot.typing import T_State
from nonebot.adapters import Event, Bot
from nonebot.adapters.cqhttp import Message, MessageSegment, GroupMessageEvent, PrivateMessageEvent
from src.libraries.maimaidx_guess import GuessObject
from src.libraries.tool import hash
from src.libraries.maimaidx_music import *
from src.libraries.image import *
from src.libraries.maimai_best_40 import generate
import requests
import json
import random
import time
import re
from urllib import parse
driver = get_driver()
@driver.on_startup
def _():
logger.info("Load help text successfully")
help_text: dict = get_driver().config.help_text
help_text['mai'] = ('查看舞萌相关功能', """桜千雪です、よろしく。
可用命令如下:
今日舞萌 查看今天的舞萌运势
XXXmaimaiXXX什么 随机一首歌
随个[dx/标准][绿黄红紫白]<难度> 随机一首指定条件的乐曲
查歌<乐曲标题的一部分> 查询符合条件的乐曲
[绿黄红紫白]id<歌曲编号> 查询乐曲信息或谱面信息
<歌曲别名>是什么歌 查询乐曲别名对应的乐曲
定数查歌 <定数> 查询定数对应的乐曲
定数查歌 <定数下限> <定数上限>
分数线 <难度+歌曲id> <分数线> 详情请输入“分数线 帮助”查看""")
def song_txt(music: Music):
return Message([
{
"type": "text",
"data": {
"text": f"{music.id}. {music.title}\n"
}
},
{
"type": "image",
"data": {
"file": f"https://www.diving-fish.com/covers/{music.id}.jpg"
}
},
{
"type": "text",
"data": {
"text": f"\n{'/'.join(music.level)}"
}
}
])
def inner_level_q(ds1, ds2=None):
result_set = []
diff_label = ['Bas', 'Adv', 'Exp', 'Mst', 'ReM']
if ds2 is not None:
music_data = total_list.filter(ds=(ds1, ds2))
else:
music_data = total_list.filter(ds=ds1)
for music in music_data:
for i in music.diff:
result_set.append((music['id'], music['title'], music['ds'][i], diff_label[i], music['level'][i]))
return result_set
inner_level = on_command('inner_level ', aliases={'定数查歌 '})
@inner_level.handle()
async def _(bot: Bot, event: Event, state: T_State):
argv = str(event.get_message()).strip().split(" ")
if len(argv) > 2 or len(argv) == 0:
await inner_level.finish("命令格式为\n定数查歌 <定数>\n定数查歌 <定数下限> <定数上限>")
return
if len(argv) == 1:
result_set = inner_level_q(float(argv[0]))
else:
result_set = inner_level_q(float(argv[0]), float(argv[1]))
if len(result_set) > 50:
await inner_level.finish("数据超出 50 条,请尝试缩小查询范围")
return
s = ""
for elem in result_set:
s += f"{elem[0]}. {elem[1]} {elem[3]} {elem[4]}({elem[2]})\n"
await inner_level.finish(s.strip())
spec_rand = on_regex(r"^随个(?:dx|sd|标准)?[绿黄红紫白]?[0-9]+\+?")
@spec_rand.handle()
async def _(bot: Bot, event: Event, state: T_State):
level_labels = ['绿', '黄', '红', '紫', '白']
regex = "随个((?:dx|sd|标准))?([绿黄红紫白]?)([0-9]+\+?)"
res = re.match(regex, str(event.get_message()).lower())
try:
if res.groups()[0] == "dx":
tp = ["DX"]
elif res.groups()[0] == "sd" or res.groups()[0] == "标准":
tp = ["SD"]
else:
tp = ["SD", "DX"]
level = res.groups()[2]
if res.groups()[1] == "":
music_data = total_list.filter(level=level, type=tp)
else:
music_data = total_list.filter(level=level, diff=['绿黄红紫白'.index(res.groups()[1])], type=tp)
await spec_rand.send(song_txt(music_data.random()))
except Exception as e:
print(e)
await spec_rand.finish("随机命令错误,请检查语法")
mr = on_regex(r".*maimai.*什么")
@mr.handle()
async def _(bot: Bot, event: Event, state: T_State):
await mr.finish(song_txt(total_list.random()))
search_music = on_regex(r"^查歌.+")
@search_music.handle()
async def _(bot: Bot, event: Event, state: T_State):
regex = "查歌(.+)"
name = re.match(regex, str(event.get_message())).groups()[0].strip()
if name == "":
return
res = total_list.filter(title_search=name)
await search_music.finish(Message([
{"type": "text",
"data": {
"text": f"{music['id']}. {music['title']}\n"
}} for music in res]))
query_chart = on_regex(r"^([绿黄红紫白]?)id([0-9]+)")
@query_chart.handle()
async def _(bot: Bot, event: Event, state: T_State):
regex = "([绿黄红紫白]?)id([0-9]+)"
groups = re.match(regex, str(event.get_message())).groups()
level_labels = ['绿', '黄', '红', '紫', '白']
if groups[0] != "":
try:
level_index = level_labels.index(groups[0])
level_name = ['Basic', 'Advanced', 'Expert', 'Master', 'Re: MASTER']
name = groups[1]
music = total_list.by_id(name)
chart = music['charts'][level_index]
ds = music['ds'][level_index]
level = music['level'][level_index]
file = f"https://www.diving-fish.com/covers/{music['id']}.jpg"
if len(chart['notes']) == 4:
msg = f'''{level_name[level_index]} {level}({ds})
TAP: {chart['notes'][0]}
HOLD: {chart['notes'][1]}
SLIDE: {chart['notes'][2]}
BREAK: {chart['notes'][3]}
谱师: {chart['charter']}
'''
else:
msg = f'''{level_name[level_index]} {level}({ds})
TAP: {chart['notes'][0]}
HOLD: {chart['notes'][1]}
SLIDE: {chart['notes'][2]}
TOUCH: {chart['notes'][3]}
BREAK: {chart['notes'][4]}
谱师: {chart['charter']}
'''
await query_chart.send(Message([
{
"type": "text",
"data": {
"text": f"{music['id']}. {music['title']}\n"
}
},
{
"type": "image",
"data": {
"file": f"{file}"
}
},
{
"type": "text",
"data": {
"text": msg
}
}
]))
except Exception:
await query_chart.send("未找到该谱面")
else:
name = groups[1]
music = total_list.by_id(name)
try:
file = f"https://www.diving-fish.com/covers/{music['id']}.jpg"
await query_chart.send(Message([
{
"type": "text",
"data": {
"text": f"{music['id']}. {music['title']}\n"
}
},
{
"type": "image",
"data": {
"file": f"{file}"
}
},
{
"type": "text",
"data": {
"text": f"艺术家: {music['basic_info']['artist']}\n分类: {music['basic_info']['genre']}\nBPM: {music['basic_info']['bpm']}\n版本: {music['basic_info']['from']}\n难度: {'/'.join(music['level'])}"
}
}
]))
except Exception:
await query_chart.send("未找到该乐曲")
wm_list = ['拼机', '推分', '越级', '下埋', '夜勤', '练底力', '练手法', '打旧框', '干饭', '抓绝赞', '收歌']
jrwm = on_command('今日舞萌', aliases={'今日mai'})
@jrwm.handle()
async def _(bot: Bot, event: Event, state: T_State):
qq = int(event.get_user_id())
h2 = hash(qq)
h = h2
rp = h % 100
wm_value = []
for i in range(11):
wm_value.append(h & 3)
h >>= 2
s = f"今日人品值:{rp}\n"
for i in range(11):
if wm_value[i] == 3:
s += f'宜 {wm_list[i]}\n'
elif wm_value[i] == 0:
s += f'忌 {wm_list[i]}\n'
s += "千雪提醒您:打机时不要大力拍打或滑动哦\n今日推荐歌曲:"
music = total_list[h2 % len(total_list)]
await jrwm.finish(Message([
{"type": "text", "data": {"text": s}}
] + song_txt(music)))
music_aliases = defaultdict(list)
f = open('src/static/aliases.csv', 'r', encoding='utf-8')
tmp = f.readlines()
f.close()
for t in tmp:
arr = t.strip().split('\t')
for i in range(len(arr)):
if arr[i] != "":
music_aliases[arr[i].lower()].append(arr[0])
find_song = on_regex(r".+是什么歌")
@find_song.handle()
async def _(bot: Bot, event: Event, state: T_State):
regex = "(.+)是什么歌"
name = re.match(regex, str(event.get_message())).groups()[0].strip().lower()
if name not in music_aliases:
await find_song.finish("未找到此歌曲\n舞萌 DX 歌曲别名收集计划:https://docs.qq.com/sheet/DSXhaUXVsRlhxRmtJ")
return
result_set = music_aliases[name]
if len(result_set) == 1:
music = total_list.by_title(result_set[0])
await find_song.finish(Message([{"type": "text", "data": {"text": "您要找的是不是"}}] + song_txt(music)))
else:
s = '\n'.join(result_set)
await find_song.finish(f"您要找的可能是以下歌曲中的其中一首:\n{ s }")
query_score = on_command('分数线')
query_score_text = '''此功能为查找某首歌分数线设计。
命令格式:分数线 <难度+歌曲id> <分数线>
例如:分数线 白337 100
命令将返回分数线允许的 TAP GREAT 容错以及 BREAK 50落等价的 TAP GREAT 数。
以下为 TAP GREAT 的对应表:
GREAT/GOOD/MISS
TAP 1/2.5/5
HOLD 2/5/10
SLIDE 3/7.5/15
TOUCH 1/2.5/5
BREAK 5/12.5/25(外加200落)'''
query_score_mes = Message([{
"type": "image",
"data": {
"file": f"base64://{str(image_to_base64(text_to_image(query_score_text)), encoding='utf-8')}"
}
}])
@query_score.handle()
async def _(bot: Bot, event: Event, state: T_State):
r = "([绿黄红紫白])(?:id)?([0-9]+)"
argv = str(event.get_message()).strip().split(" ")
if len(argv) == 1 and argv[0] == '帮助':
await query_score.send(query_score_mes)
elif len(argv) == 2:
try:
grp = re.match(r, argv[0]).groups()
level_labels = ['绿', '黄', '红', '紫', '白']
level_labels2 = ['Basic', 'Advanced', 'Expert', 'Master', 'Re:MASTER']
level_index = level_labels.index(grp[0])
chart_id = grp[1]
line = float(argv[1])
music = total_list.by_id(chart_id)
chart: Dict[Any] = music['charts'][level_index]
tap = int(chart['notes'][0])
slide = int(chart['notes'][2])
hold = int(chart['notes'][1])
touch = int(chart['notes'][3]) if len(chart['notes']) == 5 else 0
brk = int(chart['notes'][-1])
total_score = 500 * tap + slide * 1500 + hold * 1000 + touch * 500 + brk * 2500
break_bonus = 0.01 / brk
break_50_reduce = total_score * break_bonus / 4
reduce = 101 - line
if reduce <= 0 or reduce >= 101:
raise ValueError
await query_chart.send(f'''{music['title']} {level_labels2[level_index]}
分数线 {line}% 允许的最多 TAP GREAT 数量为 {(total_score * reduce / 10000):.2f}(每个-{10000 / total_score:.4f}%),
BREAK 50落(一共{brk}个)等价于 {(break_50_reduce / 100):.3f} 个 TAP GREAT(-{break_50_reduce / total_score * 100:.4f}%)''')
except Exception:
await query_chart.send("格式错误或未找到乐曲,输入“分数线 帮助”以查看帮助信息")
best_40_pic = on_command('b40')
@best_40_pic.handle()
async def _(bot: Bot, event: Event, state: T_State):
username = str(event.get_message()).strip()
print(event.message_id)
if username == "":
payload = {'qq': str(event.get_user_id())}
else:
payload = {'username': username}
img, success = await generate(payload)
if success == 400:
await best_40_pic.send("未找到此玩家,请确保此玩家的用户名和查分器中的用户名相同。")
elif success == 403:
await best_40_pic.send("该用户禁止了其他人获取数据。")
else:
await best_40_pic.send(Message([
MessageSegment.reply(event.message_id),
MessageSegment.image(f"base64://{str(image_to_base64(img), encoding='utf-8')}")
]))
disable_guess_music = on_command('猜歌设置', priority=0)
@disable_guess_music.handle()
async def _(bot: Bot, event: Event):
if event.message_type != "group":
return
arg = str(event.get_message())
group_members = await bot.get_group_member_list(group_id=event.group_id)
for m in group_members:
if m['user_id'] == event.user_id:
break
su = get_driver().config.superusers
if m['role'] != 'owner' and m['role'] != 'admin' and str(m['user_id']) not in su:
await disable_guess_music.finish("只有管理员可以设置猜歌")
return
db = get_driver().config.db
c = await db.cursor()
if arg == '启用':
await c.execute(f'update guess_table set enabled=1 where group_id={event.group_id}')
elif arg == '禁用':
await c.execute(f'update guess_table set enabled=0 where group_id={event.group_id}')
else:
await disable_guess_music.finish("请输入 猜歌设置 启用/禁用")
await db.commit()
await disable_guess_music.finish("设置成功")
guess_dict: Dict[Tuple[str, str], GuessObject] = {}
guess_cd_dict: Dict[Tuple[str, str], float] = {}
guess_music = on_command('猜歌', priority=0)
async def guess_music_loop(bot: Bot, event: Event, state: T_State):
await asyncio.sleep(10)
guess: GuessObject = state["guess_object"]
if guess.is_end:
return
cycle = state["cycle"]
if cycle < 6:
asyncio.create_task(bot.send(event, f"{cycle + 1}/7 这首歌" + guess.guess_options[cycle]))
else:
asyncio.create_task(bot.send(event, Message([
MessageSegment.text("7/7 这首歌封面的一部分是:"),
MessageSegment.image("base64://" + str(guess.b64image, encoding="utf-8")),
MessageSegment.text("答案将在 30 秒后揭晓")
])))
asyncio.create_task(give_answer(bot, event, state))
return
state["cycle"] += 1
asyncio.create_task(guess_music_loop(bot, event, state))
async def give_answer(bot: Bot, event: Event, state: T_State):
await asyncio.sleep(30)
guess: GuessObject = state["guess_object"]
if guess.is_end:
return
asyncio.create_task(bot.send(event, Message([MessageSegment.text("答案是:" + f"{guess.music['id']}. {guess.music['title']}\n"), MessageSegment.image(f"https://www.diving-fish.com/covers/{guess.music['id']}.jpg")])))
del guess_dict[state["k"]]
@guess_music.handle()
async def _(bot: Bot, event: Event, state: T_State):
mt = event.message_type
k = (mt, event.user_id if mt == "private" else event.group_id)
if mt == "group":
gid = event.group_id
db = get_driver().config.db
c = await db.cursor()
await c.execute(f"select * from guess_table where group_id={gid}")
data = await c.fetchone()
if data is None:
await c.execute(f'insert into guess_table values ({gid}, 1)')
elif data[1] == 0:
await guess_music.send("本群已禁用猜歌")
return
if k in guess_dict:
if k in guess_cd_dict and time.time() > guess_cd_dict[k] - 400:
# 如果已经过了 200 秒则自动结束上一次
del guess_dict[k]
else:
await guess_music.send("当前已有正在进行的猜歌")
return
whitelists = get_driver().config.whitelists
if not (mt == "group" and gid in whitelists):
if len(guess_dict) >= 5:
await guess_music.finish("千雪有点忙不过来了。现在正在猜的群有点多,晚点再试试如何?")
return
if k in guess_cd_dict and time.time() < guess_cd_dict[k]:
await guess_music.finish(f"已经猜过啦,下次猜歌会在 {time.strftime('%H:%M', time.localtime(guess_cd_dict[k]))} 可用噢")
return
guess = GuessObject()
guess_dict[k] = guess
state["k"] = k
state["guess_object"] = guess
state["cycle"] = 0
guess_cd_dict[k] = time.time() + 600
await guess_music.send("我将从热门乐曲中选择一首歌,并描述它的一些特征,请输入歌曲的【id】、【歌曲标题】或【歌曲标题中 5 个以上连续的字符】进行猜歌(DX乐谱和标准乐谱视为两首歌)。猜歌时查歌等其他命令依然可用。\n警告:这个命令可能会很刷屏,管理员可以使用【猜歌设置】指令进行设置。")
asyncio.create_task(guess_music_loop(bot, event, state))
guess_music_solve = on_message(priority=20)
@guess_music_solve.handle()
async def _(bot: Bot, event: Event, state: T_State):
mt = event.message_type
k = (mt, event.user_id if mt == "private" else event.group_id)
if k not in guess_dict:
return
ans = str(event.get_message())
guess = guess_dict[k]
# await guess_music_solve.send(ans + "|" + guess.music['id'])
if ans == guess.music['id'] or (ans.lower() == guess.music['title'].lower()) or (len(ans) >= 5 and ans.lower() in guess.music['title'].lower()):
guess.is_end = True
del guess_dict[k]
await guess_music_solve.finish(Message([
MessageSegment.reply(event.message_id),
MessageSegment.text("猜对了,答案是:" + f"{guess.music['id']}. {guess.music['title']}\n"),
MessageSegment.image(f"https://www.diving-fish.com/covers/{guess.music['id']}.jpg")
]))
| 2.109375 | 2 |
trace_analysis/trace_analysis/architecture/interface.py | hsgwa/trace_analysis | 0 | 5622 | <gh_stars>0
# Copyright 2021 Research Institute of Systems Planning, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from abc import ABCMeta, abstractmethod
from trace_analysis.callback import CallbackBase
from trace_analysis.communication import VariablePassing, Communication
from trace_analysis.node import Node
UNDEFINED_STR = "UNDEFINED"
class PathAlias:
def __init__(self, alias: str, callback_names: List[str]):
self.path_name = alias
self.callback_names = callback_names
class ArchitectureInterface(metaclass=ABCMeta):
@property
@abstractmethod
def nodes(self) -> List[Node]:
pass
@property
@abstractmethod
def path_aliases(self) -> List[PathAlias]:
pass
@property
@abstractmethod
def communications(self) -> List[Communication]:
pass
@property
@abstractmethod
def variable_passings(self) -> List[VariablePassing]:
pass
class ArchitectureImporter(ArchitectureInterface):
@abstractmethod
def __init__(self) -> None:
pass
@abstractmethod
def exec(self, path: str, ignore_topics: Optional[List[str]] = None) -> None:
pass
class ArchitectureExporter(metaclass=ABCMeta):
@abstractmethod
def exec(self, architecture: ArchitectureInterface, path) -> None:
pass
| 2.1875 | 2 |
disrank/__init__.py | treehousekingcomic/disrank | 1 | 5623 | from thkc_disrank import *
| 1.085938 | 1 |
layers/gin_layer.py | JakeStevens/benchmarking-gnns | 275 | 5624 | <reponame>JakeStevens/benchmarking-gnns<gh_stars>100-1000
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl.function as fn
"""
GIN: Graph Isomorphism Networks
HOW POWERFUL ARE GRAPH NEURAL NETWORKS? (<NAME>, <NAME>, <NAME> and <NAME>, ICLR 2019)
https://arxiv.org/pdf/1810.00826.pdf
"""
class GINLayer(nn.Module):
"""
[!] code adapted from dgl implementation of GINConv
Parameters
----------
apply_func : callable activation function/layer or None
If not None, apply this function to the updated node feature,
the :math:`f_\Theta` in the formula.
aggr_type :
Aggregator type to use (``sum``, ``max`` or ``mean``).
out_dim :
Rquired for batch norm layer; should match out_dim of apply_func if not None.
dropout :
Required for dropout of output features.
graph_norm :
boolean flag for output features normalization w.r.t. graph sizes.
batch_norm :
boolean flag for batch_norm layer.
residual :
boolean flag for using residual connection.
init_eps : optional
Initial :math:`\epsilon` value, default: ``0``.
learn_eps : bool, optional
If True, :math:`\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func, aggr_type, dropout, graph_norm, batch_norm, residual=False, init_eps=0, learn_eps=False):
super().__init__()
self.apply_func = apply_func
if aggr_type == 'sum':
self._reducer = fn.sum
elif aggr_type == 'max':
self._reducer = fn.max
elif aggr_type == 'mean':
self._reducer = fn.mean
else:
raise KeyError('Aggregator type {} not recognized.'.format(aggr_type))
self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
self.dropout = dropout
in_dim = apply_func.mlp.input_dim
out_dim = apply_func.mlp.output_dim
if in_dim != out_dim:
self.residual = False
# to specify whether eps is trainable or not.
if learn_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([init_eps]))
else:
self.register_buffer('eps', torch.FloatTensor([init_eps]))
self.bn_node_h = nn.BatchNorm1d(out_dim)
def forward(self, g, h, snorm_n):
h_in = h # for residual connection
g = g.local_var()
g.ndata['h'] = h
g.update_all(fn.copy_u('h', 'm'), self._reducer('m', 'neigh'))
h = (1 + self.eps) * h + g.ndata['neigh']
if self.apply_func is not None:
h = self.apply_func(h)
if self.graph_norm:
h = h* snorm_n # normalize activation w.r.t. graph size
if self.batch_norm:
h = self.bn_node_h(h) # batch normalization
h = F.relu(h) # non-linear activation
if self.residual:
h = h_in + h # residual connection
h = F.dropout(h, self.dropout, training=self.training)
return h
class ApplyNodeFunc(nn.Module):
"""
This class is used in class GINNet
Update the node feature hv with MLP
"""
def __init__(self, mlp):
super().__init__()
self.mlp = mlp
def forward(self, h):
h = self.mlp(h)
return h
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
super().__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
self.input_dim = input_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h) | 3.03125 | 3 |
music/distance/aural/diatonic/__init__.py | jedhsu/music | 0 | 5625 | <filename>music/distance/aural/diatonic/__init__.py
"""
*mus . it . dia*
The simple diatonic intervals.
"""
from .second import MinorSecond
from .second import MajorSecond
from .third import MinorThird
from .third import MajorThird
from .fourth import PerfectFourth
from .fifth import Tritone
from .fifth import PerfectFifth
from .sixth import MinorSixth
from .sixth import MajorSixth
from .seventh import MinorSeventh
from .seventh import MajorSeventh
from .eighth import Octave
__all__ = [
"MinorSecond",
"MajorSecond",
"MinorThird",
"MajorThird",
"PerfectFourth",
"Tritone",
"PerfectFifth",
"MinorSixth",
"MajorSixth",
"MinorSeventh",
"MajorSeventh",
"Octave",
]
| 1.601563 | 2 |
selenium_tests/test_functions.py | AriTheGuitarMan/AriTheGuitarMan.github.io | 0 | 5626 | <reponame>AriTheGuitarMan/AriTheGuitarMan.github.io
# this file holds some common testing functions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
depurl = "localhost:3000"
def getElement(driver, xpath):
return WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, xpath)))
def login(driver, username, password):
driver.get(depurl)
elem = getElement(driver, "//input[@id='username']")
elem.clear()
elem.send_keys(username)
elem = getElement(driver, "//input[@id='password']")
elem.clear()
elem.send_keys(password)
elem.send_keys(Keys.RETURN)
def logout(driver):
elem = getElement(driver, "//a[text()='Logout']")
elem.click() | 2.4375 | 2 |
papirus_renderer.py | ryuchihoon/WeatherStation | 0 | 5627 | #-- coding: utf-8 --
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import os
import time
import collections
from PIL import Image, ImageOps, ImageDraw, ImageFont
code_2_icono = collections.defaultdict(lambda : '38')
kor_2_eng = collections.defaultdict(lambda : 'UNKNOWN')
code_2_icono['SKY_O00'] = ['38']
code_2_icono['SKY_O01'] = ['01', '08']
code_2_icono['SKY_O02'] = ['02', '09']
code_2_icono['SKY_O03'] = ['03', '10']
code_2_icono['SKY_O04'] = ['12', '40']
code_2_icono['SKY_O05'] = ['13', '41']
code_2_icono['SKY_O06'] = ['14', '42']
code_2_icono['SKY_O07'] = ['18']
code_2_icono['SKY_O08'] = ['21']
code_2_icono['SKY_O09'] = ['32']
code_2_icono['SKY_O10'] = ['04']
code_2_icono['SKY_O11'] = ['29']
code_2_icono['SKY_O12'] = ['26']
code_2_icono['SKY_O13'] = ['27']
code_2_icono['SKY_O14'] = ['28']
code_2_icono['SKY_W00'] = ['38']
code_2_icono['SKY_W01'] = ['01', '08']
code_2_icono['SKY_W02'] = ['02', '09']
code_2_icono['SKY_W03'] = ['03', '10']
code_2_icono['SKY_W04'] = ['18']
code_2_icono['SKY_W07'] = ['21']
code_2_icono['SKY_W09'] = ['12', '40']
code_2_icono['SKY_W10'] = ['21']
code_2_icono['SKY_W11'] = ['04']
code_2_icono['SKY_W12'] = ['13', '41']
code_2_icono['SKY_W13'] = ['32']
kor_2_eng[u'좋음'] = ['GOOD']
kor_2_eng[u'보통'] = ['NORMAL']
kor_2_eng[u'나쁨'] = ['BAD']
kor_2_eng[u'매우 나쁨'] = ['V BAD']
def geticonfname(code, drawNight=False):
l = code_2_icono[code]
dname = os.path.join(os.path.dirname(__file__), "resources", "weather_icons_mod")
if len(l) > 1 and drawNight:
cur_hour = time.localtime().tm_hour
is_night = cur_hour < 5 or cur_hour > 18
if is_night:
return os.path.join(dname, l[1] + '.png')
else:
return os.path.join(dname, l[0] + '.png')
else:
return os.path.join(dname, l[0] + '.png')
BLACK = 0
WHITE = 1
class PapirusRenderer:
"""Renderer for Papirus HAT"""
def __init__(self, rotate=0, font_path=None):
if font_path:
self.font_path = font_path
else:
self.font_path = "/usr/share/fonts/truetype/freefont/FreeMono.ttf"
print("rotate:",rotate)
try:
from papirus import Papirus
self.papirus = Papirus(rotate=rotate)
self.canvas_size = self.papirus.size
print("papirus size : %s"%str(self.canvas_size))
except ImportError:
print("papirus import failed")
self.papirus = None
self.canvas_size = (264,176)
def render(self, weather, weather_forecast):
canvas = Image.new('1', self.canvas_size, WHITE)
print("font_path:",self.font_path)
fname = geticonfname(weather.weather_code, drawNight=True)
print("file:",fname)
self._drawImage(canvas, fname, 20,10,(100,100))
print("cur desc : %s"%str(weather.weather_desc))
print("cur airq : %s"%str(weather.air_quality))
temperature = str(weather.cur_temperature).split('.')[0] + u" \u2103"
self._drawText(canvas, temperature, 70,115, font_size=20, center_horizontal=True)
translated = kor_2_eng[weather.air_quality][0]
print("cur airq translated: %s"%translated)
self._drawText(canvas, translated, 70,140, font_size=20, center_horizontal=True)
base_x,base_y = 145,5
for i,w in enumerate(weather_forecast):
fname = geticonfname(w.weather_code)
self._drawImage(canvas, fname, base_x, base_y+55*i, (50,50))
temperature = str(w.min_temperature) + " / " + str(w.max_temperature)
self._drawText(canvas, temperature, base_x+80, base_y+28+55*i, font_size=14, center_horizontal=True)
# update time
self._drawText(canvas, time.strftime("%Y-%m-%d %H:%M",time.localtime()), 136, 165, font_size=9, center_horizontal=True)
if self.papirus == None:
# save a image for debugging purpose
with open("result.jpg", "wb") as fp:
canvas.save(fp)
print("result file saved")
else:
self.papirus.display(canvas)
self.papirus.update()
def _drawImage(self, canvas, image_path, x, y, size):
image = Image.open(image_path)
image = ImageOps.grayscale(image)
image = image.resize(size)
image = image.convert("1", dither=Image.FLOYDSTEINBERG)
canvas.paste(image,(x,y))
def _drawText(self, canvas, text, x, y, font_size=20, center_horizontal=False):
draw = ImageDraw.Draw(canvas)
font = ImageFont.truetype(self.font_path, font_size)
text_draw_size = draw.textsize(text, font=font)
if center_horizontal:
x = x - text_draw_size[0]/2
draw.text( (x, y) , text, font=font, fill=BLACK)
| 2.171875 | 2 |
hydrobox/discharge/__init__.py | VForWaTer/hydrobox | 4 | 5628 | from .catchment import regime, flow_duration_curve
from . import indices | 0.988281 | 1 |
scripts/convert_keras2onnx.py | ecmwf-lab/infero | 8 | 5629 | <reponame>ecmwf-lab/infero
#
# (C) Copyright 1996- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import os
import numpy as np
import argparse
import keras
import keras2onnx
if __name__ == "__main__":
"""
Lightweight script to convert a keras model into a TFlite model
"""
parser = argparse.ArgumentParser("Data Augmentation")
parser.add_argument('keras_model_path', help="Path of the input keras model")
parser.add_argument('onnx_model_path', help="Path of the output onnx model")
parser.add_argument("--verify_with", help="Check the model by passing an input numpy path")
args = parser.parse_args()
# load the keras model
model = keras.models.load_model(args.keras_model_path)
model.summary()
# do the conversion
onnx_model = keras2onnx.convert_keras(model, model.name)
# write to file
file = open(args.onnx_model_path, "wb")
file.write(onnx_model.SerializeToString())
file.close()
| 2.234375 | 2 |
src/lava/lib/dl/slayer/utils/assistant.py | timcheck/lava-dl | 37 | 5630 | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
"""Assistant utility for automatically load network from network
description."""
import torch
class Assistant:
"""Assistant that bundles training, validation and testing workflow.
Parameters
----------
net : torch.nn.Module
network to train.
error : object or lambda
an error object or a lambda function that evaluates error.
It is expected to take ``(output, target)`` | ``(output, label)``
as it's argument and return a scalar value.
optimizer : torch optimizer
the learning optimizer.
stats : slayer.utils.stats
learning stats logger. If None, stats will not be logged.
Defaults to None.
classifier : slayer.classifier or lambda
classifier object or lambda function that takes output and
returns the network prediction. None means regression mode.
Classification steps are bypassed.
Defaults to None.
count_log : bool
flag to enable count log. Defaults to False.
lam : float
lagrangian to merge network layer based loss.
None means no such additional loss.
If not None, net is expected to return the accumulated loss as second
argument. It is intended to be used with layer wise sparsity loss.
Defaults to None.
Attributes
----------
net
error
optimizer
stats
classifier
count_log
lam
device : torch.device or None
the main device memory where network is placed. It is not at start and
gets initialized on the first call.
"""
def __init__(
self,
net, error, optimizer,
stats=None, classifier=None, count_log=False,
lam=None
):
self.net = net
self.error = error
self.optimizer = optimizer
self.classifier = classifier
self.stats = stats
self.count_log = count_log
self.lam = lam
self.device = None
def reduce_lr(self, factor=10 / 3):
"""Reduces the learning rate of the optimizer by ``factor``.
Parameters
----------
factor : float
learning rate reduction factor. Defaults to 10/3.
Returns
-------
"""
for param_group in self.optimizer.param_groups:
print('\nLearning rate reduction from', param_group['lr'])
param_group['lr'] /= factor
def train(self, input, target):
"""Training assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.train()
if self.device is None:
for p in self.net.parameters():
self.device = p.device
break
device = self.device
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, net_loss, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, net_loss = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.training.num_samples += input.shape[0]
self.stats.training.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.training.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if self.lam is not None: # add net_loss before backward step
loss += self.lam * net_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if count is None:
return output
return output, count
def test(self, input, target):
"""Testing assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.eval()
if self.device is None:
for p in self.net.parameters():
self.device = p.device
break
device = self.device
with torch.no_grad():
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, _, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, _ = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.testing.num_samples += input.shape[0]
self.stats.testing.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.testing.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if count is None:
return output
return output, count
def valid(self, input, target):
"""Validation assistant.
Parameters
----------
input : torch tensor
input tensor.
target : torch tensor
ground truth or label.
Returns
-------
output
network's output.
count : optional
spike count if ``count_log`` is enabled
"""
self.net.eval()
with torch.no_grad():
device = self.net.device
input = input.to(device)
target = target.to(device)
count = None
if self.count_log is True:
if self.lam is None:
output, count = self.net(input)
else:
output, _, count = self.net(input)
else:
if self.lam is None:
output = self.net(input)
else:
output, _ = self.net(input)
loss = self.error(output, target)
if self.stats is not None:
self.stats.validation.num_samples += input.shape[0]
if self.lam is None:
self.stats.validation.loss_sum += loss.cpu().data.item() \
* output.shape[0]
else:
self.stats.validation.loss_sum += loss.cpu().data.item() \
* output.shape[0]
if self.classifier is not None: # classification
self.stats.validation.correct_samples += torch.sum(
self.classifier(output) == target
).cpu().data.item()
if count is None:
return output
return output, count
| 2.59375 | 3 |
lstm-synthetic-wave-anomaly-detect.py | cse-icon-dataAnalytics/lstm-anomaly-detect | 178 | 5631 | <reponame>cse-icon-dataAnalytics/lstm-anomaly-detect
""" Inspired by example from
https://github.com/Vict0rSch/deep_learning/tree/master/keras/recurrent
Uses the TensorFlow backend
The basic idea is to detect anomalies in a time-series.
"""
import matplotlib.pyplot as plt
import numpy as np
import time
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from numpy import arange, sin, pi, random
np.random.seed(1234)
# Global hyper-parameters
sequence_length = 100
random_data_dup = 10 # each sample randomly duplicated between 0 and 9 times, see dropin function
epochs = 1
batch_size = 50
def dropin(X, y):
""" The name suggests the inverse of dropout, i.e. adding more samples. See Data Augmentation section at
http://simaaron.github.io/Estimating-rainfall-from-weather-radar-readings-using-recurrent-neural-networks/
:param X: Each row is a training sequence
:param y: Tne target we train and will later predict
:return: new augmented X, y
"""
print("X shape:", X.shape)
print("y shape:", y.shape)
X_hat = []
y_hat = []
for i in range(0, len(X)):
for j in range(0, np.random.random_integers(0, random_data_dup)):
X_hat.append(X[i, :])
y_hat.append(y[i])
return np.asarray(X_hat), np.asarray(y_hat)
def gen_wave():
""" Generate a synthetic wave by adding up a few sine waves and some noise
:return: the final wave
"""
t = np.arange(0.0, 10.0, 0.01)
wave1 = sin(2 * 2 * pi * t)
noise = random.normal(0, 0.1, len(t))
wave1 = wave1 + noise
print("wave1", len(wave1))
wave2 = sin(2 * pi * t)
print("wave2", len(wave2))
t_rider = arange(0.0, 0.5, 0.01)
wave3 = sin(10 * pi * t_rider)
print("wave3", len(wave3))
insert = round(0.8 * len(t))
wave1[insert:insert + 50] = wave1[insert:insert + 50] + wave3
return wave1 + wave2
def z_norm(result):
result_mean = result.mean()
result_std = result.std()
result -= result_mean
result /= result_std
return result, result_mean
def get_split_prep_data(train_start, train_end,
test_start, test_end):
data = gen_wave()
print("Length of Data", len(data))
# train data
print "Creating train data..."
result = []
for index in range(train_start, train_end - sequence_length):
result.append(data[index: index + sequence_length])
result = np.array(result) # shape (samples, sequence_length)
result, result_mean = z_norm(result)
print "Mean of train data : ", result_mean
print "Train data shape : ", result.shape
train = result[train_start:train_end, :]
np.random.shuffle(train) # shuffles in-place
X_train = train[:, :-1]
y_train = train[:, -1]
X_train, y_train = dropin(X_train, y_train)
# test data
print "Creating test data..."
result = []
for index in range(test_start, test_end - sequence_length):
result.append(data[index: index + sequence_length])
result = np.array(result) # shape (samples, sequence_length)
result, result_mean = z_norm(result)
print "Mean of test data : ", result_mean
print "Test data shape : ", result.shape
X_test = result[:, :-1]
y_test = result[:, -1]
print("Shape X_train", np.shape(X_train))
print("Shape X_test", np.shape(X_test))
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
X_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))
return X_train, y_train, X_test, y_test
def build_model():
model = Sequential()
layers = {'input': 1, 'hidden1': 64, 'hidden2': 256, 'hidden3': 100, 'output': 1}
model.add(LSTM(
input_length=sequence_length - 1,
input_dim=layers['input'],
output_dim=layers['hidden1'],
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
layers['hidden2'],
return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(
layers['hidden3'],
return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(
output_dim=layers['output']))
model.add(Activation("linear"))
start = time.time()
model.compile(loss="mse", optimizer="rmsprop")
print "Compilation Time : ", time.time() - start
return model
def run_network(model=None, data=None):
global_start_time = time.time()
if data is None:
print 'Loading data... '
# train on first 700 samples and test on next 300 samples (has anomaly)
X_train, y_train, X_test, y_test = get_split_prep_data(0, 700, 500, 1000)
else:
X_train, y_train, X_test, y_test = data
print '\nData Loaded. Compiling...\n'
if model is None:
model = build_model()
try:
print("Training...")
model.fit(
X_train, y_train,
batch_size=batch_size, nb_epoch=epochs, validation_split=0.05)
print("Predicting...")
predicted = model.predict(X_test)
print("Reshaping predicted")
predicted = np.reshape(predicted, (predicted.size,))
except KeyboardInterrupt:
print("prediction exception")
print 'Training duration (s) : ', time.time() - global_start_time
return model, y_test, 0
try:
plt.figure(1)
plt.subplot(311)
plt.title("Actual Test Signal w/Anomalies")
plt.plot(y_test[:len(y_test)], 'b')
plt.subplot(312)
plt.title("Predicted Signal")
plt.plot(predicted[:len(y_test)], 'g')
plt.subplot(313)
plt.title("Squared Error")
mse = ((y_test - predicted) ** 2)
plt.plot(mse, 'r')
plt.show()
except Exception as e:
print("plotting exception")
print str(e)
print 'Training duration (s) : ', time.time() - global_start_time
return model, y_test, predicted
run_network()
| 3.484375 | 3 |
hpc-historias-clinicas/historias/migrations/0007_auto_20150425_1459.py | btenaglia/hpc-historias-clinicas | 0 | 5632 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('historias', '0006_auto_20150413_0001'),
]
operations = [
migrations.AlterField(
model_name='historias',
name='fecha_ingreso',
field=models.DateField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468359), help_text='Formato: dd/mm/yyyy', verbose_name='Fecha de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='historias',
name='hora_ingreso',
field=models.TimeField(default=datetime.datetime(2015, 4, 25, 14, 59, 14, 468307), help_text='Formato: hh:mm', verbose_name='Hora de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='ubicaciones',
name='sala',
field=models.CharField(max_length=10, choices=[(b'SALA 1', b'SALA 1'), (b'SALA 2', b'SALA 2'), (b'SALA 3', b'SALA 3'), (b'SALA 4', b'SALA 4'), (b'SALA 5', b'SALA 5'), (b'GAURDIA', b'GAURDIA'), (b'NEO', b'NEO'), (b'UTI', b'UTI'), (b'UCO', b'UCO'), (b'PRE PARTO', b'PRE PARTO')]),
preserve_default=True,
),
]
| 1.648438 | 2 |
venv/Lib/site-packages/har2case/__about__.py | Verckolf/MyInterfaceTest | 0 | 5633 | __title__ = 'har2case'
__description__ = 'Convert HAR(HTTP Archive) to YAML/JSON testcases for HttpRunner.'
__url__ = 'https://github.com/HttpRunner/har2case'
__version__ = '0.2.0'
__author__ = 'debugtalk'
__author_email__ = '<EMAIL>'
__license__ = 'Apache-2.0'
__copyright__ = 'Copyright 2017 debugtalk' | 1.109375 | 1 |
app_id_utils.py | woctezuma/match-steam-banners | 0 | 5634 | import os
from pathlib import Path
from data_utils import get_data_path, get_image_data_path, get_image_extension
def app_id_to_image_filename(app_id, is_horizontal_banner=False):
image_data_path = get_image_data_path(is_horizontal_banner)
image_filename = image_data_path + str(app_id) + get_image_extension()
return image_filename
def image_filename_to_app_id(image_filename):
base_name = os.path.basename(image_filename)
app_id = base_name.strip(get_image_extension())
return app_id
def list_app_ids(is_horizontal_banner=False):
image_data_path = get_image_data_path(is_horizontal_banner)
image_filenames = Path(image_data_path).glob("*" + get_image_extension())
app_ids = [image_filename_to_app_id(filename) for filename in image_filenames]
app_ids = sorted(app_ids, key=int)
return app_ids
def get_frozen_app_ids_filename():
frozen_app_ids_filename = get_data_path() + "frozen_app_ids.txt"
return frozen_app_ids_filename
def freeze_app_ids(app_ids, output_file_name=None):
if output_file_name is None:
output_file_name = get_frozen_app_ids_filename()
with open(output_file_name, "w", encoding="utf8") as f:
for app_id in app_ids:
f.write("{}\n".format(app_id))
return
def load_frozen_app_ids(input_file_name=None):
if input_file_name is None:
input_file_name = get_frozen_app_ids_filename()
with open(input_file_name, "r", encoding="utf8") as f:
# Do not convert to a set object, or any other conversion, because we want to keep the list order as it is.
# Just read the list from the file. That is all there is to do. Otherwise, appIDs will be scrambled!
frozen_app_ids = [app_id.strip() for app_id in f.readlines()]
return frozen_app_ids
def get_frozen_app_ids(is_horizontal_banner=False):
try:
frozen_app_ids = load_frozen_app_ids()
except FileNotFoundError:
print("Creating {}".format(get_frozen_app_ids_filename()))
frozen_app_ids = list_app_ids(is_horizontal_banner=is_horizontal_banner)
freeze_app_ids(frozen_app_ids)
return frozen_app_ids
| 2.453125 | 2 |
upload.py | sjm446/aMAZEd | 0 | 5635 | <reponame>sjm446/aMAZEd
#!/usr/bin/env python
import boto3
import random
import os
BUCKET=os.environ.get('EXPORT_S3_BUCKET_URL')
if (BUCKET != None):
s3 = boto3.client('s3')
with open("maze.txt", "rb") as f:
s3.upload_fileobj(f, BUCKET, "maze"+str(random.randrange(100000))+".txt")
else:
print("EXPORT_S3_BUCKET_URL was not set so not uploading file")
| 2.53125 | 3 |
zerver/management/commands/list_realms.py | rtzll/zulip | 0 | 5636 | <filename>zerver/management/commands/list_realms.py<gh_stars>0
import sys
from typing import Any
from argparse import ArgumentParser
from zerver.models import Realm
from zerver.lib.management import ZulipBaseCommand
class Command(ZulipBaseCommand):
help = """List realms in the server and it's configuration settings(optional).
Usage examples:
./manage.py list_realms
./manage.py list_realms --all"""
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument("--all",
dest="all",
action="store_true",
default=False,
help="Print all the configuration settings of the realms.")
def handle(self, *args: Any, **options: Any) -> None:
realms = Realm.objects.all()
outer_format = "%-5s %-40s %-40s"
inner_format = "%-40s %s"
deactivated = False
if not options["all"]:
print(outer_format % ("id", "string_id", "name"))
print(outer_format % ("--", "---------", "----"))
for realm in realms:
if realm.deactivated:
print(self.style.ERROR(outer_format % (realm.id, realm.string_id, realm.name)))
deactivated = True
else:
print(outer_format % (realm.id, realm.string_id, realm.name))
if deactivated:
print(self.style.WARNING("\nRed rows represent deactivated realms."))
sys.exit(0)
# The remaining code path is the --all case.
identifier_attributes = ["id", "name", "string_id"]
for realm in realms:
# Start with just all the fields on the object, which is
# hacky but doesn't require any work to maintain.
realm_dict = realm.__dict__
# Remove a field that is confusingly useless
del realm_dict['_state']
# Fix the one bitfield to display useful data
realm_dict['authentication_methods'] = str(realm.authentication_methods_dict())
for key in identifier_attributes:
if realm.deactivated:
print(self.style.ERROR(inner_format % (key, realm_dict[key])))
deactivated = True
else:
print(inner_format % (key, realm_dict[key]))
for key, value in sorted(realm_dict.iteritems()):
if key not in identifier_attributes:
if realm.deactivated:
print(self.style.ERROR(inner_format % (key, value)))
else:
print(inner_format % (key, value))
print("-" * 80)
if deactivated:
print(self.style.WARNING("\nRed is used to highlight deactivated realms."))
| 2.53125 | 3 |
tests/integration/ec2/test_connection.py | bopopescu/debpkg_python-boto | 15 | 5637 | # Copyright (c) 2006-2010 <NAME> http://garnaat.org/
# Copyright (c) 2009, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the EC2Connection
"""
import unittest
import time
import telnetlib
import socket
from nose.plugins.attrib import attr
from boto.ec2.connection import EC2Connection
from boto.exception import EC2ResponseError
class EC2ConnectionTest(unittest.TestCase):
ec2 = True
@attr('notdefault')
def test_launch_permissions(self):
# this is my user_id, if you want to run these tests you should
# replace this with yours or they won't work
user_id = '963068290131'
print '--- running EC2Connection tests ---'
c = EC2Connection()
# get list of private AMI's
rs = c.get_all_images(owners=[user_id])
assert len(rs) > 0
# now pick the first one
image = rs[0]
# temporarily make this image runnable by everyone
status = image.set_launch_permissions(group_names=['all'])
assert status
d = image.get_launch_permissions()
assert 'groups' in d
assert len(d['groups']) > 0
# now remove that permission
status = image.remove_launch_permissions(group_names=['all'])
assert status
time.sleep(10)
d = image.get_launch_permissions()
assert 'groups' not in d
def test_1_basic(self):
# create 2 new security groups
c = EC2Connection()
group1_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group1 = c.create_security_group(group1_name, group_desc)
time.sleep(2)
group2_name = 'test-%d' % int(time.time())
group_desc = 'This is a security group created during unit testing'
group2 = c.create_security_group(group2_name, group_desc)
# now get a listing of all security groups and look for our new one
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group1_name:
found = True
assert found
# now pass arg to filter results to only our new group
rs = c.get_all_security_groups([group1_name])
assert len(rs) == 1
# try some group to group authorizations/revocations
# first try the old style
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id)
assert status
status = c.revoke_security_group(group1.name,
group2.name,
group2.owner_id)
assert status
# now try specifying a specific port
status = c.authorize_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
status = c.revoke_security_group(group1.name,
group2.name,
group2.owner_id,
'tcp', 22, 22)
assert status
# now delete the second security group
status = c.delete_security_group(group2_name)
# now make sure it's really gone
rs = c.get_all_security_groups()
found = False
for g in rs:
if g.name == group2_name:
found = True
assert not found
group = group1
# now try to launch apache image with our new security group
rs = c.get_all_images()
img_loc = 'ec2-public-images/fedora-core4-apache.manifest.xml'
for image in rs:
if image.location == img_loc:
break
reservation = image.run(security_groups=[group.name])
instance = reservation.instances[0]
while instance.state != 'running':
print '\tinstance is %s' % instance.state
time.sleep(30)
instance.update()
# instance in now running, try to telnet to port 80
t = telnetlib.Telnet()
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now open up port 80 and try again, it should work
group.authorize('tcp', 80, 80, '0.0.0.0/0')
t.open(instance.dns_name, 80)
t.close()
# now revoke authorization and try again
group.revoke('tcp', 80, 80, '0.0.0.0/0')
try:
t.open(instance.dns_name, 80)
except socket.error:
pass
# now kill the instance and delete the security group
instance.terminate()
# check that state and previous_state have updated
assert instance.state == 'shutting-down'
assert instance.state_code == 32
assert instance.previous_state == 'running'
assert instance.previous_state_code == 16
# unfortunately, I can't delete the sg within this script
#sg.delete()
# create a new key pair
key_name = 'test-%d' % int(time.time())
status = c.create_key_pair(key_name)
assert status
# now get a listing of all key pairs and look for our new one
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert found
# now pass arg to filter results to only our new key pair
rs = c.get_all_key_pairs([key_name])
assert len(rs) == 1
key_pair = rs[0]
# now delete the key pair
status = c.delete_key_pair(key_name)
# now make sure it's really gone
rs = c.get_all_key_pairs()
found = False
for k in rs:
if k.name == key_name:
found = True
assert not found
# short test around Paid AMI capability
demo_paid_ami_id = 'ami-bd9d78d4'
demo_paid_ami_product_code = 'A79EC0DB'
l = c.get_all_images([demo_paid_ami_id])
assert len(l) == 1
assert len(l[0].product_codes) == 1
assert l[0].product_codes[0] == demo_paid_ami_product_code
print '--- tests completed ---'
def test_dry_run(self):
c = EC2Connection()
dry_run_msg = 'Request would have succeeded, but DryRun flag is set.'
try:
rs = c.get_all_images(dry_run=True)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
try:
rs = c.run_instances(
image_id='ami-a0cd60c9',
instance_type='m1.small',
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
# Need an actual instance for the rest of this...
rs = c.run_instances(
image_id='ami-a0cd60c9',
instance_type='m1.small'
)
time.sleep(120)
try:
rs = c.stop_instances(
instance_ids=[rs.instances[0].id],
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
try:
rs = c.terminate_instances(
instance_ids=[rs.instances[0].id],
dry_run=True
)
self.fail("Should have gotten an exception")
except EC2ResponseError, e:
self.assertTrue(dry_run_msg in str(e))
# And kill it.
rs.instances[0].terminate()
| 2.015625 | 2 |
docusign_esign/models/conditional_recipient_rule_filter.py | joekohlsdorf/docusign-esign-python-client | 58 | 5638 | <filename>docusign_esign/models/conditional_recipient_rule_filter.py
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class ConditionalRecipientRuleFilter(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'operator': 'str',
'recipient_id': 'str',
'scope': 'str',
'tab_id': 'str',
'tab_label': 'str',
'tab_type': 'str',
'value': 'str'
}
attribute_map = {
'operator': 'operator',
'recipient_id': 'recipientId',
'scope': 'scope',
'tab_id': 'tabId',
'tab_label': 'tabLabel',
'tab_type': 'tabType',
'value': 'value'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""ConditionalRecipientRuleFilter - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._operator = None
self._recipient_id = None
self._scope = None
self._tab_id = None
self._tab_label = None
self._tab_type = None
self._value = None
self.discriminator = None
setattr(self, "_{}".format('operator'), kwargs.get('operator', None))
setattr(self, "_{}".format('recipient_id'), kwargs.get('recipient_id', None))
setattr(self, "_{}".format('scope'), kwargs.get('scope', None))
setattr(self, "_{}".format('tab_id'), kwargs.get('tab_id', None))
setattr(self, "_{}".format('tab_label'), kwargs.get('tab_label', None))
setattr(self, "_{}".format('tab_type'), kwargs.get('tab_type', None))
setattr(self, "_{}".format('value'), kwargs.get('value', None))
@property
def operator(self):
"""Gets the operator of this ConditionalRecipientRuleFilter. # noqa: E501
# noqa: E501
:return: The operator of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._operator
@operator.setter
def operator(self, operator):
"""Sets the operator of this ConditionalRecipientRuleFilter.
# noqa: E501
:param operator: The operator of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._operator = operator
@property
def recipient_id(self):
"""Gets the recipient_id of this ConditionalRecipientRuleFilter. # noqa: E501
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document. # noqa: E501
:return: The recipient_id of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._recipient_id
@recipient_id.setter
def recipient_id(self, recipient_id):
"""Sets the recipient_id of this ConditionalRecipientRuleFilter.
Unique for the recipient. It is used by the tab element to indicate which recipient is to sign the Document. # noqa: E501
:param recipient_id: The recipient_id of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._recipient_id = recipient_id
@property
def scope(self):
"""Gets the scope of this ConditionalRecipientRuleFilter. # noqa: E501
# noqa: E501
:return: The scope of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""Sets the scope of this ConditionalRecipientRuleFilter.
# noqa: E501
:param scope: The scope of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._scope = scope
@property
def tab_id(self):
"""Gets the tab_id of this ConditionalRecipientRuleFilter. # noqa: E501
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call]. # noqa: E501
:return: The tab_id of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._tab_id
@tab_id.setter
def tab_id(self, tab_id):
"""Sets the tab_id of this ConditionalRecipientRuleFilter.
The unique identifier for the tab. The tabid can be retrieved with the [ML:GET call]. # noqa: E501
:param tab_id: The tab_id of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._tab_id = tab_id
@property
def tab_label(self):
"""Gets the tab_label of this ConditionalRecipientRuleFilter. # noqa: E501
The label string associated with the tab. # noqa: E501
:return: The tab_label of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._tab_label
@tab_label.setter
def tab_label(self, tab_label):
"""Sets the tab_label of this ConditionalRecipientRuleFilter.
The label string associated with the tab. # noqa: E501
:param tab_label: The tab_label of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._tab_label = tab_label
@property
def tab_type(self):
"""Gets the tab_type of this ConditionalRecipientRuleFilter. # noqa: E501
# noqa: E501
:return: The tab_type of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._tab_type
@tab_type.setter
def tab_type(self, tab_type):
"""Sets the tab_type of this ConditionalRecipientRuleFilter.
# noqa: E501
:param tab_type: The tab_type of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._tab_type = tab_type
@property
def value(self):
"""Gets the value of this ConditionalRecipientRuleFilter. # noqa: E501
Specifies the value of the tab. # noqa: E501
:return: The value of this ConditionalRecipientRuleFilter. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this ConditionalRecipientRuleFilter.
Specifies the value of the tab. # noqa: E501
:param value: The value of this ConditionalRecipientRuleFilter. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConditionalRecipientRuleFilter, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConditionalRecipientRuleFilter):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ConditionalRecipientRuleFilter):
return True
return self.to_dict() != other.to_dict()
| 1.867188 | 2 |
conman/routes/apps.py | meshy/django-conman | 0 | 5639 | <reponame>meshy/django-conman
from django.apps import AppConfig
from django.core.checks import register
from . import checks
class RouteConfig(AppConfig):
"""The AppConfig for conman routes."""
name = 'conman.routes'
def ready(self):
"""Register checks for conman routes."""
register(checks.polymorphic_installed)
register(checks.subclasses_available)
register(checks.subclasses_in_admin)
| 2.234375 | 2 |
Other/transactionlog entries since timestamp.py | DJHig/TM1py-samples | 1 | 5640 | <filename>Other/transactionlog entries since timestamp.py
"""
Get all TM1 transactions for all cubes starting to a specific date.
"""
import configparser
config = configparser.ConfigParser()
config.read('..\config.ini')
from datetime import datetime
from TM1py.Services import TM1Service
with TM1Service(**config['tm1srv01']) as tm1:
# Timestamp for Message-Log parsing
timestamp = datetime(year=2018, month=2, day=15, hour=16, minute=2, second=0)
# Get all entries since timestamp
entries = tm1.server.get_transaction_log_entries(since=timestamp)
# loop through entries
for entry in entries:
# Do stuff
print(entry['TimeStamp'], entry)
| 2.96875 | 3 |
src/patteRNA/Dataset.py | AviranLab/patteRNA | 12 | 5641 | <filename>src/patteRNA/Dataset.py
import logging
import numpy as np
from scipy.stats import entropy
from patteRNA.Transcript import Transcript
from patteRNA import filelib
logger = logging.getLogger(__name__)
class Dataset:
def __init__(self, fp_observations, fp_sequences=None, fp_references=None):
self.fp_obs = fp_observations
self.fp_fasta = fp_sequences
self.fp_refs = fp_references
self.rnas = dict()
self.stats = dict()
def load_rnas(self, log_flag=False):
observations_dict = filelib.parse_observations(self.fp_obs)
observations_rnas = set(observations_dict.keys())
dataset_rnas = observations_rnas
sequences_dict = None
if self.fp_fasta:
sequences_dict = filelib.parse_fasta(self.fp_fasta)
sequences_rnas = set(sequences_dict.keys())
# Cross reference input files to confirm all transcripts
for rna in observations_rnas.difference(sequences_rnas):
print('WARNING - No sequence found for RNA: {}'.format(rna))
sequences_dict[rna] = ''.join(['N'] * len(observations_dict[rna]))
for rna in sequences_rnas.difference(observations_rnas):
print('WARNING - No probing data found for RNA: {}'.format(rna))
observations_dict[rna] = np.tile(np.nan, len(sequences_dict[rna]))
dataset_rnas.update(sequences_rnas)
for rna_name in dataset_rnas:
if self.fp_fasta:
self.rnas[rna_name] = Transcript(rna_name, sequences_dict[rna_name], observations_dict[rna_name])
else:
self.rnas[rna_name] = Transcript(rna_name, 'N' * len(observations_dict[rna_name]),
observations_dict[rna_name])
if log_flag:
for rna in self.rnas:
self.rnas[rna].log_transform()
self.compute_stats()
def compute_stats(self):
"""
Parse all finite observations in the input file and compute some statistics on the data.
These statistics are mostly used to initialize parameters of the emission model before training.
"""
finite_obs = []
total_obs = 0
up_ref = 0
p_ref = 0
for rna in self.rnas:
finite_obs.extend(self.rnas[rna].obs[np.isfinite(self.rnas[rna].obs)])
total_obs += len(self.rnas[rna].obs)
up_ref += int(np.sum(self.rnas[rna].ref == 0))
p_ref += int(np.sum(self.rnas[rna].ref == 1))
self.stats['quantile_basis'] = np.linspace(0, 1, 1000)
self.stats['quantiles'] = np.quantile(finite_obs, self.stats["quantile_basis"])
self.stats['P25'], self.stats['P75'] = np.percentile(finite_obs, (25, 75))
self.stats['P40'], self.stats['P60'] = np.percentile(finite_obs, (40, 60))
self.stats['n_obs'] = len(finite_obs)
self.stats['up_ref'] = up_ref
self.stats['p_ref'] = p_ref
self.stats['total_obs'] = total_obs
self.stats['continuous_variance'] = np.var(finite_obs)
self.stats['minimum'] = np.min(finite_obs)
self.stats['maximum'] = np.max(finite_obs)
self.stats['finite_obs'] = finite_obs
self.stats['histogram_bins'] = np.linspace(self.stats['minimum'], self.stats['maximum'], 20)
self.stats['histogram'], _ = np.histogram(finite_obs,
bins=self.stats['histogram_bins'],
density=True)
def spawn_training_set(self, kl_div):
"""
Spawn a training set (smaller than or equal size to overall data) based on KL divergence criteria.
Transcripts are incrementally added to a training Dataset (high quality transcripts first) until
the training set's KL divergence from the overall data falls below the provided threshold.
"""
training_transcripts = []
training_obs = []
kl_div_set = 1.0
group_size = 20
logger.info(' ... sorting')
rnas_sd = sorted(self.rnas.values(), key=lambda transcript: transcript.density, reverse=True)
logger.info(' ... selecting')
while kl_div_set > kl_div and rnas_sd:
rnas = rnas_sd[:group_size]
rnas_sd[:group_size] = []
for rna in rnas:
training_transcripts.append(rna.name)
training_obs.extend(rna.obs[rna.mask_finite])
training_histogram, _ = np.histogram(training_obs,
bins=self.stats['histogram_bins'],
density=True)
kl_div_set = entropy(training_histogram, self.stats['histogram'])
training_set = self.spawn_set(rnas=training_transcripts)
training_set.compute_stats()
return training_set, kl_div_set
def pre_process(self, model, scoring=False):
if model.emission_model.type == 'DOM':
for rna in self.rnas:
model.emission_model.discretize(self.rnas[rna])
if scoring:
for rna in self.rnas.values():
model.e_step(rna)
rna.compute_log_B_ratios()
def get_emissions(self, model):
for rna in self.rnas:
model.emission_model.compute_emissions(self.rnas[rna])
def spawn_set(self, rnas):
spawned_set = Dataset(fp_observations=None, fp_sequences=None, fp_references=None)
spawned_set.rnas = {rna: self.rnas[rna] for rna in rnas}
return spawned_set
def spawn_reference_set(self):
spawned_set = Dataset(fp_observations=None, fp_references=None, fp_sequences=None)
references = [rna for rna in self.rnas if self.rnas[rna].ref is not None]
spawned_set.rnas = {rna: self.rnas[rna] for rna in references}
spawned_set.compute_stats()
return spawned_set
def clear(self):
self.rnas = None
self.stats = None
| 2.453125 | 2 |
src/Simulation/developer_0/main.py | GYRY-NEU/CS7610-Experiments | 0 | 5642 | <reponame>GYRY-NEU/CS7610-Experiments
import library
import json
@library.export
def init(args):
model = [[9.2, 0.21, 0.21],
[8.2, 0.22, 0.21],
[7.2, 1.21, 2.41],
[1.2, 2.21, 0.29]]
library.put("model", model)
ROUND = 0
library.put("ROUND", ROUND)
alpha = 0.2
library.put("alpha", alpha)
@library.export
def clientUpload(args):
# get client model
client = json.loads(args["data"])
# client round
k = "round" + str(client["round"])
# save model to buckets
library.put_bucket(k, client["model"])
# if enough models
if library.count_bucket(k) > 20:
ROUND = library.get("ROUND")
# check client rounds == current rounds
if ROUND != client["round"]:
return False
# set round to -1 to prevent clients uploading to this bucket
library.put("ROUND", -1)
model = library.get("model")
list_weights = library.get_bucket(k)
model = updateModel(model, list_weights)
# save calculated model and restore round
library.put("model", model)
library.put("ROUND", ROUND+1)
return True
def updateModel(model, list_weights):
"""
list_weights : 3D list of shape : (clientNumber,modelOuter, modelInner)
It contains all the models for each client
"""
# this part will change developer to developer
# one can just take avg
# or one can discard smallest and largest than take average
# this example just takes avg without use of external library
alpha = library.get("alpha")
# getting shape of 3D array
number_clients = len(list_weights)
size_outer = len(list_weights[0])
size_inner = len(list_weights[0][0])
# constructing a new 2D array of zeros of same size
newModel = [ [0 for j in range(size_inner)] for i in range(size_outer)]
# validate new created shape
assert(len(newModel) == size_outer)
assert(len(newModel[0]) == size_inner)
# sum for all the clients
for weights in list_weights:
for outerIndex, outerList in enumerate(weights):
for innerIndex, innerVal in enumerate(outerList):
newModel[outerIndex][innerIndex] += innerVal
# average it by number of clients
for outerIndex, outerList in enumerate(newModel):
for innerIndex, innerVal in enumerate(outerList):
newModel[outerIndex][innerIndex] /= number_clients
# now update the model using the learning rate using below formula
# model = (1-a) * model + a * new_model
# Prev. part and next part could be merged for efficiency but readability they implemented with two loops
# Iterate over model
for outerIndex, outerList in enumerate(newModel):
for innerIndex, innerVal in enumerate(outerList):
model[outerIndex][innerIndex] *= 1-alpha
model[outerIndex][innerIndex] += alpha * newModel[outerIndex][innerIndex]
# Finally update round number
return model
@library.export
def getModel(args):
return library.get("model")
@library.export
def getRound(args):
return library.get("ROUND")
| 2.421875 | 2 |
molecule_ignite/test/unit/test_driver.py | ragingpastry/molecule-ignite | 17 | 5643 | <reponame>ragingpastry/molecule-ignite
from molecule import api
def test_driver_is_detected():
driver_name = __name__.split(".")[0].split("_")[-1]
assert driver_name in [str(d) for d in api.drivers()]
| 2.25 | 2 |
coffeine/pipelines.py | dengemann/meegpowreg | 6 | 5644 | import numpy as np
from coffeine.covariance_transformers import (
Diag,
LogDiag,
ExpandFeatures,
Riemann,
RiemannSnp,
NaiveVec)
from coffeine.spatial_filters import (
ProjIdentitySpace,
ProjCommonSpace,
ProjLWSpace,
ProjRandomSpace,
ProjSPoCSpace)
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import RidgeCV, LogisticRegression
def make_filter_bank_transformer(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None):
"""Generate pipeline for filterbank models.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
References
----------
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
# put defaults here for projection and vectorization step
projection_defaults = {
'riemann': dict(scale=1, n_compo='full', reg=1.e-05),
'lw_riemann': dict(shrink=1),
'diag': dict(),
'log_diag': dict(),
'random': dict(n_compo='full'),
'naive': dict(),
'spoc': dict(n_compo='full', scale='auto', reg=1.e-05, shrink=1),
'riemann_wasserstein': dict()
}
vectorization_defaults = {
'riemann': dict(metric='riemann'),
'lw_riemann': dict(metric='riemann'),
'diag': dict(),
'log_diag': dict(),
'random': dict(),
'naive': dict(method='upper'),
'spoc': dict(),
'riemann_wasserstein': dict(rank='full')
}
assert set(projection_defaults) == set(vectorization_defaults)
if method not in projection_defaults:
raise ValueError(
f"The `method` ('{method}') you specified is unknown.")
# update defaults
projection_params_ = projection_defaults[method]
if projection_params is not None:
projection_params_.update(**projection_params)
vectorization_params_ = vectorization_defaults[method]
if vectorization_params is not None:
vectorization_params_.update(**vectorization_params)
def _get_projector_vectorizer(projection, vectorization):
return [(make_pipeline(*
[projection(**projection_params_),
vectorization(**vectorization_params_)]),
name) for name in names]
# setup pipelines (projection + vectorization step)
steps = tuple()
if method == 'riemann':
steps = (ProjCommonSpace, Riemann)
elif method == 'lw_riemann':
steps = (ProjLWSpace, Riemann)
elif method == 'diag':
steps = (ProjIdentitySpace, Diag)
elif method == 'log_diag':
steps = (ProjIdentitySpace, LogDiag)
elif method == 'random':
steps = (ProjRandomSpace, LogDiag)
elif method == 'naive':
steps = (ProjIdentitySpace, NaiveVec)
elif method == 'spoc':
steps = (ProjSPoCSpace, LogDiag)
elif method == 'riemann_wasserstein':
steps = (ProjIdentitySpace, RiemannSnp)
filter_bank_transformer = make_column_transformer(
*_get_projector_vectorizer(*steps), remainder='passthrough')
if categorical_interaction is not None:
filter_bank_transformer = ExpandFeatures(
filter_bank_transformer, expander_column=categorical_interaction)
return filter_bank_transformer
def make_filter_bank_regressor(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None, scaling=None,
estimator=None):
"""Generate pipeline for regression with filter bank model.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
scaling : scikit-learn Transformer object | None
Method for re-rescaling the features. Defaults to None. If None,
StandardScaler is used.
estimator : scikit-learn Estimator object.
The estimator object. Defaults to None. If None, RidgeCV
is performed with default values.
References
----------
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
filter_bank_transformer = make_filter_bank_transformer(
names=names, method=method, projection_params=projection_params,
vectorization_params=vectorization_params,
categorical_interaction=categorical_interaction
)
scaling_ = scaling
if scaling_ is None:
scaling_ = StandardScaler()
estimator_ = estimator
if estimator_ is None:
estimator_ = RidgeCV(alphas=np.logspace(-3, 5, 100))
filter_bank_regressor = make_pipeline(
filter_bank_transformer,
scaling_,
estimator_
)
return filter_bank_regressor
def make_filter_bank_classifier(names, method='riemann',
projection_params=None,
vectorization_params=None,
categorical_interaction=None, scaling=None,
estimator=None):
"""Generate pipeline for classification with filter bank model.
Prepare filter bank models as used in [1]_. These models take as input
sensor-space covariance matrices computed from M/EEG signals in different
frequency bands. Then transformations are applied to improve the
applicability of linear regression techniques by reducing the impact of
field spread.
In terms of implementation, this involves 1) projection
(e.g. spatial filters) and 2) vectorization (e.g. taking the log on the
diagonal).
.. note::
The resulting model expects as inputs data frames in which different
covarances (e.g. for different frequencies) are stored inside columns
indexed by ``names``.
Other columns will be passed through by the underlying column
transformers.
The pipeline also supports fitting categorical interaction effects
after projection and vectorization steps are performed.
.. note::
All essential methods from [1]_ are implemented here. In practice,
we recommend comparing `riemann', `spoc' and `diag' as a baseline.
Parameters
----------
names : list of str
The column names of the data frame corresponding to different
covariances.
method : str
The method used for extracting features from covariances. Defaults
to ``'riemann'``. Can be ``'riemann'``, ``'lw_riemann'``, ``'diag'``,
``'log_diag'``, ``'random'``, ``'naive'``, ``'spoc'``,
``'riemann_wasserstein'``.
projection_params : dict | None
The parameters for the projection step.
vectorization_params : dict | None
The parameters for the vectorization step.
categorical_interaction : str
The column in the input data frame containing a binary descriptor
used to fit 2-way interaction effects.
scaling : scikit-learn Transformer object | None
Method for re-rescaling the features. Defaults to None. If None,
StandardScaler is used.
estimator : scikit-learn Estimator object.
The estimator object. Defaults to None. If None, LogisticRegression
is performed with default values.
References
----------
[1] <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
Predictive regression modeling with MEG/EEG: from source power
to signals and cognitive states.
*NeuroImage*, page 116893,2020. ISSN 1053-8119.
https://doi.org/10.1016/j.neuroimage.2020.116893
"""
filter_bank_transformer = make_filter_bank_transformer(
names=names, method=method, projection_params=projection_params,
vectorization_params=vectorization_params,
categorical_interaction=categorical_interaction
)
scaling_ = scaling
if scaling_ is None:
scaling_ = StandardScaler()
estimator_ = estimator
if estimator_ is None:
estimator_ = LogisticRegression(solver='liblinear')
filter_bank_regressor = make_pipeline(
filter_bank_transformer,
scaling_,
estimator_
)
return filter_bank_regressor
| 2.703125 | 3 |
submissions/Chouard/mygames.py | dysomni/aima-python | 0 | 5645 | <gh_stars>0
from games import Game
from math import nan, isnan
from queue import PriorityQueue
from copy import deepcopy
from utils import isnumber
from grading.util import print_table
class GameState:
def __init__(self, to_move, position, board, label=None):
self.to_move = to_move
self.position = position
self.board = board
self.label = label
self.scores = {'H': 0, 'V': 0}
def __str__(self):
if self.label == None:
return super(GameState, self).__str__()
return self.label
class Move:
def __init__(self, r, c, v):
self.row = r
self.col = c
self.value = v
def rcv(self):
return self.row, self.col, self.value
def __lt__(self, other):
return self.value > other.value
def q2list(mq):
list = []
while not mq.empty():
list.append(mq.get(1).rcv())
return list
def movesInRow(board, r):
mQueue = PriorityQueue()
row = board[r]
for c in range(len(row)):
if isnan(row[c]):
continue
v = row[c]
move = Move(r, c, v)
mQueue.put(move)
return q2list(mQueue)
def movesInCol(board, c):
mQueue = PriorityQueue()
for r in range(len(board)):
if isnan(board[r][c]):
continue
v = board[r][c]
move = Move(r, c, v)
mQueue.put(move)
return q2list(mQueue)
class ThinkAhead(Game):
"""
An implementation of ThinkAhead
"""
def __init__(self, state):
self.initial = state
def actions(self, state):
"Legal moves are any square not yet taken."
r, c = state.position
if state.to_move == 'H':
moves = movesInRow(state.board, r)
return moves
if state.to_move == 'V':
moves = movesInCol(state.board, c)
return moves
return []
# defines the order of play
def opponent(self, player):
if player == 'H':
return 'V'
if player == 'V':
return 'H'
return None
def result(self, state, move):
r, c, v = move
assert state.board[r][c] == v
currMover = state.to_move
nextMover = self.opponent(currMover)
newState = deepcopy(state)
newState.to_move = nextMover
newState.position = r, c
newState.board[r][c] = nan
newState.scores[currMover] += v
return newState
def utility(self, state, player):
"Player relative score"
opponent = self.opponent(player)
return state.scores[player] - state.scores[opponent]
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return len(self.actions(state)) == 0
def display(self, state):
print_table(state.board, njust='center', sep=',')
print('Score: ' + str(state.scores))
won = GameState(
to_move='H',
position=(0, 1),
board=[[nan, nan],
[9, nan]],
label='won'
)
won.scores = {'H': 9, 'V': 0}
lost = GameState(
to_move='V',
position=(0, 1),
board=[[nan, nan],
[9, nan]],
label='lost'
)
lost.scores = {'H': 0, 'V': 9}
winin1 = GameState(
to_move='H',
position=(1, 1),
board=[[nan, nan],
[9, nan]],
label='winin1'
)
losein1 = GameState(
to_move='V',
position=(0, 0),
board=[[nan, nan],
[9, nan]],
label='losein1'
)
winin2 = GameState(
to_move='H',
position=(0, 0),
board=[[nan, 3, 2],
[nan, 9, nan],
[nan, nan, 1]],
label='winin2'
)
losein2 = GameState(
to_move='V',
position=(0, 0),
board=[[nan, nan, nan],
[3, 9, nan],
[2, nan, 1]],
label='losein2'
)
losein2.maxDepth = 3
# http://www.kongregate.com/games/zolli/thinkahead-brain-trainer
stolen = GameState(
to_move='H',
position=(3, 1),
board=[[3, 8, 9, 5],
[9, 1, 3, 2],
[8, 6, 4, 4],
[9, nan, 1, 5]],
label='stolen'
)
choose1 = GameState(
to_move='H',
position=(1, 0),
board=[[3, 8, 9, 5],
[nan, 1, 3, 2],
[8, 6, 4, 4],
[nan, nan, 1, 5]],
label='choose1'
)
winby10 = GameState(
to_move='H',
position=(2, 0),
board=[[nan, nan, nan, nan],
[nan, nan, nan, nan],
[nan, 6, 4, 5],
[nan, nan, 1, 3]],
label='winby10'
)
thinkA = ThinkAhead(stolen)
def availableMoves(board):
sides = ['T', 'B', 'L', 'R']
moves = PriorityQueue()
for row in range(0, len(board)):
for col in range(0, len(board)):
if board[row][col]['winner'] == '':
for side in sides:
if side not in board[row][col]['lines']:
moves.put((row, col, side))
moveList = []
while not moves.empty():
moveList.append(moves.get(1))
return moveList
def applyMove(board, size, row, col, side, currMover):
board[row][col]['lines'].append(side)
if row <= size - 1 and row != 0 and side == 'T':
board[row - 1][col]['lines'].append('B')
if row >= 0 and row != size - 1 and side == 'B':
board[row + 1][col]['lines'].append('T')
if col <= size - 1 and col != 0 and side == 'L':
board[row][col - 1]['lines'].append('R')
if col >= 0 and col != size - 1 and side == 'R':
board[row][col + 1]['lines'].append('L')
sides = ['T', 'B', 'L', 'R']
complete = True
for side in sides:
if side in board[row][col]['lines']:
continue
complete = False
if complete:
board[row][col]['winner'] = currMover
return board
def countScore(board):
scores = {'A': 0, 'B': 0}
for row in range(0, len(board)):
for col in range(0, len(board)):
if board[row][col]['winner'] == 'A':
scores['A'] += 1
if board[row][col]['winner'] == 'B':
scores['B'] += 1
return scores
board = '''
***
***
***
'''
def printDotsBoard(board):
board_string = ''
for row in range(0, len(board)):
for col in range(0, len(board[row])):
board_string += '*'
if 'T' in board[row][col]['lines']:
board_string += '---'
else:
board_string += ' '
if col == len(board[row]) - 1:
board_string += '*\n'
for space in range(0, len(board[row])):
if 'L' in board[row][space]['lines']:
board_string += '| '
else:
board_string += ' '
if '' != board[row][space]['winner']:
board_string += board[row][space]['winner']
else:
board_string += ' '
if space == len(board[row]) - 1 and 'R' in board[row][space]['lines']:
board_string += ' |'
else:
board_string += ' '
board_string += '\n'
if row == len(board) - 1:
for col in range(0, len(board[row])):
board_string += '*'
if 'B' in board[row][col]['lines']:
board_string += '---'
else:
board_string += ' '
board_string += '*'
print(board_string)
class DotLineState:
def __init__(self, to_move, board, label=None, scores={'A': 0, 'B': 0}):
self.to_move = to_move
self.board = board
self.label = label
self.scores = scores
def __str__(self):
if self.label is None:
return super(DotLineState, self).__str__()
return self.label
class DotsAndLines(Game):
"""
An implementation of Dots and Lines
"""
def __init__(self, state):
self.initial = state
self.size = len(state.board)
def actions(self, state):
"Legal moves are any square not yet taken."
moves = availableMoves(state.board)
return moves
# defines the order of play
def opponent(self, player):
if player == 'A':
return 'B'
if player == 'B':
return 'A'
return None
def result(self, state, move):
row, col, side = move
currMover = state.to_move
nextMover = self.opponent(currMover)
newState = deepcopy(state)
newState.to_move = nextMover
newState.board = applyMove(newState.board, self.size, row, col, side, currMover)
newState.scores = countScore(newState.board)
return newState
def utility(self, state, player):
"Player relative score"
opponent = self.opponent(player)
return state.scores[player] - state.scores[opponent]
def terminal_test(self, state):
"A state is terminal if it is won or there are no empty squares."
return len(self.actions(state)) == 0
def display(self, state):
# print_table(state.board, njust='center', sep=',')
printDotsBoard(state.board)
print('Score: ' + str(state.scores))
'''
Board represents the squares, whether the top, bottom, left, and
right have been filled, and which player owns the square.
'''
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
won = DotLineState(board=dotLineBoard, to_move='A', label='Won', scores={'A': 3, 'B': 1})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
lost = DotLineState(board=dotLineBoard, to_move='A', label='Lost', scores={'A': 1, 'B': 3})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'B', 'lines': ['T', 'B', 'L', 'R']}]]
tied = DotLineState(board=dotLineBoard, to_move='A', label='Tied', scores={'A': 2, 'B': 2})
dotLineBoard = [[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}],
[{'winner': 'A', 'lines': ['T', 'B', 'L', 'R']}, {'winner': '', 'lines': ['T', 'L']}]]
winin1Dots = DotLineState(board=dotLineBoard, to_move='A', label='Win in 1', scores={'A': 2, 'B': 1})
dotLineBoard = [[{'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['T', 'L']}, {'winner': '', 'lines': ['R']}],
[{'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['L', 'R']}, {'winner': '', 'lines': ['L', 'R']}],
[{'winner': '', 'lines': ['B', 'L', 'R']}, {'winner': '', 'lines': ['L', 'B']}, {'winner': '', 'lines': ['B', 'R']}],
]
winIn5_3x3 = DotLineState(board=dotLineBoard, to_move='A', label='Win in 5', scores={'A': 0, 'B': 0})
play = DotLineState(
board=[[{'winner': '', 'lines': []}, {'winner': '', 'lines': []}],
[{'winner': '', 'lines': []}, {'winner': '', 'lines': []}]],
to_move='A', label='Start')
#amended by whh
dotLine = DotsAndLines(play)
#dotLine = DotsAndLines(winIn5_3x3)
myGames = {
dotLine: [
won,
lost,
tied,
winin1Dots,
winIn5_3x3,
play
]
}
| 3.421875 | 3 |
discordbot/stocks/options/opt_chain.py | minhhoang1023/GamestonkTerminal | 1 | 5646 | import os
import df2img
import disnake
import numpy as np
import pandas as pd
from menus.menu import Menu
from PIL import Image
import discordbot.config_discordbot as cfg
from discordbot.config_discordbot import gst_imgur, logger
from discordbot.helpers import autocrop_image
from gamestonk_terminal.stocks.options import yfinance_model
async def chain_command(
ctx,
ticker: str = None,
expiry: str = None,
opt_type: str = None,
min_sp: float = None,
max_sp: float = None,
):
"""Show calls/puts for given ticker and expiration"""
try:
# Debug
if cfg.DEBUG:
logger.debug(
"opt-chain %s %s %s %s %s", ticker, expiry, opt_type, min_sp, max_sp
)
# Check for argument
if not ticker:
raise Exception("Stock ticker is required")
dates = yfinance_model.option_expirations(ticker)
if not dates:
raise Exception("Stock ticker is invalid")
options = yfinance_model.get_option_chain(ticker, str(expiry))
calls_df = options.calls
puts_df = options.puts
column_map = {"openInterest": "oi", "volume": "vol", "impliedVolatility": "iv"}
columns = [
"strike",
"bid",
"ask",
"volume",
"openInterest",
"impliedVolatility",
]
if opt_type == "Calls":
df = calls_df[columns].rename(columns=column_map)
if opt_type == "Puts":
df = puts_df[columns].rename(columns=column_map)
min_strike = np.percentile(df["strike"], 1)
max_strike = np.percentile(df["strike"], 100)
if min_sp:
min_strike = min_sp
if max_sp:
max_strike = max_sp
if min_sp > max_sp: # type: ignore
min_sp, max_sp = max_strike, min_strike
df = df[df["strike"] >= min_strike]
df = df[df["strike"] <= max_strike]
df["iv"] = pd.to_numeric(df["iv"].astype(float))
formats = {"iv": "{:.2f}"}
for col, f in formats.items():
df[col] = df[col].map(lambda x: f.format(x)) # pylint: disable=W0640
df.set_index("strike", inplace=True)
title = f"Stocks: {opt_type} Option Chain for {ticker.upper()} on {expiry} [yfinance]"
embeds: list = []
# Weekly Calls Pages
i, i2, end = 0, 0, 20
df_pg = []
embeds_img = []
dindex = len(df.index)
while i < dindex:
df_pg = df.iloc[i:end]
df_pg.append(df_pg)
figp = df2img.plot_dataframe(
df_pg,
fig_size=(1000, (40 + (40 * 20))),
col_width=[3, 3, 3, 3],
tbl_cells=dict(
height=35,
),
font=dict(
family="Consolas",
size=20,
),
template="plotly_dark",
paper_bgcolor="rgba(0, 0, 0, 0)",
)
imagefile = f"opt-chain{i}.png"
df2img.save_dataframe(fig=figp, filename=imagefile)
image = Image.open(imagefile)
image = autocrop_image(image, 0)
image.save(imagefile, "PNG", quality=100)
uploaded_image = gst_imgur.upload_image(imagefile, title="something")
image_link = uploaded_image.link
embeds_img.append(
f"{image_link}",
)
embeds.append(
disnake.Embed(
title=title,
colour=cfg.COLOR,
),
)
i2 += 1
i += 20
end += 20
os.remove(imagefile)
# Author/Footer
for i in range(0, i2):
embeds[i].set_author(
name=cfg.AUTHOR_NAME,
url=cfg.AUTHOR_URL,
icon_url=cfg.AUTHOR_ICON_URL,
)
embeds[i].set_footer(
text=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
i = 0
for i in range(0, i2):
embeds[i].set_image(url=embeds_img[i])
i += 1
embeds[0].set_footer(text=f"Page 1 of {len(embeds)}")
options = [
disnake.SelectOption(label="Home", value="0", emoji="🟢"),
]
await ctx.send(embed=embeds[0], view=Menu(embeds, options))
except Exception as e:
embed = disnake.Embed(
title="ERROR Stock-Options: Expirations",
colour=cfg.COLOR,
description=e,
)
embed.set_author(
name=cfg.AUTHOR_NAME,
icon_url=cfg.AUTHOR_ICON_URL,
)
await ctx.send(embed=embed, delete_after=30.0)
| 2.484375 | 2 |
scripts/get_lenderprofit.py | xujiahuayz/premfin | 4 | 5647 | <filename>scripts/get_lenderprofit.py
#%% import packages
import numpy as np
import pandas as pd
import multiprocessing
from time import time
import json
from premiumFinance.constants import (
MORTALITY_TABLE_CLEANED_PATH,
PROCESSED_PROFITABILITY_PATH,
)
from premiumFinance.financing import calculate_lender_profit, yield_curve
mortality_experience = pd.read_excel(MORTALITY_TABLE_CLEANED_PATH)
#%% calculate profit rate
def get_average_profitability(
is_level_premium=True,
lapse_assumption=True,
policyholder_rate=yield_curve,
statutory_interest=0.035,
premium_markup=0.0,
cash_interest=0.001,
lender_coc=0.01,
data_frame=mortality_experience,
):
profit_columns = data_frame.apply(
lambda row: calculate_lender_profit(
row=row,
is_level_premium=is_level_premium,
lapse_assumption=lapse_assumption,
policyholder_rate=policyholder_rate,
statutory_interest=statutory_interest,
premium_markup=premium_markup,
cash_interest=cash_interest,
lender_coc=lender_coc,
),
axis=1,
result_type="expand",
)
data_frame[["Breakeven Loan rate", "Lender profit"]] = profit_columns
data_frame["Dollar profit"] = (
data_frame["Lender profit"] * data_frame["Amount Exposed"]
)
average_profitability = (
data_frame["Dollar profit"].sum() / data_frame["Amount Exposed"].sum()
)
return average_profitability, data_frame
def tempfunc_t(x):
a, _ = get_average_profitability(lender_coc=x, lapse_assumption=True)
return a
def tempfunc_f(x):
a, _ = get_average_profitability(lender_coc=x, lapse_assumption=False)
return a
lender_coc_value = np.arange(start=0.01, stop=0.2, step=0.01)
#%% tbd
if __name__ == "__main__":
pool = multiprocessing.Pool()
start_time = time()
foo = []
for tempfunc in (tempfunc_t, tempfunc_f):
foo.append(
pool.map(
tempfunc,
lender_coc_value,
)
)
print(f"it took {time() - start_time}")
lender_profitability = {
"lender_coc": lender_coc_value.tolist(),
"profitability": foo,
}
with open(PROCESSED_PROFITABILITY_PATH, "w") as outfile:
json.dump(lender_profitability, outfile)
| 2.34375 | 2 |
dashboard/dashboard/common/layered_cache.py | BearerPipelineTest/catapult | 0 | 5648 | <reponame>BearerPipelineTest/catapult<gh_stars>0
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Caches processed query results in memcache and datastore.
Memcache is not very reliable for the perf dashboard. Prometheus team explained
that memcache is LRU and shared between multiple applications, so their activity
may result in our data being evicted. To prevent this, we cache processed
query results in the data store. Using NDB, the values are also cached in
memcache if possible. This improves performance because doing a get()
for a key which has a single BlobProperty is much quicker than a complex query
over a large dataset.
(Background: http://g/prometheus-discuss/othVtufGIyM/wjAS5djyG8kJ)
When an item is cached, layered_cache does the following:
1) Namespaces the key based on whether datastore_hooks says the request is
internal_only.
2) Pickles the value (memcache does this internally), and adds a data store
entity with the key and a BlobProperty with the pickled value.
Retrieving values checks memcache via NDB first, and if datastore is used it
unpickles.
When an item is removed from the the cache, it is removed from both internal and
external caches, since removals are usually caused by large changes that affect
both caches.
Although this module contains ndb.Model classes, these are not intended
to be used directly by other modules.
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import six.moves.cPickle as cPickle
import datetime
import logging
from google.appengine.api import datastore_errors
from google.appengine.runtime import apiproxy_errors
from google.appengine.ext import ndb
from dashboard.common import datastore_hooks
from dashboard.common import namespaced_stored_object
from dashboard.common import stored_object
class CachedPickledString(ndb.Model):
value = ndb.BlobProperty()
expire_time = ndb.DateTimeProperty()
@classmethod
def NamespacedKey(cls, key, namespace):
return ndb.Key(cls.__name__,
namespaced_stored_object.NamespaceKey(key, namespace))
@classmethod
def GetExpiredKeys(cls):
"""Gets keys of expired entities.
Returns:
List of keys for items which are expired.
"""
current_time = datetime.datetime.now()
query = cls.query(cls.expire_time < current_time)
query = query.filter(cls.expire_time != None)
return query.fetch(keys_only=True)
def Get(key):
"""Gets the value from the datastore."""
if key is None:
return None
namespaced_key = namespaced_stored_object.NamespaceKey(key)
entity = ndb.Key('CachedPickledString',
namespaced_key).get(read_policy=ndb.EVENTUAL_CONSISTENCY)
if entity:
return cPickle.loads(entity.value)
return stored_object.Get(key)
def GetExternal(key):
"""Gets the value from the datastore for the externally namespaced key."""
if key is None:
return None
namespaced_key = namespaced_stored_object.NamespaceKey(
key, datastore_hooks.EXTERNAL)
entity = ndb.Key('CachedPickledString',
namespaced_key).get(read_policy=ndb.EVENTUAL_CONSISTENCY)
if entity:
return cPickle.loads(entity.value)
return stored_object.Get(key)
def Set(key, value, days_to_keep=None, namespace=None):
"""Sets the value in the datastore.
Args:
key: The key name, which will be namespaced.
value: The value to set.
days_to_keep: Number of days to keep entity in datastore, default is None.
Entity will not expire when this value is 0 or None.
namespace: Optional namespace, otherwise namespace will be retrieved
using datastore_hooks.GetNamespace().
"""
# When number of days to keep is given, calculate expiration time for
# the entity and store it in datastore.
# Once the entity expires, it will be deleted from the datastore.
expire_time = None
if days_to_keep:
expire_time = datetime.datetime.now() + datetime.timedelta(
days=days_to_keep)
namespaced_key = namespaced_stored_object.NamespaceKey(key, namespace)
try:
CachedPickledString(
id=namespaced_key, value=cPickle.dumps(value),
expire_time=expire_time).put()
except datastore_errors.BadRequestError as e:
logging.warning('BadRequestError for key %s: %s', key, e)
except apiproxy_errors.RequestTooLargeError as e:
stored_object.Set(key, value)
def SetExternal(key, value, days_to_keep=None):
"""Sets the value in the datastore for the externally namespaced key.
Needed for things like /add_point that update internal/external data at the
same time.
Args:
key: The key name, which will be namespaced as externally_visible.
value: The value to set.
days_to_keep: Number of days to keep entity in datastore, default is None.
Entity will not expire when this value is 0 or None.
"""
Set(key, value, days_to_keep, datastore_hooks.EXTERNAL)
@ndb.synctasklet
def Delete(key):
"""Clears the value from the datastore."""
yield DeleteAsync(key)
@ndb.tasklet
def DeleteAsync(key):
unnamespaced_future = stored_object.DeleteAsync(key)
# See the comment in stored_object.DeleteAsync() about this get().
entities = yield ndb.get_multi_async([
CachedPickledString.NamespacedKey(key, datastore_hooks.INTERNAL),
CachedPickledString.NamespacedKey(key, datastore_hooks.EXTERNAL),
])
keys = [entity.key for entity in entities if entity]
yield (unnamespaced_future, ndb.delete_multi_async(keys))
def DeleteAllExpiredEntities():
"""Deletes all expired entities from the datastore."""
ndb.delete_multi(CachedPickledString.GetExpiredKeys())
| 1.796875 | 2 |
hypergbm/tests/cuml_/run_experiment_cuml.py | BigAndSweet/HyperGBM | 0 | 5649 | # -*- coding:utf-8 -*-
"""
"""
import cudf
from hypergbm import make_experiment
from hypernets.tabular import get_tool_box
from hypernets.tabular.datasets import dsutils
def main(target='y', dtype=None, max_trials=3, drift_detection=False, clear_cache=True, **kwargs):
tb = get_tool_box(cudf.DataFrame)
assert isinstance(tb, type) and tb.__name__ == 'CumlToolBox'
print("preparing...")
df = dsutils.load_bank()
if dtype is not None:
df[target] = df[target].astype(dtype)
df, = tb.from_local(df)
assert isinstance(df, cudf.DataFrame)
df_train, df_test = tb.train_test_split(df, test_size=0.5, random_state=123)
X_test = df_test
y_test = X_test.pop(target)
exp = make_experiment(df_train, target=target,
test_data=X_test.copy(),
max_trials=max_trials,
drift_detection=drift_detection,
clear_cache=clear_cache,
**kwargs)
print('experiment:', f'{[s.name for s in exp.steps]}', 'random_state', exp.random_state)
print("training...")
estimator = exp.run()
print('estimator pipeline:', f'{[s[0] for s in estimator.steps]}')
print("scoring...")
y_pred = estimator.predict(X_test)
y_proba = estimator.predict_proba(X_test)
task = exp.task
if task == 'regression':
metrics = ['mse', 'mae', 'msle', 'rmse', 'r2']
else:
metrics = ['auc', 'accuracy', 'f1', 'recall']
result = tb.metrics.calc_score(y_test, y_pred, y_proba, task=task, metrics=metrics,
pos_label=kwargs.get('pos_label', None))
print(result)
return exp, estimator
if __name__ == '__main__':
main(target='y', reward_metric='auc', ensemble_size=10, pos_label='yes', log_level='info', max_trials=10)
# main(target='y', max_trials=10, cv=False, ensemble_size=0, verbose=0, pos_label='yes', )
# main(target='day', reward_metric='f1', ensemble_size=10, log_level='info', max_trials=5)
# main(target='day', dtype='str', reward_metric='f1', ensemble_size=0, log_level='info', max_trials=6)
# main(target='age', dtype='float', ensemble_size=10, log_level='info', max_trials=8)
| 2.28125 | 2 |
Inserter.py | DarthSpector/Poster-Adder | 0 | 5650 | <gh_stars>0
def pictureInserter(og,address,list):
j=0
for i in og:
file1 = open(address+'/'+i, "a")
x="\ncover::https://image.tmdb.org/t/p/original/"+list[j]
file1.writelines(x)
file1.close()
j=j+1
| 2.828125 | 3 |
datasets/imagenet.py | xhchrn/open_lth | 9 | 5651 | <gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import concurrent.futures
import numpy as np
import os
from PIL import Image
import torchvision
from datasets import base
from platforms.platform import get_platform
def _get_samples(root, y_name, y_num):
y_dir = os.path.join(root, y_name)
if not get_platform().isdir(y_dir): return []
output = [(os.path.join(y_dir, f), y_num) for f in get_platform().listdir(y_dir) if f.lower().endswith('jpeg')]
return output
class Dataset(base.ImageDataset):
"""ImageNet"""
def __init__(self, loc: str, image_transforms):
# Load the data.
classes = sorted(get_platform().listdir(loc))
samples = []
if get_platform().num_workers > 0:
executor = concurrent.futures.ThreadPoolExecutor(max_workers=get_platform().num_workers)
futures = [executor.submit(_get_samples, loc, y_name, y_num) for y_num, y_name in enumerate(classes)]
for d in concurrent.futures.wait(futures)[0]: samples += d.result()
else:
for y_num, y_name in enumerate(classes):
samples += _get_samples(loc, y_name, y_num)
examples, labels = zip(*samples)
super(Dataset, self).__init__(
np.array(examples), np.array(labels), image_transforms,
[torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
@staticmethod
def num_train_examples(): return 1281167
@staticmethod
def num_test_examples(): return 50000
@staticmethod
def num_classes(): return 1000
@staticmethod
def _augment_transforms():
return [
torchvision.transforms.RandomResizedCrop(224, scale=(0.1, 1.0), ratio=(0.8, 1.25)),
torchvision.transforms.RandomHorizontalFlip()
]
@staticmethod
def _transforms():
return [torchvision.transforms.Resize(256), torchvision.transforms.CenterCrop(224)]
@staticmethod
def get_train_set(use_augmentation, resize):
transforms = Dataset._augment_transforms() if use_augmentation else Dataset._transforms()
return Dataset(os.path.join(get_platform().imagenet_root, 'train'), transforms)
@staticmethod
def get_test_set(resize):
return Dataset(os.path.join(get_platform().imagenet_root, 'val'), Dataset._transforms())
@staticmethod
def example_to_image(example):
with get_platform().open(example, 'rb') as fp:
return Image.open(fp).convert('RGB')
DataLoader = base.DataLoader
| 2.375 | 2 |
sm4.py | ZelKnow/sm4 | 0 | 5652 | <filename>sm4.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : sm4.py
@Description : sm4加密算法的实现
@Date : 2021/10/28 15:59:51
@Author : ZelKnow
@Github : https://github.com/ZelKnow
"""
__author__ = "ZelKnow"
from argparse import ArgumentParser, ArgumentError
from binascii import hexlify, unhexlify
from utils import S_BOX, BLOCK_BYTE, FK, CK, BLOCK_HEX
from utils import rotl, num2hex, bytes_to_list, list_to_bytes, padding, unpadding
ENCRYPT = 0 # 加密
DECRYPT = 1 # 解密
class CryptSM4(object):
def __init__(self):
self.rk = []
def T(self, A, L_func):
"""合成置换函数T
T(.) = L(\tau(.))
Args:
A (int): 输入数据
L_func (function): 线性变换L
Returns:
int: 输出数据
"""
B = [S_BOX[(A >> i) & (0x000000ff)] for i in range(0, 32, 8)]
B = [B[i] << (i * 8) for i in range(4)]
C = L_func(sum(B))
return C
def L(self, input):
"""线性变换L,用于轮函数中
L(B) = B ^ (B <<< 2) ^ (B <<< 10) ^ (B <<< 18) ^ (B <<< 24)
Args:
input (int): 输入数据
Returns:
int: 输出数据
"""
return input ^ rotl(input, 2) ^ rotl(input, 10) ^ rotl(
input, 18) ^ rotl(input, 24)
def L_prime(self, input):
"""线性变换L',用于密钥扩展算法
L'(B) = B ^ (B <<< 13) ^ (B <<< 23)
Args:
input (int): 输入数据
Returns:
int: 输出数据
"""
return input ^ rotl(input, 13) ^ rotl(input, 23)
def check_key_iv(self, key_iv):
"""检验key或iv的合法性并转换成字节串
Args:
key_iv (int, str or bytes): key或iv
Raises:
TypeError: 密钥或初始化向量类型错误
ValueError: 密钥或初始化向量长度过长
Returns:
bytes: key或iv
"""
if isinstance(key_iv, str):
key_iv = key_iv.encode(encoding='UTF8')
elif isinstance(key_iv, int):
print(len(num2hex(key_iv, width=32)))
key_iv = unhexlify(num2hex(key_iv, width=32))
elif not isinstance(key_iv, bytes):
raise TypeError("密钥或初始化向量类型错误")
if len(key_iv) > BLOCK_BYTE:
raise ValueError('密钥或初始化向量长度不能大于{}'.format(BLOCK_BYTE))
return unhexlify('00') * (BLOCK_BYTE - len(key_iv)) + key_iv
def set_key(self, key):
"""设置key
Args:
key (int, str or bytes): 密钥
"""
key = self.check_key_iv(key)
input = bytes_to_list(hexlify(key), BLOCK_HEX / 4)
input = [int(i, 16) for i in input]
K = [input[i] ^ FK[i] for i in range(4)] # 存储轮密钥
for i in range(32): # 密钥扩展算法
K.append(K[i] ^ self.T(K[i + 1] ^ K[i + 2] ^ K[i + 3]
^ CK[i], self.L_prime))
self.rk = K[4:]
def F(self, X, i):
"""轮函数F
F = X_0 ^ T(X_1 ^ X_2 ^ X_3 ^ rk)
其中输入为(X_0, X_1, X_2, X_3),轮密钥为rk
Args:
X (list): 输入
i (int): 轮密钥的下标
Returns:
int: 输出
"""
return X[0] ^ self.T(X[1] ^ X[2] ^ X[3] ^ self.rk[i], self.L)
def _crypt(self, x, mode=ENCRYPT):
"""加解密函数
Args:
x (int): 需加解密的数据
mode (int, optional): 加密或解密. Defaults to ENCRYPT.
Returns:
int: 输出
"""
input = [(x >> i) & (0xffffffff) for i in reversed(range(0, 128, 32))]
# 加解密时使用的轮密钥顺序不同
for i in range(32) if mode == ENCRYPT else reversed(range(32)):
input.append(self.F(input[-4:], i)) # 32次迭代运算
output = input[-4:]
output = [output[i] << (i * 32) for i in range(4)] # 反序变换
return sum(output)
def encrypt(self, x):
"""加密函数
Args:
x (int): 需加密的数据
Returns:
int: 输出
"""
return self._crypt(x, ENCRYPT)
def decrypt(self, x):
"""解密函数
Args:
x (int): 需解密的数据
Returns:
int: 输出
"""
return self._crypt(x, DECRYPT)
def _crypt_ECB(self, input, mode=ENCRYPT):
"""ECB加解密函数
Args:
x (int): 需加解密的数据
mode (int, optional): 加密或解密. Defaults to ENCRYPT.
Returns:
int: 输出
"""
input_list = bytes_to_list(input, BLOCK_BYTE) # 将输入拆分成block
input_list = [int(hexlify(i), 16) for i in input_list]
output_list = [self._crypt(x, mode) for x in input_list] # 分别加解密
output_list = [
unhexlify(num2hex(o, width=BLOCK_HEX)) for o in output_list
] # 转成字节流
return list_to_bytes(output_list) # 合并
def encrypt_ECB(self, plain_text):
"""ECB加密函数
Args:
x (int): 需加密的数据
Returns:
int: 输出
"""
return self._crypt_ECB(padding(plain_text), ENCRYPT)
def decrypt_ECB(self, cipher_text):
"""ECB解密函数
Args:
x (int): 需解密的数据
Returns:
int: 输出
"""
try:
cipher_text = unhexlify(cipher_text)
except:
pass
return unpadding(self._crypt_ECB(cipher_text, DECRYPT))
def _crypt_CBC(self, input, iv, mode=ENCRYPT):
"""CBC加解密函数
Args:
x (int): 需加解密的数据
mode (int, optional): 加密或解密. Defaults to ENCRYPT.
Returns:
int: 输出
"""
iv = int(hexlify(self.check_key_iv(iv)), 16) # 初始化向量
input_list = bytes_to_list(input, BLOCK_BYTE) # 拆分成block
input_list = [int(hexlify(i), 16) for i in input_list]
output_list = []
for x in input_list:
if mode == ENCRYPT:
output_list.append(self._crypt(x ^ iv, mode))
iv = output_list[-1]
else:
output_list.append(self._crypt(x, mode) ^ iv)
iv = x
output_list = [
unhexlify(num2hex(o, width=BLOCK_HEX)) for o in output_list
]
return list_to_bytes(output_list)
def encrypt_CBC(self, plain_text, iv):
"""CBC加密函数
Args:
x (int): 需加密的数据
Returns:
int: 输出
"""
return self._crypt_CBC(padding(plain_text), iv, ENCRYPT)
def decrypt_CBC(self, cipher_text, iv):
"""CBC解密函数
Args:
x (int): 需解密的数据
Returns:
int: 输出
"""
try:
cipher_text = unhexlify(cipher_text)
except:
pass
return unpadding(self._crypt_CBC(cipher_text, iv, DECRYPT))
if __name__ == '__main__':
parser = ArgumentParser(description="SM4加解密")
parser.add_argument('crypt', choices=['encrypt', 'decrypt'], help='加密或解密')
parser.add_argument('mode', choices=['ecb', 'cbc'], help='加密模式')
parser.add_argument('source', help='加密/解密目标')
parser.add_argument('key', help='密钥')
parser.add_argument('--iv', help='初始化向量,cbc模式使用')
parser.add_argument('--source_type',
choices=['input', 'bin_file', 'image'],
help='加密目标类型',
default='input')
parser.add_argument('--output', help='输出文件名,如不指定则输出至标准输出流')
args = parser.parse_args()
c = CryptSM4()
c.set_key(args.key)
if args.mode == 'cbc' and args.iv is None:
raise ArgumentError("请输入初始化向量的值")
if args.source_type == 'input':
input = args.source
if input[:2].lower() == '0x':
input = int(input[2:], 16)
elif args.source_type == 'bin_file':
with open(args.source, 'rb') as f:
input = f.read()
else:
from PIL import Image
import numpy as np
source = Image.open(args.source)
img = np.array(source.convert('RGBA'))
shape = img.shape
size = img.size
input = unhexlify(''.join([num2hex(i, width=2)
for i in img.flatten()]))
if args.crypt == 'encrypt':
output = c.encrypt_ECB(input) if args.mode == 'ecb' else c.encrypt_CBC(
input, args.iv)
else:
output = c.decrypt_ECB(input) if args.mode == 'ecb' else c.decrypt_CBC(
input, args.iv)
if args.source_type == 'image':
output = hexlify(output).decode()
output = output[:size * 2]
output = [[int(output[i + j:i + j + 2], 16) for j in range(0, 8, 2)]
for i in range(0, len(output), 8)]
output = np.array(output)
output = Image.fromarray(output.reshape(shape).astype('uint8'))
output.save(args.output)
elif args.output:
with open(args.output, "wb") as f:
f.write(output)
else:
try:
print(output.decode())
except:
print(hexlify(output).decode())
| 2.71875 | 3 |
sendotp/sendotp.py | saadmk11/sendotp-python | 5 | 5653 | <reponame>saadmk11/sendotp-python<filename>sendotp/sendotp.py
import json
import requests
from random import randint
class sendotp:
def __init__(self, key, msg):
self.baseUrl = "http://control.msg91.com"
self.authkey = key
try:
msg
except NameError:
self.msg = "Your otp is {{otp}}. Please do not share it with anybody"
else:
self.msg = msg
def actionURLBuilder(self, actionurl):
# print self.baseUrl + '/api/' +str(actionurl)
print (actionurl)
return self.baseUrl + '/api/' + str(actionurl)
def generateOtp(self):
return randint(1000, 9999)
def send(self, contactNumber, senderId, otp):
values = {
'authkey': self.authkey,
'mobile': contactNumber,
'message': self.msg.replace("{{otp}}", str(otp)),
'sender': senderId,
'otp': otp
}
print (self.call('sendotp.php', values))
return otp
def retry(self, contactNumber, retrytype='voice'):
values = {
'authkey': self.authkey,
'mobile': contactNumber,
'retrytype': retrytype
}
print (values)
response = self.call('retryotp.php', values)
return;
def verify(self, contactNumber, otp):
values = {
'authkey': self.authkey,
'mobile': contactNumber,
'otp': otp
}
response = self.call('verifyRequestOTP.php', values)
return response;
def call(self, actionurl, args):
url = self.actionURLBuilder(actionurl)
print (url)
payload = (args)
response = requests.post(url, data=payload, verify=False)
print (response.text)
return response.status_code
| 2.984375 | 3 |
leetcode/1021-remove-outermost-parentheses.py | tjeubaoit/algorithm | 0 | 5654 | class Solution:
def removeOuterParentheses(self, s: str) -> str:
ans = []
ct = 0
for ch in s:
if ch == '(':
ct += 1
if ct != 1:
ans.append(ch)
else:
ct -= 1
if ct != 0:
ans.append(ch)
return ''.join(ans)
if __name__ == '__main__':
# s = '(()())(())'
# s = '(()())(())(()(()))'
s = '()()'
ret = Solution().removeOuterParentheses(s)
print(ret)
| 3.375 | 3 |
venv/Scripts/ex049.py | SamuelNunesDev/starting_point_in_python | 0 | 5655 | n = int(input('Digite um número para ver sua tabuada: '))
for c in range(0, 11):
print(f'{n} * {c} = {n * c}')
| 3.875 | 4 |
js2py/evaljs.py | inprod/Js2Py | 0 | 5656 | <reponame>inprod/Js2Py
# coding=utf-8
from .translators import translate_js, DEFAULT_HEADER
from .es6 import js6_to_js5
import sys
import time
import json
import six
import os
import hashlib
import codecs
__all__ = [
'EvalJs', 'translate_js', 'import_js', 'eval_js', 'translate_file',
'eval_js6', 'translate_js6', 'run_file', 'disable_pyimport',
'get_file_contents', 'write_file_contents'
]
DEBUG = False
def disable_pyimport():
import pyjsparser.parser
pyjsparser.parser.ENABLE_PYIMPORT = False
def path_as_local(path):
if os.path.isabs(path):
return path
# relative to cwd
return os.path.join(os.getcwd(), path)
def import_js(path, lib_name, globals):
"""Imports from javascript source file.
globals is your globals()"""
with codecs.open(path_as_local(path), "r", "utf-8") as f:
js = f.read()
e = EvalJs()
e.execute(js)
var = e.context['var']
globals[lib_name] = var.to_python()
def get_file_contents(path_or_file):
if hasattr(path_or_file, 'read'):
js = path_or_file.read()
else:
with codecs.open(path_as_local(path_or_file), "r", "utf-8") as f:
js = f.read()
return js
def write_file_contents(path_or_file, contents):
if hasattr(path_or_file, 'write'):
path_or_file.write(contents)
else:
with open(path_as_local(path_or_file), 'w') as f:
f.write(contents)
def translate_file(input_path, output_path):
'''
Translates input JS file to python and saves the it to the output path.
It appends some convenience code at the end so that it is easy to import JS objects.
For example we have a file 'example.js' with: var a = function(x) {return x}
translate_file('example.js', 'example.py')
Now example.py can be easily importend and used:
>>> from example import example
>>> example.a(30)
30
'''
js = get_file_contents(input_path)
py_code = translate_js(js)
lib_name = os.path.basename(output_path).split('.')[0]
head = '__all__ = [%s]\n\n# Don\'t look below, you will not understand this Python code :) I don\'t.\n\n' % repr(
lib_name)
tail = '\n\n# Add lib to the module scope\n%s = var.to_python()' % lib_name
out = head + py_code + tail
write_file_contents(output_path, out)
def run_file(path_or_file, context=None):
''' Context must be EvalJS object. Runs given path as a JS program. Returns (eval_value, context).
'''
if context is None:
context = EvalJs()
if not isinstance(context, EvalJs):
raise TypeError('context must be the instance of EvalJs')
eval_value = context.eval(get_file_contents(path_or_file))
return eval_value, context
def eval_js(js):
"""Just like javascript eval. Translates javascript to python,
executes and returns python object.
js is javascript source code
EXAMPLE:
>>> import js2py
>>> add = js2py.eval_js('function add(a, b) {return a + b}')
>>> add(1, 2) + 3
6
>>> add('1', 2, 3)
u'12'
>>> add.constructor
function Function() { [python code] }
NOTE: For Js Number, String, Boolean and other base types returns appropriate python BUILTIN type.
For Js functions and objects, returns Python wrapper - basically behaves like normal python object.
If you really want to convert object to python dict you can use to_dict method.
"""
e = EvalJs()
return e.eval(js)
def eval_js6(js):
"""Just like eval_js but with experimental support for js6 via babel."""
return eval_js(js6_to_js5(js))
def translate_js6(js):
"""Just like translate_js but with experimental support for js6 via babel."""
return translate_js(js6_to_js5(js))
class EvalJs(object):
"""This class supports continuous execution of javascript under same context.
>>> ctx = EvalJs()
>>> ctx.execute('var a = 10;function f(x) {return x*x};')
>>> ctx.f(9)
81
>>> ctx.a
10
context is a python dict or object that contains python variables that should be available to JavaScript
For example:
>>> ctx = EvalJs({'a': 30})
>>> ctx.execute('var x = a')
>>> ctx.x
30
You can enable JS require function via enable_require. With this feature enabled you can use js modules
from npm, for example:
>>> ctx = EvalJs(enable_require=True)
>>> ctx.execute("var esprima = require('esprima');")
>>> ctx.execute("esprima.parse('var a = 1')")
You can run interactive javascript console with console method!"""
def __init__(self, context={}, enable_require=False):
self.__dict__['_context'] = {}
exec (DEFAULT_HEADER, self._context)
self.__dict__['_var'] = self._context['var'].to_python()
if enable_require:
def _js_require_impl(npm_module_name):
from .node_import import require
from .base import to_python
return require(to_python(npm_module_name), context=self._context)
setattr(self._var, 'require', _js_require_impl)
if not isinstance(context, dict):
try:
context = context.__dict__
except:
raise TypeError(
'context has to be either a dict or have __dict__ attr')
for k, v in six.iteritems(context):
setattr(self._var, k, v)
def execute(self, js=None, use_compilation_plan=False):
"""executes javascript js in current context
During initial execute() the converted js is cached for re-use. That means next time you
run the same javascript snippet you save many instructions needed to parse and convert the
js code to python code.
This cache causes minor overhead (a cache dicts is updated) but the Js=>Py conversion process
is typically expensive compared to actually running the generated python code.
Note that the cache is just a dict, it has no expiration or cleanup so when running this
in automated situations with vast amounts of snippets it might increase memory usage.
"""
try:
cache = self.__dict__['cache']
except KeyError:
cache = self.__dict__['cache'] = {}
hashkey = hashlib.md5(js.encode('utf-8')).digest()
try:
compiled = cache[hashkey]
except KeyError:
code = translate_js(
js, '', use_compilation_plan=use_compilation_plan)
compiled = cache[hashkey] = compile(code, '<EvalJS snippet>',
'exec')
exec (compiled, self._context)
def eval(self, expression, use_compilation_plan=False):
"""evaluates expression in current context and returns its value"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute(code, use_compilation_plan=use_compilation_plan)
return self['PyJsEvalResult']
def eval_js6(self, expression, use_compilation_plan=False):
"""same as eval, except that the JS code gets translated from es6 to es5 before being executed."""
es5_expression = js6_to_js5(expression)
return self.eval(es5_expression, use_compilation_plan)
def execute_debug(self, js):
"""executes javascript js in current context
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = translate_js(js, '')
# make sure you have a temp folder:
filename = 'temp' + os.sep + '_' + hashlib.md5(
code.encode("utf-8")).hexdigest() + '.py'
try:
with open(filename, mode='w') as f:
f.write(code)
with open(filename, "r") as f:
pyCode = compile(f.read(), filename, 'exec')
exec(pyCode, self._context)
except Exception as err:
raise err
finally:
os.remove(filename)
try:
os.remove(filename + 'c')
except:
pass
def eval_debug(self, expression):
"""evaluates expression in current context and returns its value
as opposed to the (faster) self.execute method, you can use your regular debugger
to set breakpoints and inspect the generated python code
"""
code = 'PyJsEvalResult = eval(%s)' % json.dumps(expression)
self.execute_debug(code)
return self['PyJsEvalResult']
@property
def context(self):
return self._context
def __getattr__(self, var):
return getattr(self._var, var)
def __getitem__(self, var):
return getattr(self._var, var)
def __setattr__(self, var, val):
return setattr(self._var, var, val)
def __setitem__(self, var, val):
return setattr(self._var, var, val)
def console(self):
"""starts to interact (starts interactive console) Something like code.InteractiveConsole"""
while True:
if six.PY2:
code = raw_input('>>> ')
else:
code = input('>>>')
try:
print(self.eval(code))
except KeyboardInterrupt:
break
except Exception as e:
import traceback
if DEBUG:
sys.stderr.write(traceback.format_exc())
else:
sys.stderr.write('EXCEPTION: ' + str(e) + '\n')
time.sleep(0.01)
#print x
if __name__ == '__main__':
#with open('C:\Users\Piotrek\Desktop\esprima.js', 'rb') as f:
# x = f.read()
e = EvalJs()
e.execute('square(x)')
#e.execute(x)
e.console()
| 2.296875 | 2 |
setup.py | mvduin/py-uio | 38 | 5657 | #!/usr/bin/python3
from setuptools import setup, find_packages
setup(
package_dir = { '': 'src' },
packages = find_packages( where='src' ),
)
| 1.429688 | 1 |
tools/verity_utils.py | FabriSC/Alioth-SC | 3 | 5658 | #!/usr/bin/env python
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os.path
import shlex
import struct
import common
import sparse_img
from rangelib import RangeSet
logger = logging.getLogger(__name__)
OPTIONS = common.OPTIONS
BLOCK_SIZE = common.BLOCK_SIZE
FIXED_SALT = "aee087a5be3b982978c923f566a94613496b417f2af592639bc80d141e34dfe7"
class BuildVerityImageError(Exception):
"""An Exception raised during verity image building."""
def __init__(self, message):
Exception.__init__(self, message)
def GetVerityFECSize(image_size):
cmd = ["fec", "-s", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVerityTreeSize(image_size):
cmd = ["build_verity_tree", "-s", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVerityMetadataSize(image_size):
cmd = ["build_verity_metadata", "size", str(image_size)]
output = common.RunAndCheckOutput(cmd, verbose=False)
return int(output)
def GetVeritySize(image_size, fec_supported):
verity_tree_size = GetVerityTreeSize(image_size)
verity_metadata_size = GetVerityMetadataSize(image_size)
verity_size = verity_tree_size + verity_metadata_size
if fec_supported:
fec_size = GetVerityFECSize(image_size + verity_size)
return verity_size + fec_size
return verity_size
def GetSimgSize(image_file):
simg = sparse_img.SparseImage(image_file, build_map=False)
return simg.blocksize * simg.total_blocks
def ZeroPadSimg(image_file, pad_size):
blocks = pad_size // BLOCK_SIZE
logger.info("Padding %d blocks (%d bytes)", blocks, pad_size)
simg = sparse_img.SparseImage(image_file, mode="r+b", build_map=False)
simg.AppendFillChunk(0, blocks)
def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
padding_size):
cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
verity_path, verity_fec_path]
common.RunAndCheckOutput(cmd)
def BuildVerityTree(sparse_image_path, verity_image_path):
cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
verity_image_path]
output = common.RunAndCheckOutput(cmd)
root, salt = output.split()
return root, salt
def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
block_device, signer_path, key, signer_args,
verity_disable):
cmd = ["build_verity_metadata", "build", str(image_size),
verity_metadata_path, root_hash, salt, block_device, signer_path, key]
if signer_args:
cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
if verity_disable:
cmd.append("--verity_disable")
common.RunAndCheckOutput(cmd)
def Append2Simg(sparse_image_path, unsparse_image_path, error_message):
"""Appends the unsparse image to the given sparse image.
Args:
sparse_image_path: the path to the (sparse) image
unsparse_image_path: the path to the (unsparse) image
Raises:
BuildVerityImageError: On error.
"""
cmd = ["append2simg", sparse_image_path, unsparse_image_path]
try:
common.RunAndCheckOutput(cmd)
except:
logger.exception(error_message)
raise BuildVerityImageError(error_message)
def Append(target, file_to_append, error_message):
"""Appends file_to_append to target.
Raises:
BuildVerityImageError: On error.
"""
try:
with open(target, 'ab') as out_file, \
open(file_to_append, 'rb') as input_file:
for line in input_file:
out_file.write(line)
except IOError:
logger.exception(error_message)
raise BuildVerityImageError(error_message)
def CreateVerityImageBuilder(prop_dict):
"""Returns a verity image builder based on the given build properties.
Args:
prop_dict: A dict that contains the build properties. In particular, it will
look for verity-related property values.
Returns:
A VerityImageBuilder instance for Verified Boot 1.0 or Verified Boot 2.0; or
None if the given build doesn't support Verified Boot.
"""
partition_size = prop_dict.get("partition_size")
# partition_size could be None at this point, if using dynamic partitions.
if partition_size:
partition_size = int(partition_size)
# Verified Boot 1.0
verity_supported = prop_dict.get("verity") == "true"
is_verity_partition = "verity_block_device" in prop_dict
if verity_supported and is_verity_partition:
if OPTIONS.verity_signer_path is not None:
signer_path = OPTIONS.verity_signer_path
else:
signer_path = prop_dict["verity_signer_cmd"]
return Version1VerityImageBuilder(
partition_size,
prop_dict["verity_block_device"],
prop_dict.get("verity_fec") == "true",
signer_path,
prop_dict["verity_key"] + ".pk8",
OPTIONS.verity_signer_args,
"verity_disable" in prop_dict)
# Verified Boot 2.0
if (prop_dict.get("avb_hash_enable") == "true" or
prop_dict.get("avb_hashtree_enable") == "true"):
# key_path and algorithm are only available when chain partition is used.
key_path = prop_dict.get("avb_key_path")
algorithm = prop_dict.get("avb_algorithm")
# Image uses hash footer.
if prop_dict.get("avb_hash_enable") == "true":
return VerifiedBootVersion2VerityImageBuilder(
prop_dict["partition_name"],
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASH_FOOTER,
prop_dict["avb_avbtool"],
key_path,
algorithm,
prop_dict.get("avb_salt"),
prop_dict["avb_add_hash_footer_args"])
# Image uses hashtree footer.
return VerifiedBootVersion2VerityImageBuilder(
prop_dict["partition_name"],
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASHTREE_FOOTER,
prop_dict["avb_avbtool"],
key_path,
algorithm,
prop_dict.get("avb_salt"),
prop_dict["avb_add_hashtree_footer_args"])
return None
class VerityImageBuilder(object):
"""A builder that generates an image with verity metadata for Verified Boot.
A VerityImageBuilder instance handles the works for building an image with
verity metadata for supporting Android Verified Boot. This class defines the
common interface between Verified Boot 1.0 and Verified Boot 2.0. A matching
builder will be returned based on the given build properties.
More info on the verity image generation can be found at the following link.
https://source.android.com/security/verifiedboot/dm-verity#implementation
"""
def CalculateMaxImageSize(self, partition_size):
"""Calculates the filesystem image size for the given partition size."""
raise NotImplementedError
def CalculateDynamicPartitionSize(self, image_size):
"""Calculates and sets the partition size for a dynamic partition."""
raise NotImplementedError
def PadSparseImage(self, out_file):
"""Adds padding to the generated sparse image."""
raise NotImplementedError
def Build(self, out_file):
"""Builds the verity image and writes it to the given file."""
raise NotImplementedError
class Version1VerityImageBuilder(VerityImageBuilder):
"""A VerityImageBuilder for Verified Boot 1.0."""
def __init__(self, partition_size, block_dev, fec_supported, signer_path,
signer_key, signer_args, verity_disable):
self.version = 1
self.partition_size = partition_size
self.block_device = block_dev
self.fec_supported = fec_supported
self.signer_path = signer_path
self.signer_key = signer_key
self.signer_args = signer_args
self.verity_disable = verity_disable
self.image_size = None
self.verity_size = None
def CalculateDynamicPartitionSize(self, image_size):
# This needs to be implemented. Note that returning the given image size as
# the partition size doesn't make sense, as it will fail later.
raise NotImplementedError
def CalculateMaxImageSize(self, partition_size=None):
"""Calculates the max image size by accounting for the verity metadata.
Args:
partition_size: The partition size, which defaults to self.partition_size
if unspecified.
Returns:
The size of the image adjusted for verity metadata.
"""
if partition_size is None:
partition_size = self.partition_size
assert partition_size > 0, \
"Invalid partition size: {}".format(partition_size)
hi = partition_size
if hi % BLOCK_SIZE != 0:
hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
# verity tree and fec sizes depend on the partition size, which
# means this estimate is always going to be unnecessarily small
verity_size = GetVeritySize(hi, self.fec_supported)
lo = partition_size - verity_size
result = lo
# do a binary search for the optimal size
while lo < hi:
i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
v = GetVeritySize(i, self.fec_supported)
if i + v <= partition_size:
if result < i:
result = i
verity_size = v
lo = i + BLOCK_SIZE
else:
hi = i
self.image_size = result
self.verity_size = verity_size
logger.info(
"Calculated image size for verity: partition_size %d, image_size %d, "
"verity_size %d", partition_size, result, verity_size)
return result
def Build(self, out_file):
"""Creates an image that is verifiable using dm-verity.
Args:
out_file: the output image.
Returns:
AssertionError: On invalid partition sizes.
BuildVerityImageError: On other errors.
"""
image_size = int(self.image_size)
tempdir_name = common.MakeTempDir(suffix="_verity_images")
# Get partial image paths.
verity_image_path = os.path.join(tempdir_name, "verity.img")
verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
# Build the verity tree and get the root hash and salt.
root_hash, salt = BuildVerityTree(out_file, verity_image_path)
# Build the metadata blocks.
BuildVerityMetadata(
image_size, verity_metadata_path, root_hash, salt, self.block_device,
self.signer_path, self.signer_key, self.signer_args,
self.verity_disable)
padding_size = self.partition_size - self.image_size - self.verity_size
assert padding_size >= 0
# Build the full verified image.
Append(
verity_image_path, verity_metadata_path,
"Failed to append verity metadata")
if self.fec_supported:
# Build FEC for the entire partition, including metadata.
verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
BuildVerityFEC(
out_file, verity_image_path, verity_fec_path, padding_size)
Append(verity_image_path, verity_fec_path, "Failed to append FEC")
Append2Simg(
out_file, verity_image_path, "Failed to append verity data")
def PadSparseImage(self, out_file):
sparse_image_size = GetSimgSize(out_file)
if sparse_image_size > self.image_size:
raise BuildVerityImageError(
"Error: image size of {} is larger than partition size of "
"{}".format(sparse_image_size, self.image_size))
ZeroPadSimg(out_file, self.image_size - sparse_image_size)
class VerifiedBootVersion2VerityImageBuilder(VerityImageBuilder):
"""A VerityImageBuilder for Verified Boot 2.0."""
AVB_HASH_FOOTER = 1
AVB_HASHTREE_FOOTER = 2
def __init__(self, partition_name, partition_size, footer_type, avbtool,
key_path, algorithm, salt, signing_args):
self.version = 2
self.partition_name = partition_name
self.partition_size = partition_size
self.footer_type = footer_type
self.avbtool = avbtool
self.algorithm = algorithm
self.key_path = key_path
self.salt = salt
self.signing_args = signing_args
self.image_size = None
def CalculateMinPartitionSize(self, image_size, size_calculator=None):
"""Calculates min partition size for a given image size.
This is used when determining the partition size for a dynamic partition,
which should be cover the given image size (for filesystem files) as well as
the verity metadata size.
Args:
image_size: The size of the image in question.
size_calculator: The function to calculate max image size
for a given partition size.
Returns:
The minimum partition size required to accommodate the image size.
"""
if size_calculator is None:
size_calculator = self.CalculateMaxImageSize
# Use image size as partition size to approximate final partition size.
image_ratio = size_calculator(image_size) / float(image_size)
# Prepare a binary search for the optimal partition size.
lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - BLOCK_SIZE
# Ensure lo is small enough: max_image_size should <= image_size.
delta = BLOCK_SIZE
max_image_size = size_calculator(lo)
while max_image_size > image_size:
image_ratio = max_image_size / float(lo)
lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - delta
delta *= 2
max_image_size = size_calculator(lo)
hi = lo + BLOCK_SIZE
# Ensure hi is large enough: max_image_size should >= image_size.
delta = BLOCK_SIZE
max_image_size = size_calculator(hi)
while max_image_size < image_size:
image_ratio = max_image_size / float(hi)
hi = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE + delta
delta *= 2
max_image_size = size_calculator(hi)
partition_size = hi
# Start to binary search.
while lo < hi:
mid = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
max_image_size = size_calculator(mid)
if max_image_size >= image_size: # if mid can accommodate image_size
if mid < partition_size: # if a smaller partition size is found
partition_size = mid
hi = mid
else:
lo = mid + BLOCK_SIZE
logger.info(
"CalculateMinPartitionSize(%d): partition_size %d.", image_size,
partition_size)
return partition_size
def CalculateDynamicPartitionSize(self, image_size):
self.partition_size = self.CalculateMinPartitionSize(image_size)
return self.partition_size
def CalculateMaxImageSize(self, partition_size=None):
"""Calculates max image size for a given partition size.
Args:
partition_size: The partition size, which defaults to self.partition_size
if unspecified.
Returns:
The maximum image size.
Raises:
BuildVerityImageError: On error or getting invalid image size.
"""
if partition_size is None:
partition_size = self.partition_size
assert partition_size > 0, \
"Invalid partition size: {}".format(partition_size)
add_footer = ("add_hash_footer" if self.footer_type == self.AVB_HASH_FOOTER
else "add_hashtree_footer")
cmd = [self.avbtool, add_footer, "--partition_size",
str(partition_size), "--calc_max_image_size"]
cmd.extend(shlex.split(self.signing_args))
proc = common.Run(cmd)
output, _ = proc.communicate()
if proc.returncode != 0:
raise BuildVerityImageError(
"Failed to calculate max image size:\n{}".format(output))
image_size = int(output)
if image_size <= 0:
raise BuildVerityImageError(
"Invalid max image size: {}".format(output))
self.image_size = image_size
return image_size
def PadSparseImage(self, out_file):
# No-op as the padding is taken care of by avbtool.
pass
def Build(self, out_file):
"""Adds dm-verity hashtree and AVB metadata to an image.
Args:
out_file: Path to image to modify.
"""
add_footer = ("add_hash_footer" if self.footer_type == self.AVB_HASH_FOOTER
else "add_hashtree_footer")
cmd = [self.avbtool, add_footer,
"--partition_size", str(self.partition_size),
"--partition_name", self.partition_name,
"--image", out_file]
if self.key_path and self.algorithm:
cmd.extend(["--key", self.key_path, "--algorithm", self.algorithm])
if self.salt:
cmd.extend(["--salt", self.salt])
cmd.extend(shlex.split(self.signing_args))
proc = common.Run(cmd)
output, _ = proc.communicate()
if proc.returncode != 0:
raise BuildVerityImageError("Failed to add AVB footer: {}".format(output))
class HashtreeInfoGenerationError(Exception):
"""An Exception raised during hashtree info generation."""
def __init__(self, message):
Exception.__init__(self, message)
class HashtreeInfo(object):
def __init__(self):
self.hashtree_range = None
self.filesystem_range = None
self.hash_algorithm = None
self.salt = None
self.root_hash = None
def CreateHashtreeInfoGenerator(partition_name, block_size, info_dict):
generator = None
if (info_dict.get("verity") == "true" and
info_dict.get("{}_verity_block_device".format(partition_name))):
partition_size = info_dict["{}_size".format(partition_name)]
fec_supported = info_dict.get("verity_fec") == "true"
generator = VerifiedBootVersion1HashtreeInfoGenerator(
partition_size, block_size, fec_supported)
return generator
class HashtreeInfoGenerator(object):
def Generate(self, image):
raise NotImplementedError
def DecomposeSparseImage(self, image):
raise NotImplementedError
def ValidateHashtree(self):
raise NotImplementedError
class VerifiedBootVersion1HashtreeInfoGenerator(HashtreeInfoGenerator):
"""A class that parses the metadata of hashtree for a given partition."""
def __init__(self, partition_size, block_size, fec_supported):
"""Initialize VerityTreeInfo with the sparse image and input property.
Arguments:
partition_size: The whole size in bytes of a partition, including the
filesystem size, padding size, and verity size.
block_size: Expected size in bytes of each block for the sparse image.
fec_supported: True if the verity section contains fec data.
"""
self.block_size = block_size
self.partition_size = partition_size
self.fec_supported = fec_supported
self.image = None
self.filesystem_size = None
self.hashtree_size = None
self.metadata_size = None
prop_dict = {
'partition_size': str(partition_size),
'verity': 'true',
'verity_fec': 'true' if fec_supported else None,
# 'verity_block_device' needs to be present to indicate a verity-enabled
# partition.
'verity_block_device': '',
# We don't need the following properties that are needed for signing the
# verity metadata.
'verity_key': '',
'verity_signer_cmd': None,
}
self.verity_image_builder = CreateVerityImageBuilder(prop_dict)
self.hashtree_info = HashtreeInfo()
def DecomposeSparseImage(self, image):
"""Calculate the verity size based on the size of the input image.
Since we already know the structure of a verity enabled image to be:
[filesystem, verity_hashtree, verity_metadata, fec_data]. We can then
calculate the size and offset of each section.
"""
self.image = image
assert self.block_size == image.blocksize
assert self.partition_size == image.total_blocks * self.block_size, \
"partition size {} doesn't match with the calculated image size." \
" total_blocks: {}".format(self.partition_size, image.total_blocks)
adjusted_size = self.verity_image_builder.CalculateMaxImageSize()
assert adjusted_size % self.block_size == 0
verity_tree_size = GetVerityTreeSize(adjusted_size)
assert verity_tree_size % self.block_size == 0
metadata_size = GetVerityMetadataSize(adjusted_size)
assert metadata_size % self.block_size == 0
self.filesystem_size = adjusted_size
self.hashtree_size = verity_tree_size
self.metadata_size = metadata_size
self.hashtree_info.filesystem_range = RangeSet(
data=[0, adjusted_size // self.block_size])
self.hashtree_info.hashtree_range = RangeSet(
data=[adjusted_size // self.block_size,
(adjusted_size + verity_tree_size) // self.block_size])
def _ParseHashtreeMetadata(self):
"""Parses the hash_algorithm, root_hash, salt from the metadata block."""
metadata_start = self.filesystem_size + self.hashtree_size
metadata_range = RangeSet(
data=[metadata_start // self.block_size,
(metadata_start + self.metadata_size) // self.block_size])
meta_data = b''.join(self.image.ReadRangeSet(metadata_range))
# More info about the metadata structure available in:
# system/extras/verity/build_verity_metadata.py
META_HEADER_SIZE = 268
header_bin = meta_data[0:META_HEADER_SIZE]
header = struct.unpack("II256sI", header_bin)
# header: magic_number, version, signature, table_len
assert header[0] == 0xb001b001, header[0]
table_len = header[3]
verity_table = meta_data[META_HEADER_SIZE: META_HEADER_SIZE + table_len]
table_entries = verity_table.rstrip().split()
# Expected verity table format: "1 block_device block_device block_size
# block_size data_blocks data_blocks hash_algorithm root_hash salt"
assert len(table_entries) == 10, "Unexpected verity table size {}".format(
len(table_entries))
assert (int(table_entries[3]) == self.block_size and
int(table_entries[4]) == self.block_size)
assert (int(table_entries[5]) * self.block_size == self.filesystem_size and
int(table_entries[6]) * self.block_size == self.filesystem_size)
self.hashtree_info.hash_algorithm = table_entries[7].decode()
self.hashtree_info.root_hash = table_entries[8].decode()
self.hashtree_info.salt = table_entries[9].decode()
def ValidateHashtree(self):
"""Checks that we can reconstruct the verity hash tree."""
# Writes the filesystem section to a temp file; and calls the executable
# build_verity_tree to construct the hash tree.
adjusted_partition = common.MakeTempFile(prefix="adjusted_partition")
with open(adjusted_partition, "wb") as fd:
self.image.WriteRangeDataToFd(self.hashtree_info.filesystem_range, fd)
generated_verity_tree = common.MakeTempFile(prefix="verity")
root_hash, salt = BuildVerityTree(adjusted_partition, generated_verity_tree)
# The salt should be always identical, as we use fixed value.
assert salt == self.hashtree_info.salt, \
"Calculated salt {} doesn't match the one in metadata {}".format(
salt, self.hashtree_info.salt)
if root_hash != self.hashtree_info.root_hash:
logger.warning(
"Calculated root hash %s doesn't match the one in metadata %s",
root_hash, self.hashtree_info.root_hash)
return False
# Reads the generated hash tree and checks if it has the exact same bytes
# as the one in the sparse image.
with open(generated_verity_tree, 'rb') as fd:
return fd.read() == b''.join(self.image.ReadRangeSet(
self.hashtree_info.hashtree_range))
def Generate(self, image):
"""Parses and validates the hashtree info in a sparse image.
Returns:
hashtree_info: The information needed to reconstruct the hashtree.
Raises:
HashtreeInfoGenerationError: If we fail to generate the exact bytes of
the hashtree.
"""
self.DecomposeSparseImage(image)
self._ParseHashtreeMetadata()
if not self.ValidateHashtree():
raise HashtreeInfoGenerationError("Failed to reconstruct the verity tree")
return self.hashtree_info
def CreateCustomImageBuilder(info_dict, partition_name, partition_size,
key_path, algorithm, signing_args):
builder = None
if info_dict.get("avb_enable") == "true":
builder = VerifiedBootVersion2VerityImageBuilder(
partition_name,
partition_size,
VerifiedBootVersion2VerityImageBuilder.AVB_HASHTREE_FOOTER,
info_dict.get("avb_avbtool"),
key_path,
algorithm,
# Salt is None because custom images have no fingerprint property to be
# used as the salt.
None,
signing_args)
return builder
| 1.992188 | 2 |
orgviz/dones.py | tkf/orgviz | 8 | 5659 | <filename>orgviz/dones.py
#!/usr/bin/env python
"""org archive to html table converter"""
import os
import datetime
import itertools
from .utils.date import minutestr, total_minutes
def rootname_from_archive_olpath(node):
"""
Find rootname from ARCHIVE_OLPATH property.
Return None if not found.
"""
olpath = node.get_property('ARCHIVE_OLPATH')
if olpath:
olpathlist = olpath.split('/', 1)
if len(olpathlist) > 1:
(rootname, dummy) = olpathlist
else:
rootname = olpath
return rootname
return None
def find_rootname(node):
"""
Find rootname given node
"""
rootname = rootname_from_archive_olpath(node)
if not rootname:
n = node
p = node.parent
while not p.is_root():
n = p
p = p.parent
# n is root node
rootname = rootname_from_archive_olpath(n) or n.heading
return rootname
def key_row_from_node(node):
"""
Return three tuple (key, row) whose elemens are
key object for sorting table and dictionary which has following
keywords: heading, closed, scheduled, effort, clocksum, rootname.
"""
heading = node.heading
# find rootname
rootname = find_rootname(node)
if heading == rootname:
rootname = ""
# calc clocksum if CLOCK exists
clocksum = ''
clocklist = node.clock
if clocklist:
clocksum = sum([total_minutes(c.duration) for c in clocklist])
closed = node.closed
scheduled = node.scheduled
effort = node.get_property('Effort')
row = dict(
heading=heading,
closed=closed and closed.start.strftime('%a %d %b %H:%M'),
scheduled=scheduled and scheduled.start.strftime('%a %d %b %H:%M'),
effort=effort and minutestr(effort),
clocksum=clocksum and minutestr(clocksum),
rootname=rootname,
)
return (closed.start if closed else None, row)
def unique_name_from_paths(pathlist):
namelist = []
for path in pathlist:
name = os.path.basename(path)
if name in namelist:
name_orig = name
i = 1
while name not in namelist:
name = "%s <%d>" % (name_orig, i)
i += 1
namelist.append(name)
return namelist
def sameday(dt1, dt2):
return (isinstance(dt1, datetime.date) and
isinstance(dt2, datetime.date) and
dt1.year == dt2.year and
dt1.month == dt2.month and
dt1.day == dt2.day)
def table_add_oddday(key_table):
"""
Add oddday key in each rows of key_table *IN PLACE*.
Note that key should be a ``datetime.date`` object.
"""
previous = None
odd = True
for (key, row) in key_table:
this = key
if not sameday(this, previous):
odd = not odd
row['oddday'] = odd
previous = this
def get_data(orgnodes_list, orgpath_list, done, num=100):
"""
Get data for rendering jinja2 template. Data is dictionary like this:
table: list of `row`
list of row generated by ``row_from_node``
orgpathname_list: list of `orgpathname`
orgpathname: dict
contains `orgpath` and `orgname`.
`orgname` is short and unique name for `orgpath`.
title: str
a title
"""
key_table = []
orgname_list = unique_name_from_paths(orgpath_list)
for (nodelist, orgname) in zip(orgnodes_list, orgname_list):
for node in nodelist:
if node.todo == done:
(key, row) = key_row_from_node(node)
if key:
row['orgname'] = orgname
key_table.append((key, row))
orgpathname_list = [
dict(orgpath=orgpath, orgname=orgname)
for (orgpath, orgname) in zip(orgpath_list, orgname_list)]
key_table.sort(reverse=True)
table_add_oddday(key_table)
table = list(itertools.islice((row for (key, row) in key_table), num))
return dict(table=table, orgpathname_list=orgpathname_list,
title='Recently archived tasks')
| 2.6875 | 3 |
tests/python/unittest/test_lang_tag.py | ravikumarvc/incubator-tvm | 3 | 5660 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import tvm
@tvm.tag_scope(tag="conv")
def compute_conv(data, weight):
N, IC, H, W = data.shape
OC, IC, KH, KW = weight.shape
OH = H - KH + 1
OW = W - KW + 1
ic = tvm.reduce_axis((0, IC), name='ic')
dh = tvm.reduce_axis((0, KH), name='dh')
dw = tvm.reduce_axis((0, KW), name='dw')
return tvm.compute((N, OC, OH, OW), lambda i, oc, h, w: \
tvm.sum(data[i, ic, h+dh, w+dw] * weight[oc, ic, dh, dw],
axis=[ic, dh, dw]))
def test_with():
n = tvm.size_var('n')
m = tvm.size_var('m')
l = tvm.size_var('l')
A = tvm.placeholder((n, l), name='A')
B = tvm.placeholder((m, l), name='B')
with tvm.tag_scope(tag="gemm"):
k = tvm.reduce_axis((0, l), name='k')
C = tvm.compute((n, m), lambda i, j: tvm.sum(A[i, k] * B[j, k], axis=k),
attrs={"hello" : 1, "arr": [10, 12]})
assert C.op.tag == 'gemm'
assert "hello" in C.op.attrs
assert "xx" not in C.op.attrs
assert C.op.attrs["hello"].value == 1
CC = tvm.load_json(tvm.save_json(C))
assert CC.op.attrs["hello"].value == 1
assert CC.op.attrs["arr"][0].value == 10
# str format happened to be json compatible
assert json.loads(str(CC.op.attrs))["arr"][1] == 12
def test_decorator():
n = tvm.size_var('n')
c = tvm.size_var('c')
h = tvm.size_var('h')
w = tvm.size_var('w')
kh = tvm.size_var('kh')
kw = tvm.size_var('kw')
A = tvm.placeholder((n, c, h, w), name='A')
B = tvm.placeholder((c, c, kh, kw), name='B')
C = compute_conv(A, B)
assert C.op.tag == 'conv'
assert len(C.op.attrs) == 0
def test_nested():
n = tvm.size_var('n')
c = tvm.size_var('c')
h = tvm.size_var('h')
w = tvm.size_var('w')
kh = tvm.size_var('kh')
kw = tvm.size_var('kw')
A = tvm.placeholder((n, c, h, w), name='A')
B = tvm.placeholder((c, c, kh, kw), name='B')
try:
with tvm.tag_scope(tag='conv'):
C = compute_conv(A, B)
assert False
except ValueError:
pass
if __name__ == "__main__":
test_with()
test_decorator()
test_nested()
| 2.03125 | 2 |
doepy/case_studies/discrete_time/MSFB2014.py | scwolof/doepy | 1 | 5661 | <reponame>scwolof/doepy<gh_stars>1-10
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from scipy.integrate import odeint
from ..continuous_time import MSFB2014
"""
<NAME>, <NAME>, <NAME> and <NAME> (2014)
"Active fault diagnosis for nonlinear systems with probabilistic uncertainties"
IFAC Proceedings (2014): 7079-7084
"""
class Model (MSFB2014.Model):
def __init__ (self, name):
super().__init__(name)
def __call__ (self, x, u, p):
f = lambda x,t: self._ode_func(x,u,p)
t = np.linspace(0, self.dt, 51)
X = odeint(f, x, t)
return X[-1]
class M1 (Model):
"""
Nominal scenario (no fault)
"""
def __init__ (self):
super().__init__('M1')
self._ode_func = MSFB2014.M1()
self.p0 = self._ode_func.p0
class M2 (Model):
"""
Multiplicative actuator fault in inlet pump
"""
def __init__ (self):
super().__init__('M2')
self._ode_func = MSFB2014.M2()
self.p0 = self._ode_func.p0
class M3 (Model):
"""
Circular leak in tank
"""
def __init__ (self):
super().__init__('M3')
self._ode_func = MSFB2014.M3()
self.p0 = self._ode_func.p0
class DataGen (M2):
def __init__ (self):
super().__init__()
self.true_param = np.array([ 0.97, 0.82, 0.96, 0.67 ])
def __call__ (self, x, u):
return super().__call__(x, u, self.true_param)
def get ():
return DataGen(), [M1(), M2(), M3()] | 1.726563 | 2 |
house_code/tutorials_altered/3D_positioning_and_orientation.py | mukobi/Pozyx-Gabe | 1 | 5662 | #!/usr/bin/env python
"""
The pozyx ranging demo (c) Pozyx Labs
please check out https://www.pozyx.io/Documentation/Tutorials/getting_started/Python
This demo requires one (or two) pozyx shields. It demonstrates the 3D orientation and the functionality
to remotely read register data from a pozyx device. Connect one of the Pozyx devices with USB and run this script.
This demo reads the following sensor data:
- pressure
- acceleration
- magnetic field strength
- angular velocity
- the heading, roll and pitch
- the quaternion rotation describing the 3D orientation of the device. This can be used to transform from the body coordinate system to the world coordinate system.
- the linear acceleration (the acceleration excluding gravity)
- the gravitational vector
The data can be viewed in the Processing sketch orientation_3D.pde
"""
from time import time
from time import sleep
from pypozyx import *
from pypozyx.definitions.bitmasks import POZYX_INT_MASK_IMU
from pythonosc.osc_message_builder import OscMessageBuilder
from pythonosc.udp_client import SimpleUDPClient
from modules.user_input_config_functions import UserInputConfigFunctions as UserInput
from modules.file_writing import SensorAndPositionFileWriting as FileWriting
from modules.console_logging_functions import ConsoleLoggingFunctions as ConsoleLogging
import time as t
class Orientation3D(object):
"""Reads out all sensor data from either a local or remote Pozyx"""
def __init__(self, pozyx, osc_udp_client, anchors, algorithm=POZYX_POS_ALG_UWB_ONLY,
dimension=POZYX_3D, height=1000, remote_id=None):
self.pozyx = pozyx
self.osc_udp_client = osc_udp_client
self.anchors = anchors
self.algorithm = algorithm
self.dimension = dimension
self.height = height
self.remote_id = remote_id
def setup(self):
"""There is no specific setup functionality"""
self.current_time = time()
"""Sets up the Pozyx for positioning by calibrating its anchor list."""
print("------------POZYX POSITIONING V1.0 -------------")
print("NOTES: ")
print("- No parameters required.")
print()
print("- System will auto start configuration")
print()
print("- System will auto start positioning")
print("------------POZYX POSITIONING V1.0 --------------")
print()
print("START Ranging: ")
self.pozyx.clearDevices(self.remote_id)
self.setAnchorsManual()
self.printPublishConfigurationResult()
def loop(self):
"""Gets new IMU sensor data"""
# check sensor data status
sensor_data = SensorData()
calibration_status = SingleRegister()
if self.remote_id is not None or self.pozyx.checkForFlag(POZYX_INT_MASK_IMU, 0.01) == POZYX_SUCCESS:
status = self.pozyx.getAllSensorData(sensor_data, self.remote_id)
status &= self.pozyx.getCalibrationStatus(calibration_status, self.remote_id)
if status == POZYX_SUCCESS:
# check position status
position = Coordinates()
status = self.pozyx.doPositioning(
position, self.dimension, self.height, self.algorithm, remote_id=self.remote_id)
if status == POZYX_SUCCESS:
# self.print_publish_position(position)
self.publishSensorData(sensor_data, calibration_status)
return sensor_data, position
else:
pass
# self.print_publish_error_code("positioning")
return "Error, no data to print for this line"
def publishSensorData(self, sensor_data, calibration_status):
"""Makes the OSC sensor data package and publishes it"""
self.msg_builder = OscMessageBuilder("/sensordata")
self.msg_builder.add_arg(int(1000 * (time() - self.current_time)))
current_time = time()
self.addSensorData(sensor_data)
self.addCalibrationStatus(calibration_status)
self.osc_udp_client.send(self.msg_builder.build())
def addSensorData(self, sensor_data):
"""Adds the sensor data to the OSC message"""
self.msg_builder.add_arg(sensor_data.pressure)
self.addComponentsOSC(sensor_data.acceleration)
self.addComponentsOSC(sensor_data.magnetic)
self.addComponentsOSC(sensor_data.angular_vel)
self.addComponentsOSC(sensor_data.euler_angles)
self.addComponentsOSC(sensor_data.quaternion)
self.addComponentsOSC(sensor_data.linear_acceleration)
self.addComponentsOSC(sensor_data.gravity_vector)
def addComponentsOSC(self, component):
"""Adds a sensor data component to the OSC message"""
for data in component.data:
self.msg_builder.add_arg(float(data))
def addCalibrationStatus(self, calibration_status):
"""Adds the calibration status data to the OSC message"""
self.msg_builder.add_arg(calibration_status[0] & 0x03)
self.msg_builder.add_arg((calibration_status[0] & 0x0C) >> 2)
self.msg_builder.add_arg((calibration_status[0] & 0x30) >> 4)
self.msg_builder.add_arg((calibration_status[0] & 0xC0) >> 6)
def setAnchorsManual(self):
"""Adds the manually measured anchors to the Pozyx's device list one for one."""
status = self.pozyx.clearDevices(self.remote_id)
for anchor in self.anchors:
status &= self.pozyx.addDevice(anchor, self.remote_id)
if len(anchors) > 4:
status &= self.pozyx.setSelectionOfAnchors(POZYX_ANCHOR_SEL_AUTO, len(anchors))
return status
def printPublishConfigurationResult(self):
"""Prints and potentially publishes the anchor configuration result in a human-readable way."""
list_size = SingleRegister()
status = self.pozyx.getDeviceListSize(list_size, self.remote_id)
print("List size: {0}".format(list_size[0]))
if list_size[0] != len(self.anchors):
self.printPublishErrorCode("configuration")
return
device_list = DeviceList(list_size=list_size[0])
status = self.pozyx.getDeviceIds(device_list, self.remote_id)
print("Calibration result:")
print("Anchors found: {0}".format(list_size[0]))
print("Anchor IDs: ", device_list)
for i in range(list_size[0]):
anchor_coordinates = Coordinates()
status = self.pozyx.getDeviceCoordinates(
device_list[i], anchor_coordinates, self.remote_id)
print("ANCHOR,0x%0.4x, %s" % (device_list[i], str(anchor_coordinates)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message(
"/anchor", [device_list[i], int(anchor_coordinates.x), int(anchor_coordinates.y), int(anchor_coordinates.z)])
sleep(0.025)
def printPublishErrorCode(self, operation):
"""Prints the Pozyx's error and possibly sends it as a OSC packet"""
error_code = SingleRegister()
network_id = self.remote_id
if network_id is None:
self.pozyx.getErrorCode(error_code)
print("ERROR %s, local error code %s" % (operation, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message("/error", [operation, 0, error_code[0]])
return
status = self.pozyx.getErrorCode(error_code, self.remote_id)
if status == POZYX_SUCCESS:
print("ERROR %s on ID %s, error code %s" %
(operation, "0x%0.4x" % network_id, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message(
"/error", [operation, network_id, error_code[0]])
else:
self.pozyx.getErrorCode(error_code)
print("ERROR %s, couldn't retrieve remote error code, local error code %s" %
(operation, str(error_code)))
if self.osc_udp_client is not None:
self.osc_udp_client.send_message("/error", [operation, 0, -1])
# should only happen when not being able to communicate with a remote Pozyx.
if __name__ == '__main__':
# shortcut to not have to find out the port yourself
serial_port = get_serial_ports()[0].device
remote_id = 0x6110 # remote device network ID
remote = True # whether to use a remote device
# if not remote:
# remote_id = None
index = 0
previous_cycle_time = 0
current_cycle_time = 0
attributes_to_log = ["acceleration"]
to_use_file = False
filename = None
"""User input configuration section, comment out to use above settings"""
remote = UserInput.use_remote()
remote_id = UserInput.get_remote_id(remote)
to_use_file = UserInput.use_file()
filename = UserInput.get_filename(to_use_file)
attributes_to_log = UserInput.get_multiple_attributes_to_log()
use_processing = True
ip = "127.0.0.1"
network_port = 8888
anchors = [DeviceCoordinates(0x6863, 1, Coordinates(0, 0, 2000)),
DeviceCoordinates(0x615a, 1, Coordinates(0, 18288, 1000)),
DeviceCoordinates(0x607c, 1, Coordinates(18288, 0, 1000)),
DeviceCoordinates(0x6134, 1, Coordinates(18288, 18288, 2000))]
# algorithm = POZYX_POS_ALG_UWB_ONLY # positioning algorithm to use
algorithm = POZYX_POS_ALG_TRACKING # tracking positioning algorithm
dimension = POZYX_3D # positioning dimension
height = 1000 # height of device, required in 2.5D positioning
pozyx = PozyxSerial(serial_port)
osc_udp_client = SimpleUDPClient(ip, network_port)
o = Orientation3D(pozyx, osc_udp_client, anchors, algorithm, dimension, height, remote_id)
o.setup()
logfile = None
if to_use_file:
logfile = open(filename, 'a')
FileWriting.write_sensor_and_position_header_to_file(logfile)
start = ConsoleLogging.get_time()
try:
while True:
# updates elapsed time and time difference
elapsed = ConsoleLogging.get_elapsed_time(ConsoleLogging, start)
previous_cycle_time = current_cycle_time
current_cycle_time = elapsed
time_difference = current_cycle_time - previous_cycle_time
# store iterate_file returns as a tuple or an error message
loop_results = o.loop()
if type(loop_results) == tuple:
one_cycle_sensor_data, one_cycle_position = loop_results
formatted_data_dictionary = ConsoleLogging.format_sensor_data(
one_cycle_sensor_data, attributes_to_log)
if type(formatted_data_dictionary) == dict:
formatted_data_dictionary["Position"] = [
"x:", one_cycle_position.x, "y:", one_cycle_position.y, "z:", one_cycle_position.z]
ConsoleLogging.log_sensor_data_to_console(index, elapsed, formatted_data_dictionary)
if to_use_file:
FileWriting.write_sensor_and_position_data_to_file(
index, elapsed, time_difference,
logfile, one_cycle_sensor_data, one_cycle_position)
# if the iterate_file didn't return a tuple, it returned an error string
else:
error_string = loop_results
ConsoleLogging.print_data_error_message(index, elapsed, error_string)
index += 1 # increment data index
# this allows Windows users to exit the while iterate_file by pressing ctrl+c
except KeyboardInterrupt:
pass
if to_use_file:
logfile.close()
| 2.71875 | 3 |
hdfs_kernel/exceptions.py | Jasper912/jupyter-hdfs-kernel | 3 | 5663 | <reponame>Jasper912/jupyter-hdfs-kernel
#!/usr/bin/env python
# -*- coding=utf-8 -*-
#
# Author: huangnj
# Time: 2019/09/27
import traceback
from functools import wraps
from hdfs_kernel.constants import EXPECTED_ERROR_MSG, INTERNAL_ERROR_MSG
from hdfs.util import HdfsError
# == EXCEPTIONS ==
class SessionManagementException(Exception):
pass
class CommandNotAllowedException(Exception):
pass
class CommandExecuteException(Exception):
pass
# option parse Error
class OptionParsingError(RuntimeError):
pass
class OptionParsingExit(Exception):
def __init__(self, status, msg):
self.msg = msg
self.status = status
# == DECORATORS FOR EXCEPTION HANDLING ==
EXPECTED_EXCEPTIONS = [HdfsError, SessionManagementException, CommandNotAllowedException,
CommandExecuteException, OptionParsingExit, OptionParsingError]
def handle_expected_exceptions(f):
"""A decorator that handles expected exceptions. Self can be any object with
an "ipython_display" attribute.
Usage:
@handle_expected_exceptions
def fn(self, ...):
etc..."""
exceptions_to_handle = tuple(EXPECTED_EXCEPTIONS)
# Notice that we're NOT handling e.DataFrameParseException here. That's because DataFrameParseException
# is an internal error that suggests something is wrong with LivyClientLib's implementation.
@wraps(f)
def wrapped(self, *args, **kwargs):
try:
out = f(self, *args, **kwargs)
except exceptions_to_handle as err:
# Do not log! as some messages may contain private client information
self.send_error(EXPECTED_ERROR_MSG.format(err))
return None
else:
return out
return wrapped
def wrap_unexpected_exceptions(f, execute_if_error=None):
"""A decorator that catches all exceptions from the function f and alerts the user about them.
Self can be any object with a "logger" attribute and a "ipython_display" attribute.
All exceptions are logged as "unexpected" exceptions, and a request is made to the user to file an issue
at the Github repository. If there is an error, returns None if execute_if_error is None, or else
returns the output of the function execute_if_error.
Usage:
@wrap_unexpected_exceptions
def fn(self, ...):
..etc """
@wraps(f)
def wrapped(self, *args, **kwargs):
try:
out = f(self, *args, **kwargs)
except Exception as e:
self.logger.error(u"ENCOUNTERED AN INTERNAL ERROR: {}\n\tTraceback:\n{}".format(e, traceback.format_exc()))
self.send_error(INTERNAL_ERROR_MSG.format(e))
return None if execute_if_error is None else execute_if_error()
else:
return out
return wrapped
| 2.25 | 2 |
dashboard/tests/test_inventory.py | vishalvvr/transtats | 0 | 5664 | # Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from fixture import DjangoFixture
from fixture.style import NamedDataStyle
from fixture.django_testcase import FixtureTestCase
from dashboard.managers.inventory import InventoryManager
from dashboard.models import Product
from dashboard.tests.testdata.db_fixtures import (
LanguageData, LanguageSetData, PlatformData, ProductData, ReleaseData
)
db_fixture = DjangoFixture(style=NamedDataStyle())
class InventoryManagerTest(FixtureTestCase):
inventory_manager = InventoryManager()
fixture = db_fixture
datasets = [LanguageData, LanguageSetData, PlatformData, ProductData, ReleaseData]
def test_get_locales(self):
"""
Test get_locales
"""
japanese_locale = self.inventory_manager.get_locales(pick_locales=['ja_JP'])
self.assertEqual(len(japanese_locale), 1)
self.assertEqual(japanese_locale[0].lang_name, 'Japanese')
self.assertEqual(japanese_locale[0].locale_alias, 'ja')
self.assertEqual(japanese_locale[0].locale_script, 'Hani')
def test_get_active_locales_count(self):
"""
Test get_active_locales_count
"""
active_locales = self.inventory_manager.get_active_locales_count()
self.assertEqual(active_locales, 3)
def test_get_locale_alias(self):
"""
Test get_locale_alias
"""
locale_alias = self.inventory_manager.get_locale_alias('fr_FR')
self.assertEqual(locale_alias, 'fr')
locale_alias = self.inventory_manager.get_locale_alias('de_DE')
self.assertEqual(locale_alias, 'de_DE')
def test_get_alias_locale(self):
"""
Test get_alias_locale
"""
alias_locale = self.inventory_manager.get_alias_locale('fr')
self.assertEqual(alias_locale, 'fr_FR')
alias_locale = self.inventory_manager.get_alias_locale('de_DE')
self.assertEqual(alias_locale, 'de_DE')
def test_get_locales_set(self):
"""
Test get_locales_set
"""
active_locales, inactive_locales, aliases = \
self.inventory_manager.get_locales_set()
self.assertEqual(len(active_locales), 3)
self.assertEqual(len(inactive_locales), 1)
self.assertEqual(len(aliases), 4)
def test_get_locale_lang_tuple(self):
"""
Test get_locale_lang_tuple
"""
ru_tuple = ('ru_RU', 'Russian')
fr_tuple = ('fr_FR', 'French')
locale_lang_tuple = self.inventory_manager.get_locale_lang_tuple()
self.assertEqual(len(locale_lang_tuple), 3)
locale_lang_tuple = self.inventory_manager.get_locale_lang_tuple(locales=['fr_FR', 'ru_RU'])
self.assertEqual(len(locale_lang_tuple), 2)
self.assertTupleEqual(locale_lang_tuple[0], ru_tuple)
self.assertTupleEqual(locale_lang_tuple[1], fr_tuple)
def test_get_langset(self):
"""
Test get_get_langset
"""
lang_set = self.inventory_manager.get_langset(langset_slug='custom-set')
self.assertEqual(lang_set.lang_set_name, 'Custom Set')
self.assertEqual(lang_set.lang_set_color, 'Peru')
def test_get_langsets(self):
"""
Test get_langsets
"""
lang_sets = self.inventory_manager.get_langsets(
fields=['lang_set_name', 'locale_ids']
)
self.assertEqual(len(lang_sets), 2)
self.assertNotIn('lang_set_color', vars(lang_sets[0]))
self.assertListEqual(lang_sets[0].locale_ids, ['fr_FR', 'ja_JP'])
def test_get_locale_groups(self):
"""
Test get_locale_groups
"""
locale_groups = self.inventory_manager.get_locale_groups('ja_JP')
self.assertDictEqual(locale_groups, {'ja_JP': ['custom-set', 'f27-set']})
def test_get_all_locales_groups(self):
"""
Test get_all_locales_groups
"""
groups_of_all_locales = self.inventory_manager.get_all_locales_groups()
self.assertDictEqual(groups_of_all_locales,
{'ja_JP': ['custom-set', 'f27-set'], 'fr_FR': ['custom-set', 'f27-set'],
'ru_RU': ['f27-set'], 'ko_KR': []})
def test_get_translation_platforms(self):
"""
Test get_translation_platforms
"""
transplatforms = self.inventory_manager.get_translation_platforms(engine='zanata')
self.assertEqual(transplatforms[1].api_url, 'https://translate.zanata.org')
self.assertEqual(transplatforms[1].platform_slug, 'ZNTAPUB')
def test_get_ci_platforms(self):
"""
Test get_translation_platforms
"""
ciplatforms = self.inventory_manager.get_translation_platforms(ci=True)
self.assertEqual(ciplatforms[0].api_url, 'https://cloud.memsource.com/web')
self.assertEqual(ciplatforms[0].platform_slug, 'MSRCPUB')
def test_get_transplatforms_set(self):
"""
Test get_transplatforms_set
"""
active_platforms, inactive_platforms = self.inventory_manager.get_transplatforms_set()
self.assertEqual(len(active_platforms), 3)
self.assertEqual(len(inactive_platforms), 0)
def test_get_engine_from_slug(self):
"""
Test get_engine_from_slug
"""
platform_engine = self.inventory_manager.get_engine_from_slug(
PlatformData.platform_zanata_fedora.platform_slug
)
self.assertEqual(platform_engine, 'zanata')
platform_engine = self.inventory_manager.get_engine_from_slug(
PlatformData.platform_memsource_cloud.platform_slug
)
self.assertEqual(platform_engine, 'memsource')
def test_get_transplatform_slug_url(self):
"""
test get_transplatform_slug_url
"""
slug_url_tuple = self.inventory_manager.get_transplatform_slug_url()
self.assertTupleEqual(slug_url_tuple, (('MSRCPUB', 'https://cloud.memsource.com/web'),
('ZNTAFED', 'https://fedora.zanata.org'),
('ZNTAPUB', 'https://translate.zanata.org')))
def test_get_relbranch_locales(self):
"""
Test get_relbranch_locales
"""
relbranch_locales = self.inventory_manager.get_relbranch_locales("nonexisting-relbranch")
self.assertFalse(relbranch_locales)
relbranch_locales = self.inventory_manager.get_relbranch_locales('fedora-27')
self.assertListEqual(relbranch_locales, ['ja_JP', 'fr_FR', 'ru_RU'])
def test_get_release_streams(self):
"""
Test get_release_streams
"""
relstream_fedora = Product.objects.get(product_name='Fedora')
relstream_rhel = Product.objects.get(product_name='RHEL')
release_streams = self.inventory_manager.get_release_streams()
self.assertEqual(len(release_streams), 2)
self.assertIn(relstream_fedora, release_streams)
self.assertIn(relstream_rhel, release_streams)
release_streams = self.inventory_manager.get_release_streams(stream_slug='RHEL')
self.assertEqual(len(release_streams), 1)
self.assertIn(relstream_rhel, release_streams)
release_streams = self.inventory_manager.get_release_streams(only_active=True)
self.assertEqual(len(release_streams), 1)
self.assertIn(relstream_fedora, release_streams)
def test_get_relstream_slug_name(self):
"""
Test get_relstream_slug_name
"""
relstream_slug_name_tuple = self.inventory_manager.get_relstream_slug_name()
self.assertEqual(len(relstream_slug_name_tuple), 1)
self.assertTupleEqual(relstream_slug_name_tuple[0], ('fedora', 'Fedora'))
def test_get_relstream_build_tags(self):
"""
Test get_relstream_build_tags
"""
tags = self.inventory_manager.get_relstream_build_tags(stream_slug='fedora')
self.assertIsInstance(tags, dict)
self.assertDictEqual(tags, {'fedora': ['f28', 'f29', 'rawhide']})
| 1.960938 | 2 |
web_console_v2/api/fedlearner_webconsole/rpc/server.py | nolanliou/fedlearner | 0 | 5665 | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=broad-except, cyclic-import
import logging
import threading
from concurrent import futures
import grpc
from fedlearner_webconsole.proto import (
service_pb2, service_pb2_grpc,
common_pb2
)
from fedlearner_webconsole.db import db
from fedlearner_webconsole.project.models import Project
from fedlearner_webconsole.workflow.models import (
Workflow, WorkflowState, TransactionState
)
from fedlearner_webconsole.exceptions import (
UnauthorizedException
)
class RPCServerServicer(service_pb2_grpc.WebConsoleV2ServiceServicer):
def __init__(self, server):
self._server = server
def CheckConnection(self, request, context):
try:
return self._server.check_connection(request)
except UnauthorizedException as e:
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('CheckConnection rpc server error: %s', repr(e))
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
def UpdateWorkflowState(self, request, context):
try:
return self._server.update_workflow_state(request)
except UnauthorizedException as e:
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('UpdateWorkflowState rpc server error: %s', repr(e))
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
def GetWorkflow(self, request, context):
try:
return self._server.get_workflow(request)
except UnauthorizedException as e:
return service_pb2.GetWorkflowResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNAUTHORIZED,
msg=repr(e)))
except Exception as e:
logging.error('GetWorkflow rpc server error: %s', repr(e))
return service_pb2.GetWorkflowResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e)))
class RpcServer(object):
def __init__(self):
self._lock = threading.Lock()
self._started = False
self._server = None
self._app = None
def start(self, app):
assert not self._started, "Already started"
self._app = app
listen_port = app.config.get('GRPC_LISTEN_PORT', 1999)
with self._lock:
self._server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
service_pb2_grpc.add_WebConsoleV2ServiceServicer_to_server(
RPCServerServicer(self), self._server)
self._server.add_insecure_port('[::]:%d' % listen_port)
self._server.start()
self._started = True
def stop(self):
if not self._started:
return
with self._lock:
self._server.stop(None).wait()
del self._server
self._started = False
def check_auth_info(self, auth_info):
logging.debug('auth_info: %s', auth_info)
project = Project.query.filter_by(
name=auth_info.project_name).first()
if project is None:
raise UnauthorizedException('Invalid project')
project_config = project.get_config()
# TODO: fix token verification
# if project_config.token != auth_info.auth_token:
# raise UnauthorizedException('Invalid token')
if project_config.domain_name != auth_info.target_domain:
raise UnauthorizedException('Invalid domain')
source_party = None
for party in project_config.participants:
if party.domain_name == auth_info.source_domain:
source_party = party
if source_party is None:
raise UnauthorizedException('Invalid domain')
return project, source_party
def check_connection(self, request):
with self._app.app_context():
_, party = self.check_auth_info(request.auth_info)
logging.debug(
'received check_connection from %s', party.domain_name)
return service_pb2.CheckConnectionResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS))
def update_workflow_state(self, request):
with self._app.app_context():
project, party = self.check_auth_info(request.auth_info)
logging.debug(
'received update_workflow_state from %s: %s',
party.domain_name, request)
name = request.workflow_name
state = WorkflowState(request.state)
target_state = WorkflowState(request.target_state)
transaction_state = TransactionState(request.transaction_state)
workflow = Workflow.query.filter_by(
name=request.workflow_name,
project_id=project.id).first()
if workflow is None:
assert state == WorkflowState.NEW
assert target_state == WorkflowState.READY
workflow = Workflow(
name=name,
project_id=project.id,
state=state, target_state=target_state,
transaction_state=transaction_state)
db.session.add(workflow)
db.session.commit()
db.session.refresh(workflow)
workflow.update_state(
state, target_state, transaction_state)
db.session.commit()
return service_pb2.UpdateWorkflowStateResponse(
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS),
transaction_state=workflow.transaction_state.value)
def _filter_workflow(self, workflow, modes):
# filter peer-readable and peer-writable variables
if workflow is None:
return
var_list = [
i for i in workflow.variables if i.access_mode in modes]
workflow.ClearField('variables')
for i in var_list:
workflow.variables.append(i)
for job_def in workflow.job_definitions:
var_list = [
i for i in job_def.variables if i.access_mode in modes]
job_def.ClearField('variables')
for i in var_list:
job_def.variables.append(i)
def get_workflow(self, request):
with self._app.app_context():
project, party = self.check_auth_info(request.auth_info)
workflow = Workflow.query.filter_by(
name=request.workflow_name,
project_id=project.id).first()
assert workflow is not None
config = workflow.get_config()
self._filter_workflow(
config,
[
common_pb2.Variable.PEER_READABLE,
common_pb2.Variable.PEER_WRITABLE
])
# job details
jobs = [service_pb2.JobDetail(
name=job.name, state=job.get_state_for_front())
for job in workflow.get_jobs()]
# fork info
forked_from = ''
if workflow.forked_from:
forked_from = Workflow.query.get(workflow.forked_from).name
return service_pb2.GetWorkflowResponse(
name=request.workflow_name,
status=common_pb2.Status(
code=common_pb2.STATUS_SUCCESS),
config=config,
jobs=jobs,
state=workflow.state.value,
target_state=workflow.target_state.value,
transaction_state=workflow.transaction_state.value,
forkable=workflow.forkable,
forked_from=forked_from,
reuse_job_names=workflow.get_reuse_job_names(),
peer_reuse_job_names=workflow.get_peer_reuse_job_names(),
fork_proposal_config=workflow.get_fork_proposal_config()
)
rpc_server = RpcServer()
| 1.804688 | 2 |
chapter15/async_aiohttp.py | haru-256/ExpertPython3_Source | 9 | 5666 | <reponame>haru-256/ExpertPython3_Source<filename>chapter15/async_aiohttp.py<gh_stars>1-10
"""
「非同期プログラミング」の節で登場するサンプルコード
aiohttpを使って非同期にHTTPのリクエストを送信する方法
"""
import asyncio
import time
import aiohttp
from asyncrates import get_rates
SYMBOLS = ('USD', 'EUR', 'PLN', 'NOK', 'CZK')
BASES = ('USD', 'EUR', 'PLN', 'NOK', 'CZK')
async def fetch_rates(session, place):
return await get_rates(session, place)
async def present_result(result):
base, rates = (await result)
rates_line = ", ".join(
[f"{rates[symbol]:7.03} {symbol}" for symbol in SYMBOLS]
)
print(f"1 {base} = {rates_line}")
async def main():
async with aiohttp.ClientSession() as session:
await asyncio.wait([
asyncio.create_task(present_result(fetch_rates(session, base)))
for base in BASES
])
if __name__ == "__main__":
started = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
elapsed = time.time() - started
print()
print(f"経過時間: {elapsed:.2f}s")
| 2.953125 | 3 |
experiments/nginx/run.py | OleksiiOleksenko/intel_mpx_explained | 15 | 5667 | #!/usr/bin/env python
from __future__ import print_function
import logging
import os
import signal
from time import sleep
from subprocess import Popen, PIPE
import socket
from core.common_functions import *
from core.run import Runner
class NginxPerf(Runner):
"""
Runs Nginx
"""
name = "nginx"
exp_name = "nginx"
bench_suite = False
benchmarks = {"nginx": ""}
test_benchmarks = {"nginx": ""}
client_numbers = [1, 5, 9, 13, 17, 21, 25, 29]
ab = "ab"
duration = 20 # in seconds
requests_num = 1000000 # some huge number so we always take 20 seconds
def __init__(self, *args, **kwargs):
super(NginxPerf, self).__init__(*args, **kwargs)
if self.config.input_type == "test":
self.client_numbers = (1,)
def per_benchmark_action(self, type_, benchmark, args):
self.log_build(type_, benchmark)
build_path = "/".join([self.dirs["build"], type_])
self.current_exe = build_path + '/sbin/' + benchmark
build_benchmark(
b=benchmark,
t=type_,
makefile=self.dirs['bench_src'],
build_path=build_path
)
# generate an input file
with open(build_path + "/html/index.html", "w") as f:
f.write("<html><body><h1>It works!</h1>")
random_text = my_check_output("lorem -p 10")
f.write(random_text)
f.write("</body></html>")
# config Nginx
replace_in_file(build_path + "/conf/nginx.conf", "listen 80;", "listen 8080;", ignoreifcontains=True)
replace_in_file(build_path + "/conf/nginx.conf", "worker_processes 1;", "worker_processes auto;", ignoreifcontains=True)
def per_thread_action(self, type_, benchmark, args, thread_num):
servercmd = "{action} {exe} -g \"daemon off;\"".format(
action=self.action,
exe=self.current_exe,
)
logging.debug("Server command: %s" % servercmd)
# by default start client on local machine
if env.get("CLIENT_MACHINE"):
ssh = "ssh %s" % env["CLIENT_MACHINE"]
logging.debug("Using remote client: %s" % env["CLIENT_MACHINE"])
else:
ssh = ""
logging.debug("Using local client (use CLIENT_MACHINE env var to specify remote client)")
myip = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
with open(self.dirs["log_file"], "a") as f:
for client_number in self.client_numbers:
# start server
my_check_output("pkill -9 nginx > /dev/null || true") # for sanity
sleep(1)
server = Popen(servercmd, shell=True, stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid)
sleep(1)
# start client (possibly on another machine)
msg = self.run_message.format(input=client_number, **locals())
self.log_run(msg)
f.write("[run] " + msg + "\n")
out = my_check_output("{ssh} {ab} -k -t {duration} -n {requests_num} -c {client_number} http://{myip}:8080/".format(
ab=self.ab,
duration=self.duration,
requests_num=self.requests_num,
**locals()
))
f.write("===== client =====\n")
f.write(out)
# log and stop server
f.write("===== return code is %s =====\n" % str(server.poll()))
try:
os.killpg(server.pid, signal.SIGINT)
except:
pass
f.write("===== stdout =====\n")
for line in server.stdout:
f.write(line.decode('utf-8'))
f.write("===== stderr =====\n")
for line in server.stderr:
f.write(line.decode('utf-8'))
sleep(1)
def set_logging(self):
self.num_benchmarks = len(self.benchmarks) * len(self.types) * self.num_runs * len(self.client_numbers)
logging.info("Total runs: %d" % self.num_benchmarks)
def main(benchmark_name=None):
runner = NginxPerf()
runner.main()
| 2.359375 | 2 |
tavi/test/unit/base/document_no_fields_test.py | verdammelt/tavi | 0 | 5668 | <gh_stars>0
# -*- coding: utf-8 -*-
import unittest
from tavi.base.documents import BaseDocument
class BaseDocumentNoFieldsTest(unittest.TestCase):
class NoFieldsSample(BaseDocument):
pass
def setUp(self):
super(BaseDocumentNoFieldsTest, self).setUp()
self.no_fields_sample = self.NoFieldsSample()
def test_get_fields(self):
self.assertEqual([], self.no_fields_sample.fields)
def test_get_errors(self):
self.assertEqual(0, self.no_fields_sample.errors.count)
def test_valid(self):
self.assertEqual(True, self.no_fields_sample.valid)
def test_get_field_values(self):
self.assertEqual({}, self.no_fields_sample.field_values)
| 2.328125 | 2 |
seqparse/test/test_seqparse.py | hoafaloaf/seqparse | 1 | 5669 | """Test file sequence discovery on disk."""
# "Future" Libraries
from __future__ import print_function
# Standard Libraries
import os
import unittest
# Third Party Libraries
import mock
from builtins import range
from future.utils import lrange
from . import (DirEntry, generate_entries, initialise_mock_scandir_data,
mock_scandir_deep)
from .. import (__version__, get_parser, get_sequence, get_version, invert,
validate_frame_sequence)
from ..sequences import FileSequence, FrameChunk, FrameSequence
###############################################################################
# class: TestSeqparseModule
class TestSeqparseModule(unittest.TestCase):
"""Test file discovery on the seqparse module."""
_test_ext = "exr"
_test_file_name = "TEST_DIR"
_test_root = "test_dir"
_singletons = ["singleton0.jpg", "singleton1.jpg"]
def setUp(self):
"""Set up the test case."""
pass
@mock.patch("seqparse.seqparse.scandir")
def test_singletons(self, mock_api_call):
"""Seqparse: Test file singleton discovery from disk location."""
# Expected outputs ...
output = [os.path.join(self._test_root, x) for x in self._singletons]
entries = list()
for file_name in output:
entries.append(DirEntry(file_name))
mock_api_call.return_value = iter(entries)
parser = get_parser()
parser.scan_path(self._test_root)
file_names = parser.singletons
self.assertIn(self._test_root, file_names)
self.assertEqual(self._test_root, file_names[self._test_root].path)
self.assertEqual(len(file_names), 1)
self.assertEqual(
len(file_names[self._test_root]), len(self._singletons))
self.assertEqual(
sorted(self._singletons), sorted(file_names[self._test_root]))
# Check parser output ...
self.assertEqual(sorted(map(str, parser.output())), output)
# Test seqs_only option ...
self.assertEqual(sorted(parser.output(seqs_only=True)), [])
@mock.patch("seqparse.seqparse.scandir")
def test_single_padded_file(self, mock_api_call):
"""Seqparse: Test single padded file sequence discovery."""
frames = {4: [1]}
# Expected outputs ...
frame_seq_output = "0001"
file_seq_output = ".".join(
(self._test_file_name, frame_seq_output, self._test_ext))
final_output = os.path.join(self._test_root, file_seq_output)
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
data = parser.sequences
test_output = list(parser.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertTrue(4 in file_seq[self._test_ext])
self.assertEqual(len(file_seq[self._test_ext]), 1)
# And finally, the file sequence.
file_seq = file_seq[self._test_ext][4]
self.assertEqual(len(file_seq), len(frames[4]))
self.assertEqual(str(file_seq), final_output)
@mock.patch("seqparse.seqparse.scandir")
def test_simple_sequence(self, mock_api_call):
"""Seqparse: Test simple file sequence discovery."""
frames = {4: [0, 1, 2, 3, 4]}
# Expected outputs ...
frame_seq_output = "0000-0004"
file_seq_output = ".".join(
(self._test_file_name, frame_seq_output, self._test_ext))
final_output = os.path.join(self._test_root, file_seq_output)
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
data = parser.sequences
test_output = list(parser.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 1)
self.assertEqual(str(test_output[0]), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertTrue(4 in file_seq[self._test_ext])
self.assertEqual(len(file_seq[self._test_ext]), 1)
# And finally, the file sequence.
file_seq = file_seq[self._test_ext][4]
self.assertEqual(len(file_seq), len(frames[4]))
self.assertEqual(str(file_seq), final_output)
@mock.patch("seqparse.seqparse.scandir")
def test_complex_sequence(self, mock_api_call):
"""Seqparse: Test complex file sequence discovery."""
frames = {
1: [5, 6, 7, 8, 114, 199, 2000],
3: [8, 9, 10, 12],
4: [0, 1, 2, 3, 4, 5, 6, 8, 10, 12, 101]
}
input_entries = generate_entries(
ext=self._test_ext,
frames=frames,
name=self._test_file_name,
root=self._test_root)
# Expected output frame sequences. Note how frames 114, 199 move to the
# "pad 3" group and 2000 moves to the "pad 4" group!
output_seqs = {
1: "5-8",
3: "008-010,012,114,199",
4: "0000-0006,0008-0012x2,0101,2000"
}
# Expected final output (where "/" is os.sep):
# test_dir/TEST_DIR.5-8.exr
# test_dir/TEST_DIR.008-010,012,114,199.exr
# test_dir/TEST_DIR.0000-0006,0008-0012x2,0101,2000.exr
mock_api_call.return_value = iter(input_entries)
parser = get_parser()
parser.scan_path(self._test_root)
final_output = list()
for pad, seq_frames in sorted(output_seqs.items()):
bits = (self._test_file_name, seq_frames, self._test_ext)
output_seqs[pad] = os.path.join(self._test_root, ".".join(bits))
final_output.append(output_seqs[pad])
data = parser.sequences
# Check the structure of the sequences property.
self.assertIn(self._test_root, data)
self.assertEqual(len(data), 1)
self.assertIn(self._test_file_name, data[self._test_root])
self.assertEqual(len(data[self._test_root]), 1)
# Now check the file sequence itself.
file_seq = data[self._test_root][self._test_file_name]
test_output = list(file_seq.output())
self.assertEqual(len(test_output), 3)
self.assertEqual(list(map(str, test_output)), final_output)
self.assertIn(self._test_ext, file_seq)
self.assertEqual(len(file_seq), 1)
self.assertEqual(set(file_seq[self._test_ext]), set(output_seqs))
# And finally, the file sequences.
for pad in sorted(output_seqs):
self.assertEqual(output_seqs[pad],
str(file_seq[self._test_ext][pad]))
@mock.patch("seqparse.seqparse.scandir")
def test_nested_sequences(self, mock_api_call):
"""Seqparse: Test file sequence discovery in nested directories."""
mock_api_call.side_effect = mock_scandir_deep
print("\n\n SEQUENCES\n ---------")
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root)
for seq in parser.output():
print(" ", seq)
print("\n MAX LEVELS\n ----------")
for max_levels in range(-1, 4):
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root, max_levels=max_levels)
expected_seqs = max_levels + 2
if max_levels == -1:
expected_seqs = 5
seqs = list(parser.output())
blurb = " o max_levels == {:d}: {:d} ({:d} expected) entries"
print(blurb.format(max_levels, len(seqs), expected_seqs))
for seq in seqs:
print(" -", seq)
self.assertEqual(len(seqs), expected_seqs)
print("\n MIN LEVELS\n ----------")
for min_levels in range(-1, 4):
initialise_mock_scandir_data(self._test_root)
parser = get_parser()
parser.scan_path(self._test_root, min_levels=min_levels)
expected_seqs = 3 - min_levels
if min_levels == -1:
expected_seqs = 5
seqs = list(parser.output())
blurb = " o min_levels == {:d}: {:d} ({:d} expected) entries"
print(blurb.format(min_levels, len(seqs), expected_seqs))
for seq in seqs:
print(" -", seq)
self.assertEqual(len(seqs), expected_seqs)
print("")
def test_valid_frame_sequences(self):
"""Seqparse: Test validity of simple frame ranges."""
good_frame_seqs = [
"0001", ",0001", "0001,", "0001-0001", "0001-0001x0",
"0001-0003x3", "0001,0003", "0001,,0003", "0001-0010",
"0001-0010x0", "0001-0011x2", "0001-0012x2", "0001-0005,0007-0010",
"0001-0005x2,0007-0010", "0001-0005,0007-0011x2",
"0001-0005,0006,0008-0012x2", "0001,0003-0007,0009-0015x2",
"3,1,5,7", "01-05,03-07"
]
bad_frame_seqs = [
"-0001", "0001-", "0001x2", "x2", "0001,0003x2", "0001-0005x",
"0010-0001", "x", ",", ",,", ""
]
print("\n\n GOOD SEQUENCES\n --------------")
for frame_seq in good_frame_seqs:
output = validate_frame_sequence(frame_seq)
print(' o {!r} --> {!r}'.format(frame_seq, output))
self.assertTrue(output)
print("\n BAD SEQUENCES\n -------------")
for frame_seq in bad_frame_seqs:
print(' o {!r}'.format(frame_seq))
self.assertFalse(validate_frame_sequence(frame_seq))
print("")
def test_add_file_sequence(self):
"""Seqparse: Test file sequence addition via seqparse.add_file."""
input_file = ".".join((self._test_file_name, "0005", self._test_ext))
input_file = os.path.join(self._test_root, input_file)
# Expected outputs ...
input_frame_seq = "0000-0004"
output_frame_seq = "0000-0005"
input_file_seq = ".".join(
(self._test_file_name, input_frame_seq, self._test_ext))
input_file_seq = os.path.join(self._test_root, input_file_seq)
output_file_seq = ".".join(
(self._test_file_name, output_frame_seq, self._test_ext))
output_file_seq = os.path.join(self._test_root, output_file_seq)
print("\n\n INPUT FILES\n -----------")
print(" o", input_file_seq)
print(" o", input_file)
parser = get_parser()
parser.add_file(input_file_seq)
parser.add_file(input_file)
output = list(parser.output())
print("\n OUTPUT FILES\n ------------")
for line in output:
print(" o", line)
print("\n EXPECTED OUTPUT\n ---------------")
print(" o", output_file_seq)
print("")
self.assertEqual(len(output), 1)
self.assertEqual(str(output[0]), output_file_seq)
input_frame_seq = "0000-0002,,0003-0005"
input_file_seq = ".".join(
(self._test_file_name, input_frame_seq, self._test_ext))
input_file_seq = os.path.join(self._test_root, input_file_seq)
print("\n INPUT FILES\n -----------")
print(" o", input_file_seq)
print(" o", input_file)
parser = get_parser()
parser.add_file(input_file_seq)
parser.add_file(input_file)
output = list(parser.output())
print("\n OUTPUT FILES\n ------------")
for line in output:
print(" o", line)
print("\n EXPECTED OUTPUT\n ---------------")
print(" o", output_file_seq)
print("")
self.assertEqual(len(output), 1)
self.assertEqual(str(output[0]), output_file_seq)
@mock.patch("seqparse.seqparse.scandir")
def test_inversion(self, mock_api_call):
"""Seqparse: Test usage of the "missing" option in Seqparse.output."""
file_path = os.path.join(self._test_root, self._test_file_name)
chunk_in = FrameChunk(first=1, last=11, step=2, pad=4)
fseq = FileSequence(
name=file_path, ext=self._test_ext, frames=chunk_in)
input_entries = [DirEntry(x) for x in fseq]
mock_api_call.return_value = input_entries
chunk_out = FrameChunk(first=2, last=10, step=2, pad=4)
expected = FileSequence(
name=file_path, ext=self._test_ext, frames=chunk_out)
parser = get_parser()
parser.scan_path(self._test_root)
inverted = list(parser.output(missing=True))
self.assertEqual(len(inverted), 1)
print("\n\n SEQUENCE\n --------")
print(" input files: ", fseq)
print(" expected files:", expected)
print(" inverted files:", inverted[0])
self.assertEqual(str(inverted[0]), str(expected))
fseq = FileSequence(
name=file_path, ext=self._test_ext, frames=[1, 2, 3, 4, 6], pad=4)
input_entries = [DirEntry(x) for x in fseq]
mock_api_call.return_value = input_entries
expected = FileSequence(
name=file_path, ext=self._test_ext, frames=[5], pad=4)
parser = get_parser()
parser.scan_path(self._test_root)
inverted = list(parser.output(missing=True))
self.assertEqual(len(inverted), 1)
print("\n\n SEQUENCE\n --------")
print(" input files: ", fseq)
print(" expected files:", expected)
print(" inverted files:", inverted[0])
self.assertEqual(str(inverted[0]), str(expected))
@mock.patch("seqparse.seqparse.scandir")
def test_scan_options(self, mock_api_call):
"""Seqparse: Make sure scan_options works as expected."""
frames = {4: (1, 2, 3, 4, 6)}
input_entries = generate_entries(
name="test", ext="py", frames=frames, root=self._test_root)
input_entries.extend(
generate_entries(
name=".test", ext="py", frames=frames, root=self._test_root))
input_entries.append(
DirEntry(os.path.join(self._test_root, "pony.py")))
mock_api_call.return_value = input_entries
parser = get_parser()
parser.scan_options["stat"] = True
parser.scan_path(self._test_root)
output = list(parser.output())
expected = [
os.path.join(self._test_root, "test.0001-0004,0006.py"),
os.path.join(self._test_root, "pony.py")
]
self.assertEqual(len(output), 2)
self.assertEqual(list(map(str, output)), expected)
self.assertEqual(output[0].ctime, 1490908340)
self.assertEqual(output[0].mtime, 1490908305)
self.assertEqual(output[0].size, 36520)
parser = get_parser()
parser.scan_options["all"] = True
parser.scan_path(self._test_root)
output = list(parser.output())
expected = [
os.path.join(self._test_root, ".test.0001-0004,0006.py"),
os.path.join(self._test_root, "test.0001-0004,0006.py"),
os.path.join(self._test_root, "pony.py")
]
self.assertEqual(len(output), 3)
self.assertEqual(list(map(str, output)), expected)
def test_api_calls(self):
"""Seqparse: Test API calls at root of module."""
chunk = FrameChunk(first=1, last=7, step=2, pad=4)
seq = get_sequence(lrange(1, 8, 2), pad=4)
self.assertTrue(isinstance(seq, FrameSequence))
self.assertEqual(str(seq), "0001-0007x2")
expected = FrameChunk(first=2, last=6, step=2, pad=4)
inverted = invert(chunk)
self.assertEqual(str(inverted), str(expected))
inverted = invert(seq)
self.assertEqual(str(inverted), str(expected))
with self.assertRaises(TypeError):
invert(get_parser())
self.assertEqual(get_version(), __version__)
| 2.578125 | 3 |
deliveroo_scraping.py | ragreener1/deliveroo-scraping | 0 | 5670 | <filename>deliveroo_scraping.py
import urllib.request
import pandas as pd
import sqlite3
import re
from bs4 import BeautifulSoup
# Parameters
postcodes_list = ["W1F7EY"]
db_name = "scraped.db"
# This is so that Deliveroo think the scraper is Google Chrome
# as opposed to a web scraper
hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11' +
'(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*' +
';q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
def process_menu(doc, url, tags_df, tag_type, restaurants, restaurants_to_tags,
menu_sections, menu_items):
# This function processes the menu
# This gets the restaurant_name by finding the <h1> tag with the CSS class
# restaurant_name
restaurant_name = doc.find("h1", class_="restaurant__name", text=True).text
# This gets the deliveroo_name by selecting the appropriate part from the
# URL
# This will fail on restaurants not in London
deliveroo_name = re.findall(
'(?<=https://deliveroo.co.uk/menu/london/)(.*)(?=\\?postcode=)',
url)[0]
# This adds this to the restaurants dataframe
# This isn't very efficient, if you were wanting to scrape large numbers
# you wouldn't want to use .append
restaurants = restaurants.append(
{"name": restaurant_name, "deliveroo_name": deliveroo_name},
ignore_index=True)
# This gets the restaurant_id by finding the index of what as inserted
# Again this isn't very efficient
restaurant_id = restaurants[
(restaurants == [restaurant_name, deliveroo_name]).all(
axis=1)].index[0]
restaurant_tags = []
# Deal with tags
# Start by finding all <small> tags with the CSS class tag
for tag in doc.find_all("small", class_="tag"):
# The second element of the <small> CSS class is the type of the tag
# this could be locale or food etc.
tagtype = tag['class'][1]
# The name of the tag is what is inside the <small>
name = tag.text
# See if the tagtype exists in the tag_type dataframe
type_matches = tag_type[(tag_type == [tagtype]).all(axis=1)]
# If it doesn't
if len(type_matches) == 0:
# Add it (again not very efficient)
tag_type = tag_type.append({"name": tagtype}, ignore_index=True)
# Update the matches
type_matches = tag_type[(tag_type == [tagtype]).all(axis=1)]
# See if the tag already exists in the tags_df dataframe
matches = tags_df[
(tags_df == [name, type_matches.index[0]]).all(axis=1)]
# If it doesn't
if len(matches) == 0:
# Add it
entry = {"name": name, "type": type_matches.index[0]}
tags_df = tags_df.append(entry, ignore_index=True)
matches = tags_df[(tags_df == [name, type_matches.index[0]]).all(
axis=1)]
# Add the tag to a list of tags for that restaurant
restaurant_tags.append(matches.index[0])
# For each tag
for tag in restaurant_tags:
# Add this to restaurants_to_tags df
restaurants_to_tags = restaurants_to_tags.append(
{"restaurant_id": restaurant_id, "tag_id": tag}, ignore_index=True)
# For each category (in the menu, e.g. Sides, Mains, Desserts, Drinks -
# different for every restaurant though!) process the menu items
# This is found by looking for <div> tags with the CSS class
# menu-index-page__menu-category
categories = doc.find_all("div", class_="menu-index-page__menu-category")
for category in categories:
# the category name is inside the h3 inside the div
category_name = category.h3.text
# Add the category to the menu_sections data frame. Again this isn't
# efficient.
menu_sections = menu_sections.append(
{"restaurant_id": restaurant_id, "name": category_name},
ignore_index=True)
# Get the id in the menu_sections data frame
category_id = menu_sections[
(menu_sections == [restaurant_id, category_name]).all(
axis=1)].index[0]
# Get each of the items in that category
category_items = []
# For each menu item. Found by looking for <div> inside the category
# with the CSS class menu-index-page__item_content
items_html = category.find_all("div",
class_="menu-index-page__item-content")
for menu_item in items_html:
# The name is the <h6> with the CSS class
# menu-index-page__item-title
item_name = \
menu_item.find("h6", class_="menu-index-page__item-title").text
# The price is the <span> with the CSS class
# menu-index-page__item-price. The £ symbol is dropped, it is then
# converted to a floating-point number (decimal), multiplied by 100
# so that it is in pence. It is then converted to an integer.
#
# https://stackoverflow.com/questions/3730019/why-not-use-double-or-float-to-represent-currency
price_as_text = \
menu_item.find("span", class_="menu-index-page__item-price")\
.text[1:]
price_as_float = float(price_as_text)
item_price = int(price_as_float * 100)
# If an item is popular it has a <span> with the CSS class
# menu-index-page__item-popular
# So this tries to find it, if it exists is_item_popular = True,
# False otherwise.
is_item_popular = menu_item.find(
"span", class_="menu-index-page__item-popular") is not None
# Add this menu_item to category_items
category_items.append(
{"menu_section_id": category_id,
"name": item_name,
"price_in_pence": item_price,
"is_popular": is_item_popular}
)
# Add all the menu items in that category to the menu_items data frame,
# this is more efficient than doing this one at a time
menu_items = menu_items.append(category_items, ignore_index=True)
# Return the updated dataframes
return (tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items)
def get_restaurant_and_process_menu(url, tags_df, tag_type, restaurants,
restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs,
postcodes):
# This functions gets the restaurant and then processes its menu if it
# hasn't been processed before
# Get the deliveroo name from the url
deliveroo_name = re.findall(
'(?<=https://deliveroo.co.uk/menu/london/)(.*)(?=\\?postcode=)',
url)[0]
# If this restaurant hasn't been seen before
if deliveroo_name not in restaurants['deliveroo_name']:
# Get the webpage
request = urllib.request.Request(url, headers=hdr)
page = urllib.request.urlopen(request)
soup = BeautifulSoup(page)
# Try and process the menu, if it doesn't work handle it nicely
try:
(tags_df, tag_type, restaurants, restaurants_to_tags,
menu_sections, menu_items) = process_menu(soup, url, tags_df,
tag_type, restaurants,
restaurants_to_tags,
menu_sections,
menu_items)
except Exception:
print(f"Fail on {url}")
# Get the postcode from the URL
postcode = re.findall('(?<=\\?postcode=)(.)*', url)[0]
# Find where it is in the postcodes data frame
postcodes_index = (postcodes['post_code'] == postcode).index[0]
# Find the restaurants id in the restaurants dataframe using the deliveroo
# name
restaurant_index = \
(restaurants['deliveroo_name'] == deliveroo_name).index[0]
# Add an entry to restaurants_to_locs saying that this restaurant is
# available at this location
restaurants_to_locs = restaurants_to_locs.append(
{"restaurant_id": restaurant_index, "loc_id": postcodes_index},
ignore_index=True)
# Return the amended dataframes
return (tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs)
def process_restaurants_for_postcode(postcode, tags_df, tag_type, restaurants,
restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs,
postcodes):
# This function processes the restaurants for the postcodes
# Add the postcode to the URL - it doesn't matter that it says camden, it
# will update as appropriate.
url = "https://deliveroo.co.uk/restaurants/london/camden" \
f"?postcode={postcode}&sort=time"
# Create the HTTP request
request = urllib.request.Request(url, headers=hdr)
# Get the page
page = urllib.request.urlopen(request)
soup = BeautifulSoup(page)
# For every link in the page
for i, link in enumerate(soup.find_all("a")):
print(i)
# Get the destination of the link
destination = link.get("href")
# If it's to a menu, get the restaurant and process the menu
if "/menu" in destination:
(tags_df, tag_type, restaurants, restaurants_to_tags,
menu_sections, menu_items, restaurants_to_locs) = \
get_restaurant_and_process_menu(
"https://deliveroo.co.uk" + destination, tags_df, tag_type,
restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs, postcodes)
# Return the amended dataframes
return (tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs)
def process_all_restaurants(postcodes, db_name):
# This function processes all of the postcodes
# Create the dataframes
tags_df = pd.DataFrame({"name": [], "type": []})\
.astype({"name": "str", "type": "int32"})
tag_type = pd.DataFrame({"name": []})
restaurants = pd.DataFrame({"name": [], "deliveroo_name": []})\
.astype({"name": "str", "deliveroo_name": "str"})
restaurants_to_tags = pd.DataFrame({"restaurant_id": [], "tag_id": []})\
.astype({"restaurant_id": "int64", "tag_id": "int64"})
menu_sections = pd.DataFrame({"restaurant_id": [], "name": []})\
.astype({"restaurant_id": "int64", "name": "str"})
menu_items = pd.DataFrame(
{"menu_section_id": [],
"name": [],
"price_in_pence": [],
"is_popular": []}).astype(
{"menu_section_id": "int64",
"name": "str",
"price_in_pence": "int64",
"is_popular": "bool"})
restaurants_to_locs = pd.DataFrame({"restaurant_id": [], "loc_id": []})\
.astype({"restaurant_id": "int64", "loc_id": "int64"})
for post_code in postcodes['post_code']:
(tags_df, tag_type, restaurants, restaurants_to_tags, menu_sections,
menu_items, restaurants_to_locs) =\
process_restaurants_for_postcode(post_code, tags_df, tag_type,
restaurants, restaurants_to_tags,
menu_sections, menu_items,
restaurants_to_locs, postcodes)
# Write to db
cnx = sqlite3.connect(db_name)
postcodes.to_sql("POSTCODES", cnx, index_label="id")
restaurants.to_sql("RESTAURANTS", cnx, index_label="id")
restaurants_to_locs.to_sql("RESTAURANTS_AVAILABLE", cnx, index_label="id")
menu_items.to_sql("MENU_ITEMS", cnx, index_label="id")
menu_sections.to_sql("MENU_SECTIONS", cnx, index_label="id")
tags_df.to_sql("CATEGORIES", cnx, index_label="id")
tag_type.to_sql("CATEGORY_TYPES", cnx, index_label="id")
restaurants_to_tags.to_sql("RESTAURANT_CATEGORIES", cnx, index_label="id")
cnx.close()
if __name__ == "__main__":
postcodes_df = pd.DataFrame({
'post_code': postcodes_list
})
process_all_restaurants(postcodes_df, db_name)
| 3.59375 | 4 |
wouso/core/security/admin.py | AlexandruGhergut/wouso | 117 | 5671 | from django.contrib import admin
from wouso.core.security.models import Report
admin.site.register(Report)
| 1.0625 | 1 |
DataWrangling/TTNData2Gsheet_Auto.py | diliprk/SmartCityVisualization | 0 | 5672 | <filename>DataWrangling/TTNData2Gsheet_Auto.py
#### Reading Data from The Things Network Data and Automatically Storing it to a Google Spreadsheet
# Author: <NAME>
# Email: <EMAIL>
# Date: 19/01/2018
# Revision: version#1
# License: MIT License
import pandas as pd
import requests
from df2gspread import df2gspread as d2g
import time
## Set Initial Time Duration in mins to query TTN Data:
time_duration = 5
# Insert spreadsheet file id of Google Spreadsheet
spreadsheet = '1ftXlebCTDp5tTxvlm5K3Sv1oNttDHR7s1xTi-i-ZR_o' ## Google SpreadSheet Title: TTN_Live_DataLogger
# Insert Sheet Name
wks_name = 'Sheet1'
def queryttndata(time_duration):
'''
This function queries data from TTN Swagger API based on a time duration which is given as an input
'''
headers = {'Accept': 'application/json','Authorization': 'key <KEY>'}
## Set query duration in minutes
querytime = str(time_duration) + 'm'
params = (('last', querytime),)
response = requests.get('https://vehiclecounter.data.thethingsnetwork.org/api/v2/query', headers=headers, params=params).json()
df_raw = pd.DataFrame.from_dict(response)
return df_raw
def cleandf(df):
'''
In this function we pass as input the raw dataframe from TTN in JSON format to clean and optimize the data.
This function is customized and unique to every dataset
'''
df.rename(columns={'time': 'TTNTimeStamp'}, inplace=True)
df['TTNTimeStamp'] = pd.to_datetime(df['TTNTimeStamp'])
df['TTNTimeStamp'] = df['TTNTimeStamp'] + pd.Timedelta(hours=1) ## Offset Time by 1 hour to fix TimeZone Error of Swagger API TimeStamps
df['TTNTimeStamp'] = df['TTNTimeStamp'].values.astype('datetime64[s]')
drop_cols = ['raw','device_id']
df = df.drop(drop_cols, 1)
df.reset_index()
df = df.reindex(['TTNTimeStamp','Count'], axis=1)
print("Latest Data:")
print(df.tail(1),'\n')
return df
while True:
#begin your infinite loop
df_raw = queryttndata(time_duration)
df_clean = cleandf(df_raw)
d2g.upload(df_clean, spreadsheet,wks_name,col_names=True,clean=True) # Write dataframe to Google Spreadsheet
df_clean.to_csv('TTN_VehicleCountData.csv', date_format="%d/%m/%Y %H:%M:%S",index=True) # Save DataFrame locally
time.sleep(60) # Call function every 60 seconds
time_duration += 1 ## Increment query duration by 1 mins at the end of every function call
| 3.140625 | 3 |
alleycat/reactive/property.py | mysticfall/alleycat-reactive | 14 | 5673 | from __future__ import annotations
from typing import TypeVar, Generic, Callable, Optional, Any, cast, Tuple
import rx
from returns import pipeline
from returns.functions import identity
from returns.maybe import Maybe, Nothing
from rx import Observable
from rx.subject import BehaviorSubject
from . import ReactiveValue, ReactiveView
from .value import Modifier
T = TypeVar("T")
class ReactiveProperty(Generic[T], ReactiveValue[T]):
def __init__(
self,
init_value: Maybe[T] = Nothing,
read_only=False,
modifier: Callable[[Any], Modifier] = lambda _: identity,
validator: Callable[[Any, T], T] = lambda _, v: v) -> None:
super().__init__(read_only)
self._init_value = init_value
self._modifier = modifier
self._validator = validator
@property
def init_value(self) -> Maybe[T]:
return self._init_value
@property
def validator(self) -> Callable[[T, Any], T]:
return self._validator
@property
def modifier(self) -> Callable[[Any], Modifier]:
return self._modifier
def as_view(self) -> ReactiveView[T]:
return ReactiveView(self.context, self.read_only)
def pipe(self, modifiers: Callable[[Any], Tuple[Modifier, ...]]) -> ReactiveProperty:
def stack(obj: Any):
# FIXME: Not sure why both PyCharm and Mypy fails to resolve pipeline.pipe(). Should investigate later.
# noinspection PyUnresolvedReferences
return pipeline.pipe(*([self.modifier(obj)] + list(modifiers(obj)))) # type:ignore
return ReactiveProperty(self.init_value, self.read_only, stack, self.validator)
def validate(self, validator: Callable[[Any, T], T]) -> ReactiveProperty[T]:
if validator is None:
raise ValueError("Argument 'modifier' is required.")
def validate(obj: Any, v: T) -> T:
return validator(obj, self.validator(obj, v))
return ReactiveProperty(self.init_value, self.read_only, self.modifier, validate)
class PropertyData(ReactiveValue.Data[T]):
def __init__(
self,
name: str,
init_value: Maybe[T],
modifier: Modifier,
validator: Callable[[T], T]):
assert name is not None
assert init_value is not None
assert modifier is not None
assert validator is not None
self._validator = validator
self._property: Optional[BehaviorSubject] = None
obs: Observable
if init_value != Nothing:
self._property = BehaviorSubject(init_value.map(validator).unwrap())
obs = self._property
else:
obs = rx.empty()
super().__init__(name, obs, modifier)
# Must override to appease Mypy... I hate Python.
@property
def value(self) -> T:
return super().value
@value.setter
def value(self, value: T):
self._check_disposed()
if self.initialized:
assert self._property is not None
self._property.on_next(self.validator(value))
else:
self._property = BehaviorSubject(self.validator(value))
self.observable = self._property
@property
def validator(self) -> Callable[[T], T]:
return self._validator
def dispose(self) -> None:
assert self._property is not None
self._check_disposed()
self._property.on_completed()
super().dispose()
def _create_data(self, obj: Any) -> PropertyData:
assert obj is not None
assert self.name is not None
def validate(v: T) -> T:
return self.validator(obj, v)
return self.PropertyData(self.name, self.init_value, self.modifier(obj), validate)
def _get_data(self, obj: Any) -> PropertyData:
assert obj is not None
return cast(ReactiveProperty.PropertyData, super()._get_data(obj))
def _set_value(self, obj: Any, data: ReactiveValue.Data, value: Any) -> None:
assert obj is not None
assert isinstance(data, ReactiveProperty.PropertyData)
data.value = value
| 2.109375 | 2 |
utils/mask/converter.py | csgcmai/cvat | 4 | 5674 | #!/usr/bin/env python
#
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import absolute_import, division, print_function
import argparse
import os
import glog as log
import numpy as np
import cv2
from lxml import etree
from tqdm import tqdm
def parse_args():
"""Parse arguments of command line"""
parser = argparse.ArgumentParser(
fromfile_prefix_chars='@',
description='Convert CVAT XML annotations to masks'
)
parser.add_argument(
'--cvat-xml', metavar='FILE', required=True,
help='input file with CVAT annotation in xml format'
)
parser.add_argument(
'--background-color', metavar='COLOR_BGR', default="0,0,0",
help='specify background color (by default: 0,0,0)'
)
parser.add_argument(
'--label-color', metavar='LABEL:COLOR_BGR', action='append',
default=[],
help="specify a label's color (e.g. 255 or 255,0,0). The color will " +
"be interpreted in accordance with the mask format."
)
parser.add_argument(
'--mask-bitness', type=int, choices=[8, 24], default=8,
help='choose bitness for masks'
)
parser.add_argument(
'--output-dir', metavar='DIRECTORY', required=True,
help='directory for output masks'
)
return parser.parse_args()
def parse_anno_file(cvat_xml):
root = etree.parse(cvat_xml).getroot()
anno = []
for image_tag in root.iter('image'):
image = {}
for key, value in image_tag.items():
image[key] = value
image['shapes'] = []
for poly_tag in image_tag.iter('polygon'):
polygon = {'type': 'polygon'}
for key, value in poly_tag.items():
polygon[key] = value
image['shapes'].append(polygon)
for box_tag in image_tag.iter('box'):
box = {'type': 'box'}
for key, value in box_tag.items():
box[key] = value
box['points'] = "{0},{1};{2},{1};{2},{3};{0},{3}".format(
box['xtl'], box['ytl'], box['xbr'], box['ybr'])
image['shapes'].append(box)
image['shapes'].sort(key=lambda x: int(x.get('z_order', 0)))
anno.append(image)
return anno
def create_mask_file(mask_path, width, height, bitness, color_map, background, shapes):
mask = np.zeros((height, width, bitness // 8), dtype=np.uint8)
for shape in shapes:
color = color_map.get(shape['label'], background)
points = [tuple(map(float, p.split(','))) for p in shape['points'].split(';')]
points = np.array([(int(p[0]), int(p[1])) for p in points])
mask = cv2.fillPoly(mask, [points], color=color)
cv2.imwrite(mask_path, mask)
def to_scalar(str, dim):
scalar = list(map(int, str.split(',')))
if len(scalar) < dim:
scalar.extend([scalar[-1]] * dim)
return tuple(scalar[0:dim])
def main():
args = parse_args()
anno = parse_anno_file(args.cvat_xml)
color_map = {}
dim = args.mask_bitness // 8
for item in args.label_color:
label, color = item.split(':')
color_map[label] = to_scalar(color, dim)
background = to_scalar(args.background_color, dim)
for image in tqdm(anno, desc='Generate masks'):
mask_path = os.path.join(args.output_dir, os.path.splitext(image['name'])[0] + '.png')
mask_dir = os.path.dirname(mask_path)
if mask_dir:
os.makedirs(mask_dir, exist_ok=True)
create_mask_file(mask_path, int(image['width']), int(image['height']),
args.mask_bitness, color_map, background, image['shapes'])
if __name__ == "__main__":
main()
| 2.609375 | 3 |
examples/plot_afq_callosal.py | gkiar/pyAFQ | 0 | 5675 | <reponame>gkiar/pyAFQ
"""
==========================
Callosal bundles using AFQ API
==========================
An example using the AFQ API to find callosal bundles using the templates from:
http://hdl.handle.net/1773/34926
"""
import os.path as op
import plotly
from AFQ import api
from AFQ.mask import RoiMask
import AFQ.data as afd
##########################################################################
# Get some example data
# ---------------------
#
# Retrieves `Stanford HARDI dataset <https://purl.stanford.edu/ng782rw8378>`_.
#
afd.organize_stanford_data(clear_previous_afq=True)
##########################################################################
# Set tractography parameters (optional)
# ---------------------
# We make this tracking_params which we will pass to the AFQ object
# which specifies that we want 100,000 seeds randomly distributed
# in the ROIs of every bundle.
#
# We only do this to make this example faster and consume less space.
tracking_params = dict(seed_mask=RoiMask(),
n_seeds=10000,
random_seeds=True,
rng_seed=42)
##########################################################################
# Initialize an AFQ object:
# -------------------------
#
# We specify bundle_info as the default bundles list (api.BUNDLES) plus the
# callosal bundle list. This tells the AFQ object to use bundles from both
# the standard and callosal templates.
myafq = api.AFQ(bids_path=op.join(afd.afq_home,
'stanford_hardi'),
dmriprep='vistasoft',
bundle_info=api.BUNDLES + api.CALLOSUM_BUNDLES,
tracking_params=tracking_params)
##########################################################################
# Visualizing bundles and tract profiles:
# ---------------------------------------
# This would run the script and visualize the bundles using the plotly
# interactive visualization, which should automatically open in a
# new browser window.
bundle_html = myafq.viz_bundles(export=True, n_points=50)
plotly.io.show(bundle_html[0])
| 1.820313 | 2 |
latest/probe.py | Soldie/Nscan-scanner-ip | 574 | 5676 | import time
import Queue
import random
import socket
import struct
import logging
import threading
from convert import *
from protocol import ethernet, ip, tcp, udp
ETH_P_IP = 0x0800 # IP protocol
ETH_P_ALL = 0x0003 # Every packet
NSCRIPT_PATH = 'nscript' # NSCRIPT PATH
PAYLOAD = {
53:('\x5d\x0d\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x06'
'google\x03com\x00\x00\x01\x00\x01'), # 'google.com' DNS Lookup
161:('\x30\x26\x02\x01\x01\x04\x06public\xa1\x19\x02'
'\x04\x56\x9f\x5a\xdd\x02\x01\x00\x02\x01\x00\x30\x0b\x30\x09\x06'
'\x05\x2b\x06\x01\x02\x01\x05\x00'), # SNMP GetNextRequest|public|2c version|1.3.6.1.2.1
123:('\x17\x00\x02\x05'), # NTP systats commands lacks 38 null bytes (just to save bandwidth)
1900:('M-SEARCH * HTTP/1.1\r\nHOST: 192.168.127.12:1900\r\n'
'MAN: "ssdp:discover"\r\nMX: 2\r\nST: ssdp:all\r\n\r\n')
}
class Generator(object):
def __init__(self, size):
self.size = size
self.inc = size/4
if self.inc<1:
self.inc = 1
self.base = -self.inc
self.num = self.base
self.index = 0
def __iter__(self):
return self
def next(self):
if (self.num+self.inc)>=self.size:
self.next_index()
self.next_base()
self.num = self.num + self.inc
return self.num
def next_base(self):
self.base = 0
self.base-= self.index
self.num = self.base
def next_index(self):
self.index+=1
if self.index>=self.inc:
raise StopIteration
def suspend(self):
return self.size, self.inc, self.base, self.num, self.index
def resume(self, size, inc, base, num, index):
self.size = size
self.inc = inc
self.base = base
self.num = num
self.index = index
class ScriptEngine(object):
def __init__(self, imports):
self.imports = imports
self.event = threading.Event()
self.queues = {}
self.thread = []
def Load(self):
for script in self.imports:
q = Queue.Queue()
s = __import__('{}.{}'.format(NSCRIPT_PATH, script),
fromlist=[NSCRIPT_PATH])
t = threading.Thread(target=s.run,
args=(q, self.event))
self.thread.append(t)
t.setDaemon(True)
t.start()
self.queues[script] = q
def Feed(self, host, port):
for scr in self.imports:
for r in self.imports[scr]:
if port in xrange(r[0], r[1]):
self.queues[scr].put((host, port))
break
def Cleanup(self):
while Alive(self.thread):
time.sleep(10)
class nscan(object):
def __init__(self, options):
self.options = options
self.hosts = self.split(options.hosts, options.threads)
self.ports = options.ports
self.srcp = random.randint(1, 65535)#self.PickPort() # source port
self.smac = options.smac
self.dmac = options.dmac
self.ifname = options.ifname
self.siface = options.siface
self.diface = options.diface
self.banner = options.banner
self.count = options.count
self.cooldown = options.cooldown
self.queue = Queue.Queue()
if options.stype.upper()=='U':
self.stype = socket.IPPROTO_UDP
else:
self.stype = socket.IPPROTO_TCP
self.events = {
'send': threading.Event(),
'recv': threading.Event()}
self.threads = {
'send': [],
'recv': None}
def __Transport(self, src, dst=0):
if self.stype==socket.IPPROTO_TCP:
transport = tcp.TCP(src, dst)
transport.seqn = 0xDEADC0DE
else:
transport = udp.UDP(src, dst)
return transport
def __Pack(self, transport, src, dst):
if self.stype==socket.IPPROTO_TCP:
transport.payload = ''
else:
transport.payload = PAYLOAD.get(transport.dstp, '\x00\r\n\r\n')
packed = transport.pack(src, dst)
return packed + transport.payload
def __CookieCheck(self, data):
check = False
dstp = struct.unpack('!H', data[22:24])[0]
if self.stype==socket.IPPROTO_UDP:
if dstp==self.srcp:
check = True
else:
ackn = struct.unpack('!L', data[28:32])[0]
flags = struct.unpack('B', data[33])[0] & 0b010010 # SYN-ACK
if dstp==self.srcp and ackn==0xDEADC0DF and flags==18:
check = True
return check
def init(self):
generators = []
for h in self.hosts:
g = Generator(h[1]-h[0])
generators.append(g)
t = threading.Thread(target=self.send, args=(h, self.srcp, g))
t.setDaemon(True)
self.threads['send'].append(t)
t = threading.Thread(target=self.recv)
t.setDaemon(True)
self.threads['recv'] = t
if 'resume' in dir(self.options):
i = 0
for g in generators:
g.resume(*self.options.indexes[i])
i+=1
return self.threads, self.events, self.queue, generators
def run(self):
self.events['send'].set()
self.events['recv'].set()
for t in self.threads['send']:
t.start()
self.threads['recv'].start()
def send(self, hosts, srcp, gen):
if 'ppp' in self.ifname:
family = socket.AF_INET
proto = socket.IPPROTO_RAW
eth = ''
else:
family = socket.AF_PACKET
proto = ETH_P_IP
eth = ethernet.ETHER(mac2byte(self.smac), mac2byte(self.dmac), ETH_P_IP).pack()
sock = socket.socket(family, socket.SOCK_RAW, proto)
transport = self.__Transport(srcp, 0)
npacket = 0
self.events['send'].wait()
target = hosts[0]
while self.events['send'].isSet():
try:
target = hosts[0] + gen.next()
iph = ip.IP(self.diface, dec2dot(target), self.stype)
except StopIteration:
break
for port_list in self.ports:
for port in range(port_list[0], port_list[1]):
if self.events['send'].isSet():
transport.dstp = port
packet = eth + iph.pack() + self.__Pack(transport, iph.src, iph.dst) #tcph.pack(iph.src, iph.dst)
sock.sendto(packet, (dec2dot(target), 0)) # self.ifname
npacket+=1
if not npacket%self.cooldown[0]:
time.sleep(self.cooldown[1])
else:
break
logging.info('[SEND] Sent: {} packets'.format(npacket))
sock.close()
def recv(self):
sock = socket.socket(socket.AF_INET,
socket.SOCK_RAW,
self.stype)
sock.bind(('', self.srcp))
sock.settimeout(5)
self.events['recv'].wait()
counter = 0
while self.events['recv'].isSet():
try:
data, sa_ll = sock.recvfrom(65535)
if self.__CookieCheck(data):
self.queue.put(Extract(data))
counter += 1
if counter==self.count:
self.events['send'].clear()
break
except socket.timeout:
continue
sock.close()
logging.info('[RECV] Received: {} packets'.format(counter))
def split(self, hosts, n):
'''
Split host range into n parts (multithreaded)
'''
nhosts = hosts[1] - hosts[0] # number of hosts
nparts = nhosts/n + 1
host_parts = []
start = hosts[0]
while True:
if len(host_parts)<n-1:
end = start + nparts
host_parts.append((start, end))
start = end
else:
host_parts.append((start, hosts[1]))
break
return host_parts
def PickPort(self):
while True:
srcp = random.randrange(10000, 65535)
if srcp not in self.sport:
self.sport.append(srcp)
break
return srcp
def Extract(packet):
src = socket.inet_ntoa(packet[12:16])
srcp = struct.unpack('!H', packet[20:22])[0]
return src, srcp
def Alive(thread_list):
''' check if thread is alive '''
alive = False
for t in thread_list:
if t.isAlive():
alive = True
break
return alive
| 2.125 | 2 |
parsy-backend/flaskApp/assignment/views.py | dstambler17/Parsy.io | 0 | 5677 | import sys
from flask import Blueprint, request, jsonify
from flaskApp import db
from flaskApp.assignment.utils import *
from flaskApp.error.error_handlers import *
import json
from flaskApp.helpers import getAssignmentData
assignment = Blueprint('assignment', __name__)
@assignment.route('/restoreAssignment/<calID>/<courseID>', methods=['POST'])
def restore_assignment(calID, courseID):
try:
DbAssignmentUtils.restore_all_original_assignment(calID, courseID)
return jsonify({"restore" : "success"}), 201
except (NotFound) as e:
return jsonify(e.body), e.status_code
@assignment.route('/getAssignment/<calID>/<courseID>/<assignment>', methods=['GET'])
def get_assignment_details(calID, courseID, assignment):
try:
res = DbAssignmentUtils.get_assignment_slot_details(calID, courseID, assignment)
return jsonify(res), 200
except (NotFound) as e:
return jsonify(e.body), e.status_code
@assignment.route('/deleteAssignment/<calID>/<courseID>', methods=['DELETE'])
def delete_assignment(calID, courseID):
try:
request_body = json.loads(request.get_data())
DbAssignmentUtils.delete_assignment_slot(calID, courseID, request_body)
return jsonify({}), 204
except (NotFound, BadRequest) as e:
return jsonify(e.body), e.status_code
@assignment.route('/addAssignment/<calID>/<courseID>', methods=['POST'])
def add_assignment(calID, courseID):
try:
request_body = json.loads(request.get_data())
res = DbAssignmentUtils.add_Assignment_slot(calID, courseID, request_body)
return jsonify(res), 201
except (NotFound, BadRequest, ValidationFailed) as e:
return jsonify(e.body), e.status_code
'''Test method, keep just in case. Will prob be moved to seperate API designed to
interact with just the MySQL database that the data pipeline will drop stuff into'''
@assignment.route('/getAssignmentTest/<courseID>', methods=['GET'])
def get_session_assignment(courseID):
try:
result = getAssignmentData(courseID)
return jsonify(result)
except (NotFound) as e:
return jsonify(e.body), e.status_code
| 2.328125 | 2 |
python/patterns/slidingwindow/longest_substring_no_repeating_char.py | dharmik-thakkar/dsapatterns | 0 | 5678 | #######################################################################################################################
# Given a string, find the length of the longest substring which has no repeating characters.
#
# Input: String="aabccbb"
# Output: 3
# Explanation: The longest substring without any repeating characters is "abc".
#
# Input: String="abbbb"
# Output: 2
# Explanation: The longest substring without any repeating characters is "ab".
#
# Input: String="abccde"
# Output: 3
# Explanation: Longest substrings without any repeating characters are "abc" & "cde".
#######################################################################################################################
def longest_substring_no_repeating_char(input_str: str) -> int:
window_start = 0
is_present = [None for i in range(26)]
max_window = 0
for i in range(len(input_str)):
char_ord = ord(input_str[i]) - 97
if is_present[char_ord] is not None:
window_start = max(window_start, is_present[char_ord] + 1)
is_present[char_ord] = i
max_window = max(max_window, i - window_start + 1)
return max_window
print(longest_substring_no_repeating_char('aabccbb'))
print(longest_substring_no_repeating_char('abbbb'))
print(longest_substring_no_repeating_char('abccde'))
print(longest_substring_no_repeating_char('abcabcbb'))
print(longest_substring_no_repeating_char('bbbbb'))
print(longest_substring_no_repeating_char('pwwkew'))
| 4.09375 | 4 |
Apache Spark with Python - Big Data with PySpark and Spark/6-PairRDD/filter/AirportsNotInUsa.py | jrderek/Big_Data_Engineering_Portfolio | 0 | 5679 | <filename>Apache Spark with Python - Big Data with PySpark and Spark/6-PairRDD/filter/AirportsNotInUsa.py
import sys
sys.path.insert(0, '.')
from pyspark import SparkContext, SparkConf
from commons.Utils import Utils
if __name__ == "__main__":
'''
Create a Spark program to read the airport data from in/airports.text;
generate a pair RDD with airport name being the key and country name being the value.
Then remove all the airports which are located in United States and output the pair RDD to out/airports_not_in_usa_pair_rdd.text
Each row of the input file contains the following columns:
Airport ID, Name of airport, Main city served by airport, Country where airport is located,
IATA/FAA code, ICAO Code, Latitude, Longitude, Altitude, Timezone, DST, Timezone in Olson format
Sample output:
("Kamloops", "Canada")
("Wewak Intl", "Papua New Guinea")
...
'''
conf = SparkConf().setAppName("airports").setMaster("local[*]")
sc = SparkContext(conf=conf)
airportsRDD = sc.textFile("inputs/airports.text")
airportPairRDD = airportsRDD.map(lambda line:
(Utils.COMMA_DELIMITER.split(line)[1],
Utils.COMMA_DELIMITER.split(line)[3]))
airportsNotInUSA = airportPairRDD.filter(
lambda keyValue: keyValue[1] != "\"United States\"")
airportsNotInUSA.saveAsTextFile(
"outputs/airports_not_in_usa_pair_rdd.text")
| 3.40625 | 3 |
linux/keyman-config/keyman_config/keyboard_details.py | srl295/keyman | 0 | 5680 | <reponame>srl295/keyman
#!/usr/bin/python3
# Keyboard details window
import logging
import json
from os import path
import qrcode
import tempfile
import gi
from gi.repository import Gtk
from keyman_config import KeymanComUrl, _, secure_lookup
from keyman_config.accelerators import init_accel
from keyman_config.kmpmetadata import parsemetadata
gi.require_version('Gtk', '3.0')
# basics: keyboard name, package version, description
# other things: filename (of kmx), ,
# OSK availability, documentation availability, package copyright
# also: supported languages, fonts
# from kmx?: keyboard version, encoding, layout type
# there is data in kmp.inf/kmp.json
# there is possibly data in kbid.json (downloaded from api)
class KeyboardDetailsView(Gtk.Dialog):
# TODO Display all the information that is available
# especially what is displayed for Keyman on Windows
# TODO clean up file once have what we want
def __init__(self, parent, kmp):
# kmp has name, version, packageID, area
if "keyboard" in kmp["name"].lower():
wintitle = kmp["name"]
else:
wintitle = _("{name} keyboard").format(name=kmp["name"])
Gtk.Dialog.__init__(self, wintitle, parent)
init_accel(self)
self.set_border_width(6)
packageDir = path.join(kmp['areapath'], kmp['packageID'])
kmp_json = path.join(packageDir, "kmp.json")
info, system, options, keyboards, files = parsemetadata(kmp_json)
if info is None:
# Dialog when invalid metadata
self.add_button(_("_Close"), Gtk.ResponseType.CLOSE)
grid = Gtk.Grid()
self.get_content_area().pack_start(grid, True, True, 12)
lbl_invalid_metadata = Gtk.Label()
lbl_invalid_metadata.set_text(_("ERROR: Keyboard metadata is damaged.\nPlease \"Uninstall\" and then \"Install\" the keyboard."))
lbl_invalid_metadata.set_halign(Gtk.Align.END)
grid.add(lbl_invalid_metadata)
self.resize(700, 200)
self.show_all()
return
kbdata = None
jsonfile = path.join(packageDir, kmp['packageID'] + ".json")
if path.isfile(jsonfile):
try:
with open(jsonfile, "r") as read_file:
kbdata = json.load(read_file)
except Exception as e:
logging.warning('Exception %s reading %s %s', type(e), jsonfile, e.args)
grid = Gtk.Grid()
# grid.set_column_homogeneous(True)
# kbdatapath = path.join("/usr/local/share/keyman", kmp["id"], kmp["id"] + ".json")
# Package info
lbl_pkg_name = Gtk.Label()
lbl_pkg_name.set_text(_("Package name: "))
lbl_pkg_name.set_halign(Gtk.Align.END)
grid.add(lbl_pkg_name)
prevlabel = lbl_pkg_name
label = Gtk.Label()
if secure_lookup(info, 'name', 'description'):
label.set_text(secure_lookup(info, 'name', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_name, Gtk.PositionType.RIGHT, 1, 1)
lbl_pkg_id = Gtk.Label()
lbl_pkg_id.set_text(_("Package id: "))
lbl_pkg_id.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_id, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_id
label = Gtk.Label()
if secure_lookup(kmp, 'packageID'):
label.set_text(kmp['packageID'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_id, Gtk.PositionType.RIGHT, 1, 1)
lbl_pkg_vrs = Gtk.Label()
lbl_pkg_vrs.set_text(_("Package version: "))
lbl_pkg_vrs.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_vrs, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_vrs
label = Gtk.Label()
if secure_lookup(info, 'version', 'description'):
label.set_text(secure_lookup(info, 'version', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_vrs, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(kbdata, 'description'):
lbl_pkg_desc = Gtk.Label()
lbl_pkg_desc.set_text(_("Package description: "))
lbl_pkg_desc.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_desc, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_desc
label = Gtk.Label()
label.set_text(kbdata['description'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
label.set_line_wrap(80)
grid.attach_next_to(label, lbl_pkg_desc, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(info, "author"):
lbl_pkg_auth = Gtk.Label()
lbl_pkg_auth.set_text(_("Package author: "))
lbl_pkg_auth.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_auth, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_auth
label = Gtk.Label()
if secure_lookup(info, 'author', 'description'):
label.set_text(secure_lookup(info, 'author', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_auth, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(info, "copyright"):
lbl_pkg_cpy = Gtk.Label()
lbl_pkg_cpy.set_text(_("Package copyright: "))
lbl_pkg_cpy.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pkg_cpy, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_pkg_cpy
label = Gtk.Label()
if secure_lookup(info, 'copyright', 'description'):
label.set_text(secure_lookup(info, 'copyright', 'description'))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_pkg_cpy, Gtk.PositionType.RIGHT, 1, 1)
# Padding and full width horizontal divider
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
divider_pkg = Gtk.HSeparator()
grid.attach_next_to(divider_pkg, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = divider_pkg
# Keyboard info for each keyboard
if keyboards:
for kbd in keyboards:
kbdata = None
jsonfile = path.join(packageDir, kbd['id'] + ".json")
if path.isfile(jsonfile):
try:
with open(jsonfile, "r") as read_file:
kbdata = json.load(read_file)
except Exception as e:
logging.warning('Exception %s reading %s %s', type(e), jsonfile, e.args)
# start with padding
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
# show the icon somewhere
lbl_kbd_file = Gtk.Label()
lbl_kbd_file.set_text(_("Keyboard filename: "))
lbl_kbd_file.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_file, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_file
label = Gtk.Label()
label.set_text(path.join(packageDir, kbd['id'] + ".kmx"))
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_file, Gtk.PositionType.RIGHT, 1, 1)
if kbdata and secure_lookup(kbdata, 'id') != secure_lookup(kmp, 'packageID'):
lbl_kbd_name = Gtk.Label()
lbl_kbd_name.set_text(_("Keyboard name: "))
lbl_kbd_name.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_name, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_name
label = Gtk.Label()
if secure_lookup(kbdata, 'name'):
label.set_text(kbdata['name'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_name, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_id = Gtk.Label()
lbl_kbd_id.set_text(_("Keyboard id: "))
lbl_kbd_id.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_id, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_id
label = Gtk.Label()
if secure_lookup(kbdata, 'id'):
label.set_text(kbdata['id'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_id, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_vrs = Gtk.Label()
lbl_kbd_vrs.set_text(_("Keyboard version: "))
lbl_kbd_vrs.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_vrs, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_vrs
label = Gtk.Label()
if secure_lookup(kbdata, 'version'):
label.set_text(kbdata['version'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_vrs, Gtk.PositionType.RIGHT, 1, 1)
if secure_lookup(info, "author"):
lbl_kbd_auth = Gtk.Label()
lbl_kbd_auth.set_text(_("Keyboard author: "))
lbl_kbd_auth.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_auth, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_auth
label = Gtk.Label()
if secure_lookup(kbdata, 'authorName'):
label.set_text(kbdata['authorName'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_auth, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_lic = Gtk.Label()
lbl_kbd_lic.set_text(_("Keyboard license: "))
lbl_kbd_lic.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_lic, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_lic
label = Gtk.Label()
if secure_lookup(kbdata, 'license'):
label.set_text(kbdata['license'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
grid.attach_next_to(label, lbl_kbd_lic, Gtk.PositionType.RIGHT, 1, 1)
lbl_kbd_desc = Gtk.Label()
lbl_kbd_desc.set_text(_("Keyboard description: "))
lbl_kbd_desc.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_kbd_desc, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
prevlabel = lbl_kbd_desc
label = Gtk.Label()
if secure_lookup(kbdata, 'description'):
label.set_text(kbdata['description'])
label.set_halign(Gtk.Align.START)
label.set_selectable(True)
label.set_line_wrap(80)
grid.attach_next_to(label, lbl_kbd_desc, Gtk.PositionType.RIGHT, 1, 1)
# Padding and full width horizontal divider
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
divider_pkg = Gtk.HSeparator()
grid.attach_next_to(divider_pkg, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
# label7 = Gtk.Label()
# label7.set_text(_("On Screen Keyboard: "))
# label7.set_halign(Gtk.Align.END)
# grid.attach_next_to(label7, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
# prevlabel = label7
# # label = Gtk.Label()
# # label.set_text(secure_lookup(info, 'version', 'description'))
# # label.set_halign(Gtk.Align.START)
# # label.set_selectable(True)
# # grid.attach_next_to(label, label7, Gtk.PositionType.RIGHT, 1, 1)
# label8 = Gtk.Label()
# label8.set_text(_("Documentation: "))
# label8.set_halign(Gtk.Align.END)
# grid.attach_next_to(label8, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
# prevlabel = label8
# #TODO need to know which area keyboard is installed in to show this
# # label = Gtk.Label()
# # welcome_file = path.join("/usr/local/share/doc/keyman", kmp["id"], "welcome.htm")
# # if path.isfile(welcome_file):
# # label.set_text(_("Installed"))
# # else:
# # label.set_text(_("Not installed"))
# # label.set_halign(Gtk.Align.START)
# # label.set_selectable(True)
# # grid.attach_next_to(label, label8, Gtk.PositionType.RIGHT, 1, 1)
# label9 = Gtk.Label()
# # stored in kmx
# label9.set_text(_("Message: "))
# label9.set_halign(Gtk.Align.END)
# grid.attach_next_to(label9, prevlabel, Gtk.PositionType.BOTTOM, 1, 1)
# prevlabel = label9
# label = Gtk.Label()
# label.set_line_wrap(True)
# label.set_text(
# "This keyboard is distributed under the MIT license (MIT) as described somewhere")
# #label.set_text(kmp["description"])
# label.set_halign(Gtk.Align.START)
# label.set_selectable(True)
# grid.attach_next_to(label, label9, Gtk.PositionType.RIGHT, 1, 1)
# Add an entire row of padding
lbl_pad = Gtk.Label()
lbl_pad.set_text("")
lbl_pad.set_halign(Gtk.Align.END)
grid.attach_next_to(lbl_pad, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_pad
# If it doesn't exist, generate QR code to share keyboard package
path_qr = path.join(tempfile.gettempdir(), kmp['packageID'] + '_qrcode.png')
url = KeymanComUrl + "/go/keyboard/" + kmp['packageID'] + "/share"
if not path.isfile(path_qr):
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=4,
border=4)
qr.add_data(url)
qr.make(fit=True)
img = qr.make_image()
img.save(path_qr)
# Display QR Code, spanning 2 columns so it will be centered
image = Gtk.Image()
image.set_from_file(path_qr)
grid.attach_next_to(image, prevlabel, Gtk.PositionType.BOTTOM, 2, 1)
lbl_share_kbd = Gtk.Label()
lbl_share_kbd.set_markup(_("Scan this code to load this keyboard\non another device or <a href='{uri}'>share online</a>").format(uri=url))
lbl_share_kbd.set_halign(Gtk.Align.CENTER)
lbl_share_kbd.set_line_wrap(True)
grid.attach_next_to(lbl_share_kbd, image, Gtk.PositionType.BOTTOM, 2, 1)
prevlabel = lbl_share_kbd
self.add_button(_("_Close"), Gtk.ResponseType.CLOSE)
self.get_content_area().pack_start(grid, True, True, 12)
self.resize(800, 450)
self.show_all()
| 2.28125 | 2 |
build_osx/copy_runtime.py | ozsolarwind/SAM | 0 | 5681 | import os
import shutil
SOURCE_DIR = '../deploy/runtime'
TARGET_DIR = 'SAM.app/Contents/runtime'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/solar_resource'
TARGET_DIR = 'SAM.app/Contents/solar_resource'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/wind_resource'
TARGET_DIR = 'SAM.app/Contents/wind_resource'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
SOURCE_DIR = '../deploy/libraries'
TARGET_DIR = 'SAM.app/Contents/libraries'
if os.path.exists(TARGET_DIR):
shutil.rmtree(TARGET_DIR)
shutil.copytree(SOURCE_DIR, TARGET_DIR, ignore=shutil.ignore_patterns('.git'))
| 2.125 | 2 |
codalab/lib/path_util.py | kl-chou/codalab-worksheets | 236 | 5682 | <filename>codalab/lib/path_util.py
"""
path_util contains helpers for working with local filesystem paths.
There are a few classes of methods provided here:
Functions to normalize paths and check that they are in normal form:
normalize, check_isvalid, check_isdir, check_isfile, path_is_url
Functions to list directories and to deal with subpaths of paths:
safe_join, get_relative_path, ls, recursive_ls
Functions to read files to compute hashes, write results to stdout, etc:
getmtime, get_size, hash_directory, hash_file_contents
Functions that modify that filesystem in controlled ways:
copy, make_directory, set_write_permissions, rename, remove
"""
import errno
import hashlib
import itertools
import os
import shutil
import subprocess
import sys
from typing import Optional
from codalab.common import precondition, UsageError, parse_linked_bundle_url
from codalab.lib import file_util
from codalab.worker.file_util import get_path_size
# Block sizes and canonical strings used when hashing files.
BLOCK_SIZE = 0x40000
FILE_PREFIX = 'file'
LINK_PREFIX = 'link'
def path_error(message, path):
"""
Raised when a user-supplied path causes an exception.
"""
return UsageError(message + ': ' + path)
################################################################################
# Functions to normalize paths and check that they are in normal form.
################################################################################
def normalize(path):
"""
Return the absolute path of the location specified by the given path.
This path is returned in a "canonical form", without ~'s, .'s, ..'s.
"""
if path == '-':
return '/dev/stdin'
elif path_is_url(path):
return path
else:
return os.path.abspath(os.path.expanduser(path))
def check_isvalid(path, fn_name):
"""
Raise a PreconditionViolation if the path is not absolute or normalized.
Raise a UsageError if the file at that path does not exist.
"""
precondition(os.path.isabs(path), '%s got relative path: %s' % (fn_name, path))
# Broken symbolic links are valid paths, so we use lexists instead of exists.
if not os.path.lexists(path):
raise path_error('%s got non-existent path:' % (fn_name,), path)
def check_isdir(path, fn_name):
"""
Check that the path is valid, then raise UsageError if the path is a file.
"""
check_isvalid(path, fn_name)
if not os.path.isdir(path):
raise path_error('%s got non-directory:' % (fn_name,), path)
def check_isfile(path, fn_name):
"""
Check that the path is valid, then raise UsageError if the path is a file.
"""
check_isvalid(path, fn_name)
if os.path.isdir(path):
raise path_error('%s got directory:' % (fn_name,), path)
def path_is_url(path):
if isinstance(path, str):
for prefix in ['http', 'https', 'ftp']:
if path.startswith(prefix + '://'):
return True
return False
################################################################################
# Functions to list directories and to deal with subpaths of paths.
################################################################################
def safe_join(*paths):
"""
Join a sequence of paths but filter out any that are empty. Used for targets.
Note that os.path.join has this functionality EXCEPT at the end of the list,
which causes problems when a target subpath is empty.
"""
return os.path.join(*[_f for _f in paths if _f])
def get_relative_path(root, path):
"""
Return the relative path from root to path, which should be nested under root.
"""
precondition(path.startswith(root), '%s is not under %s' % (path, root))
return path[len(root) :]
def ls(path):
"""
Return a (list of directories, list of files) in the given directory.
"""
check_isdir(path, 'ls')
(directories, files) = ([], [])
for file_name in os.listdir(path):
if os.path.isfile(os.path.join(path, file_name)):
files.append(file_name)
else:
directories.append(file_name)
return (directories, files)
def recursive_ls(path):
"""
Return a (list of directories, list of files) in the given directory and
all of its nested subdirectories. All paths returned are absolute.
Symlinks are returned in the list of files, even if they point to directories.
This makes it possible to distinguish between real and symlinked directories
when computing the hash of a directory. This function will NOT descend into
symlinked directories.
"""
check_isdir(path, 'recursive_ls')
(directories, files) = ([], [])
for (root, _, file_names) in os.walk(path):
assert os.path.isabs(root), 'Got relative root in os.walk: %s' % (root,)
directories.append(root)
for file_name in file_names:
files.append(os.path.join(root, file_name))
# os.walk ignores symlinks to directories, but we should count them as files.
# However, we can't used the followlinks parameter, because a) we don't want
# to descend into directories and b) we could end up in an infinite loop if
# we were to pass that flag. Instead, we handle symlinks here:
for subpath in os.listdir(root):
full_subpath = os.path.join(root, subpath)
if os.path.islink(full_subpath) and os.path.isdir(full_subpath):
files.append(full_subpath)
return (directories, files)
################################################################################
# Functions to read files to compute hashes, write results to stdout, etc.
################################################################################
def getmtime(path):
"""
Like os.path.getmtime, but does not follow symlinks.
"""
return os.lstat(path).st_mtime
def get_size(path, dirs_and_files=None):
"""
Get the size (in bytes) of the file or directory at or under the given path.
Does not include symlinked files and directories.
"""
if parse_linked_bundle_url(path).uses_beam:
return get_path_size(path)
if os.path.islink(path) or not os.path.isdir(path):
return os.lstat(path).st_size
dirs_and_files = dirs_and_files or recursive_ls(path)
return sum(os.lstat(path).st_size for path in itertools.chain(*dirs_and_files))
def hash_directory(path, dirs_and_files=None):
"""
Return the hash of the contents of the folder at the given path.
This hash is independent of the path itself - if you were to move the
directory and call get_hash again, you would get the same result.
"""
if parse_linked_bundle_url(path).uses_beam:
# On Azure Blob Storage, we just use the directory size for the hashed contents.
return get_size(path)
(directories, files) = dirs_and_files or recursive_ls(path)
# Sort and then hash all directories and then compute a hash of the hashes.
# This two-level hash is necessary so that the overall hash is unambiguous -
# if we updated directory_hash with the directory names themselves, then
# we'd be hashing the concatenation of these names, which could be generated
# in multiple ways.
directory_hash = hashlib.sha1()
for directory in sorted(directories):
relative_path = get_relative_path(path, directory)
directory_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())
# Use a similar two-level hashing scheme for all files, but incorporate a
# hash of both the file name and contents.
file_hash = hashlib.sha1()
for file_name in sorted(files):
relative_path = get_relative_path(path, file_name)
file_hash.update(hashlib.sha1(relative_path.encode()).hexdigest().encode())
file_hash.update(hash_file_contents(file_name).encode())
# Return a hash of the two hashes.
overall_hash = hashlib.sha1(directory_hash.hexdigest().encode())
overall_hash.update(file_hash.hexdigest().encode())
return overall_hash.hexdigest()
def hash_file_contents(path):
"""
Return the hash of the file's contents, read in blocks of size BLOCK_SIZE.
"""
message = 'hash_file called with relative path: %s' % (path,)
precondition(os.path.isabs(path), message)
if os.path.islink(path):
contents_hash = hashlib.sha1(LINK_PREFIX.encode())
contents_hash.update(os.readlink(path).encode())
else:
contents_hash = hashlib.sha1(FILE_PREFIX.encode())
with open(path, 'rb') as file_handle:
while True:
data = file_handle.read(BLOCK_SIZE)
if not data:
break
contents_hash.update(data)
return contents_hash.hexdigest()
################################################################################
# Functions that modify that filesystem in controlled ways.
################################################################################
def copy(source_path: str, dest_path: str, follow_symlinks: Optional[bool] = False):
"""
Copy |source_path| to |dest_path|.
Assume dest_path doesn't exist.
|follow_symlinks|: whether to follow symlinks
Note: this only works in Linux.
"""
if os.path.exists(dest_path):
raise path_error('already exists', dest_path)
if source_path == '/dev/stdin':
with open(dest_path, 'wb') as dest:
file_util.copy(
sys.stdin,
dest,
autoflush=False,
print_status='Copying %s to %s' % (source_path, dest_path),
)
else:
if not follow_symlinks and os.path.islink(source_path):
raise path_error('not following symlinks', source_path)
if not os.path.exists(source_path):
raise path_error('does not exist', source_path)
command = [
'rsync',
'-pr%s' % ('L' if follow_symlinks else 'l'),
source_path
+ ('/' if not os.path.islink(source_path) and os.path.isdir(source_path) else ''),
dest_path,
]
if subprocess.call(command) != 0:
raise path_error('Unable to copy %s to' % source_path, dest_path)
def make_directory(path):
"""
Create the directory at the given path.
"""
try:
os.mkdir(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
check_isdir(path, 'make_directory')
def set_write_permissions(path):
# Recursively give give write permissions to |path|, so that we can operate
# on it.
if not os.path.islink(path): # Don't need write permissions if symlink
subprocess.call(['chmod', '-R', 'u+w', path])
def rename(old_path, new_path):
# Allow write permissions, or else the move will fail.
set_write_permissions(old_path)
subprocess.call(['mv', old_path, new_path])
def remove(path):
"""
Remove the given path, whether it is a directory, file, or link.
"""
if parse_linked_bundle_url(path).uses_beam:
from apache_beam.io.filesystems import FileSystems
if not FileSystems.exists(path):
FileSystems.delete([path])
return
check_isvalid(path, 'remove')
set_write_permissions(path) # Allow permissions
if os.path.islink(path):
os.unlink(path)
elif os.path.isdir(path):
try:
shutil.rmtree(path)
except shutil.Error:
pass
else:
os.remove(path)
if os.path.exists(path):
print('Failed to remove %s' % path)
def soft_link(source, path):
"""
Create a symbolic link to source at path. This is basically the same as doing "ln -s $source $path"
"""
check_isvalid(source, 'soft_link')
os.symlink(source, path)
| 2.78125 | 3 |
statsmodels/regression/tests/test_glsar_gretl.py | aliavni/statsmodels | 1 | 5683 | # -*- coding: utf-8 -*-
"""Tests of GLSAR and diagnostics against Gretl
Created on Thu Feb 02 21:15:47 2012
Author: <NAME>
License: BSD-3
"""
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_allclose, assert_array_less)
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
def compare_ftest(contrast_res, other, decimal=(5,4)):
assert_almost_equal(contrast_res.fvalue, other[0], decimal=decimal[0])
assert_almost_equal(contrast_res.pvalue, other[1], decimal=decimal[1])
assert_equal(contrast_res.df_num, other[2])
assert_equal(contrast_res.df_denom, other[3])
assert_equal("f", other[4])
class TestGLSARGretl:
def test_all(self):
d = macrodata.load_pandas().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv'].values))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp'].values))
#simple diff, not growthrate, I want heteroscedasticity later for testing
endogd = np.diff(d['realinv'])
exogd = add_constant(np.c_[np.diff(d['realgdp'].values), d['realint'][:-1].values])
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1].values])
res_ols = OLS(endogg, exogg).fit()
#print res_ols.params
mod_g1 = GLSAR(endogg, exogg, rho=-0.108136)
res_g1 = mod_g1.fit()
#print res_g1.params
mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R
res_g2 = mod_g2.iterative_fit(maxiter=5)
#print res_g2.params
rho = -0.108136
# coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
partable = np.array([
[-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # ***
[ 4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # ***
[-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346]]) # **
#Statistics based on the rho-differenced data:
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.113973),
endog_std = ("S.D. dependent var", 18.67447),
ssr = ("Sum squared resid", 22530.90),
mse_resid_sqrt = ("S.E. of regression", 10.66735),
rsquared = ("R-squared", 0.676973),
rsquared_adj = ("Adjusted R-squared", 0.673710),
fvalue = ("F(2, 198)", 221.0475),
f_pvalue = ("P-value(F)", 3.56e-51),
resid_acf1 = ("rho", -0.003481),
dw = ("Durbin-Watson", 1.993858))
#fstatistic, p-value, df1, df2
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"]
#LM-statistic, p-value, df
arch_4 = [7.30776, 0.120491, 4, "chi2"]
#multicollinearity
vif = [1.002, 1.002]
cond_1norm = 6862.0664
determinant = 1.0296049e+009
reciprocal_condition_number = 0.013819244
#Chi-square(2): test-statistic, pvalue, df
normality = [20.2792, 3.94837e-005, 2]
#tests
res = res_g1 #with rho from Gretl
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 6)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)
assert_allclose(res.f_pvalue,
result_gretl_g1['f_pvalue'][1],
rtol=1e-2)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=4)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
#tests
res = res_g2 #with estimated rho
#estimated lag coefficient
assert_almost_equal(res.model.rho, rho, decimal=3)
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 3)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)
assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(2,4))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(2,4))
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=1)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=2)
'''
Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858
'''
'''
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023:
'''
'''
Test for ARCH of order 4
coefficient std. error t-ratio p-value
--------------------------------------------------------
alpha(0) 97.0386 20.3234 4.775 3.56e-06 ***
alpha(1) 0.176114 0.0714698 2.464 0.0146 **
alpha(2) -0.0488339 0.0724981 -0.6736 0.5014
alpha(3) -0.0705413 0.0737058 -0.9571 0.3397
alpha(4) 0.0384531 0.0725763 0.5298 0.5968
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491:
'''
'''
Variance Inflation Factors
Minimum possible value = 1.0
Values > 10.0 may indicate a collinearity problem
ds_l_realgdp 1.002
realint_1 1.002
VIF(j) = 1/(1 - R(j)^2), where R(j) is the multiple correlation coefficient
between variable j and the other independent variables
Properties of matrix X'X:
1-norm = 6862.0664
Determinant = 1.0296049e+009
Reciprocal condition number = 0.013819244
'''
'''
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491
Test of common factor restriction -
Null hypothesis: restriction is acceptable
Test statistic: F(2, 195) = 0.426391
with p-value = P(F(2, 195) > 0.426391) = 0.653468
Test for normality of residual -
Null hypothesis: error is normally distributed
Test statistic: Chi-square(2) = 20.2792
with p-value = 3.94837e-005:
'''
#no idea what this is
'''
Augmented regression for common factor test
OLS, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
coefficient std. error t-ratio p-value
---------------------------------------------------------------
const -10.9481 1.35807 -8.062 7.44e-014 ***
ds_l_realgdp 4.28893 0.229459 18.69 2.40e-045 ***
realint_1 -0.662644 0.334872 -1.979 0.0492 **
ds_l_realinv_1 -0.108892 0.0715042 -1.523 0.1294
ds_l_realgdp_1 0.660443 0.390372 1.692 0.0923 *
realint_2 0.0769695 0.341527 0.2254 0.8219
Sum of squared residuals = 22432.8
Test of common factor restriction
Test statistic: F(2, 195) = 0.426391, with p-value = 0.653468
'''
################ with OLS, HAC errors
#Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
#Dependent variable: ds_l_realinv
#HAC standard errors, bandwidth 4 (Bartlett kernel)
#coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
#for confidence interval t(199, 0.025) = 1.972
partable = np.array([
[-9.48167, 1.17709, -8.055, 7.17e-014, -11.8029, -7.16049], # ***
[4.37422, 0.328787, 13.30, 2.62e-029, 3.72587, 5.02258], #***
[-0.613997, 0.293619, -2.091, 0.0378, -1.19300, -0.0349939]]) # **
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.257395),
endog_std = ("S.D. dependent var", 18.73915),
ssr = ("Sum squared resid", 22799.68),
mse_resid_sqrt = ("S.E. of regression", 10.70380),
rsquared = ("R-squared", 0.676978),
rsquared_adj = ("Adjusted R-squared", 0.673731),
fvalue = ("F(2, 199)", 90.79971),
f_pvalue = ("P-value(F)", 9.53e-29),
llf = ("Log-likelihood", -763.9752),
aic = ("Akaike criterion", 1533.950),
bic = ("Schwarz criterion", 1543.875),
hqic = ("Hannan-Quinn", 1537.966),
resid_acf1 = ("rho", -0.107341),
dw = ("Durbin-Watson", 2.213805))
linear_logs = [1.68351, 0.430953, 2, "chi2"]
#for logs: dropping 70 nan or incomplete observations, T=133
#(res_ols.model.exog <=0).any(1).sum() = 69 ?not 70
linear_squares = [7.52477, 0.0232283, 2, "chi2"]
#Autocorrelation, Breusch-Godfrey test for autocorrelation up to order 4
lm_acorr4 = [1.17928, 0.321197, 4, 195, "F"]
lm2_acorr4 = [4.771043, 0.312, 4, "chi2"]
acorr_ljungbox4 = [5.23587, 0.264, 4, "chi2"]
#break
cusum_Harvey_Collier = [0.494432, 0.621549, 198, "t"] #stats.t.sf(0.494432, 198)*2
#see cusum results in files
break_qlr = [3.01985, 0.1, 3, 196, "maxF"] #TODO check this, max at 2001:4
break_chow = [13.1897, 0.00424384, 3, "chi2"] # break at 1984:1
arch_4 = [3.43473, 0.487871, 4, "chi2"]
normality = [23.962, 0.00001, 2, "chi2"]
het_white = [33.503723, 0.000003, 5, "chi2"]
het_breusch_pagan = [1.302014, 0.521520, 2, "chi2"] #TODO: not available
het_breusch_pagan_konker = [0.709924, 0.701200, 2, "chi2"]
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"] #not available
cond_1norm = 5984.0525
determinant = 7.1087467e+008
reciprocal_condition_number = 0.013826504
vif = [1.001, 1.001]
names = 'date residual leverage influence DFFITS'.split()
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir, 'results/leverage_influence_ols_nostars.txt')
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=1,
converters={0:lambda s: s})
#either numpy 1.6 or python 3.2 changed behavior
if np.isnan(lev[-1]['f1']):
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=2,
converters={0:lambda s: s})
lev.dtype.names = names
res = res_ols #for easier copying
cov_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov_hac)
assert_almost_equal(res.params, partable[:,0], 5)
assert_almost_equal(bse_hac, partable[:,1], 5)
#TODO
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=4) #not in gretl
assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6) #FAIL
assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=6) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
#f-value is based on cov_hac I guess
#res2 = res.get_robustcov_results(cov_type='HC1')
# TODO: fvalue differs from Gretl, trying any of the HCx
#assert_almost_equal(res2.fvalue, result_gretl_g1['fvalue'][1], decimal=0) #FAIL
#assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=1) #FAIL
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(6,5))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(6,5))
linear_sq = smsdia.linear_lm(res.resid, res.model.exog)
assert_almost_equal(linear_sq[0], linear_squares[0], decimal=6)
assert_almost_equal(linear_sq[1], linear_squares[1], decimal=7)
hbpk = smsdia.het_breuschpagan(res.resid, res.model.exog)
assert_almost_equal(hbpk[0], het_breusch_pagan_konker[0], decimal=6)
assert_almost_equal(hbpk[1], het_breusch_pagan_konker[1], decimal=6)
hw = smsdia.het_white(res.resid, res.model.exog)
assert_almost_equal(hw[:2], het_white[:2], 6)
#arch
#sm_arch = smsdia.acorr_lm(res.resid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.resid, nlags=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=5)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
vif2 = [oi.variance_inflation_factor(res.model.exog, k) for k in [1,2]]
infl = oi.OLSInfluence(res_ols)
#print np.max(np.abs(lev['DFFITS'] - infl.dffits[0]))
#print np.max(np.abs(lev['leverage'] - infl.hat_matrix_diag))
#print np.max(np.abs(lev['influence'] - infl.influence)) #just added this based on Gretl
#just rough test, low decimal in Gretl output,
assert_almost_equal(lev['residual'], res.resid, decimal=3)
assert_almost_equal(lev['DFFITS'], infl.dffits[0], decimal=3)
assert_almost_equal(lev['leverage'], infl.hat_matrix_diag, decimal=3)
assert_almost_equal(lev['influence'], infl.influence, decimal=4)
def test_GLSARlag():
#test that results for lag>1 is close to lag=1, and smaller ssr
from statsmodels.datasets import macrodata
d2 = macrodata.load_pandas().data
g_gdp = 400*np.diff(np.log(d2['realgdp'].values))
g_inv = 400*np.diff(np.log(d2['realinv'].values))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1].values], prepend=False)
mod1 = GLSAR(g_inv, exogg, 1)
res1 = mod1.iterative_fit(5)
mod4 = GLSAR(g_inv, exogg, 4)
res4 = mod4.iterative_fit(10)
assert_array_less(np.abs(res1.params / res4.params - 1), 0.03)
assert_array_less(res4.ssr, res1.ssr)
assert_array_less(np.abs(res4.bse / res1.bse) - 1, 0.015)
assert_array_less(np.abs((res4.fittedvalues / res1.fittedvalues - 1).mean()),
0.015)
assert_equal(len(mod4.rho), 4)
if __name__ == '__main__':
t = TestGLSARGretl()
t.test_all()
'''
Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: ds_l_realinv
HAC standard errors, bandwidth 4 (Bartlett kernel)
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.48167 1.17709 -8.055 7.17e-014 ***
ds_l_realgdp 4.37422 0.328787 13.30 2.62e-029 ***
realint_1 -0.613997 0.293619 -2.091 0.0378 **
Mean dependent var 3.257395 S.D. dependent var 18.73915
Sum squared resid 22799.68 S.E. of regression 10.70380
R-squared 0.676978 Adjusted R-squared 0.673731
F(2, 199) 90.79971 P-value(F) 9.53e-29
Log-likelihood -763.9752 Akaike criterion 1533.950
Schwarz criterion 1543.875 Hannan-Quinn 1537.966
rho -0.107341 Durbin-Watson 2.213805
QLR test for structural break -
Null hypothesis: no structural break
Test statistic: max F(3, 196) = 3.01985 at observation 2001:4
(10 percent critical value = 4.09)
Non-linearity test (logs) -
Null hypothesis: relationship is linear
Test statistic: LM = 1.68351
with p-value = P(Chi-square(2) > 1.68351) = 0.430953
Non-linearity test (squares) -
Null hypothesis: relationship is linear
Test statistic: LM = 7.52477
with p-value = P(Chi-square(2) > 7.52477) = 0.0232283
LM test for autocorrelation up to order 4 -
Null hypothesis: no autocorrelation
Test statistic: LMF = 1.17928
with p-value = P(F(4,195) > 1.17928) = 0.321197
CUSUM test for parameter stability -
Null hypothesis: no change in parameters
Test statistic: Harvey-Collier t(198) = 0.494432
with p-value = P(t(198) > 0.494432) = 0.621549
Chow test for structural break at observation 1984:1 -
Null hypothesis: no structural break
Asymptotic test statistic: Chi-square(3) = 13.1897
with p-value = 0.00424384
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 3.43473
with p-value = P(Chi-square(4) > 3.43473) = 0.487871:
#ANOVA
Analysis of Variance:
Sum of squares df Mean square
Regression 47782.7 2 23891.3
Residual 22799.7 199 114.571
Total 70582.3 201 351.156
R^2 = 47782.7 / 70582.3 = 0.676978
F(2, 199) = 23891.3 / 114.571 = 208.528 [p-value 1.47e-049]
#LM-test autocorrelation
Breusch-Godfrey test for autocorrelation up to order 4
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 0.0640964 1.06719 0.06006 0.9522
ds_l_realgdp -0.0456010 0.217377 -0.2098 0.8341
realint_1 0.0511769 0.293136 0.1746 0.8616
uhat_1 -0.104707 0.0719948 -1.454 0.1475
uhat_2 -0.00898483 0.0742817 -0.1210 0.9039
uhat_3 0.0837332 0.0735015 1.139 0.2560
uhat_4 -0.0636242 0.0737363 -0.8629 0.3893
Unadjusted R-squared = 0.023619
Test statistic: LMF = 1.179281,
with p-value = P(F(4,195) > 1.17928) = 0.321
Alternative statistic: TR^2 = 4.771043,
with p-value = P(Chi-square(4) > 4.77104) = 0.312
Ljung-Box Q' = 5.23587,
with p-value = P(Chi-square(4) > 5.23587) = 0.264:
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023
#heteroscedasticity White
White's test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 104.920 21.5848 4.861 2.39e-06 ***
ds_l_realgdp -29.7040 6.24983 -4.753 3.88e-06 ***
realint_1 -6.93102 6.95607 -0.9964 0.3203
sq_ds_l_realg 4.12054 0.684920 6.016 8.62e-09 ***
X2_X3 2.89685 1.38571 2.091 0.0379 **
sq_realint_1 0.662135 1.10919 0.5970 0.5512
Unadjusted R-squared = 0.165860
Test statistic: TR^2 = 33.503723,
with p-value = P(Chi-square(5) > 33.503723) = 0.000003:
#heteroscedasticity Breusch-Pagan (original)
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 1.09468 0.192281 5.693 4.43e-08 ***
ds_l_realgdp -0.0323119 0.0386353 -0.8363 0.4040
realint_1 0.00410778 0.0512274 0.08019 0.9362
Explained sum of squares = 2.60403
Test statistic: LM = 1.302014,
with p-value = P(Chi-square(2) > 1.302014) = 0.521520
#heteroscedasticity Breusch-Pagan Koenker
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2 (Koenker robust variant)
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 10.6870 21.7027 0.4924 0.6230
ds_l_realgdp -3.64704 4.36075 -0.8363 0.4040
realint_1 0.463643 5.78202 0.08019 0.9362
Explained sum of squares = 33174.2
Test statistic: LM = 0.709924,
with p-value = P(Chi-square(2) > 0.709924) = 0.701200
########## forecast
#forecast mean y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 2.946312 -22.987904 - -11.367905
2008:4 -27.665860 -36.294434 3.036851 -42.282972 - -30.305896
2009:1 -70.239280 -44.018178 4.007017 -51.919841 - -36.116516
2009:2 -27.024588 -12.284842 1.427414 -15.099640 - -9.470044
2009:3 8.078897 4.483669 1.315876 1.888819 - 7.078520
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
#forecast actual y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 11.101892 -39.070353 - 4.714544
2008:4 -27.665860 -36.294434 11.126262 -58.234939 - -14.353928
2009:1 -70.239280 -44.018178 11.429236 -66.556135 - -21.480222
2009:2 -27.024588 -12.284842 10.798554 -33.579120 - 9.009436
2009:3 8.078897 4.483669 10.784377 -16.782652 - 25.749991
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
'''
| 1.820313 | 2 |
core/views.py | tweeprint/api.tweeprint.com | 1 | 5684 | <gh_stars>1-10
import requests
import django.contrib.auth as auth
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse, Http404
from django.contrib.auth.decorators import login_required
from django.core.serializers import serialize
from core.serializers import *
from core.models import *
from core.secrets import API_TOKEN, STRIPE_API_KEY
import json
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import get_object_or_404
def get_category(request, category):
category = serialize('json', Tweeprint.objects.filter(category_slug=category), fields=('id', 'date_added', 'link', 'tweet_id', 'tweet_json', 'score', 'category', 'category_slug'))
return HttpResponse(category, content_type="application/json")
def get_categories(request):
categories = [t[0] for t in Tweeprint.CHOICES]
if request.method == 'GET':
return JsonResponse(categories, safe=False)
def get_used_categories(request):
used_categories = {t.category_slug: {'category': t.category, 'slug': t.category_slug} for t in Tweeprint.objects.all()}.values()
if request.method == 'GET':
return JsonResponse(list(used_categories), safe=False)
def get_tweeprints(request):
if request.method == 'GET':
tweeprints = serialize('json', Tweeprint.objects.all(), fields=('id', 'date_added', 'link', 'tweet_id', 'tweet_json', 'score', 'category', 'category_slug'))
return HttpResponse(tweeprints, content_type="application/json")
def get_most_recent(request):
if request.method == 'GET':
tweeprints = serialize('json', Tweeprint.objects.all().order_by('-date_added'), fields=('id', 'date_added', 'link', 'tweet_id', 'tweet_json', 'score', 'category', 'category_slug'))
return HttpResponse(tweeprints, content_type="application/json")
def get_most_popular(request):
if request.method == 'GET':
tweeprints = serialize('json', Tweeprint.objects.all().order_by('-score'), fields=('id', 'date_added', 'link', 'tweet_id', 'tweet_json', 'score', 'category', 'category_slug'))
return HttpResponse(tweeprints, content_type="application/json")
@csrf_exempt
def submit(request):
if request.method == 'POST':
form = request.body
json_data = json.loads(request.body)
try:
tweeprint = Tweeprint.objects.create(link=str(json_data['link']), category=json_data['category'])
except Exception as e:
print(e)
return HttpResponse('Submitted!')
return HttpResponse("POST not made") | 2.03125 | 2 |
src/framed/bioreactor/__init__.py | cdanielmachado/framed | 25 | 5685 | <gh_stars>10-100
from __future__ import absolute_import
__author__ = 'kaizhuang'
"""
Package implementing features for simulating bioreactor operation.
"""
from .base import Organism, Bioreactor
from .bioreactors import ANAEROBIC, AEROBIC, MICROAEROBIC
from .bioreactors import Bioreactor_ox, IdealBatch, IdealFedbatch
from framed.bioreactor.dfba import *
| 1.15625 | 1 |
shared/templates/coreos_kernel_option/template.py | deperrone/content | 1,138 | 5686 | <reponame>deperrone/content<filename>shared/templates/coreos_kernel_option/template.py
from ssg.utils import parse_template_boolean_value
def preprocess(data, lang):
data["arg_negate"] = parse_template_boolean_value(data, parameter="arg_negate", default_value=False)
data["arg_is_regex"] = parse_template_boolean_value(data, parameter="arg_is_regex", default_value=False)
return data
| 1.929688 | 2 |
pondus/backends/__init__.py | enicklas/pondus | 1 | 5687 | # -*- coding: UTF-8 -*-
"""
This file is part of Pondus, a personal weight manager.
Copyright (C) 2011 <NAME> <<EMAIL>>
This program is free software licensed under the MIT license. For details
see LICENSE or http://www.opensource.org/licenses/mit-license.php
"""
__all__ = ['csv_backend', 'sportstracker_backend', 'xml_backend',
'xml_backend_old']
| 0.84375 | 1 |
setup.py | specialprocedures/chpy | 0 | 5688 | <filename>setup.py
import pathlib
from setuptools import find_packages, setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setup(
name="chpy",
version="0.1.1",
description="Build networks from the Companies House API",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/specialprocedures/chpy",
author="<NAME>",
# author_email="<EMAIL>",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=find_packages(exclude=["collections", "time", "math", "re", "os"]),
include_package_data=True,
# install_requires=["networkx", "pandas", "progressbar", "fuzzywuzzy",
# "os", "requests", "math", "time", "collections", "re"]
)
| 1.640625 | 2 |
src/sentry/eventtypes/error.py | boblail/sentry | 0 | 5689 | <gh_stars>0
from __future__ import absolute_import
import six
from sentry.utils.safe import get_path, trim
from sentry.utils.strings import truncatechars
from .base import BaseEvent
def get_crash_location(exception, platform=None):
default = None
for frame in reversed(get_path(exception, 'stacktrace', 'frames', filter=True) or ()):
fn = frame.get('filename') or frame.get('abs_path')
if fn:
func = frame.get('function')
if func is not None:
from sentry.interfaces.stacktrace import trim_function_name
func = trim_function_name(func, frame.get('platform') or platform)
if frame.get('in_app'):
return fn, func
if default is None:
default = fn, func
return default
class ErrorEvent(BaseEvent):
key = 'error'
def has_metadata(self, data):
exception = get_path(data, 'exception', 'values', -1)
return exception and any(v is not None for v in six.itervalues(exception))
def get_metadata(self, data):
exception = get_path(data, 'exception', 'values', -1)
if not exception:
return {}
loc = get_crash_location(exception, data.get('platform'))
rv = {
'value': trim(get_path(exception, 'value', default=''), 1024),
}
# If the exception mechanism indicates a synthetic exception we do not
# want to record the type and value into the metadata.
if not get_path(exception, 'mechanism', 'synthetic'):
rv['type'] = trim(get_path(exception, 'type', default='Error'), 128)
# Attach crash location if available
if loc is not None:
fn, func = loc
if fn:
rv['filename'] = fn
if func:
rv['function'] = func
return rv
def get_title(self, metadata):
ty = metadata.get('type')
if ty is None:
return metadata.get('function') or '<unknown>'
if not metadata.get('value'):
return ty
return u'{}: {}'.format(
ty,
truncatechars(metadata['value'].splitlines()[0], 100),
)
def get_location(self, metadata):
return metadata.get('filename')
| 2.09375 | 2 |
keras_en_parser_and_analyzer/library/tests/test_detect_date.py | Sultan91/keras-english-resume-parser-and-analyzer | 0 | 5690 | from unittest import TestCase
from datetime import date
from keras_en_parser_and_analyzer.library.pipmp_my_cv_classify import detect_date
class DetectDate(TestCase):
def test_detect_date(self):
dates_to_test = ['10-1990', '09/12/2020', 'jan 1990', 'feb 2012', '9-12-2020']
res = detect_date(dates_to_test[0])
self.assertEqual(10, res.month)
self.assertEqual(1990, res.year)
res = detect_date(dates_to_test[1])
self.assertEqual(9, res.month)
self.assertEqual(2020, res.year)
res = detect_date(dates_to_test[2])
self.assertEqual(1, res.month)
self.assertEqual(1990, res.year)
res = detect_date(dates_to_test[3])
self.assertEqual(2, res.month)
self.assertEqual(2012, res.year)
res = detect_date(dates_to_test[4])
self.assertEqual(9, res.month)
self.assertEqual(2020, res.year)
| 3 | 3 |
capirca/lib/ipset.py | google-admin/capirca | 604 | 5691 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Ipset iptables generator. This is a subclass of Iptables generator.
ipset is a system inside the Linux kernel, which can very efficiently store
and match IPv4 and IPv6 addresses. This can be used to dramatically increase
performace of iptables firewall.
"""
import string
from capirca.lib import iptables
from capirca.lib import nacaddr
class Error(Exception):
"""Base error class."""
class Term(iptables.Term):
"""Single Ipset term representation."""
_PLATFORM = 'ipset'
_SET_MAX_LENGTH = 31
_POSTJUMP_FORMAT = None
_PREJUMP_FORMAT = None
_TERM_FORMAT = None
_COMMENT_FORMAT = string.Template(
'-A $filter -m comment --comment "$comment"')
_FILTER_TOP_FORMAT = string.Template('-A $filter')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# This stores tuples of set name and set contents, keyed by direction.
# For example:
# { 'src': ('set_name', [ipaddr object, ipaddr object]),
# 'dst': ('set_name', [ipaddr object, ipaddr object]) }
self.addr_sets = {}
def _CalculateAddresses(self, src_addr_list, src_addr_exclude_list,
dst_addr_list, dst_addr_exclude_list):
"""Calculates source and destination address list for a term.
Since ipset is very efficient at matching large number of
addresses, we never return any exclude addresses. Instead
least positive match is calculated for both source and destination
addresses.
For source and destination address list, three cases are possible.
First case is when there are no addresses. In that case we return
_all_ips.
Second case is when there is strictly one address. In that case,
we optimize by not generating a set, and it's then the only
element of returned set.
Third case is when there are more than one address in a set.
In that case we generate a set and also return _all_ips. Note the
difference to the first case where no set is actually generated.
Args:
src_addr_list: source address list of the term.
src_addr_exclude_list: source address exclude list of the term.
dst_addr_list: destination address list of the term.
dst_addr_exclude_list: destination address exclude list of the term.
Returns:
tuple containing source address list, source address exclude list,
destination address list, destination address exclude list in
that order.
"""
target_af = self.AF_MAP[self.af]
src_addr_list = self._CalculateAddrList(src_addr_list,
src_addr_exclude_list, target_af,
'src')
dst_addr_list = self._CalculateAddrList(dst_addr_list,
dst_addr_exclude_list, target_af,
'dst')
return (src_addr_list, [], dst_addr_list, [])
def _CalculateAddrList(self, addr_list, addr_exclude_list,
target_af, direction):
"""Calculates and stores address list for target AF and direction.
Args:
addr_list: address list.
addr_exclude_list: address exclude list of the term.
target_af: target address family.
direction: direction in which address list will be used.
Returns:
calculated address list.
"""
if not addr_list:
addr_list = [self._all_ips]
addr_list = [addr for addr in addr_list if addr.version == target_af]
if addr_exclude_list:
addr_exclude_list = [addr_exclude for addr_exclude in addr_exclude_list if
addr_exclude.version == target_af]
addr_list = nacaddr.ExcludeAddrs(addr_list, addr_exclude_list)
if len(addr_list) > 1:
set_name = self._GenerateSetName(self.term.name, direction)
self.addr_sets[direction] = (set_name, addr_list)
addr_list = [self._all_ips]
return addr_list
def _GenerateAddressStatement(self, src_addr, dst_addr):
"""Returns the address section of an individual iptables rule.
See _CalculateAddresses documentation. Three cases are possible here,
and they map directly to cases in _CalculateAddresses.
First, there can be no addresses for a direction (value is _all_ips then)
In that case we return empty string.
Second there can be stricly one address. In that case we return single
address match (-s or -d).
Third case, is when the value is _all_ips but also the set for particular
direction is present. That's when we return a set match.
Args:
src_addr: ipaddr address or network object with source
address of the rule.
dst_addr: ipaddr address or network object with destination
address of the rule.
Returns:
tuple containing source and destination address statement, in
that order.
"""
src_addr_stmt = ''
dst_addr_stmt = ''
if src_addr and dst_addr:
if src_addr == self._all_ips:
if 'src' in self.addr_sets:
src_addr_stmt = ('-m set --match-set %s src' %
self.addr_sets['src'][0])
else:
src_addr_stmt = '-s %s/%d' % (src_addr.network_address,
src_addr.prefixlen)
if dst_addr == self._all_ips:
if 'dst' in self.addr_sets:
dst_addr_stmt = ('-m set --match-set %s dst' %
self.addr_sets['dst'][0])
else:
dst_addr_stmt = '-d %s/%d' % (dst_addr.network_address,
dst_addr.prefixlen)
return (src_addr_stmt, dst_addr_stmt)
def _GenerateSetName(self, term_name, suffix):
if self.af == 'inet6':
suffix += '-v6'
if len(term_name) + len(suffix) + 1 > self._SET_MAX_LENGTH:
set_name_max_lenth = self._SET_MAX_LENGTH - len(suffix) - 1
term_name = term_name[:set_name_max_lenth]
return '%s-%s' % (term_name, suffix)
class Ipset(iptables.Iptables):
"""Ipset generator."""
_PLATFORM = 'ipset'
_SET_TYPE = 'hash:net'
SUFFIX = '.ips'
_TERM = Term
_MARKER_BEGIN = '# begin:ipset-rules'
_MARKER_END = '# end:ipset-rules'
_GOOD_OPTIONS = ['nostate', 'abbreviateterms', 'truncateterms', 'noverbose',
'exists']
# TODO(vklimovs): some not trivial processing is happening inside this
# __str__, replace with explicit method
def __str__(self):
# Actual rendering happens in __str__, so it has to be called
# before we do set specific part.
iptables_output = super().__str__()
output = []
output.append(self._MARKER_BEGIN)
for (_, _, _, _, terms) in self.iptables_policies:
for term in terms:
output.extend(self._GenerateSetConfig(term))
output.append(self._MARKER_END)
output.append(iptables_output)
return '\n'.join(output)
def _GenerateSetConfig(self, term):
"""Generates set configuration for supplied term.
Args:
term: input term.
Returns:
string that is configuration of supplied term.
"""
output = []
c_str = 'create'
a_str = 'add'
if 'exists' in self.filter_options:
c_str = c_str + ' -exist'
a_str = a_str + ' -exist'
for direction in sorted(term.addr_sets, reverse=True):
set_name, addr_list = term.addr_sets[direction]
set_hashsize = 1 << len(addr_list).bit_length()
set_maxelem = set_hashsize
output.append('%s %s %s family %s hashsize %i maxelem %i' %
(c_str,
set_name,
self._SET_TYPE,
term.af,
set_hashsize,
set_maxelem))
for address in addr_list:
output.append('%s %s %s' % (a_str, set_name, address))
return output
| 2.09375 | 2 |
straxen/analyses/records_matrix.py | zhut19/straxen | 14 | 5692 | import warnings
import numba
import numpy as np
import strax
import straxen
DEFAULT_MAX_SAMPLES = 20_000
@straxen.mini_analysis(requires=('records',),
warn_beyond_sec=10,
default_time_selection='touching')
def records_matrix(records, time_range, seconds_range, config, to_pe,
max_samples=DEFAULT_MAX_SAMPLES,
ignore_max_sample_warning=False):
"""Return (wv_matrix, times, pms)
- wv_matrix: (n_samples, n_pmt) array with per-PMT waveform intensity in PE/ns
- times: time labels in seconds (corr. to rows)
- pmts: PMT numbers (corr. to columns)
Both times and pmts have one extra element.
:param max_samples: Maximum number of time samples. If window and dt
conspire to exceed this, waveforms will be downsampled.
:param ignore_max_sample_warning: If True, suppress warning when this happens.
Example:
wvm, ts, ys = st.records_matrix(run_id, seconds_range=(1., 1.00001))
plt.pcolormesh(ts, ys, wvm.T,
norm=matplotlib.colors.LogNorm())
plt.colorbar(label='Intensity [PE / ns]')
"""
if len(records):
dt = records[0]['dt']
samples_per_record = len(records[0]['data'])
else:
# Defaults here do not matter, nothing will be plotted anyway
dt = 10, 110
record_duration = samples_per_record * dt
window = time_range[1] - time_range[0]
if window / dt > max_samples:
with np.errstate(divide='ignore', invalid='ignore'):
# Downsample. New dt must be
# a) multiple of old dt
dts = np.arange(0, record_duration + dt, dt).astype(np.int)
# b) divisor of record duration
dts = dts[record_duration / dts % 1 == 0]
# c) total samples < max_samples
dts = dts[window / dts < max_samples]
if len(dts):
# Pick lowest dt that satisfies criteria
dt = dts.min()
else:
# Records will be downsampled to single points
dt = max(record_duration, window // max_samples)
if not ignore_max_sample_warning:
warnings.warn(f"Matrix would exceed max_samples {max_samples}, "
f"downsampling to dt = {dt} ns.")
wvm = _records_to_matrix(
records,
t0=time_range[0],
n_channels=config['n_tpc_pmts'],
dt=dt,
window=window)
wvm = wvm.astype(np.float32) * to_pe.reshape(1, -1) / dt
# Note + 1, so data for sample 0 will range from 0-1 in plot
ts = (np.arange(wvm.shape[0] + 1) * dt / int(1e9) + seconds_range[0])
ys = np.arange(wvm.shape[1] + 1)
return wvm, ts, ys
@straxen.mini_analysis(requires=('raw_records',),
warn_beyond_sec=3e-3,
default_time_selection='touching')
def raw_records_matrix(context, run_id, raw_records, time_range,
ignore_max_sample_warning=False,
max_samples=DEFAULT_MAX_SAMPLES,
**kwargs):
# Convert raw to records. We may not be able to baseline correctly
# at the start of the range due to missing zeroth fragments
records = strax.raw_to_records(raw_records)
strax.baseline(records, allow_sloppy_chunking=True)
strax.zero_out_of_bounds(records)
return context.records_matrix(run_id=run_id,
records=records,
time_range=time_range,
max_samples=max_samples,
ignore_max_sample_warning=ignore_max_sample_warning,
**kwargs)
@numba.njit
def _records_to_matrix(records, t0, window, n_channels, dt=10):
n_samples = (window // dt) + 1
# Use 32-bit integers, so downsampling saturated samples doesn't
# cause wraparounds
# TODO: amplitude bit shift!
y = np.zeros((n_samples, n_channels),
dtype=np.int32)
if not len(records):
return y
samples_per_record = len(records[0]['data'])
for r in records:
if r['channel'] > n_channels:
continue
if dt >= samples_per_record * r['dt']:
# Downsample to single sample -> store area
idx = (r['time'] - t0) // dt
if idx >= len(y):
print(len(y), idx)
raise IndexError('Despite n_samples = window // dt + 1, our '
'idx is too high?!')
y[idx, r['channel']] += r['area']
continue
# Assume out-of-bounds data has been zeroed, so we do not
# need to do r['data'][:r['length']] here.
# This simplifies downsampling.
w = r['data'].astype(np.int32)
if dt > r['dt']:
# Downsample
duration = samples_per_record * r['dt']
assert duration % dt == 0, "Cannot downsample fractionally"
# .astype here keeps numba happy ... ??
w = w.reshape(duration // dt, -1).sum(axis=1).astype(np.int32)
elif dt < r['dt']:
raise ValueError("Upsampling not yet implemented")
(r_start, r_end), (y_start, y_end) = strax.overlap_indices(
r['time'] // dt, len(w),
t0 // dt, n_samples)
# += is paranoid, data in individual channels should not overlap
# but... https://github.com/AxFoundation/strax/issues/119
y[y_start:y_end, r['channel']] += w[r_start:r_end]
return y
| 2.421875 | 2 |
bdbc/lib/python3.5/site-packages/bigchaindb_driver/crypto.py | entropyx/fiduchain-blockchain-interface | 0 | 5693 | from collections import namedtuple
from cryptoconditions import crypto
CryptoKeypair = namedtuple('CryptoKeypair', ('signing_key', 'verifying_key'))
def generate_keypair():
"""Generates a cryptographic key pair.
Returns:
:class:`~bigchaindb_driver.crypto.CryptoKeypair`: A
:obj:`collections.namedtuple` with named fields
:attr:`~bigchaindb_driver.crypto.CryptoKeypair.signing_key` and
:attr:`~bigchaindb_driver.crypto.CryptoKeypair.verifying_key`.
"""
return CryptoKeypair(
*(k.decode() for k in crypto.ed25519_generate_key_pair()))
| 3.03125 | 3 |
reviewboard/webapi/resources/change.py | mnoorenberghe/reviewboard | 0 | 5694 | <reponame>mnoorenberghe/reviewboard
from __future__ import unicode_literals
from django.utils import six
from djblets.util.decorators import augment_method_from
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.reviews.fields import get_review_request_field
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.mixins import MarkdownFieldsMixin
from reviewboard.webapi.resources import resources
class ChangeResource(MarkdownFieldsMixin, WebAPIResource):
"""Provides information on a change made to a public review request.
A change includes, optionally, text entered by the user describing the
change, and also includes a list of fields that were changed on the
review request.
The list of fields changed are in ``fields_changed``. The keys are the
names of the fields, and the values are details on that particular
change to the field.
For ``summary``, ``description``, ``testing_done`` and ``branch`` fields,
the following detail keys will be available:
* ``old``: The old value of the field.
* ``new``: The new value of the field.
For ``diff`` fields:
* ``added``: The diff that was added.
For ``bugs_closed`` fields:
* ``old``: A list of old bugs.
* ``new``: A list of new bugs.
* ``removed``: A list of bugs that were removed, if any.
* ``added``: A list of bugs that were added, if any.
For ``file_attachments``, ``screenshots``, ``target_people`` and
``target_groups`` fields:
* ``old``: A list of old items.
* ``new``: A list of new items.
* ``removed``: A list of items that were removed, if any.
* ``added``: A list of items that were added, if any.
For ``screenshot_captions`` and ``file_captions`` fields:
* ``old``: The old caption.
* ``new``: The new caption.
* ``screenshot``: The screenshot that was updated.
"""
added_in = '1.6'
model = ChangeDescription
name = 'change'
fields = {
'id': {
'type': int,
'description': 'The numeric ID of the change description.',
},
'fields_changed': {
'type': dict,
'description': 'The fields that were changed.',
},
'text': {
'type': six.text_type,
'description': 'The description of the change written by the '
'submitter.',
'supports_text_types': True,
},
'text_type': {
'type': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The mode for the text field.',
'added_in': '2.0',
},
'timestamp': {
'type': six.text_type,
'description': 'The date and time that the change was made '
'(in YYYY-MM-DD HH:MM:SS format).',
},
}
uri_object_key = 'change_id'
model_parent_key = 'review_request'
allowed_methods = ('GET',)
mimetype_list_resource_name = 'review-request-changes'
mimetype_item_resource_name = 'review-request-change'
def serialize_fields_changed_field(self, obj, **kwargs):
review_request = obj.review_request.get()
fields_changed = {}
for field_name, data in six.iteritems(obj.fields_changed):
field_cls = get_review_request_field(field_name)
field = field_cls(review_request)
fields_changed[field.field_id] = field.serialize_change_entry(obj)
return fields_changed
def has_access_permissions(self, request, obj, *args, **kwargs):
return obj.review_request.get().is_accessible_by(request.user)
def get_queryset(self, request, *args, **kwargs):
review_request = resources.review_request.get_object(
request, *args, **kwargs)
return review_request.changedescs.filter(public=True)
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get_list(self, *args, **kwargs):
"""Returns a list of changes made on a review request."""
pass
@webapi_check_local_site
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
"""Returns the information on a change to a review request."""
pass
change_resource = ChangeResource()
| 1.796875 | 2 |
controllers/notes/NewNote.py | heminsatya/free_notes | 0 | 5695 | # Dependencies
from aurora import Controller, View, Forms
from models import Users, Notes
from aurora.security import login_required, get_session
from flask import request
from datetime import datetime
# The controller class
class NewNote(Controller):
# POST Method
@login_required(app='users')
def post(self):
# The required models
user = Users().read(where={'username':get_session('user')}).first()
notes = Notes()
# Form data
data = request.form
form = Forms(data)
# Valid form data
if form.validate():
# Collect form inputs
title = data.get('title')
content = data.get('content')
# Required fields
if not title or not content:
return {
'error': '<i class="fas fa-exclamation-triangle mr-1"></i> Form data is invalid!',
}, 400
# Everything is fine
# Insert new note into the database
data = {
'user_id': user['id'],
'title': title,
'content': content,
# 'date': datetime.now().strftime("%m-%d-%Y")
}
notes.create(data=data)
# Return the result
return {
'success': '<i class="fas fa-check-circle mr-1"></i> The new note created successfully!',
}, 200
# Invalid form data
else:
# Return the result
return {
'error': '<i class="fas fa-exclamation-triangle mr-1"></i> Form data is invalid!',
}, 400
# GET Method
@login_required(app='users')
def get(self):
# The required models
user = Users().read(where={'username':get_session('user')}).first()
notes = Notes().read(where={'user_id':user['id']}, order_by={'id':'DESC'}).all()
form = Forms()
return View('create', user=user, form=form)
| 2.78125 | 3 |
EDA-&-Data-Preprocessing/code.py | udayraj-gupta/ga-learner-dsmp-repo | 0 | 5696 | # --------------
#Importing header files
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#Code starts here
data = pd.read_csv(path)
data['Rating'].hist()
data = data[data['Rating']<=5]
data['Rating'].hist()
#Code ends here
# --------------
# code starts here
total_null = data.isnull().sum()
percent_null = (total_null/data.isnull().count())*100
missing_data = pd.concat([total_null,percent_null],axis=1,keys=['Total','Percentage'])
print(missing_data)
data = data.dropna()
total_null_1 = data.isnull().sum()
percent_null_1 = (total_null_1/data.isnull().count())*100
missing_data_1 = pd.concat([total_null_1,percent_null_1],axis=1,keys=['Total','Percentage'])
print(missing_data_1)
# code ends here
# --------------
#Code starts here
a = sns.catplot(x='Category',y='Rating',data=data, kind="box", height = 10)
a.set_xticklabels(rotation=90)
a.set_titles('Rating vs Category [BoxPlot]')
#Code ends here
# --------------
#Importing header files
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
#Code starts here
le = LabelEncoder()
#data['Installs'] = data['Installs'].str.replace(',','').str.replace('+','')
data['Installs'] = data['Installs'].apply(lambda x : x.replace(',','')).apply(lambda x : x.replace('+',''))
data['Installs'] =data['Installs'].astype(int)
print(data['Installs'])
data['Installs'] = le.fit_transform(data['Installs'])
a = sns.regplot(x="Installs", y="Rating" , data=data)
a.set_title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
import seaborn as sns
#Code starts here
d=data['Price'].value_counts()
print(d)
data['Price']=data['Price'].apply(lambda x : x.replace('$',''))
d=data['Price'].value_counts()
print(d)
data['Price']=data['Price'].astype(float)
#le=LabelEncoder()
#data['Installs'] = le.fit_transform(data['Installs'])
y=sns.regplot(data=data,x='Price',y='Rating')
y.set_title('Rating vs Installs [RegPlot]')
#Code ends here
# --------------
#Code starts here
data['Genres']=data['Genres'].str.split(';').str[0]
#print(data['Genres'])
df=data[['Genres','Rating']]
gr_mean=df.groupby(['Genres'],as_index=False).mean()
gr_mean=gr_mean.sort_values(by=['Rating'])
gr_mean=pd.DataFrame(gr_mean)
print(gr_mean)#,gr_mean[-1,:])
#Code ends heree
# --------------
#Code starts here
import seaborn as sns
data['Last Updated'] = pd.to_datetime(data['Last Updated'])
print(data['Last Updated'].max())
max_date=data['Last Updated'].max()
data['Last Updated Days']=max_date-data['Last Updated']
data['Last Updated Days']=data['Last Updated Days'].dt.days
sns.regplot(data=data,x='Last Updated Days',y='Rating').set_title('Rating vs Last Updated [RegPlot]')
#Code ends here
| 2.96875 | 3 |
openpnm/algorithms/ChargeConservation.py | rguan-uoft/OpenPNM | 1 | 5697 | import numpy as np
from openpnm.algorithms import ReactiveTransport
from openpnm.models.physics import generic_source_term as gst
from openpnm.utils import logging
logger = logging.getLogger(__name__)
class ChargeConservation(ReactiveTransport):
r"""
A class to enforce charge conservation in ionic transport simulations.
Parameters
----------
network : OpenPNM Network object
The network on which this algorithm operates
project : OpenPNM Project object
Either a network or a project must be specified
name : string, optional
A unique name to give the object for easier identification. If not
given, one is generated.
"""
def __init__(self, settings={}, phase=None, **kwargs):
def_set = {'phase': None,
'quantity': 'pore.potential',
'conductance': 'throat.ionic_conductance',
'charge_conservation': 'electroneutrality',
'gui': {'setup': {'phase': None,
'quantity': '',
'conductance': '',
'charge_conservation': ''},
'set_rate_BC': {'pores': None,
'values': None},
'set_value_BC': {'pores': None,
'values': None},
'set_source': {'pores': None,
'propname': ''}
}
}
super().__init__(**kwargs)
self.settings.update(def_set)
self.settings.update(settings)
if phase is not None:
self.setup(phase=phase)
def setup(self, phase=None, quantity='', conductance='',
charge_conservation=None, **kwargs):
r"""
This method takes several arguments that are essential to running the
algorithm and adds them to the settings.
Parameters
----------
phase : OpenPNM Phase object
The phase on which the algorithm is to be run.
quantity : string
(default is ``'pore.mole_fraction'``) The name of the physical
quantity to be calculated.
conductance : string
(default is ``'throat.diffusive_conductance'``) The name of the
pore-scale transport conductance values. These are typically
calculated by a model attached to a *Physics* object associated
with the given *Phase*.
charge_conservation : string
The assumption adopted to enforce charge conservation when
performing ions transport simulations (default is
"electroneutrality").
Notes
-----
Any additional arguments are added to the ``settings`` dictionary of
the object.
"""
if phase:
self.settings['phase'] = phase.name
if quantity:
self.settings['quantity'] = quantity
if conductance:
self.settings['conductance'] = conductance
if charge_conservation:
self.settings['charge_conservation'] = charge_conservation
super().setup(**kwargs)
def _charge_conservation_eq_source_term(self, e_alg):
# Source term for Poisson or charge conservation (electroneutrality) eq
phase = self.project.phases()[self.settings['phase']]
Ps = (self['pore.all'] * np.isnan(self['pore.bc_value']) *
np.isnan(self['pore.bc_rate']))
mod = gst.charge_conservation
phys = self.project.find_physics(phase=phase)
phys[0].add_model(propname='pore.charge_conservation', model=mod,
phase=phase, p_alg=self, e_alg=e_alg,
assumption=self.settings['charge_conservation'])
self.set_source(propname='pore.charge_conservation', pores=Ps)
| 2.65625 | 3 |
jno/commands/upload.py | Kosinkadink/jno | 1 | 5698 | from jno.util import interpret_configs
from jno.util import run_arduino_process
from jno.util import create_build_directory
from jno.util import get_common_parameters
from jno.util import verify_arduino_dir
from jno.util import verify_and_get_port
from jno.util import JnoException
from jno.commands.command import Command
import getopt
from colorama import Fore
class Upload(Command):
help_name = "Upload"
help_usage = "jno upload [-b, --board=] boardname [-p, --ports=] port [-v, --verbose]"
help_description = "Runs build and uploads to board. Without arguments, uses board/port defined locally/globally. " \
"If port is not defined, uses first available port. With -v, more info will be displayed during upload."
def run(self,argv,location):
jno_dict = interpret_configs()
verify_arduino_dir(jno_dict)
create_build_directory(jno_dict)
arg_list = self.perform_upload(argv,jno_dict)
run_arduino_process(arg_list)
# Create argument list for arduino build
def perform_upload(self,argv,jno_dict):
# assemble command query
# GOAL: <arduino exec> --upload <script> --board <board> --port <serial>
arg_list = [jno_dict["EXEC_SCRIPT"]]
# add common params - set pref
arg_list.extend(get_common_parameters(jno_dict))
# add upload params
arg_list.append("--upload")
arg_list.append(jno_dict["SKETCH_INO"])
try:
opts,args = getopt.getopt(argv, 'b:p:v',['board=','port=','verbose'])
except getopt.GetoptError as e:
raise JnoException(str(e))
for opt, arg in opts:
if opt in ("-b","--board"):
jno_dict["board"] = arg.strip()
elif opt in ("-p","--port"):
jno_dict["port"] = arg.strip()
elif opt in ("-v","--verbose"):
arg_list.append("--verbose")
# verify port or get first available
port = verify_and_get_port(jno_dict["port"])
if not port:
if jno_dict["port"] == "DEFAULT":
raise JnoException("no ports available")
raise JnoException("port does not exist: {}".format(jno_dict["port"]))
else:
if jno_dict["port"] == "DEFAULT":
print("{1}No port provided, using available port {0}{2}".format(port,Fore.YELLOW,Fore.RESET))
# add board params
arg_list.append("--board")
arg_list.append(self.formatBoard(jno_dict["board"],jno_dict))
# add port params
arg_list.append("--port")
arg_list.append(port)
return arg_list
| 2.484375 | 2 |
modelling/inference_multi_attribute.py | rizwan09/hydra-sum | 5 | 5699 | import argparse
import json
import logging
import os
import torch
from transformers.file_utils import ModelOutput
from typing import Dict, Optional, Tuple
from torch.utils.data import DataLoader, SequentialSampler
from transformers.modeling_outputs import Seq2SeqLMOutput
import train_seq2seq_utils
import single_head_utils
import multi_head_utils
from torch import nn
from generation_utils_multi_attribute import GenerationMixinCustomCombined
from transformers import (
PreTrainedModel,
PreTrainedTokenizer,
BartConfig,
BartTokenizer
)
logger = logging.getLogger(__name__)
MODEL_CLASSES = {"bart_mult_heads_2": (BartConfig,
multi_head_utils.ConditionalGenerationCustomBartMultHeads,
BartTokenizer),
}
class Seq2SeqLMOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
past_key_values_1: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
past_key_values_2: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
class BartModelCombined(GenerationMixinCustomCombined, nn.Module):
def __init__(self, model1, model2, config: BartConfig):
super().__init__()
self.model1 = model1
self.model2 = model2
self.config = config
self.device = model2.device
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs_1=None,
encoder_outputs_2=None,
past_key_values_1=None,
past_key_values_2=None,
inputs_embeds=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=None,
use_mixed=False,
use_head_1=0,
use_head_2=0,
gate_prob=0.5,
):
args1 = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_input_ids': decoder_input_ids,
'decoder_attention_mask': decoder_attention_mask,
'head_mask': head_mask,
'decoder_head_mask': decoder_head_mask,
'cross_attn_head_mask': cross_attn_head_mask,
'encoder_outputs': encoder_outputs_1,
'past_key_values': past_key_values_1,
'inputs_embeds': inputs_embeds,
'use_cache': use_cache,
'output_attentions': False,
'output_hidden_states': False,
'return_dict': None,
'use_mixed': False,
'use_head': use_head_1,
}
out1 = self.model1(**args1)
softmax_0 = torch.exp(out1.logits)
args2 = {'input_ids': input_ids,
'attention_mask': attention_mask,
'decoder_input_ids': decoder_input_ids,
'decoder_attention_mask': decoder_attention_mask,
'head_mask': head_mask,
'decoder_head_mask': decoder_head_mask,
'cross_attn_head_mask': cross_attn_head_mask,
'encoder_outputs': encoder_outputs_2,
'past_key_values': past_key_values_2,
'inputs_embeds': inputs_embeds,
'use_cache': use_cache,
'output_attentions': output_attentions,
'output_hidden_states': output_hidden_states,
'return_dict': None,
'use_mixed': False,
'use_head': use_head_2,
}
out2 = self.model2(**args2)
softmax_1 = torch.exp(out2.logits)
softmax_0 = softmax_0 * gate_prob
softmax_1 = softmax_1 * (1 - gate_prob)
lm_logits = torch.log(softmax_0 + softmax_1)
return_output = Seq2SeqLMOutput(
logits=lm_logits,
past_key_values_1=out1.past_key_values,
past_key_values_2=out2.past_key_values)
return return_output
# unchanged
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past_1=None,
past_2=None,
attention_mask=None,
head_mask=None,
use_cache=None,
encoder_outputs_1=None,
encoder_outputs_2=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past_1 is not None and past_2 is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs_1": encoder_outputs_1,
"encoder_outputs_2": encoder_outputs_2,
"past_key_values_1": past_1,
"past_key_values_2": past_2,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
def load_model(path):
args = json.load(open(path))
config_class, model_class = BartConfig, multi_head_utils.ConditionalGenerationCustomBartMultHeads
config = config_class.from_pretrained(args['path'])
model = model_class.from_pretrained(
args['path'],
from_tf=bool(".ckpt" in args['path']),
config=config)
return model, args, config
def evaluate(args, eval_dataset, model: PreTrainedModel, args1, args2, tokenizer: PreTrainedTokenizer,
suffix="") -> Dict:
eval_output_dir = args.output_dir
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
if args.generate:
f_out = open(os.path.join(eval_output_dir, 'test_out%s.txt' % suffix), 'w')
print(eval_output_dir)
k = 0
with torch.no_grad():
model.eval()
for batch in eval_dataloader:
batch = tuple(t.to(args.device) for t in batch)
input_ids, input_attention_mask, decoder_ids = batch[0], batch[1], batch[2]
for j in range(input_ids.shape[0]):
gold = tokenizer.decode(decoder_ids[j], skip_special_tokens=True)
input = tokenizer.decode(input_ids[j], skip_special_tokens=True)
input_args = {'input_ids': input_ids[j].unsqueeze(0),
'attention_mask': input_attention_mask[j].unsqueeze(0), 'num_beams': 6,
'length_penalty': 2, 'no_repeat_ngram_size': 3, 'max_length': 200, 'min_length': 12,
'top_k': 30, 'top_p': 0.5, 'do_sample': True,
'decoder_start_token_id': tokenizer.bos_token_id, 'num_return_sequences': 1,
'gate_prob': args.gate_probability, 'use_head_1': args1['use_head'],
'use_head_2': args2['use_head']}
gen = model.generate(**input_args)
gen = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=True) for g in
gen]
# gen = gen[0]
print(gen[0].strip())
f_out.write(input + '\n')
f_out.write(gold + '\n')
for g in gen:
f_out.write(g.strip() + '\n')
f_out.write('\n')
k += 1
if k > 1000:
break
f_out.close()
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_type",
default=None,
type=str,
help="base model, used to load tokenizer",
)
parser.add_argument(
"--model_1_config",
default=None,
type=str,
help="Path to model 1 config",
)
parser.add_argument(
"--model_2_config",
default=None,
type=str,
required=True,
help="Path to model 2 config",
)
parser.add_argument(
"--test_data_file",
default=None,
type=str,
required=True,
help="Evaluation data file to evaluate the perplexity on (a text file).",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--max_seq_length",
default=1024,
type=int,
help="The maximum total input sequence length after tokenization.",
)
parser.add_argument(
"--max_decoder_length",
default=128,
type=int,
help="The maximum total decoder sequence length after tokenization.",
)
parser.add_argument("--per_gpu_eval_batch_size", default=32, type=int, help="Batch size evaluation.", )
parser.add_argument("--gpu_device", type=int, default=0, help="gpu device")
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached data sets", )
# custom flags
parser.add_argument("--generate", action="store_true", help="Generate summaries for dev set", )
parser.add_argument("--dump_posteriors", action="store_true", help="Dump posterior probs at intermediate steps", )
parser.add_argument("--gate_probability", type=float, default=None, help="gate prob")
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
args.n_gpu = 1
device = torch.device("cuda", args.gpu_device)
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
filename=os.path.join(args.output_dir, 'model.log')
)
# Set seed
model1, args1, config = load_model(args.model_1_config)
model1.to(args.device)
model2, args2, _ = load_model(args.model_2_config)
model2.to(args.device)
f_out = open(os.path.join(args.output_dir, 'model_configs.json'), 'w')
json.dump(args1, f_out)
f_out.write('\n')
json.dump(args2, f_out)
f_out.write('\n')
json.dump({'gate_prob': args.gate_probability}, f_out)
f_out.write('\n')
f_out.close()
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
model = BartModelCombined(model1, model2, config)
eval_dataset = train_seq2seq_utils.load_and_cache_examples(args, tokenizer, 'test')
evaluate(args, eval_dataset, model, args1, args2, tokenizer, 'final')
logger.info("Training/evaluation parameters %s", args)
if __name__ == "__main__":
main()
| 2.046875 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.