content
stringlengths 5
1.05M
|
---|
#!/usr/bin/env python3
'''Test WAF Access settings'''
#TODO: make so waflz_server only runs once and then can post to it
# ------------------------------------------------------------------------------
# Imports
# ------------------------------------------------------------------------------
import pytest
import subprocess
import os
import sys
import json
import time
import requests
# ------------------------------------------------------------------------------
# Constants
# ------------------------------------------------------------------------------
G_TEST_HOST = 'http://127.0.0.1:12345/'
# ------------------------------------------------------------------------------
# globals
# ------------------------------------------------------------------------------
g_server_pid = -1
# ------------------------------------------------------------------------------
#
# ------------------------------------------------------------------------------
def run_command(command):
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return (p.returncode, stdout, stderr)
# ------------------------------------------------------------------------------
# fixture
# ------------------------------------------------------------------------------
@pytest.fixture(scope='module')
def setup_waflz_server():
# ------------------------------------------------------
# setup
# ------------------------------------------------------
l_cwd = os.getcwd()
l_file_path = os.path.dirname(os.path.abspath(__file__))
l_acl_path = os.path.realpath(os.path.join(l_file_path, 'test_bb_acl_settings.acl.json'))
l_waflz_server_path = os.path.abspath(os.path.join(l_file_path, '../../../build/util/waflz_server/waflz_server'))
l_subproc = subprocess.Popen([l_waflz_server_path,
'-a', l_acl_path])
print('cmd: \n{}\n'.format(' '.join([l_waflz_server_path,
'-a', l_acl_path])))
time.sleep(1)
# ------------------------------------------------------
# yield...
# ------------------------------------------------------
yield setup_waflz_server
# ------------------------------------------------------
# tear down
# ------------------------------------------------------
l_code, l_out, l_err = run_command('kill -9 %d'%(l_subproc.pid))
time.sleep(0.5)
# ------------------------------------------------------------------------------
# test_bb_acl_accesslists_01_interactions
# ------------------------------------------------------------------------------
def test_bb_acl_accesslists_01_interactions(setup_waflz_server):
# ------------------------------------------------------
# whitelist
# ------------------------------------------------------
l_uri = G_TEST_HOST
l_headers = {'host': 'myhost.com',
'User-Agent': 'dogs are cool'
}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r_json = l_r.json()
assert 'status' in l_r_json
assert l_r_json['status'] == 'ok'
# ------------------------------------------------------
# accesslist allow
# ------------------------------------------------------
l_uri = G_TEST_HOST
l_headers = {'host': 'myhost.com',
'User-Agent': 'monkeys are cool'
}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r_json = l_r.json()
assert 'status' in l_r_json
assert l_r_json['status'] == 'ok'
# ------------------------------------------------------
# accesslist block
# ------------------------------------------------------
l_uri = G_TEST_HOST
l_headers = {'host': 'myhost.com',
'User-Agent': 'monkeys are bad'
}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r_json = l_r.json()
assert len(l_r_json) > 0
assert 'Accesslist deny' in l_r_json['rule_msg']
# ------------------------------------------------------
# blacklist block
# ------------------------------------------------------
l_uri = G_TEST_HOST
l_headers = {'host': 'myhost.com',
'User-Agent': 'cats are cool'
}
l_r = requests.get(l_uri, headers=l_headers)
assert l_r.status_code == 200
l_r_json = l_r.json()
assert len(l_r_json) > 0
assert 'Blacklist User-Agent match' in l_r_json['rule_msg']
|
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.urls import reverse_lazy
from django.contrib.auth import login
from django.contrib.auth.views import LoginView
from django.views.generic.edit import FormView
from .forms import UserUpdateForm, UserRegisterForm, ProfileUpdateForm
@login_required
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST,
request.FILES,
instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(request, f'Your account has been updated!')
return redirect('accounts:profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {
'u_form': u_form,
'p_form': p_form
}
return render(request, 'account/profile.html', context)
class UserRegisterView(FormView, UserRegisterForm):
template_name = 'account/signup.html'
form_class = UserRegisterForm
success_url = reverse_lazy('notes:notes')
def form_valid(self, form):
user = form.save()
if user is not None:
login(self.request, user,
backend='django.contrib.auth.backends.ModelBackend')
return super(UserRegisterView, self).form_valid(form)
def get(self, *args, **kwargs):
if self.request.user.is_authenticated:
return redirect('notes:notes')
else:
return super(UserRegisterView, self).get(*args, **kwargs)
class UserLoginView(LoginView):
template_name = 'account/login.html'
fields = "__all__"
redirect_authenticated_user = True
def get_success_url(self):
return reverse_lazy('notes:notes')
|
import unittest
from Solutions import Lesson04
class FrogRiverOneTests(unittest.TestCase):
def test_frog_river_one_example_test_01(self):
x = 5
a = [1, 3, 1, 4, 2, 3, 5, 4]
res = Lesson04.frog_river_one(x, a)
self.assertEqual(6, res)
|
# -*- coding: utf-8 -*-
import logging
from os.path import join as pjoin
import numpy as np
from africanus.constants import minus_two_pi_over_c
from africanus.util.jinja2 import jinja_env
from africanus.rime.phase import PHASE_DELAY_DOCS
from africanus.util.code import memoize_on_key, format_code
from africanus.util.cuda import cuda_function, grids
from africanus.util.requirements import requires_optional
try:
import cupy as cp
from cupy._core._scalar import get_typename as _get_typename
from cupy.cuda.compiler import CompileException
except ImportError:
pass
log = logging.getLogger(__name__)
def _key_fn(lm, uvw, frequency):
return (lm.dtype, uvw.dtype, frequency.dtype)
_TEMPLATE_PATH = pjoin("rime", "cuda", "phase.cu.j2")
@memoize_on_key(_key_fn)
def _generate_kernel(lm, uvw, frequency):
# Floating point output type
out_dtype = np.result_type(lm, uvw, frequency)
# Block sizes
blockdimx = 32 if frequency.dtype == np.float32 else 16
blockdimy = 32 if uvw.dtype == np.float32 else 16
block = (blockdimx, blockdimy, 1)
# Create template
render = jinja_env.get_template(_TEMPLATE_PATH).render
name = "phase_delay"
code = render(kernel_name=name,
lm_type=_get_typename(lm.dtype),
uvw_type=_get_typename(uvw.dtype),
freq_type=_get_typename(frequency.dtype),
out_type=_get_typename(out_dtype),
sqrt_fn=cuda_function('sqrt', lm.dtype),
sincos_fn=cuda_function('sincos', out_dtype),
minus_two_pi_over_c=minus_two_pi_over_c,
blockdimx=blockdimx,
blockdimy=blockdimy)
# Complex output type
out_dtype = np.result_type(out_dtype, np.complex64)
return cp.RawKernel(code, name), block, out_dtype
@requires_optional("cupy")
def phase_delay(lm, uvw, frequency):
kernel, block, out_dtype = _generate_kernel(lm, uvw, frequency)
grid = grids((frequency.shape[0], uvw.shape[0], 1), block)
out = cp.empty(shape=(lm.shape[0], uvw.shape[0], frequency.shape[0]),
dtype=out_dtype)
try:
kernel(grid, block, (lm, uvw, frequency, out))
except CompileException:
log.exception(format_code(kernel.code))
raise
return out
try:
phase_delay.__doc__ = PHASE_DELAY_DOCS.substitute(
array_type=':class:`cupy.ndarray`')
except AttributeError:
pass
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: fish.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='fish.proto',
package='fishmodel',
syntax='proto3',
serialized_pb=_b('\n\nfish.proto\x12\tfishmodel\"\x8c\x02\n\x04\x46ish\x12\n\n\x02id\x18\x01 \x01(\x04\x12\x11\n\tdate_time\x18\x02 \x01(\t\x12&\n\x04type\x18\x03 \x01(\x0e\x32\x18.fishmodel.Fish.FishType\x12(\n\x05state\x18\x04 \x01(\x0e\x32\x19.fishmodel.Fish.FishState\x12\x0c\n\x04tags\x18\x05 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x06 \x01(\t\"/\n\x08\x46ishType\x12\r\n\tTYPE_NONE\x10\x00\x12\n\n\x06SALMON\x10\x01\x12\x08\n\x04TUNA\x10\x02\"C\n\tFishState\x12\x0e\n\nSTATE_NONE\x10\x00\x12\x07\n\x03NEW\x10\x01\x12\x0e\n\nPROCESSING\x10\x02\x12\r\n\tCOMPLETED\x10\x03\x62\x06proto3')
)
_FISH_FISHTYPE = _descriptor.EnumDescriptor(
name='FishType',
full_name='fishmodel.Fish.FishType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='TYPE_NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SALMON', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TUNA', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=178,
serialized_end=225,
)
_sym_db.RegisterEnumDescriptor(_FISH_FISHTYPE)
_FISH_FISHSTATE = _descriptor.EnumDescriptor(
name='FishState',
full_name='fishmodel.Fish.FishState',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATE_NONE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NEW', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROCESSING', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='COMPLETED', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=227,
serialized_end=294,
)
_sym_db.RegisterEnumDescriptor(_FISH_FISHSTATE)
_FISH = _descriptor.Descriptor(
name='Fish',
full_name='fishmodel.Fish',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='fishmodel.Fish.id', index=0,
number=1, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='date_time', full_name='fishmodel.Fish.date_time', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='fishmodel.Fish.type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='state', full_name='fishmodel.Fish.state', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tags', full_name='fishmodel.Fish.tags', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='details', full_name='fishmodel.Fish.details', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_FISH_FISHTYPE,
_FISH_FISHSTATE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=26,
serialized_end=294,
)
_FISH.fields_by_name['type'].enum_type = _FISH_FISHTYPE
_FISH.fields_by_name['state'].enum_type = _FISH_FISHSTATE
_FISH_FISHTYPE.containing_type = _FISH
_FISH_FISHSTATE.containing_type = _FISH
DESCRIPTOR.message_types_by_name['Fish'] = _FISH
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Fish = _reflection.GeneratedProtocolMessageType('Fish', (_message.Message,), dict(
DESCRIPTOR = _FISH,
__module__ = 'fish_pb2'
# @@protoc_insertion_point(class_scope:fishmodel.Fish)
))
_sym_db.RegisterMessage(Fish)
# @@protoc_insertion_point(module_scope)
|
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from Metadata.models import Metadata
import json
User = get_user_model()
class TestViews(TestCase):
def setUp(self):
self.register_url = reverse('register')
self.login_url = reverse('login')
self.account_url = reverse('account')
user_a = User.objects.create_user('[email protected]', 'some_strong_123_pass')
user_a.save()
self.user_a = user_a
def test_register_view(self):
response = self.client.get(self.register_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'account/register.html')
def test_login_view(self):
response = self.client.get(self.login_url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'account/login.html')
def test_account_view(self):
data = {"email": "[email protected]", "password": "some_strong_123_pass"}
response = self.client.post(self.account_url, data, follow=True)
redirect_path = response.request.get("PATH_INFO")
self.assertEqual(response.status_code, 200)
self.assertEqual(redirect_path, '/login/')
|
# from api.serializers import UserSerializer
import functools
import operator
from datetime import datetime, timedelta
# Facebook
from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter
# Twitter
from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter
from api.models import Accommodation, AccommodationImage, AccommodationHosting, Booking, Review, UserInfo, Search, \
ReviewCount, BookRequest
from api.serializers import AccommodationSerializer, AccommodationImageSerializer, AccommodationHostingSerializer, \
BookingSerializer, ReviewSerializer, UserInfoSerializer, SearchSerializer, ReviewCountSerializer, \
BookRequestSerializer
from django.db.models import Q
from django.http import Http404
from rest_auth.registration.views import SocialLoginView
from rest_auth.social_serializers import TwitterLoginSerializer
from rest_framework import status
from rest_framework import viewsets
from rest_framework.authentication import TokenAuthentication
from rest_framework.authtoken.models import Token
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.decorators import action, api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from .functions import compareDate
class FacebookLogin(SocialLoginView):
adapter_class = FacebookOAuth2Adapter
class TwitterLogin(SocialLoginView):
serializer_class = TwitterLoginSerializer
adapter_class = TwitterOAuthAdapter
@api_view(["GET, POST"])
@permission_classes((TokenAuthentication,))
@permission_classes((IsAuthenticated,))
class AccommodationView(viewsets.ModelViewSet):
queryset = Accommodation.objects.all()
# queryset = Accomodation.objects.filter(user__username__exact="sean")
serializer_class = AccommodationSerializer
def get_queryset(self):
""" allow rest api to filter by submissions """
queryset = Accommodation.objects.all()
user = self.request.query_params.get('user', None)
if user is not None:
queryset = queryset.filter(user=user)
return queryset
""" Get accomodation review """
class AccommodationView(viewsets.ModelViewSet):
queryset = Accommodation.objects.all()
# queryset = Accomodation.objects.filter(user__username__exact="sean")
serializer_class = AccommodationSerializer
@action(methods=['get'], detail=False)
def review(self, request, pk=True):
print('Go to review')
def get_queryset(self):
""" allow rest api to filter by submissions """
queryset = Accommodation.objects.all()
user = self.request.query_params.get('user', None)
id = self.request.query_params.get('id', None)
if user is not None:
queryset = queryset.filter(user=user)
if id is not None:
queryset = queryset.filter(id=id)
return queryset
class AccommodationImageView(viewsets.ModelViewSet):
queryset = AccommodationImage.objects.all()
# queryset = Accomodation.objects.filter(user__username__exact="sean")
serializer_class = AccommodationImageSerializer
def get_queryset(self):
""" allow rest api to filter by submissions """
queryset = AccommodationImage.objects.all()
accommodation = self.request.query_params.get('accommodation', None)
if accommodation is not None:
queryset = queryset.filter(accommodation=accommodation)
return queryset
class AccommodationHostingView(viewsets.ModelViewSet):
queryset = AccommodationHosting.objects.all()
serializer_class = AccommodationHostingSerializer
def get(self, request):
pk = request.GET.get('pk')
myHostObject = AccommodationHosting.objects.get(pk=pk)
serializer = self.serializer_class(myHostObject)
return Response(serializer.data)
""" handling PUT request and backend validation"""
def update(self, request, pk, format=None):
new_date_start = request.data['date_start']
new_date_end = request.data['date_end']
new_price = request.data['price']
new_description = request.data['description']
myHostObject = AccommodationHosting.objects.get(pk=pk) # self.get_object(pk)
myHostObject.date_start = new_date_start
myHostObject.date_end = new_date_end
myHostObject.price = new_price
myHostObject.description = new_description
myHostObject.save()
return Response(request.data, status=status.HTTP_200_OK)
""" handling POST request backend validation"""
def create(self, request, format=None):
serializer = self.serializer_class(data=request.data)
date_start = request.data['date_start']
date_end = request.data['date_end']
check_valid = compareDate(date_start, date_end)
if serializer.is_valid():
if check_valid <= 0:
return Response({"date": "start date must be before end date"}, status=status.HTTP_400_BAD_REQUEST)
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def get_queryset(self):
""" allow rest api to filter by submissions """
queryset_1 = Accommodation.objects.all()
queryset_2 = AccommodationHosting.objects.all()
user = self.request.query_params.get('user', None)
accomm = self.request.query_params.get('accomm', None)
if user is not None:
ids = queryset_1.values_list('id', flat=True).filter(user=user)
queryset_2 = queryset_2.filter(accommodation__in=set(ids))
if accomm is not None:
queryset_2 = queryset_2.filter(accommodation=accomm)
return queryset_2
class BookingView(viewsets.ModelViewSet):
queryset = Booking.objects.all()
serializer_class = BookingSerializer
def get_queryset(self):
""" allow rest api to filter by submissions """
queryset = Booking.objects.all()
booker = self.request.query_params.get('booker', None)
host = self.request.query_params.get('host', None)
if booker is not None:
queryset = queryset.filter(booker=booker)
if host is not None:
queryset = queryset.filter(hosting=host)
return queryset.order_by('date_start')
""" get all the reviews """
class GetReviews(viewsets.ModelViewSet):
queryset = Review.objects.all()
serializer_class = ReviewSerializer
def get_queryset(self):
queryset = Review.objects.all()
return queryset
""" GET the reviews made by an user """
""" GET /users/{user_id}/reviews """
class UserReviews(viewsets.ModelViewSet):
queryset = Review.objects.all()
serializer_class = ReviewSerializer
def get_queryset(self):
queryset = Review.objects.all()
user_pk = self.kwargs['user_pk']
if user_pk is not None:
queryset = queryset.filter(user=user_pk)
if not queryset:
raise Http404('Review does not exist for this accommodation')
return queryset
""" Get reviews for a specific accommodation """
""" GET accommodation/{accomodation_pk}/reviews/ """
class AccomodationReviews(viewsets.ModelViewSet):
queryset = Review.objects.all()
serializer_class = ReviewSerializer
def get_queryset(self):
queryset = Review.objects.all() # initialise queryset
accommodation_pk = self.kwargs['accommodation_pk']
if accommodation_pk is not None:
queryset = queryset.filter(accommodation=accommodation_pk)
if not queryset:
raise Http404('Review does not exist for this accommodation')
return queryset
""" GET all current users """
""" /users/ """
class Users(viewsets.ModelViewSet):
queryset = UserInfo.objects.all()
serializer_class = UserInfoSerializer
""" This would get all users """
def get_queryset(self):
queryset = UserInfo.objects.all()
return queryset
""" Custom authentication - return Token, username and email """
class CustomAuthToken(ObtainAuthToken):
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=user)
return Response({
'token': token.key,
'user_id': user.pk,
'email': user.email,
'username': user.username
})
class SearchViews(viewsets.ModelViewSet):
queryset = Search.objects.all()
serializer_class = SearchSerializer
def get_queryset(self):
queryset = Search.objects.all()
return queryset
class SearchHostingViews(viewsets.ModelViewSet):
queryset = Search.objects.all()
serializer_class = AccommodationHostingSerializer
def get_queryset(self):
""" allow rest api to filter by submissions """
queryset = Search.objects.all()
queryset_accommodation = AccommodationHosting.objects.all()
date_start = self.request.query_params.get('start', None)
date_end = self.request.query_params.get('end', None)
price_upper = self.request.query_params.get('price_upper', None)
price_lower = self.request.query_params.get('price_lower', None)
guest = self.request.query_params.get('guest', None)
location = self.request.query_params.get('location', None)
Accommodation_Type = self.request.query_params.get('Accommodation_Type', None)
if guest is not None:
queryset = queryset.filter(guest__gte=guest)
if location is not None:
queryset = queryset.filter(location__icontains=location)
if Accommodation_Type is not None:
queryset = queryset.filter(Accomodation_Type__icontains=Accommodation_Type)
if price_upper is not None:
queryset = queryset.filter(price__lte=price_upper)
if price_lower is not None:
queryset = queryset.filter(price__gte=price_lower)
if date_start is not None and date_end is not None:
begin = datetime.strptime(date_start, '%Y-%m-%d')
endin = datetime.strptime(date_end, '%Y-%m-%d')
delta = endin - begin # timedelta
y = []
for i in range(delta.days + 1):
x = begin + timedelta(i)
y.append(str(x.date()) + ",")
condition = functools.reduce(operator.and_, [Q(date_free__icontains=day) for day in y])
queryset = queryset.filter(condition)
newQ = list(queryset.values_list('accommodation', flat=True))
queryset_accommodation = queryset_accommodation.filter(accommodation__in=set(newQ))
return queryset_accommodation
class ReviewCountViews(viewsets.ModelViewSet):
queryset = ReviewCount.objects.all()
serializer_class = ReviewCountSerializer
def get_queryset(self):
queryset = ReviewCount.objects.all()
return queryset
class BookRequestViews(viewsets.ModelViewSet):
queryset = BookRequest.objects.all()
serializer_class = BookRequestSerializer
def get_queryset(self):
queryset = BookRequest.objects.all()
hasReply = self.request.query_params.get('hasReply', None)
toHost = self.request.query_params.get('toHost', None)
if (toHost != None):
queryset = queryset.filter(toHost=toHost)
if (hasReply != None):
queryset = queryset.filter(hasReply=hasReply)
return queryset
|
from flask import Flask
from flask_cors import CORS
import os
from nakiri.routes import index
from nakiri.routes import user
from nakiri.models.db import db, migrate
def create_app():
app = Flask(__name__)
CORS(app)
app.register_blueprint(user.blueprint)
app.register_blueprint(index.blueprint)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['NAKIRI_DB']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db.init_app(app)
migrate.init_app(app, db)
return app
|
# Sorting an array using Selection Sort Technique
from array import *
def selection_sort(input_list):
for idx in range(len(input_list)):
min_idx = idx
for j in range( idx +1, len(input_list)):
if input_list[min_idx] > input_list[j]:
min_idx = j
# Swap the minimum value with the compared value
input_list[idx], input_list[min_idx] = input_list[min_idx], input_list[idx]
# Create the array
ar = array('i',[])
n = int(input("Enter the number of elements : "))
print("Enter the values:")
for i in range(n):
ar.append(int(input()))
print("Original Array :",ar)
selection_sort(ar)
print(ar)
|
import tweepy
import pandas
from collections import Counter
from TwitterSecrets import *
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
auth_api = tweepy.API(auth)
account_list = ["aguialabs", "wehlersdesign", "LeReveduDogon", "SkagerakDanmark","vanwijheverf","BulbUK","GenerationFndt","Danone","NetD_news","HovioneGroup","simplybusiness","EllasKitchenUK","AlfaAccountants","theCOOKkitchen","MacphieUK","Pukkaherbs","Ingeus_uk","ApivitaSA","les2vaches","lilyskitchen","Camif_","BatesWellsTweet","Ecotone_","divinechocolate","pixelis_agency","HerbatintItalia","BigIssue","eqinvestors","ARP_Astrance","Perlagewinery","ruchequiditoui","iglooRegen","nativeadv","ietp_","AgenceSidiese","resourcefutures","Treedom","WHEBsustainable","BridgesFundMgmt","ekodev","CitizenCapital","hcpgroup","microdon_fr","Patte_Blanche","FluidITuk","stonesoupchat","CuentoDeLuz","TrusteesUnltd","MSeedImpact","GreenApesJungle","NuovaVista","clipit_officiel","Investing4Good","Green_Element","wehlersdesign","TribeCapital","LumoFrance","Authenticitys","getyoti","bestreetwize","reviveNV","SkagerakDanmark","vanwijheverf","Fairphone","maseco_pw","PurProjet","BubbleChamber1","squizgourde","Birdeo","FARADGroup","aguialabs","lygofairtrade","actimpact","LeReveduDogon","OnPurposeUK"]
try:
for target in account_list:
print("Searching for", target)
tweets = auth_api.user_timeline(screen_name=target,
count=200,
include_rts = True,
tweet_mode = 'extended'
)
all_tweets = []
all_tweets.extend(tweets)
oldest_id = tweets[-1].id
while True:
tweets = auth_api.user_timeline(screen_name=target,
count=200,
include_rts = True,
max_id = oldest_id - 1,
tweet_mode = 'extended'
)
if len(tweets) == 0:
break
oldest_id = tweets[-1].id
all_tweets.extend(tweets)
print('N of tweets downloaded till now {}'.format(len(all_tweets)))
outtweets = [[tweet.id_str,
tweet.created_at,
tweet.favorite_count,
tweet.retweet_count,
tweet.full_text.encode("utf-8").decode("utf-8")]
for idx,tweet in enumerate(all_tweets)]
df = pandas.DataFrame(outtweets,columns=["id","created_at","favorite_count","retweet_count", "text"])
df.to_csv(r'./csv/%s_tweets.csv' % target,index=False)
df.head(3)
except BaseException as e:
print('There seems to be an error: ,',str(e))
|
# Run the below to set up the notebook, you need to have Tensorflow installed for this exercise.
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Input, Dense
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from tensorflow import keras
import numpy as np
from sklearn.datasets import load_digits
import matplotlib.pyplot as plt
## Step 1: Load dataset
num_classes = 10
digits = load_digits(n_class=num_classes)
x = digits.data
y = digits.target
n_samples, n_features = x.shape
shape_image = x.shape[1]
# plt.imshow(x[0])
print("data shape: ", x.shape)
print("class shape: ", y.shape)
# Split the data into training and testing sets
N_train = int(0.8 * x.shape[0])
x_train = x[:N_train, :]
y_train = y[:N_train]
x_test = x[N_train:, :]
y_test = y[N_train:]
# Convert labels to one-hot vector
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(y_train)
print(integer_encoded)
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
y_train_onehot = onehot_encoder.fit_transform(integer_encoded)
print(y_train_onehot)
print("Finish onehot encode")
## Step 2: Build a neural network model to classify the digits
# One of the most simplest ways to build a neural network with Tensorflow is to use high-level interfaces from Keras
# First, define the based sequential model
model = Sequential()
# Add the first fully connected layer with 100 hidden units, with ReLU activation.
# As this is the first layer in your model, don't forget to include the 'input_shape' argument
model.add(Dense(100, activation='relu', input_shape=(shape_image,)))
model.add(Dense(10, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'],
)
## Step 3: Train the model
model.fit(x_train, y_train_onehot, epochs=10, batch_size=32)
## Step 4: Evaluate the model
# Call 'predict' function from the model to get the predicted class probabilities
y_predict = model.predict(x_test)
# Find the prediction (as the classes with highest probabilities)
y_predict_max = np.array(list(map(lambda row: np.argmax(row), y_predict)))
# Calculate the prediction accuracy
accuracy = accuracy_score(y_test, y_predict_max)
print("Accuracy={:.2f}".format(accuracy))
## Step 5: Visualize the classification results
for selected_class in range(0,10):
x_visualize = x_test[y_predict_max == selected_class]
# plot some images of the digits
n_img_per_row = 10
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
if i * n_img_per_row + j < len(x_visualize):
img[ix:ix + 8, iy:iy + 8] = x_visualize[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('Test images predicted as "{:}"'.format(selected_class))
plt.show()
|
#!/usr/bin/env python
from .pure_market_making import PureMarketMakingStrategy
from .asset_price_delegate import AssetPriceDelegate
from .order_book_asset_price_delegate import OrderBookAssetPriceDelegate
from .api_asset_price_delegate import APIAssetPriceDelegate
__all__ = [
PureMarketMakingStrategy,
AssetPriceDelegate,
OrderBookAssetPriceDelegate,
APIAssetPriceDelegate
]
|
# ------------------------------------------------------------------------------
# Program: The LDAR Simulator (LDAR-Sim)
# File: LDAR-Sim main
# Purpose: Interface for parameterizing and running LDAR-Sim.
#
# Copyright (C) 2018-2020 Thomas Fox, Mozhou Gao, Thomas Barchyn, Chris Hugenholtz
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the MIT License as published
# by the Free Software Foundation, version 3.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
# You should have received a copy of the MIT License
# along with this program. If not, see <https://opensource.org/licenses/MIT>.
#
# ------------------------------------------------------------------------------
def aggregate(site, leaks):
leaks_present = []
equipment_rates = []
site_rate = 0
# Make list of all leaks and add up all emissions at site
for leak in leaks:
if leak['facility_ID'] == site['facility_ID']:
if leak['status'] == 'active':
leaks_present.append(leak)
site_rate += leak['rate']
# Sum emissions by equipment group
for group in range(int(site['equipment_groups'])):
group_emissions = 0
for leak in leaks_present:
if leak['equipment_group'] == (group + 1):
group_emissions += leak['rate']
equipment_rates.append(group_emissions)
return leaks_present, equipment_rates, site_rate |
from .sfd_detector import SFDDetector as FaceDetector
|
from django import forms
class CartForm(forms.Form):
product_id = forms.IntegerField(required=True, widget=forms.HiddenInput())
quantity = forms.IntegerField(
min_value=1, required=True, widget=forms.NumberInput({'class': 'form-control', 'value': 1}))
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = [
'IdentityArgs',
'SkuArgs',
]
@pulumi.input_type
class IdentityArgs:
def __init__(__self__, *,
type: Optional[pulumi.Input['ResourceIdentityType']] = None,
user_assigned_identities: Optional[pulumi.Input[Mapping[str, Any]]] = None):
"""
Identity for the resource.
:param pulumi.Input['ResourceIdentityType'] type: The identity type. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the Azure Health Bot
:param pulumi.Input[Mapping[str, Any]] user_assigned_identities: The list of user identities associated with the resource. The user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
if type is not None:
pulumi.set(__self__, "type", type)
if user_assigned_identities is not None:
pulumi.set(__self__, "user_assigned_identities", user_assigned_identities)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input['ResourceIdentityType']]:
"""
The identity type. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities. The type 'None' will remove any identities from the Azure Health Bot
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input['ResourceIdentityType']]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="userAssignedIdentities")
def user_assigned_identities(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
The list of user identities associated with the resource. The user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
"""
return pulumi.get(self, "user_assigned_identities")
@user_assigned_identities.setter
def user_assigned_identities(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "user_assigned_identities", value)
@pulumi.input_type
class SkuArgs:
def __init__(__self__, *,
name: pulumi.Input['SkuName']):
"""
The resource model definition representing SKU
:param pulumi.Input['SkuName'] name: The name of the Azure Health Bot SKU
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input['SkuName']:
"""
The name of the Azure Health Bot SKU
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input['SkuName']):
pulumi.set(self, "name", value)
|
import torch
from torch.utils.data import DataLoader
from torch.utils.data import TensorDataset
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from sklearn.model_selection import train_test_split
from .dataset.data_loader import load_dataset
from .models.deepconv import DeepConv
from .models.mlp import MLP
from .models.cnn import CNN
from .models.lstm import LSTM
from .models.deepconvlstm import DeepConvLSTM
from .utils.runmanager import RunManager
from collections import namedtuple
Arg = namedtuple("Arg", ["batch_size", "lr", "device", "epoch"])
args = Arg(16, 0.001, "cuda", 2)
def get_dataset_loader(X, y, batch_size, shuffle=False):
X = torch.tensor(X, dtype=torch.float32)
y = torch.tensor(y, dtype=torch.long)
dataset = TensorDataset(X, y)
dataset_loader = DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle, drop_last=True
)
return dataset_loader
def trainapi(subjects, locations, model, channel_policy, result_dir, instance_name):
# channel policy
ch_start, ch_end = 0, 0
if channel_policy == "acc" or "acc_norm":
ch_start, ch_end = 0, 3
elif channel_policy == "gyr":
ch_start, ch_end = 3, 6
elif channel_policy == "mag":
ch_start, ch_end = 6, 9
elif channel_policy == "acc+gyr":
ch_start, ch_end = 0, 6
elif channel_policy == "acc+gyr+mag":
ch_start, ch_end = 0, 9
X, y = load_dataset(subjects, locations, ch_start, ch_end)
label_number = len(np.unique(y))
if channel_policy == "acc_norm":
X = np.linalg.norm(X, axis=1)
X = np.expand_dims(X, axis=1)
classifier = nn.Module()
# model dispatcher
if model == "cnn":
X = np.squeeze(X, axis=2)
_, channel_num, width = X.shape
classifier = CNN(channel_num, width, label_number)
elif model == "mlp":
window_nums, *_ = X.shape
X = X.reshape(window_nums, -1)
_, input_size = X.shape
classifier = MLP(input_size, label_number)
elif model == "deepconv":
_, channel_num, _, width = X.shape
classifier = DeepConv(channel_num, width, label_number)
elif model == "lstm":
X = np.squeeze(X, axis=2)
X = np.transpose(X, (0, 2, 1))
_, _, channel_num = X.shape
classifier = LSTM(n_features=channel_num, n_classes=label_number)
elif model == "deepconvlstm":
X = np.squeeze(X, axis=2)
X = np.transpose(X, (0, 2, 1))
_, _, channel_num = X.shape
classifier = DeepConvLSTM(n_channels=channel_num, n_classes=label_number)
classifier = classifier.to(args.device)
print(f"X.shape: {X.shape}, y.shape: {y.shape}")
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=0
)
train_set_loader = get_dataset_loader(X_train, y_train, args.batch_size)
test_set_loader = get_dataset_loader(X_test, y_test, args.batch_size)
optimizer = optim.Adam(classifier.parameters(), lr=args.lr)
run_manager = RunManager(result_dir, instance_name, args)
run_manager.begin_run(classifier, optimizer, train_set_loader, test_set_loader)
for _ in range(args.epoch):
run_manager.begin_epoch()
for batch in train_set_loader:
X, y = batch
X = X.to(args.device)
y = y.to(args.device)
preds = classifier(X)
loss = F.cross_entropy(preds, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
run_manager.track_loss(loss, X.shape[0])
run_manager.track_num_correct(preds, y)
run_manager.track_label_and_pred(y, preds)
run_manager.end_epoch()
run_manager.end_run()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 15:00:21 2017
Adopted from
https://github.com/SanPen/GridCal/blob/master/UnderDevelopment/GridCal/
gui/GuiFunctions.py
Ing.,Mgr. (MSc.) Jan Cimbálník, PhD.
Biomedical engineering
International Clinical Research Center
St. Anne's University Hospital in Brno
Czech Republic
&
Mayo systems electrophysiology lab
Mayo Clinic
200 1st St SW
Rochester, MN
United States
"""
# Std imports
# Third pary imports
from PyQt5.QtCore import QAbstractTableModel, QModelIndex, Qt, pyqtSignal
from PyQt5.QtWidgets import QTableView, QAbstractScrollArea
import numpy as np
# Local imports
class DataFrameView(QTableView):
row_selected = pyqtSignal(int, name='row_selected')
data_changed = pyqtSignal(name='data_changed')
def __init__(self, df, parent=None):
super().__init__()
self.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
self.model = DataFrameModel(df, self)
self.setModel(self.model)
self.n_cols = len(df.columns)
self.n_rows = len(df.index)
# enable sorting
self.setSortingEnabled(True)
# signals
self.selectionModel().selectionChanged.connect(self.evaluate_selection)
self.model.dataChanged.connect(self.pass_data_changed_signal)
# sort
self.model.sort(0, Qt.AscendingOrder)
# adjust size
self.resizeColumnsToContents()
# Custom key events
def keyPressEvent (self, e):
super(DataFrameView, self).keyPressEvent(e)
if e.key() == Qt.Key_Delete:
indexes = self.selectionModel().selectedIndexes()
self.model.removeRows(indexes)
def set_selection_mode(self, value):
if value:
self.setSelectionBehavior(self.SelectRows)
else:
self.setSelectionBehavior(self.SelectItems)
def evaluate_selection(self):
indexes = self.selectionModel().selectedIndexes()
# Whole row selected
if (len(indexes) == self.n_cols
and not (sum(np.diff([i.row() for i in indexes])))):
self.row_selected.emit(self.model.df.index[indexes[0].row()])
def pass_data_changed_signal(self, ia, ib):
self.data_changed.emit()
class DataFrameModel(QAbstractTableModel):
"""
Class to populate a table view with a pandas dataframe
"""
def __init__(self, df, parent=None):
QAbstractTableModel.__init__(self, parent)
self.df = df
def rowCount(self, parent=None):
return self.df.shape[0]
def columnCount(self, parent=None):
return self.df.shape[1]
def data(self, index, role=Qt.DisplayRole):
if not index.isValid():
return False
if role == Qt.DisplayRole:
return str(self.df.iloc[index.row(), index.column()])
return None
def setData(self, index, value, role):
if not index.isValid():
return False
if role == Qt.EditRole:
ri = index.row()
ci = index.column()
r = int(self.headerData(ri, Qt.Vertical, Qt.DisplayRole))
c = self.headerData(ci, Qt.Horizontal, Qt.DisplayRole)
self.df.loc[r, c] = value
self.dataChanged.emit(index, index)
return True
def removeRows(self, indexes):
rows = [idx.row() for idx in indexes]
self.beginResetModel()
self.beginRemoveRows(QModelIndex(), min(rows), max(rows))
self.df.drop(self.df.index[[rows]], inplace=True)
self.endRemoveRows()
self.endResetModel()
self.dataChanged.emit(QModelIndex(), QModelIndex())
def headerData(self, n, orientation, role):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.df.columns[n]
if orientation == Qt.Vertical and role == Qt.DisplayRole:
return str(self.df.index[n])
return None
def sort(self, col_n, order):
self.layoutAboutToBeChanged.emit()
col = self.df.columns[col_n]
if order == Qt.DescendingOrder:
self.df.sort_values(by=col, ascending=False, inplace=True)
else:
self.df.sort_values(by=col, ascending=True, inplace=True)
self.layoutChanged.emit()
def flags(self, index):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
|
import unittest
import cupy
from cupy import testing
import cupyx.scipy.special
import numpy
try:
import scipy.special
_scipy_available = True
except ImportError:
_scipy_available = False
@testing.gpu
@testing.with_requires('scipy')
class TestSpecial(unittest.TestCase):
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary(self, name, xp, scp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return getattr(scp.special, name)(a)
def test_j0(self):
self.check_unary('j0')
def test_j1(self):
self.check_unary('j1')
def test_y0(self):
self.check_unary('y0')
def test_y1(self):
self.check_unary('y1')
def test_i0(self):
self.check_unary('i0')
def test_i1(self):
self.check_unary('i1')
@testing.gpu
@testing.with_requires('scipy')
class TestFusionSpecial(unittest.TestCase):
@testing.for_dtypes(['f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5, scipy_name='scp')
def check_unary(self, name, xp, scp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
@cupy.fuse()
def f(x):
return getattr(scp.special, name)(x)
return f(a)
def test_j0(self):
self.check_unary('j0')
def test_j1(self):
self.check_unary('j1')
def test_y0(self):
self.check_unary('y0')
def test_y1(self):
self.check_unary('y1')
def test_i0(self):
self.check_unary('i0')
def test_i1(self):
self.check_unary('i1')
class TestFusionCPUSpecial(unittest.TestCase):
@testing.for_dtypes(['f', 'd'])
def check_unary(self, name, dtype):
a = testing.shaped_arange((2, 3), numpy, dtype)
@cupy.fuse()
def f(x):
return getattr(cupyx.scipy.special, name)(x)
if _scipy_available:
x = getattr(scipy.special, name)(a)
numpy.testing.assert_array_equal(f(a), x)
else:
with self.assertRaises(ImportError):
f(a)
def test_j0(self):
self.check_unary('j0')
def test_j1(self):
self.check_unary('j1')
def test_y0(self):
self.check_unary('y0')
def test_y1(self):
self.check_unary('y1')
def test_i0(self):
self.check_unary('i0')
def test_i1(self):
self.check_unary('i1')
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=protected-access
import datetime
import functools
import os
import inspect
import types
import warnings
def Cache(obj):
"""Decorator for caching read-only properties.
Example usage (always returns the same Foo instance):
@Cache
def CreateFoo():
return Foo()
If CreateFoo() accepts parameters, a separate cached value is maintained
for each unique parameter combination.
Cached methods maintain their cache for the lifetime of the /instance/, while
cached functions maintain their cache for the lifetime of the /module/.
"""
@functools.wraps(obj)
def Cacher(*args, **kwargs):
cacher = args[0] if inspect.getargspec(obj).args[:1] == ['self'] else obj
cacher.__cache = cacher.__cache if hasattr(cacher, '__cache') else {}
key = str(obj) + str(args) + str(kwargs)
if key not in cacher.__cache:
cacher.__cache[key] = obj(*args, **kwargs)
return cacher.__cache[key]
return Cacher
class Deprecated(object):
def __init__(self, year, month, day, extra_guidance=''):
self._date_of_support_removal = datetime.date(year, month, day)
self._extra_guidance = extra_guidance
def _DisplayWarningMessage(self, target):
target_str = ''
if isinstance(target, types.FunctionType):
target_str = 'Function %s' % target.__name__
else:
target_str = 'Class %s' % target.__name__
warnings.warn('%s is deprecated. It will no longer be supported on %s. '
'Please remove it or switch to an alternative before '
'that time. %s\n'
% (target_str,
self._date_of_support_removal.strftime('%B %d, %Y'),
self._extra_guidance),
stacklevel=self._ComputeStackLevel())
def _ComputeStackLevel(self):
this_file, _ = os.path.splitext(__file__)
frame = inspect.currentframe()
i = 0
while True:
filename = frame.f_code.co_filename
if not filename.startswith(this_file):
return i
frame = frame.f_back
i += 1
def __call__(self, target):
if isinstance(target, types.FunctionType):
@functools.wraps(target)
def wrapper(*args, **kwargs):
self._DisplayWarningMessage(target)
return target(*args, **kwargs)
return wrapper
elif inspect.isclass(target):
original_ctor = target.__init__
# We have to handle case original_ctor is object.__init__ separately
# since object.__init__ does not have __module__ defined, which
# cause functools.wraps() to raise exception.
if original_ctor == object.__init__:
def new_ctor(*args, **kwargs):
self._DisplayWarningMessage(target)
return original_ctor(*args, **kwargs)
else:
@functools.wraps(original_ctor)
def new_ctor(*args, **kwargs):
self._DisplayWarningMessage(target)
return original_ctor(*args, **kwargs)
target.__init__ = new_ctor
return target
else:
raise TypeError('@Deprecated is only applicable to functions or classes')
def Disabled(*args):
"""Decorator for disabling tests/benchmarks.
If args are given, the test will be disabled if ANY of the args match the
browser type, OS name or OS version:
@Disabled('canary') # Disabled for canary browsers
@Disabled('win') # Disabled on Windows.
@Disabled('win', 'linux') # Disabled on both Windows and Linux.
@Disabled('mavericks') # Disabled on Mac Mavericks (10.9) only.
@Disabled('all') # Unconditionally disabled.
"""
def _Disabled(func):
if not hasattr(func, '_disabled_strings'):
func._disabled_strings = set()
func._disabled_strings.update(disabled_strings)
return func
assert args, (
"@Disabled(...) requires arguments. Use @Disabled('all') if you want to "
'unconditionally disable the test.')
assert not callable(args[0]), 'Please use @Disabled(..).'
disabled_strings = list(args)
for disabled_string in disabled_strings:
# TODO(tonyg): Validate that these strings are recognized.
assert isinstance(disabled_string, str), '@Disabled accepts a list of strs'
return _Disabled
def Enabled(*args):
"""Decorator for enabling tests/benchmarks.
The test will be enabled if ANY of the args match the browser type, OS name
or OS version:
@Enabled('canary') # Enabled only for canary browsers
@Enabled('win') # Enabled only on Windows.
@Enabled('win', 'linux') # Enabled only on Windows or Linux.
@Enabled('mavericks') # Enabled only on Mac Mavericks (10.9).
"""
def _Enabled(func):
if not hasattr(func, '_enabled_strings'):
func._enabled_strings = set()
func._enabled_strings.update(enabled_strings)
return func
assert args, '@Enabled(..) requires arguments'
assert not callable(args[0]), 'Please use @Enabled(..).'
enabled_strings = list(args)
for enabled_string in enabled_strings:
# TODO(tonyg): Validate that these strings are recognized.
assert isinstance(enabled_string, str), '@Enabled accepts a list of strs'
return _Enabled
# TODO(dpranke): Remove if we don't need this.
def Isolated(*args):
"""Decorator for noting that tests must be run in isolation.
The test will be run by itself (not concurrently with any other tests)
if ANY of the args match the browser type, OS name, or OS version."""
def _Isolated(func):
if not isinstance(func, types.FunctionType):
func._isolated_strings = isolated_strings
return func
@functools.wraps(func)
def wrapper(*args, **kwargs):
func(*args, **kwargs)
wrapper._isolated_strings = isolated_strings
return wrapper
if len(args) == 1 and callable(args[0]):
isolated_strings = []
return _Isolated(args[0])
isolated_strings = list(args)
for isolated_string in isolated_strings:
# TODO(tonyg): Validate that these strings are recognized.
assert isinstance(isolated_string, str), 'Isolated accepts a list of strs'
return _Isolated
# TODO(nednguyen): Remove this and have call site just use ShouldSkip directly.
def IsEnabled(test, possible_browser):
"""Returns True iff |test| is enabled given the |possible_browser|.
Use to respect the @Enabled / @Disabled decorators.
Args:
test: A function or class that may contain _disabled_strings and/or
_enabled_strings attributes.
possible_browser: A PossibleBrowser to check whether |test| may run against.
"""
should_skip, msg = ShouldSkip(test, possible_browser)
return (not should_skip, msg)
def ShouldSkip(test, possible_browser):
"""Returns whether the test should be skipped and the reason for it."""
platform_attributes = _PlatformAttributes(possible_browser)
if hasattr(test, '__name__'):
name = test.__name__
elif hasattr(test, '__class__'):
name = test.__class__.__name__
else:
name = str(test)
skip = 'Skipping %s (%s) because' % (name, str(test))
running = 'You are running %r.' % platform_attributes
if hasattr(test, '_disabled_strings'):
if 'all' in test._disabled_strings:
return (True, '%s it is unconditionally disabled.' % skip)
if set(test._disabled_strings) & set(platform_attributes):
return (True, '%s it is disabled for %s. %s' %
(skip, ' and '.join(test._disabled_strings), running))
if hasattr(test, '_enabled_strings'):
if 'all' in test._enabled_strings:
return False, None # No arguments to @Enabled means always enable.
if not set(test._enabled_strings) & set(platform_attributes):
return (True, '%s it is only enabled for %s. %s' %
(skip, ' or '.join(test._enabled_strings), running))
return False, None
def ShouldBeIsolated(test, possible_browser):
platform_attributes = _PlatformAttributes(possible_browser)
if hasattr(test, '_isolated_strings'):
isolated_strings = test._isolated_strings
if not isolated_strings:
return True # No arguments to @Isolated means always isolate.
for isolated_string in isolated_strings:
if isolated_string in platform_attributes:
return True
return False
return False
def _PlatformAttributes(possible_browser):
"""Returns a list of platform attribute strings."""
attributes = [a.lower() for a in [
possible_browser.browser_type,
possible_browser.platform.GetOSName(),
possible_browser.platform.GetOSVersionName(),
]]
if possible_browser.supports_tab_control:
attributes.append('has tabs')
if 'content-shell' in possible_browser.browser_type:
attributes.append('content-shell')
if 'mandoline' in possible_browser.browser_type:
attributes.append('mandoline')
return attributes
|
### BEGIN LICENSE ###
### Use of the triage tools and related source code is subject to the terms
### of the license below.
###
### ------------------------------------------------------------------------
### Copyright (C) 2011 Carnegie Mellon University. All Rights Reserved.
### ------------------------------------------------------------------------
### Redistribution and use in source and binary forms, with or without
### modification, are permitted provided that the following conditions are
### met:
###
### 1. Redistributions of source code must retain the above copyright
### notice, this list of conditions and the following acknowledgments
### and disclaimers.
###
### 2. Redistributions in binary form must reproduce the above copyright
### notice, this list of conditions and the following disclaimer in the
### documentation and/or other materials provided with the distribution.
###
### 3. All advertising materials for third-party software mentioning
### features or use of this software must display the following
### disclaimer:
###
### "Neither Carnegie Mellon University nor its Software Engineering
### Institute have reviewed or endorsed this software"
###
### 4. The names "Department of Homeland Security," "Carnegie Mellon
### University," "CERT" and/or "Software Engineering Institute" shall
### not be used to endorse or promote products derived from this software
### without prior written permission. For written permission, please
### contact [email protected].
###
### 5. Products derived from this software may not be called "CERT" nor
### may "CERT" appear in their names without prior written permission of
### [email protected].
###
### 6. Redistributions of any form whatsoever must retain the following
### acknowledgment:
###
### "This product includes software developed by CERT with funding
### and support from the Department of Homeland Security under
### Contract No. FA 8721-05-C-0003."
###
### THIS SOFTWARE IS PROVIDED BY CARNEGIE MELLON UNIVERSITY ``AS IS'' AND
### CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER
### EXPRESS OR IMPLIED, AS TO ANY MATTER, AND ALL SUCH WARRANTIES, INCLUDING
### WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
### EXPRESSLY DISCLAIMED. WITHOUT LIMITING THE GENERALITY OF THE FOREGOING,
### CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND
### RELATING TO EXCLUSIVITY, INFORMATIONAL CONTENT, ERROR-FREE OPERATION,
### RESULTS TO BE OBTAINED FROM USE, FREEDOM FROM PATENT, TRADEMARK AND
### COPYRIGHT INFRINGEMENT AND/OR FREEDOM FROM THEFT OF TRADE SECRETS.
### END LICENSE ###
'''
Some unit tests for select objects in lib.
Because these objects expect to be instantiated in GDB's Python interpreter,
the Python-supplied unittest module is not used (it assumes access to
argv). This script is meant to be invoked like this:
exploitable$ gdb -ex "source lib/gdb_wrapper/tests/x86_unit_tests.py" -ex "quit"
'''
import sys, os
sys.path.append(os.getcwd())
import lib.gdb_wrapper.x86 as gdb_wrapper
def assertEqual(val1, val2, fmt="\"%s\""):
assert type(val1) == type(val2), "%s != %s" % (type(val1), type(val2))
assert val1 == val2, ("%s != %s" % (fmt, fmt)) % (val1, val2)
def testInstruction():
'''
Tests that the gdb_wrapper.Instruction string parsing works as expected.
'''
# single arg test
gdbstr = "=> 0xb97126 <gtk_main+6>:call 0xab9247"
i = gdb_wrapper.x86Instruction(gdbstr)
assertEqual(i.addr, 0xb97126, "0x%x")
assertEqual(str(i.operands[0]), "0xab9247")
assertEqual(i.mnemonic, "call")
# trailing symbol test
gdbstr = "=> 0xb97126 <gtk_main+6>:call 0xab9247 <g_list_remove_link@plt>"
i = gdb_wrapper.x86Instruction(gdbstr)
assertEqual(i.addr, 0xb97126, "0x%x")
assertEqual(str(i.operands[0]), "0xab9247")
assertEqual(i.mnemonic, "call")
# no args
gdbstr = " 0x005ab337 <+535>: ret"
i = gdb_wrapper.x86Instruction(gdbstr)
assertEqual(i.addr, 0x005ab337, "0x%x")
assertEqual(len(i.operands), 0)
assertEqual(i.mnemonic, "ret")
# prefix, multiple args
gdbstr = " 0x0011098c: repz xor 0xDEADBEEF,0x23"
i = gdb_wrapper.x86Instruction(gdbstr)
assertEqual(i.addr, 0x0011098c, "0x%x")
assertEqual(str(i.operands[0]), "0xDEADBEEF")
assertEqual(str(i.operands[1]), "0x23")
assertEqual(i.mnemonic, "repz xor")
# segment register
gdbstr = "=> 0x4004bf <main+11>: mov DWORD PTR ds:0x0,eax"
i = gdb_wrapper.x86Instruction(gdbstr)
assertEqual(i.addr, 0x4004bf, "0x%x")
assertEqual(str(i.operands[0]), "DWORD PTR ds:0x0")
assertEqual(str(i.operands[1]), "eax")
assertEqual(i.mnemonic, "mov")
# C++ template class
gdbstr = "=> 0x4007c9 <Test::MyTemplate<5, Test::MyTemplateClass<5, 6, 7, 8> >()+4>: mov DWORD PTR [eax+ebx*4+0x8],eax"
i = gdb_wrapper.x86Instruction(gdbstr)
assertEqual(i.addr, 0x4007c9, "0x%x")
assertEqual(str(i.operands[0]), "DWORD PTR [eax+ebx*4+0x8]")
assertEqual(str(i.operands[1]), "eax")
assertEqual(i.mnemonic, "mov")
# C++ test
gdbstr = "=> 0x4211d6 <AvlTree<address_space::memory_page_t*, address_space::lessbyHost, nil<address_space::memory_page_t*> >::rotateWithLeftChild(AvlNode<address_space::memory_page_t*, address_space::lessbyHost, nil<address_space::memory_page_t*> >*&) const+50>:\tmov rdx,QWORD PTR [rax+0x18]"
i = gdb_wrapper.x86Instruction(gdbstr)
assertEqual(i.addr, 0x4211d6, "0x%x")
assertEqual(str(i.operands[0]), "rdx")
assertEqual(str(i.operands[1]), "QWORD PTR [rax+0x18]")
assertEqual(i.mnemonic, "mov")
def testOperand():
'''
Tests that the gdb_wrapper.Operand string parsing works as expected.
Does not test expression evaluation -- would need active registers to
do that.
'''
# eiz, pointer test
# 0x005ab184 <+100>: lea esi,[esi+eiz*1+0x0]
gdbstr = "[esi+eiz*1+0x0]"
o = gdb_wrapper.Operand(gdbstr)
assertEqual(o.is_pointer, True)
assertEqual(o.expr, "$esi+0*1+0x0")
# complex, other-pointer-style test
# 0x00110057: add BYTE PTR [edx+edx*8-0x1d43fffe],bh
gdbstr = "BYTE PTR [edx+edx*8-0x1d43fffe]"
o = gdb_wrapper.Operand(gdbstr)
assertEqual(o.is_pointer, True)
assertEqual(o.expr, "$edx+$edx*8-0x1d43fffe")
# less-common register test
# 0x00110057: add BYTE PTR [edx+edx*8-0x1d43fffe],bh
gdbstr = "bh"
o = gdb_wrapper.Operand(gdbstr)
assertEqual(o.is_pointer, False)
assertEqual(o.expr, "$bh")
# yet-another-pointer-style test
# 0x001102ab: add BYTE PTR ds:0x2880000,ch
gdbstr = "BYTE PTR ds:0x2880000"
o = gdb_wrapper.Operand(gdbstr)
assertEqual(o.is_pointer, True)
assertEqual(o.expr, "0x2880000")
# floating point stack test
# 0xb68dc5: fucomi st,st(1)
gdbstr = "st(1)"
o = gdb_wrapper.Operand(gdbstr)
assertEqual(o.is_pointer, False)
assertEqual(o.expr, "st(1)")
# spacing test
gdbstr = "edi * xmm5 +1"
o = gdb_wrapper.Operand(gdbstr)
assertEqual(o.is_pointer, False)
assertEqual(o.expr.replace(" ",""), "$edi*$xmm5+1")
# 64-bit registers
gdbstr = "r16 + r8 + r8b"
o = gdb_wrapper.Operand(gdbstr)
assertEqual(o.is_pointer, False)
assertEqual(o.expr.replace(" ",""), "$r16+$r8+$r8b")
# more 64-bit registers
gdbstr = "[r12w + 0x50]"
o = gdb_wrapper.Operand(gdbstr)
assertEqual(o.is_pointer, True)
assertEqual(o.expr.replace(" ",""), "$r12w+0x50")
if __name__ == "__main__":
testInstruction()
testOperand()
print("passed all tests")
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
from django.contrib.auth.models import User
from googaccount.models import AppCreds
from main.support import animations_list, font_effects
class Meta(models.Model):
appcreds = models.ForeignKey(AppCreds)
counter = models.IntegerField(default=0)
type = models.CharField(max_length=50,
choices=(
('youtubesubs', 'YouTube Subscribers'),
('youtubeviewers', 'YouTube Concurrents'),
)
)
next_update = models.DateTimeField()
last_update = models.DateTimeField()
running = models.BooleanField(default=False)
pre_text = models.CharField(blank=True, null=True, max_length=255, help_text="e.g 'Sub Goal:'")
post_text = models.CharField(blank=True, null=True, max_length=255, help_text = "e.g. / 3000")
font = models.CharField(blank=True, null=True, max_length=255)
font_size = models.CharField(blank=True, null=True, max_length=255)
font_color = models.CharField(blank=True, null=True, max_length=255)
font_effect = models.CharField(blank=True, null=True, max_length=255, choices=font_effects())
font_weight = models.CharField(blank=True, null=True, max_length=255, default="normal")
outline_color = models.CharField(blank=True, null=True, max_length=255)
user = models.ForeignKey(User)
|
#!/usr/bin/env python
#Takes as input schtats output files,
#one per SMRTcell and graphs them.
#file names are expected to be from the deplex
#script
import sys
from itertools import chain, imap, cycle
from operator import itemgetter
import math
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from numpy import arange
from args import parseArgs, getHelpStr, CLArgument
from nucio import fileIterator, lineItemIterator
from args import argflag
description = ("Usage: graphCellStats.py [options] title in.schtats [in2.schtats ...]\n\n"
"Graph SMRTCell Stats")
argument_list = [["lengreater", "lengreater", int, 10000,
("The y-axis will be of reads greater than this "
"argument. Make sure that all schatats outputs "
"have this increment: ex: #>10000 "
"Default: 10000 ")],
["counts", "counts", argflag, False,
("Graph counts instead of bases ie. number of reads")],
["out","out", str, "cellstats.pdf",
("Output filename. Default: 'cellstats.pdf'")]]
arguments = map(CLArgument._make, argument_list)
if not len(sys.argv) > 1:
sys.exit(getHelpStr(description, arguments) + "\n")
(p_arg_map, args_remaining) = parseArgs(sys.argv[1:], arguments)
if not len(args_remaining) >= 1:
sys.exit(getHelpStr(description, arguments) + "\n")
title = args_remaining[0]
infiles = args_remaining[1:]
cellnames = map(lambda f : "_".join(f.split(".")[0].split("_")[:2]), infiles)
fit_gen = lambda filename : fileIterator(filename, lineItemIterator)
file_iterators = map(fit_gen, infiles)
def getBasesFromLineArr(arr):
if not bool(arr):
return
if arr[0].startswith("n="):
return arr[6].split("=")[1]
if arr[0].startswith("#>%d" % p_arg_map["lengreater"]):
return arr[1].split("=")[1]
def getCountsFromLineArr(arr):
if not bool(arr):
return
if arr[0].startswith("n="):
return arr[0].split("=")[1]
if arr[0].startswith("#>%d" % p_arg_map["lengreater"]):
return arr[0].split("=")[1]
intlog = lambda x : math.log(int(x))
data = []
dgetter = getCountsFromLineArr if p_arg_map["counts"] else getBasesFromLineArr
for cellname, it in zip(cellnames,file_iterators):
d = map(intlog,filter(bool,imap(dgetter, it)))
d.append(cellname)
data.append(d)
mpl.rc('xtick', labelsize=6)
mpl.rc('ytick', labelsize=6)
pp = PdfPages(p_arg_map["out"])
colors = cycle("bgrcmyk")
markers = "oooooooxxxxxxxx++++++++********ssssssssvvvvvvvv"
cellset = sorted(list(set(cellnames)))
cmap = dict(zip(cellset, zip(colors,markers)))
h = []
for cellgroup in cellset:
groupdata = filter(lambda x : x[2] == cellgroup, data)
(alld, dgreater, cells) = zip(*groupdata)
h.append(plt.scatter(alld, dgreater, marker=cmap[cellgroup][1], c=cmap[cellgroup][0]))
plt.legend(h,cellset, loc='upper left', fontsize=4, scatterpoints=1)
if p_arg_map["counts"]:
plt.xlabel("Log (Total Number of Reads)")
plt.ylabel("Log (Total Number of Reads > %d)" % p_arg_map["lengreater"])
else:
plt.xlabel("Log (Total Cell Bases)")
plt.ylabel("Log (Bases > %d )" % p_arg_map["lengreater"])
plt.suptitle(title)
plt.savefig(pp, format="pdf")
pp.close()
|
from __future__ import annotations
from cmath import exp, polar, sqrt
from types import FunctionType
from itertools import dropwhile, zip_longest
from math import comb, factorial, inf, pi
from operator import not_
from typing import Literal, Iterable, Iterator, Protocol, TypeVar, Union
__all__ = ("Polynom",)
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
Scalar = Union[float, complex]
j = exp(2j * pi / 3)
class PolynomError(Exception):
...
class RootNotFound(PolynomError):
...
class TooManyKnownRoots(PolynomError):
...
class NotARoot(PolynomError):
...
def hide_ones(x: Scalar) -> str:
if x == 1:
return ""
return str(x)
def factor_negative(x: Scalar) -> tuple[Literal["+", "-"], Scalar]:
if isinstance(x, complex):
if max(x.imag, x.real) < 0:
return ("-", -x)
return ("+", x)
if x < 0:
return ("-", -x)
return ("+", x)
class SupportsAdd(Protocol[T]):
def __add__(self, __other: T) -> T:
...
class SupportsSub(Protocol[T]):
def __sub__(self, __other: T) -> T:
...
class SupportsTrueDiv(Protocol[T]):
def __truediv__(self, __other: T) -> T:
...
class SupportsMul(Protocol[T]):
def __mul__(self, __other: T) -> T:
...
class SupportsPow(Protocol[T_co]):
def __pow__(self, __other: int) -> T_co:
...
class SupportsCall(Protocol[T_co]):
def __call__(self, *__args, **kwargs) -> T_co:
...
class ScalarLike(
SupportsAdd[T], SupportsSub[T], SupportsMul[T], SupportsPow[T], Protocol[T]
):
pass
class DivScalarLike(ScalarLike[T], SupportsTrueDiv[T], Protocol[T]):
pass
class Polynom:
__slots__ = ("_coefficients",)
def __init__(self, coefficients: Iterable[Scalar], /):
coeffs = tuple(coefficients)
rev = dropwhile(not_, coeffs[::-1])
self._coefficients = tuple(rev)[::-1]
@classmethod
def zero(cls):
"""Construct a polybom that constantly evalutates to 0"""
return cls([])
@classmethod
def one(cls):
"""Constructs a polynom that constantly evaluates to 1"""
return cls([1])
@classmethod
def constant(cls, a: Scalar, /):
"""
Construct a polynom that constantly evaluates to "a"
Args:
a: constant that this polynom evaluates to
"""
return cls([a])
@classmethod
def identity(cls):
"""Constructs a polynom that always returns the provided argument"""
return cls([0, 1])
@classmethod
def Xpow(cls, n: int, /):
"""
Constructs a polynom that returns the provided argument raised to the power n
Args:
n: power at which X is raised to
"""
return cls([0 for _ in range(n)] + [1])
@classmethod
def from_roots(cls, roots_: Iterable[Scalar], /) -> Polynom:
"""
Given a tuple (x_1, ..., x_n), constructs a polynom P
such as P(x_i) = 0 for all 1 <= i <= n
Args:
roots_: Iterable of points where this polynom should evaluate to 0
"""
roots = tuple(roots_)
prod = cls.one()
for root in roots:
prod *= Polynom([-root, 1])
return prod
@classmethod
def lagrange(cls, points_: Iterable[tuple[Scalar, Scalar]], /) -> Polynom:
"""
Given a set of points ((x_1, y_1), ... (x_n, y_n)), constructs a polynom
P such a P(x_i) = y_i for all 1 <= i <= n
Args:
points_: Iterable of points where the polynom's curve should go throught
"""
points = tuple(points_)
n = len(points)
sum_ = cls.zero()
for j in range(n):
prod = cls([points[j][1]])
for i in range(n):
if i == j:
continue
prod *= cls([-points[i][0], 1]) / (points[j][0] - points[i][0])
sum_ += prod
return sum_
@classmethod
def approximate_function(
cls, f: SupportsCall, a: float, b: float, deg: int
) -> Polynom:
"""Approximates function "f" on [a, b] using a polynom of degree "deg"."""
if deg == 0:
return Polynom.constant(f((a + b) / 2))
step = (b - a) / deg
anchor_points = [a + k * step for k in range(deg + 1)]
return cls.lagrange([(x, f(x)) for x in anchor_points])
@classmethod
def chebyshev(cls, kind: Literal[1, 2] = 1, n: int = 1) -> Polynom:
"""
Chebyshev's polynoms
Both kind are defined by the sequence:
T_{n+2} = 2X T_{n+1} + T_n
For the first kind:
T_0 = 1
T_1 = X
For the second kind:
T_0 = 1
T_1 = 2X
Args:
kind: Which kind to choose
n: Index of the required polynom
"""
upper = int(n / 2)
sum_ = cls.zero()
if kind == 1:
cst = cls([-1, 0, 1])
for k in range(upper + 1):
sum_ += comb(n, 2 * k) * (cst ** k) * cls.Xpow(n - 2 * k)
return sum_
if kind == 2:
cst = cls([-1, 0, 1])
for k in range(upper + 1):
sum_ += comb(n + 1, 2 * k + 1) * (cst ** k) * cls.Xpow(n - 2 * k)
return sum_
raise TypeError(f"Expected 1 or 2 for parameter kind, got {kind}")
@classmethod
def hilbert(cls, n: int) -> Polynom:
"""
Hilbert's polynoms
Defined by:
H_0 = 1
for all non-zero natural integer n:
H_n = X(X-1) ... (X - n + 1) / n!
Args:
n: Index of the desired polynom
"""
prod_ = Polynom([1 / factorial(n)])
for k in range(n):
prod_ *= Polynom([-k, 1])
return prod_
@classmethod
def taylor(cls, f: FunctionType, a: float, n: int, h: float = 1e-5) -> Polynom:
"""
Taylor series of function f around a of order n using h to approximate the n-th derivative
"""
sum_ = Polynom.zero()
def df(f):
return lambda x: (f(x + h) - f(x - h)) / (2 * h)
for k in range(n):
sum_ += f(a) * Polynom.Xpow(k) / factorial(k)
f = df(f) # type: ignore
return sum_
@classmethod
def mclaurin(cls, f: FunctionType, n: int, h: float = 1e-5) -> Polynom:
"""
Mclaurin series of function f of order n using h to approximate the n-th derivative
"""
return cls.taylor(f, 0, n, h)
@property
def coefficients(self) -> tuple[Scalar, ...]:
"""
Since P = a_0 + a_1 X + a_2 X^2 + ... + a_n X^n
Returns the tuple (a_0, ..., a_n)
"""
return self._coefficients
@property
def degree(self) -> int | float:
"""
Returns this polynom's degree i.e. highest power.
Note that if P(X) = 0, then it's degree is -infinity.
"""
return len(self.coefficients) - 1 if self.coefficients else -inf
@property
def is_even(self) -> bool:
"""
Checks whether this polynom is even
"""
return all(a == 0 for a in self.coefficients[::2])
@property
def is_odd(self) -> bool:
"""
Checks whether this polynom is odd
"""
return all(a == 0 for a in self.coefficients[1::2])
@property
def is_real(self) -> bool:
"""
Checks whether this polynom is real
"""
return all(isinstance(a, float) for a in self.coefficients)
@property
def is_complex(self) -> bool:
"""
Checks whether this polynom is complex
"""
return any(isinstance(a, complex) for a in self.coefficients)
def __repr__(self) -> str:
return "{0.__class__.__name__}([{0.coefficients}])".format(self)
def __str__(self) -> str:
if self.degree < 0:
return "0X^0"
if self.degree == 0:
return hide_ones(self.coefficients[0]) + "X^0"
disp = []
if self.coefficients[0] != 0:
disp.append(str(self.coefficients[0]))
if self.coefficients[1] != 0:
sign, value = factor_negative(self[1])
if value == 1:
disp.append(f"{sign} X")
else:
disp.append(f"{sign} {value}X")
for index, coeff in enumerate(self.coefficients[2:], start=2):
if coeff == 0:
continue
sign, value = factor_negative(coeff)
if value == 1:
disp.append(f"{sign} X^{index}")
else:
disp.append(f"{sign} {value}X^{index}")
return " ".join(disp)
def __eq__(self, other: object) -> bool:
return type(self) == type(other) and self.coefficients == other.coefficients # type: ignore
def __hash__(self) -> int:
return hash(self.coefficients)
def __bool__(self) -> bool:
return self.degree >= 0
def __len__(self) -> int | float:
return self.degree
def __iter__(self) -> Iterator[Scalar]:
return iter(self.coefficients)
def __call__(self, x: ScalarLike):
sum_ = x - x
for index, coeff in enumerate(self.coefficients):
right = x ** index
sum_ += coeff * right
return sum_
def __add__(self, other: Polynom) -> Polynom:
coeffs = [
x + y
for x, y in zip_longest(self.coefficients, other.coefficients, fillvalue=0)
]
return Polynom(coeffs)
def __neg__(self) -> Polynom:
return -1 * self
def __sub__(self, other: Polynom) -> Polynom:
return self + (-other)
def __mul__(self, other: Scalar | Polynom) -> Polynom:
if isinstance(other, (int, float, complex)):
return Polynom([a * other for a in self.coefficients])
# one of our polynomials is null
if min(self.degree, other.degree) < 0:
return Polynom.zero()
coeffs = []
for k in range(int(self.degree + other.degree) + 1):
sum_ = 0
for i in range(k + 1):
try:
sum_ += self[i] * other[k - i]
except IndexError:
continue
coeffs.append(sum_)
return Polynom(coeffs)
__rmul__ = __mul__
def __truediv__(self, other: Scalar) -> Polynom:
return (1 / other) * self
def __floordiv__(self, other: Polynom) -> Polynom:
quotient, _ = self.__divmod__(other)
return quotient
def __mod__(self, other: Polynom) -> Polynom:
_, remainder = self.__divmod__(other)
return remainder
def __matmul__(self, other: Polynom):
return self(other)
def __pos__(self):
return self
def __pow__(self, n: int):
if n == 0:
return Polynom.one()
if self.degree == 0:
return Polynom.constant(self[0] ** n)
if self.degree == 1:
b, a = self
return Polynom([comb(n, k) * a**k * b**(n-k) for k in range(n + 1)])
p = self
for _ in range(n - 1):
p *= self
return p
def __getitem__(self, index: int):
return self.coefficients[index]
def __divmod__(self, other: Polynom) -> tuple[Polynom, Polynom]:
if self.degree < other.degree:
return Polynom.zero(), self
A = self
B = other
Q = Polynom.zero()
while A.degree >= B.degree:
P = (A[-1] / B[-1]) * Polynom.Xpow(int(A.degree - B.degree))
A -= P * B
Q += P
return Q, A
def monic(self) -> Polynom:
"""
Returns the monic polynomial associated to ours.
"""
if self[-1] == 1:
return self
return self / self[-1]
def derivative(self, n: int = 1) -> Polynom:
"""
n-th derivative
"""
if n > self.degree:
return Polynom.zero()
coeffs = list(self.coefficients)
for _ in range(n):
coeffs = [index * a for index, a in enumerate(coeffs)][1:]
return Polynom(coeffs)
def antiderivative(self, n: int = 1, constant: float | complex = 0, /):
"""
n-th antiderivative using the provided constant
"""
coeffs = list(self.coefficients)
for _ in range(n):
coeffs = [constant] + [a / index for index, a in enumerate(coeffs, start=1)]
return Polynom(coeffs)
def integrate(self, a: float, b: float, /) -> float:
"""
Integral [a, b]
"""
antiderivative = self.antiderivative()
return antiderivative(b) - antiderivative(a)
@classmethod
def approximate_integral(
cls, f: FunctionType, a: float, b: float, n: int, deg: int
) -> float:
"""
Approximates the integral of "f" between "a" and "b" using "n" polynoms of degree "deg".
"""
step = (b - a) / n
integral = float(0)
for k in range(n + 1):
c = k * step
d = (k + 1) * step
P = cls.approximate_function(f, c, d, deg)
integral += P.integrate(c, d)
return integral
def gcd(self, other: Polynom) -> Polynom:
"""
Returns the greatest common divisor between this polynom and another one
"""
P = self
Q = other
while Q:
P, Q = Q, P % Q
return P.monic()
def lcm(self, other: Polynom) -> Polynom:
"""
Returns the last common multiple between this polynom and another one
"""
if not (self and other):
return Polynom.zero()
P = (self * other) // self.gcd(other)
return P.monic()
def _newton(self, x: Scalar, /) -> Scalar | None:
derivative = self.derivative()
for _ in range(10):
try:
x -= self(x) / derivative(x)
except ZeroDivisionError:
# Can't guarantee that it converged
return None
return x
def _orig_newton(self, x: Scalar, /) -> Scalar:
"""
Newton, but returns the original argument if it didn't converge
"""
res = self._newton(x)
if res is None:
return x
return res
def _root_cubic(self, epsilon: float) -> set[Scalar]:
"""
Finds the roots of this polynom if it's degree is 3.
"""
d, c, b, a = self
# yay
delta = (
18 * a * b * c * d
- 4 * b ** 3 * d
+ (b * c) ** 2
- 4 * a * c ** 3
- 27 * (a * d) ** 2
)
delta_0 = b ** 2 - 3 * a * c
delta_1 = 2 * b ** 3 - 9 * a * b * c + 27 * a ** 2 * d
if max(abs(delta), abs(delta_0)) <= epsilon:
return {self._orig_newton(-b / (3 * a))}
if abs(delta) <= epsilon and abs(delta_0) > epsilon:
r_1 = self._orig_newton((9 * a * d - b * c) / (2 * delta_0))
r_2 = self._orig_newton(
(4 * a * b * c - 9 * a ** 2 * d - b ** 3) / (a * delta_0)
)
return {r_1, r_2}
C = (delta_1 + sqrt(delta_1 ** 2 - 4 * delta_0 ** 3)) ** (1 / 3)
close_roots = set()
for k in range(3):
jc = (j ** k) * C
close_roots.add((-1 / (3 * a)) * (b + jc + delta_0 / jc))
if (
isinstance(delta, float) and delta > 0
): # we know that all roots are real in that case
close_roots = {z.real for z in close_roots}
# Since it isn't as accurate when roots are bigger, we apply Newton's method
return {self._orig_newton(root) for root in close_roots}
def roots(
self, known_roots: Iterable[Scalar] = (), epsilon: float = 10e-2
) -> set[Scalar] | bool:
"""
If this polynom's degree is lower or equal to 3, returns all roots.
Args:
known_roots: Used to factor this polynom, pass atleast n - 3 of them
so the remaining ones can be found.
(n designates this polynom's degree)
epsilon: Used to compute how close two floats needs to be
before they are considered equal.
Pass 0 to avoid approximating anything.
Returns:
True: this polynom is null, everything is a root
None: No roots
set(...): A set containing all of this polynom's roots
Raises:
NotARoot: One element provided in known_roots is not a root
(Or not doesn't evaluates close enough to 0 according
to the provided epsilon)
TooManyKnownRoots: Too many provided known_roots, can't have more than
this polynom's degree
RootNotFound: Couldn't find remaining roots because not enough known_roots
were provided
Note:
Apparently, numpy has a method to find the roots of any polynom
Will switch to it if I ever understand how it works
"""
known_roots = set(known_roots)
for maybe_root in known_roots:
if abs(self(maybe_root)) > epsilon:
raise NotARoot(f"{maybe_root} is not a root of this polynom")
if self.degree < 0: # P(X) = 0
return True
if self.degree == 0: # a = 0
return set()
if self.degree == 1: # az + b = 0
return {-self[0] / self[1]}
if self.degree == 2: # c + bz + az^2 = 0
c, b, a = self
s_delta = sqrt(b ** 2 - 4 * a * c)
return {(-b - s_delta) / (2 * a), (-b + s_delta) / (2 * a)}
if set(self.coefficients[1:-1]) == {0}: # z**n + a = 0
if self[0] == 0: # z**n = 0
return {0}
r, phi = polar(-self[0])
n = int(self.degree)
return {r ** (1 / n) * exp((1j / n) * (phi + 2 * k * pi)) for k in range(n)}
if self.degree == 3: # az^3 + bz^2 + cz + d = 0
return self._root_cubic(epsilon)
diff = int(self.degree) - len(known_roots)
if diff < 0:
raise TooManyKnownRoots("Too many known roots provided")
if diff == 0:
return known_roots
# Try some basic stuff to get more roots
guessed_roots: set[complex] = set()
for known_root in known_roots:
if self.is_real:
# if we have root, then it's conjugate is a root too
conj = known_root.conjugate()
if abs(known_root - conj) > epsilon:
guessed_roots.add(conj)
neg = -known_root
if self.is_even or self.is_odd:
# if we have a root, then it's opposite is a root too
if abs(known_root - neg) > epsilon:
guessed_roots.add(neg)
known_roots |= guessed_roots
if diff <= 3: # ensure that the new polynom has roots
poly = self // Polynom.from_roots(known_roots)
new_roots = poly.roots()
if isinstance(new_roots, set):
if not any(isinstance(x, bool) for x in new_roots): # type checker ...
return known_roots | new_roots
raise RootNotFound("No roots found for this polynom")
|
def mergingLetters(s, t):
#edge cases
mergedStr = ""
firstChar = list(s)
secondChar = list(t)
for i, ele in enumerate(secondChar):
if i < len(firstChar):
mergedStr = mergedStr + firstChar[i]
print('first pointer', firstChar[i], mergedStr)
if i < len(secondChar):
mergedStr = mergedStr + secondChar[i]
print('second pointer', secondChar[i], "merged",mergedStr)
return mergedStr
print(mergingLetters('abcd', 'jjjjjjj')) |
"""
API implementation for chart.client.utils.
"""
from __future__ import annotations
from typing import Any, Dict, Optional, Union
from . import objects, sessions
async def _request(type, uri, headers, data, output):
session = sessions.Session()
return await session.request(type, uri, headers, data, output)
async def _ws_connection(uri, msg):
ws_conn = sessions.WebSocket()
ws_conn.start_connection(uri, msg)
async def get(uri: str,
headers: Union[Dict[str, Any], Any, None] = None,
data: Union[Dict[str, Any], Any, None] = None,
output: Optional[str] = None) -> objects.Response:
return await _request(uri=uri,
headers=headers,
data=data,
type='GET',
output=output)
async def post(uri: str,
headers: Union[Dict[str, Any], Any, None] = None,
data: Union[Dict[str, Any], Any, None] = None,
output: Optional[str] = None) -> objects.Response:
return await _request(uri=uri,
headers=headers,
data=data,
type='POST',
output=output)
async def connect(uri: str, msg: Any):
_ws_connection(uri, msg)
|
#!/usr/bin/env python
from networks.imagenet.imagenet_model import ImagenetModel
from tensorflow.keras import applications
class ResNet50(ImagenetModel):
def __init__(self, args):
self.name = f"ResNet-50"
self.model_class = applications.ResNet50
ImagenetModel.__init__(self, args)
class ResNet101(ImagenetModel):
def __init__(self, args):
self.name = f"ResNet-101"
self.model_class = applications.ResNet101
ImagenetModel.__init__(self, args)
class ResNet152(ImagenetModel):
def __init__(self, args):
self.name = f"ResNet-152"
self.model_class = applications.ResNet152
ImagenetModel.__init__(self, args)
class ResNetV250(ImagenetModel):
def __init__(self, args):
self.name = f"ResNetV2-50"
self.model_class = applications.ResNet50V2
ImagenetModel.__init__(self, args)
class ResNetV2101(ImagenetModel):
def __init__(self, args):
self.name = f"ResNetV2-101"
self.model_class = applications.ResNet101V2
ImagenetModel.__init__(self, args)
class ResNetV2152(ImagenetModel):
def __init__(self, args):
self.name = f"ResNetV2-152"
self.model_class = applications.ResNet152V2
ImagenetModel.__init__(self, args)
class InceptionV3(ImagenetModel):
def __init__(self, args):
self.name = 'InceptionV3'
self.model_class = applications.InceptionV3
ImagenetModel.__init__(self, args)
class InceptionResNetV2(ImagenetModel):
def __init__(self, args):
self.name = 'InceptionResnetV2'
self.model_class = applications.InceptionResNetV2
ImagenetModel.__init__(self, args)
class Xception(ImagenetModel):
def __init__(self, args):
self.name = 'Xception'
self.model_class = applications.Xception
ImagenetModel.__init__(self, args)
class DenseNet121(ImagenetModel):
def __init__(self, args):
self.name = f"DenseNet-121"
self.model_class = applications.DenseNet121
ImagenetModel.__init__(self, args)
class DenseNet169(ImagenetModel):
def __init__(self, args):
self.name = f"DenseNet-169"
self.model_class = applications.DenseNet169
ImagenetModel.__init__(self, args)
class DenseNet201(ImagenetModel):
def __init__(self, args):
self.name = f"DenseNet-201"
self.model_class = applications.DenseNet201
ImagenetModel.__init__(self, args)
class VGG16(ImagenetModel):
def __init__(self, args):
self.name = 'VGG-16'
self.model_class = applications.VGG16
ImagenetModel.__init__(self, args)
class VGG_19(ImagenetModel):
def __init__(self, args):
self.name = 'VGG-19'
self.model_class = applications.VGG19
ImagenetModel.__init__(self, args)
class MobileNet(ImagenetModel):
def __init__(self, args):
self.name = 'MobileNet'
self.model_class = applications.MobileNet
ImagenetModel.__init__(self, args)
class MobileNetV2(ImagenetModel):
def __init__(self, args):
self.name = 'MobileNetV2'
self.model_class = applications.MobileNetV2
ImagenetModel.__init__(self, args)
class NasNet(ImagenetModel):
def __init__(self, args):
self.name = f"Nasnet-{self.value}"
ImagenetModel.__init__(self, args)
class NASNetMobile(ImagenetModel):
def __init__(self, args):
self.name = f"NasNet-Mobile"
self.model_class = applications.NASNetMobile
ImagenetModel.__init__(self, args)
class NASNetLarge(ImagenetModel):
def __init__(self, args):
self.name = f"NasNet-Large"
self.model_class = applications.NASNetLarge
ImagenetModel.__init__(self, args) |
import io
import os
import json
class ConfigurationFile(object):
def __init__(self, filepath, mod="fill"):
self.filepath = filepath
self.mod = mod
self.content = {}
# create the file if does not exist
if(not os.path.exists(self.filepath)):
print("Creating the file at ", self.filepath)
os.makedirs(os.path.dirname(self.filepath), exist_ok=True)
self.clear()
# update the object in reading the file
self.update()
# clear all the config file
def clear(self):
if os.path.exists(self.filepath):
os.remove(self.filepath)
self.content = {}
ConfigurationFile._writing_json(self.filepath, {}, mod="w+")
# return a list containg all the config file existing keys
def keys(self):
return [k for k in self.content]
# update the config file
def update(self):
file_content = ConfigurationFile._reading_json(self.filepath)
self.content = dict(file_content, **self.content)
ConfigurationFile._writing_json(self.filepath, self.content)
def set_mod(self, mod=""):
self.mod = mod
def __getitem__(self, index ):
if(index not in self.content and self.mod=="fill"):
value = input("A value is necessary for variable "+str(index)+"\n value : ")
self.content[index] = value
self.update()
return self.content[index] if(index in self.content) else None
def __setitem__(self, index, value):
self.content[index] = value
self.update()
def __str__(self):
_cf_str = ""
max_len = max([0]+[len(k) for k in self.content])
for k, v in self.content.items():
_cf_str += (k+(" "*(max_len - len(k)))+" : "+str(v)) + '\n'
return _cf_str
def __repr__(self):
return self.__str__()
def __contains__(self, b):
return b in self.content
@staticmethod
def _reading_json(filepath):
with io.open(filepath, 'r') as file_sconf:
return json.load(file_sconf)
@staticmethod
def _writing_json(filepath, dictionary, mod='w'):
with io.open(filepath, mod) as file_sconf:
file_sconf.write(json.dumps(dictionary, sort_keys=True, indent=4))
|
from unittest.mock import patch
from django.contrib.auth.models import AnonymousUser
from django.test import RequestFactory
from comment.tests.test_api.test_views import BaseAPITest
from comment.api.permissions import (
IsOwnerOrReadOnly, FlagEnabledPermission, CanChangeFlaggedCommentState, SubscriptionEnabled,
CanGetSubscribers, UserPermittedOrReadOnly, CanCreatePermission, CanBlockUsers
)
from comment.api.views import CommentList
from comment.models import FlagInstanceManager
from comment.conf import settings
class BaseAPIPermissionsTest(BaseAPITest):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.factory = RequestFactory()
def setUp(self):
super().setUp()
self.view = CommentList()
class OwnerPermissionTest(BaseAPIPermissionsTest):
def setUp(self):
super().setUp()
self.permission = IsOwnerOrReadOnly()
def test_get_request(self):
request = self.factory.get('/')
self.assertTrue(self.permission.has_object_permission(request, self.view, self.comment_1))
def test_put_method_from_different_user(self):
request = self.factory.put('/')
request.user = self.user_2
self.assertEqual(self.comment_1.user, self.user_1)
self.assertFalse(self.permission.has_object_permission(request, self.view, self.comment_1))
def test_put_method_from_admin(self):
request = self.factory.put('/')
request.user = self.admin
self.assertEqual(self.comment_1.user, self.user_1)
self.assertFalse(self.permission.has_object_permission(request, self.view, self.comment_1))
def test_put_method_from_same_user(self):
request = self.factory.put('/')
request.user = self.user_1
self.assertEqual(self.comment_1.user, self.user_1)
self.assertTrue(self.permission.has_object_permission(request, self.view, self.comment_1))
class FlagEnabledPermissionTest(BaseAPIPermissionsTest):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.request = cls.factory.get('/')
def setUp(self):
super().setUp()
self.permission = FlagEnabledPermission()
@patch.object(settings, 'COMMENT_FLAGS_ALLOWED', 0)
def test_flagging_disabled(self):
self.assertIs(False, self.permission.has_permission(self.request, self.view))
@patch.object(settings, 'COMMENT_FLAGS_ALLOWED', 1)
def test_flagging_enabled(self):
self.assertIs(True, self.permission.has_permission(self.request, self.view))
class CanChangeFlaggedCommentStateTest(BaseAPIPermissionsTest):
@classmethod
@patch.object(settings, 'COMMENT_FLAGS_ALLOWED', 1)
def setUpTestData(cls):
super().setUpTestData()
cls.flag_data = {
'reason': FlagInstanceManager.reason_values[0],
'info': '',
}
cls.create_flag_instance(cls.user_1, cls.comment_1, **cls.flag_data)
cls.create_flag_instance(cls.user_2, cls.comment_1, **cls.flag_data)
cls.comment_1.flag.refresh_from_db()
cls.flagged_comment = cls.comment_1
cls.unflagged_comment = cls.comment_2
def setUp(self):
super().setUp()
self.permission = CanChangeFlaggedCommentState()
self.request = self.factory.get('/')
self.request.user = self.user_1
@patch.object(settings, 'COMMENT_FLAGS_ALLOWED', True)
def test_normal_user(self):
self.assertFalse(self.permission.has_permission(self.request, self.view))
@patch.object(settings, 'COMMENT_FLAGS_ALLOWED', True)
def test_moderator(self):
self.request.user = self.moderator
self.assertTrue(self.permission.has_permission(self.request, self.view))
@patch.object(settings, 'COMMENT_FLAGS_ALLOWED', False)
def test_flagging_system_disabled(self):
self.request.user = self.moderator
self.assertFalse(self.permission.has_permission(self.request, self.view))
def test_can_change_for_flagged_comment(self):
self.assertIs(
True,
self.permission.has_object_permission(self.request, self.view, self.flagged_comment)
)
def test_cannot_change_for_unflagged_comment(self):
self.assertIs(
False,
self.permission.has_object_permission(self.request, self.view, self.unflagged_comment)
)
class CanGetSubscribersTest(BaseAPIPermissionsTest):
def setUp(self):
super().setUp()
self.permission = CanGetSubscribers()
self.request = self.factory.get('/')
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', True)
def test_normal_users_cannot_retrieve_subscribers(self):
self.request.user = self.user_1
self.assertFalse(self.permission.has_permission(self.request, self.view))
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', True)
def test_only_moderators_can_retrieve_subscribers(self):
self.request.user = self.moderator
self.assertTrue(self.permission.has_permission(self.request, self.view))
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', False)
def test_cannot_retrieve_subscribers_when_system_disabled(self):
self.request.user = self.moderator
self.assertFalse(self.permission.has_permission(self.request, self.view))
class SubscriptionEnabledTest(BaseAPIPermissionsTest):
def setUp(self):
super().setUp()
self.request = self.factory.post('/')
self.permission = SubscriptionEnabled()
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', False)
def test_when_subscription_disabled(self):
self.assertFalse(self.permission.has_permission(self.request, self.view))
@patch.object(settings, 'COMMENT_ALLOW_SUBSCRIPTION', True)
def test_when_permission(self):
self.assertTrue(self.permission.has_permission(self.request, self.view))
class UserPermittedOrReadOnlyTest(BaseAPIPermissionsTest):
def setUp(self):
super().setUp()
self.permission = UserPermittedOrReadOnly()
@patch.object(settings, 'COMMENT_ALLOW_BLOCKING_USERS', True)
def test_user_has_permission_for_safe_method(self, *arg):
request = self.factory.get('/')
request.user = AnonymousUser()
self.assertTrue(self.permission.has_permission(request, self.view))
@patch.object(settings, 'COMMENT_ALLOW_BLOCKING_USERS', False)
def test_user_has_permission_when_blocking_system_not_enabled(self, *arg):
request = self.factory.post('/')
request.user = AnonymousUser()
self.assertTrue(self.permission.has_permission(request, self.view))
@patch('comment.managers.BlockedUserManager.is_user_blocked', return_value=True)
def test_blocked_user_has_no_permission(self, *arg):
request = self.factory.post('/')
request.user = AnonymousUser()
self.assertFalse(self.permission.has_permission(request, self.view))
class CanCreatePermissionTest(BaseAPIPermissionsTest):
def setUp(self):
super().setUp()
self.request = self.factory.get('/')
self.permission = CanCreatePermission()
@patch.object(settings, 'COMMENT_ALLOW_ANONYMOUS', False)
def test_unauthenticated_user_cannot_create_comment(self):
self.request.user = AnonymousUser()
self.assertFalse(self.permission.has_permission(self.request, self.view))
@patch.object(settings, 'COMMENT_ALLOW_ANONYMOUS', False)
def test_authenticated_user_can_create_comment(self):
self.request.user = self.user_1
self.assertTrue(self.request.user.is_authenticated)
self.assertTrue(self.permission.has_permission(self.request, self.view))
@patch.object(settings, 'COMMENT_ALLOW_ANONYMOUS', True)
def test_anonymous_can_create_comment_when_anonymity_system_enabled(self):
self.request.user = AnonymousUser()
self.assertFalse(self.request.user.is_authenticated)
self.assertTrue(self.permission.has_permission(self.request, self.view))
class CanBlockUsersTest(BaseAPIPermissionsTest):
def setUp(self):
super().setUp()
self.request = self.factory.post('/')
self.permission = CanBlockUsers()
@patch.object(settings, 'COMMENT_ALLOW_BLOCKING_USERS', False)
def test_can_block_user_when_blocking_system_disabled(self):
self.request.user = self.admin
self.assertFalse(self.permission.has_permission(self.request, self.view))
@patch.object(settings, 'COMMENT_ALLOW_BLOCKING_USERS', True)
def test_admin_can_block_user(self):
self.request.user = self.admin
self.assertTrue(self.permission.has_permission(self.request, self.view))
@patch.object(settings, 'COMMENT_ALLOW_BLOCKING_USERS', True)
@patch.object(settings, 'COMMENT_ALLOW_MODERATOR_TO_BLOCK', False)
def test_moderator_cannot_block_user_when_moderation_system_disabled(self):
self.request.user = self.moderator
self.assertFalse(self.permission.has_permission(self.request, self.view))
@patch.object(settings, 'COMMENT_ALLOW_BLOCKING_USERS', True)
@patch.object(settings, 'COMMENT_ALLOW_MODERATOR_TO_BLOCK', True)
def test_moderator_can_block_user_when_moderation_system_enabled(self):
self.request.user = self.moderator
self.assertTrue(self.permission.has_permission(self.request, self.view))
|
""" Provide a framework for querying the CAST source data
"""
# Generic/Built-in
import os
import logging
# Computation
import pickle
import numpy as np
import pandas as pd
# BAYOTA
from bayota_settings.base import get_source_pickles_dir, get_source_csvs_dir, get_metadata_csvs_dir
from castjeeves.sqltables import SourceData
from castjeeves.sqltables import Metadata as sqlMetaData
from castjeeves.sourcehooks import Agency
from castjeeves.sourcehooks import Animal
from castjeeves.sourcehooks import Bmp
from castjeeves.sourcehooks import County
from castjeeves.sourcehooks import Geo
from castjeeves.sourcehooks import LoadSource
from castjeeves.sourcehooks import Lrseg
from castjeeves.sourcehooks import Meta
from castjeeves.sourcehooks import Scenario
from castjeeves.sourcehooks import Sector
from castjeeves.sourcehooks import Translator
logger = logging.getLogger(__name__)
class Jeeves:
""" This class provides a framework for querying the CAST source data files.
Access to parts of the source data is split among hopefully-intuitive groupings.
Attributes:
agency ():
animal ():
bmp ():
county ():
geo ():
loadsource ():
lrseg ():
sector ():
meta ():
translator ():
source (str): Description of `attr1`.
metadata_tables (:obj:`int`, optional): Description of `attr2`.
"""
def __init__(self):
self.source = self.loadInSourceDataFromSQL()
self.metadata_tables = self.loadInMetaDataFromSQL()
self.agency = Agency(sourcedata=self.source)
self.animal = Animal(sourcedata=self.source)
self.bmp = Bmp(sourcedata=self.source)
self.county = County(sourcedata=self.source)
self.geo = Geo(sourcedata=self.source)
self.loadsource = LoadSource(sourcedata=self.source)
self.lrseg = Lrseg(sourcedata=self.source)
self.meta = Meta(sourcedata=self.source, metadata=self.metadata_tables)
self.scenario = Scenario(sourcedata=self.source)
self.sector = Sector(sourcedata=self.source)
self.translator = Translator(sourcedata=self.source)
@classmethod
def loadInSourceDataFromSQL(cls):
"""Loads in the source data from a pickle object,
or if it doesn't exist, makes a pickle file from the csv files.
Returns:
a SourceData object
"""
savename = os.path.join(get_source_pickles_dir(), 'SourceData.obj')
if os.path.exists(savename):
with open(savename, 'rb') as f:
sourcedata = pickle.load(f)
else:
logger.info('<%s object does not exist yet. Generating...>' % SourceData.__name__)
# Source tables are loaded.
sourcedata = SourceData()
tbllist = sourcedata.getTblList()
for tblName in tbllist:
# for tblName in tbllist:
# print("loading source:", tblName)
df = cls.loadDataframe(tblName, get_source_csvs_dir())
sourcedata.addTable(tblName, df)
with open(savename, 'wb') as f:
pickle.dump(sourcedata, f)
return sourcedata
@classmethod
def loadInMetaDataFromSQL(cls):
"""Loads in the metadata from a pickle object,
or if it doesn't exist, makes a pickle file from the csv files.
Returns:
a sqlMetaData object
"""
savename = os.path.join(get_source_pickles_dir(), 'MetaData.obj')
if os.path.exists(savename):
with open(savename, 'rb') as f:
metadata = pickle.load(f)
else:
logger.info('<%s object does not exist yet. Generating...>' % sqlMetaData.__name__)
# Source tables are loaded.
metadata = sqlMetaData()
tbllist = metadata.getTblList()
for tblName in tbllist:
# for tblName in tbllist:
# print("loading source:", tblName)
df = cls.loadDataframe(tblName, get_metadata_csvs_dir())
metadata.addTable(tblName, df)
with open(savename, 'wb') as f:
pickle.dump(metadata, f)
return metadata
@staticmethod
def loadDataframe(tblName, loc):
""" """
dtype_dict = {}
if tblName == "ImpBmpSubmittedManureTransport":
dtype_dict["fipsfrom"] = np.str
fileLocation = os.path.join(loc, tblName + ".csv")
df = pd.read_csv(fileLocation, dtype=dtype_dict, encoding="utf-8")
# Added by DEKAUFMAN to read csv in chunks instead of all at once
# tp = pd.read_csv(fileLocation, header=None, encoding="utf-8", chunksize=500000)
# df = pd.concat(tp, ignore_index=True)
df = df.rename(columns={column: column.lower() for column in df.columns})
if tblName == "TblBmpGroup":
df["ruleset"] = df["ruleset"].astype(str).str.lower()
return df
|
## VM based authorization for docker volumes
# Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
""" Define string constant for column name for table in authorization DB"""
# column name in tenants table
COL_ID = 'id'
COL_NAME = 'name'
COL_DESCRIPTION = 'description'
COL_DEFAULT_DATASTORE_URL = 'default_datastore_url'
# column name in vms table
COL_VM_ID = 'vm_id'
COL_TENANT_ID = 'tenant_id'
# column name in privileges table
COL_DATASTORE_URL = 'datastore_url'
COL_ALLOW_CREATE = 'allow_create'
COL_MAX_VOLUME_SIZE = 'max_volume_size'
COL_USAGE_QUOTA = 'usage_quota'
# column name in volume table
COL_VOLUME_NAME = 'volume_name'
COL_VOLUME_SIZE = 'volume_size'
# default tenant constants
DEFAULT_TENANT = '_DEFAULT'
DEFAULT_TENANT_UUID = '11111111-1111-1111-1111-111111111111'
DEFAULT_TENANT_DESCR = "This is a default vmgroup"
DEFAULT_DS = '_DEFAULT'
DEFAULT_DS_URL = DEFAULT_DS + "_URL"
ORPHAN_TENANT = "_ORPHAN"
VM_DS = '_VM_DS'
VM_DS_URL = VM_DS + "://"
ALL_DS = '_ALL_DS'
ALL_DS_URL = ALL_DS + "://"
|
try:
f = open('curruptfile.txt')
# if f.name == 'currupt_file.txt':
# raise Exception
except IOError as e:
print('First!')
except Exception as e:
print('Second')
else:
print(f.read())
f.close()
finally:
print("Executing Finally...")
print('End of program') |
# -*- coding: utf-8 -*-
#
# "Fuel" documentation build configuration file, created by
# sphinx-quickstart on Tue Sep 25 14:02:29 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# -- Default Settings -----------------------------------------------------
execfile('./common_conf.py')
exclude_patterns = ['_*', 'pages', 'contents', 'pdf', '.tox', '*egg']
|
SAVE_DIR="data/"
PORT="/dev/ttyUSB0"
COMPRESS=70
# leave these default
BAUD_START=115200
BAUD_PEAK=1500000 #1000000
SLEEP_TIME=0.00
WAIT_TIME=20
BOOT_TIME=3 #wait for booting to settle before injecting our kernel
MCU_CODE="mcu-kernel.py"
PROMPT=b'\r\n>>> '
LOG_READ=True
LOG_WRITE=True
|
import json
class UserNotFoundError(Exception):
pass
class InvalidTaskIdError(Exception):
pass
def fetch_task_id(event):
task_id = ''
if 'pathParameters' in event and 'task_id' in event['pathParameters']:
task_id = event['pathParameters']['task_id']
return task_id
else:
InvalidTaskIdError
def fetch_user_id_from_event(event):
if 'requestContext' in event and 'authorizer' in event['requestContext'] and 'claims' in event['requestContext']['authorizer'] and 'cognito:username' in event['requestContext']['authorizer']['claims']:
return event['requestContext']['authorizer']['claims']['cognito:username']
else:
raise UserNotFoundError
def convert_return_object(ststus: int = 200, body: str = None, is_base64_encoded: bool = False):
return_body = ''
if type(body) == dict or type(body) == list:
return_body = json.dumps(body)
elif type(body) == str:
return_body = body
else:
return_body = ''
return {
'statusCode': ststus,
'body': return_body,
'isBase64Encoded': False
}
|
import os
import subprocess
from backend.blueprints.spa_api.errors.errors import AuthorizationException
from backend.server_constants import BASE_FOLDER
from backend.utils.checks import is_admin, is_local_dev
try:
import config
update_code = config.update_code
if update_code is None:
update_code = 1234
except:
update_code = 1234
def update_self(code):
if code != update_code or not is_admin():
raise AuthorizationException()
script = os.path.join(BASE_FOLDER, 'update_run.sh')
if update_code == 1234 or is_local_dev():
subprocess.call([script, 'test'])
else:
subprocess.call([script])
|
#!/usr/bin/env python
# encoding:utf8
"""
flask 异步demo
"""
|
"""
Created on Sun Apr 5 2020
@author: Ruksana Kabealo, Pranay Methuku, Abirami Senthilvelan, Malay Shah
Class: CSE 5915 - Information Systems
Section: 6pm TR, Spring 2020
Prof: Prof. Jayanti
A Python 3 script to perform the following tasks in order:
1) look at source directory,
2) extract xml annotations
3) save its corresponding compilation into a csv file
Assumptions:
Annotation files all correspond to .jpg images
Usage:
python3 xml_to_csv.py --source=path/to/source --csv-file=path/to/csv/file
Examples:
python3 auto_label.py -s=./tier1/test -c=../tier1/test_labels.csv
"""
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
import argparse
def retrieve_df(directory_path):
"""
helper function to take in a directory
and compile a DataFrame using them
"""
xml_list = []
# iterate through all the xml files in directory
for xml_file in glob.glob(directory_path + '/*.xml'):
tree = ET.parse(xml_file)
root = tree.getroot()
column_names = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
# get xml tags corresponding to column_names from file and create a row
for member in root.findall('object'):
value = (root.find('filename').text, # filename
int(root.find('size')[0].text), # width
int(root.find('size')[1].text), # height
member[0].text, # class
int(member[4][0].text), # xmin
int(member[4][1].text), # ymin
int(member[4][2].text), # xmax
int(member[4][3].text) # ymax
)
xml_list.append(value)
return pd.DataFrame(xml_list, columns=column_names)
if __name__ == "__main__":
# set up command line
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source", type=str, default="train",
help="Path to the source folder to look from, train folder by default")
parser.add_argument("-c", "--csv-file", type=str, default="train_labels.csv",
help="Path to a CSV file to output the annotations into")
args = parser.parse_args()
xml_df = retrieve_df(os.path.join(os.getcwd(), args.source))
xml_df.to_csv(args.csv_file, index=False)
print('Successfully converted the annotations in {} to a file {}.'.format(args.source, args.csv_file))
|
List1 = []
List2 = []
List3 = []
List4 = []
List5 = [13,31,2,19,96]
statusList1 = 0
while statusList1 < 13:
inputList1 = str(input("Masukkan hewan bersayap : "))
statusList1 += 1
List1.append(inputList1)
print()
statusList2 = 0
while statusList2 < 5:
inputList2 = str(input("Masukkan hewan berkaki 2 : "))
statusList2 += 1
List2.append(inputList2)
print()
statusList3 = 0
while statusList3 < 5:
inputList3 = str(input("Masukkan nama teman terdekat anda : "))
statusList3 += 1
List3.append(inputList3)
print()
statusList4 = 0
while statusList4 < 5:
inputList4 = int(input("Masukkan tanggal lahir teman tersebut : "))
statusList4 += 1
List4.append(inputList4)
print(List1[0:5]+List4,'\n')
List3[4] = 'Rio'
print(List3,'\n')
tempList5 = List5.copy()
del tempList5[4]
del tempList5[2]
print(tempList5,'\n')
print(List4 + List5)
print("Nilai Max dari List 4 dan List 5 adalah",max(List4 + List5))
print("Nilai Min dari List 4 dan List 5 adalah",min(List4 + List5)) |
#!/usr/bin/env python
# coding=utf-8
"""TrainingPreparator engine action.
Use this module to add the project main code.
"""
from .._compatibility import six
from .._logging import get_logger
from marvin_python_toolbox.engine_base import EngineBaseDataHandler
__all__ = ['TrainingPreparator']
logger = get_logger('training_preparator')
class TrainingPreparator(EngineBaseDataHandler):
def __init__(self, **kwargs):
super(TrainingPreparator, self).__init__(**kwargs)
def execute(self, params, **kwargs):
from sklearn.model_selection import train_test_split
from sklearn import model_selection
X_train, X_test = train_test_split(self.marvin_initial_dataset, random_state=1, test_size=0.3)
self.marvin_dataset = {'train_X': X_train, 'test_X': X_test}
|
from __init__ import *
import optparse
import sys
sys.path.insert(0, ROOT+'apps/python/')
from pipe_options import *
def parse_args():
help_str = \
'"new" : from scratch | "existing" : compile and run | "ready" : just run'
parser.add_option('-m', '--mode',
type='choice',
action='store',
dest='mode',
choices=['new', 'existing', 'ready', 'tune'],
default=['new'],
help=help_str)
parser.add_option('-i', '--img',
action='store',
dest='img_file',
help='input image file path',)
parser.add_option('-k', '--colour_temp',
action='store',
dest='colour_temp',
default=3700,
help='colour temperature',)
parser.add_option('-c', '--contrast',
action='store',
dest='contrast',
default=50,
help='colour contrast',)
parser.add_option('-g', '--gamma',
action='store',
dest='gamma',
default=2.0,
help='gamma value',)
parser.add_option('-n', '--runs',
action='store',
dest='runs',
default=1,
help='number of runs',)
parser.add_option('-t', '--timer',
action='store_true',
dest='timer',
default=False,
help='True : report execution time, \
False: do not collect timing info',)
parser.add_option('-d', '--display',
action='store_true',
dest='display',
default=False,
help='True : display output image, \
False: do not display output image',)
parser.add_option('--cxx',
action='store',
dest='cxx',
choices=['g++', 'icpc'],
default=['g++'],
help='CXX Compiler')
parser.add_option('--cxx_flags',
action='store',
dest='cxx_flags',
default=['-O3'],
help='CXX Compiler flags')
parser.add_option('--graph-gen',
action='store_true',
dest='graph_gen',
default=False,
help='True : generate .dot file of pipeline graph, \
False: don\'t',)
(options, args) = parser.parse_args()
return options
|
# Day 12 Project: Reading List
# Project details: https://teclado.com/30-days-of-python/python-30-day-12-project/
# Users should be able to add a book to their reading list by providing a book title, an author's name, and a year of publication. The program should store information about all of these books
# in a Python list. Users should be able to display all the books in their reading list, and these books should be printed out in a user-friendly format. Users should be able to select these
# options from a text menu, and they should be able to perform multiple operations without restarting the program. You can see an example of a working menu in the post on while loops (day 8).
booklist = [
("The Fellowship of the Ring", "J. R. R. Tolkien", "1954"),
("The Two Towers", "J. R. R. Tolkien", "1954"),
("The Return of the King", "J. R. R. Tolkien", "1955")
]
def userinterface():
print("--- Reading list ---\n\n1. Add books to list\n2. View books in list\n3. Quit")
userchoice = input("Enter: ")
if userchoice == "1":
title = input("Enter book title: ")
author = input("Enter author: ")
year = input("Enter year of publication: ")
for i in booklist:
booklist.append((title, author, year))
return
elif userchoice == "2":
print("\nSelection confirmed, printing books in your list:\n")
for i in booklist:
print(f"{i[0]} ({i[2]}) by {i[1]}")
elifchoice = input("\nReturn to main menu? [Y/N] ").lower()
if elifchoice == "y":
return
elif elifchoice == "n":
quit()
elif userchoice == "3":
quit()
else:
"Something went wrong."
def main():
while True:
userinterface()
main() |
from .base import BaseTestCase
from elevate.signals import grant, revoke
from elevate.utils import has_elevated_privileges, grant_elevated_privileges
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_logged_in, user_logged_out
class SignalsTestCase(BaseTestCase):
def test_grant(self):
self.login()
grant(User, self.request)
self.assertTrue(has_elevated_privileges(self.request))
def test_revoke(self):
self.login()
grant(User, self.request)
revoke(User, self.request)
self.assertFalse(has_elevated_privileges(self.request))
def test_user_logged_in(self):
self.login()
user_logged_in.send_robust(sender=User, request=self.request)
self.assertTrue(has_elevated_privileges(self.request))
def test_user_logged_out(self):
self.login()
grant_elevated_privileges(self.request)
self.assertTrue(has_elevated_privileges(self.request))
user_logged_out.send_robust(sender=User, request=self.request)
self.assertFalse(has_elevated_privileges(self.request))
|
#!python3
import json
import requests
from pprint import pprint
r = requests.get(
"https://us.api.battle.net/wow/character/Cenarion%20Circle/Ardy?fields=mounts&locale=en_US&apikey="
)
data = json.loads(r.text)
for item in data.items():
print(item)
# Hard to read
for item in data.items():
pprint(item)
# easier to read
|
from .dispatcher import Dispatcher
from .bot import VkBot
from .updater import Updater
from .objects.vk_obj import Message, Geo, Attachment, Update, VkEventType, VkEvent
from .objects.keyboard import Keyboard, Action
from .handler import Handler, CommandHandler, MessageHandler, State, FsmHandler, BaseHandler
__all__ = [Dispatcher, Handler, VkBot, Updater, Message, Geo, Keyboard, Attachment, Action, Update, Handler,
CommandHandler, MessageHandler, State, VkEventType, VkEvent, FsmHandler, BaseHandler]
|
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.pad import pad as pad_function
from gdsfactory.port import select_ports_electrical
from gdsfactory.routing.route_quad import route_quad
from gdsfactory.types import ComponentOrFactory
@gf.cell
def add_electrical_pads_shortest(
component: Component,
pad: ComponentOrFactory = pad_function,
pad_port_spacing: float = 50.0,
select_ports=select_ports_electrical,
port_orientation: int = 90,
layer: gf.types.Layer = (31, 0),
**kwargs,
) -> Component:
"""Add pad to each closest electrical port.
Args:
component:
pad: pad element or function
pad_port_spacing: between pad and port
select_ports: function
port_orientation
layer: for the routing
**kwargs: pad_settings
"""
c = Component()
c.component = component
ref = c << component
ports = select_ports(ref.ports)
ports = list(ports.values())
pad = pad(**kwargs) if callable(pad) else pad
pad_port_spacing += pad.info_child.full["size"][0] / 2
for port in ports:
p = c << pad
if port_orientation == 0:
p.x = port.x + pad_port_spacing
p.y = port.y
c.add(route_quad(port, p.ports["e1"], layer=layer))
elif port_orientation == 180:
p.x = port.x - pad_port_spacing
p.y = port.y
c.add(route_quad(port, p.ports["e3"], layer=layer))
elif port_orientation == 90:
p.y = port.y + pad_port_spacing
p.x = port.x
c.add(route_quad(port, p.ports["e4"], layer=layer))
elif port_orientation == 270:
p.y = port.y - pad_port_spacing
p.x = port.x
c.add(route_quad(port, p.ports["e2"], layer=layer))
c.add_ports(ref.ports)
for port in ports:
c.ports.pop(port.name)
c.copy_child_info(component)
return c
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.cross(length=100, layer=gf.LAYER.M3)
c = gf.components.mzi_phase_shifter()
c = gf.components.straight_heater_metal()
cc = add_electrical_pads_shortest(component=c)
cc.show()
|
import logging
from json import dumps, loads
from random import random
import matplotlib.pyplot as plt
import psycopg2
import seaborn as sns
from django.http import HttpResponse
from django.shortcuts import render
from kafka import KafkaConsumer, KafkaProducer
from .utils import plot_graph
# Create your views here.
# creating the kafka produnce to push the data into kafka stream
logger = logging.getLogger(__name__)
def genarate_data(request):
poc_producer = KafkaProducer(
bootstrap_servers=["localhost:9092"],
value_serializer=lambda x: dumps(x).encode("utf-8"),
)
# genarate random power consumption values between 0.1 to 1.0
while True:
power_consumption_data = {"consumption_value": f"{random():.2f}"}
poc_producer.send("poc_power_consumption", value=power_consumption_data)
return HttpResponse("Data inserted into kafka stream")
def get_db_connection():
conn = psycopg2.connect(
host="postgres",
database="power_stats",
user="debug",
password="debug",
port="5432",
)
return conn
def store_data(request):
consumption_value = KafkaConsumer(
'poc_power_consumption',
bootstrap_servers = ['localhost:9092'],
auto_offset_reset = 'earliest',
enable_auto_commit = True,
group_id = 'poc-group',
value_deserializer = lambda x:loads(x.decode('utf-8'))
)
poc_table = "CREATE TABLE IF NOT EXISTS power_consumption (time TIMESTAMPTZ NOT NULL, consumption_value DOUBLE PRECISION);"
pc_hypertable = "SELECT create_hypertable('power_consumption', 'time');"
conn = get_db_connection()
cursor = conn.cursor()
cursor.execute(poc_table)
cursor.execute(pc_hypertable)
conn.commit()
for consumption in consumption_value:
try:
value = consumption.value["consumption_value"]
cursor.execute(
f"INSERT INTO power_consumption (time, consumption_value) VALUES (NOW(), {value});"
)
conn.commit()
except Exception as e:
logger.info(f"database error - {e}")
query = "SELECT time_bucket('5 minutes', time) AS five_min, max(consumption_value) FROM power_consumption;"
cursor.execute(query)
conn.close()
def plot_graph(request):
conn = get_db_connection()
cursor = conn.cursor()
query = "SELECT time_bucket('5 minutes', time) AS five_min, avg(consumption_value) FROM power_consumption GROUP BY five_min ORDER BY five_min DESC;"
cursor.execute(query)
query_set = cursor.fetchall()
x = []
y = []
for i in range(len(query_set)):
x.append(query_set[i][0])
y.append(query_set[i][1])
# ax = sns.stripplot(x=x, y=y)
# ax.set(xlabel ='time', ylabel ='power consumption')
# plt.title('power')
# plt.savefig('./power_stats/power_stats/static/images/readings.png')
chart = plot_graph(x, y)
return render(request, )
buffer = BytesIO()
|
import unittest
from datetime import datetime
from decimal import Decimal
from botocore.exceptions import ClientError
import pytest
from _pytest.monkeypatch import MonkeyPatch
from falcano.model import Model
from falcano.attributes import (
UnicodeAttribute,
UTCDateTimeAttribute,
NumberAttribute,
ListAttribute,
UnicodeSetAttribute
)
from falcano.indexes import GlobalSecondaryIndex, AllProjection
class TypeIndex(GlobalSecondaryIndex):
class Meta:
index_name = 'Type'
billing_mode = 'PAY_PER_REQUEST'
projection = AllProjection()
Type = UnicodeAttribute(default='project', hash_key=True)
SK = UnicodeAttribute(range_key=True)
class BaseModel(Model):
'''Base model with meta'''
class Meta(Model.Meta):
table_name = 'falcano-e2e'
billing_mode = 'PAY_PER_REQUEST'
PK = UnicodeAttribute(hash_key=True)
SK = UnicodeAttribute(range_key=True)
TypeIndex = TypeIndex()
class FriendModel(BaseModel):
"""
A model for testing
"""
Type = UnicodeAttribute(default='friend')
Name = UnicodeAttribute(null=True)
Description = UnicodeAttribute(null=True)
CreatedAt = UTCDateTimeAttribute(default=datetime.utcnow())
class FriendGroup(BaseModel):
'''
A model for a friendgroup
'''
Type = UnicodeAttribute(default='friend_group')
Name = UnicodeAttribute(null=True)
class FriendToUpdate(BaseModel):
'''
A model for a friend that has lots of fun things to update
'''
Type = UnicodeAttribute(default='update_friend')
NumberAttr = NumberAttribute(null=True)
SetAttr = UnicodeSetAttribute(null=True)
ListAttr = ListAttribute(null=True)
StringAttr = UnicodeAttribute(null=True)
class TestModel(unittest.TestCase):
def setUp(self):
self.monkeypatch = MonkeyPatch()
self.monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'fakeMyKeyId')
self.monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'fakeMySecret')
if not BaseModel.exists():
print('Creating table')
FriendModel.create_table(wait=True)
for item in BaseModel.scan():
# clean up all items in db
item.delete()
self.friend1 = FriendModel('friend#drue', 'friend#meta',
Name='Dan Rue', CreatedAt=datetime(2014, 5, 12, 23, 30))
self.friend1.save()
self.friend2 = FriendModel('friend#jberk', 'friend#meta', Name='Justin Berk')
self.friend2.save()
self.friend3 = FriendModel('friend#fbladilsh', 'friend#meta', Name='Frank Bladilsh')
self.friend3.save()
self.group1 = FriendGroup('group#group1', 'group#meta', Name='Friendship Squad')
self.group1.save()
self.friend_to_update = FriendToUpdate(
'update#first', 'update#meta', NumberAttr=2,
SetAttr={'A', 'B'}, ListAttr=['One', 'Two'], StringAttr='First')
self.friend_to_update.save()
def tearDown(self):
# clean up all items in db
for item in BaseModel.scan():
item.delete()
def test_existence(self):
friend_group1 = FriendGroup(
self.group1.PK,
self.friend1.PK,
Name="Boston"
)
friend_group1.save(FriendGroup.SK.does_not_exist())
with pytest.raises(ClientError) as err:
friend_group1.save(FriendGroup.SK.does_not_exist())
assert err.typename == 'ConditionalCheckFailedException'
friend_group1.Name = 'Seattle'
res = friend_group1.save(FriendGroup.SK.exists())
res = FriendGroup.query(
self.group1.PK,
FriendGroup.SK.startswith(
self.friend1.PK
)
)
assert list(res)[0].Name == 'Seattle'
assert [friend.PK for friend in list(FriendModel.TypeIndex.query('friend'))] == \
['friend#drue', 'friend#fbladilsh', 'friend#jberk']
for group in FriendGroup.query('group#group1', FriendGroup.SK.startswith('group#meta')):
assert group.SK == 'group#meta'
def test_time_storage(self):
assert self.friend1.CreatedAt == datetime(2014, 5, 12, 23, 30)
def test_get(self):
res = BaseModel.get(self.friend1.PK, self.friend1.SK)
assert res.PK == self.friend1.PK
assert res.SK == self.friend1.SK
def test_batch_get(self):
items = [
(self.friend1.PK, self.friend1.SK),
(self.group1.PK, self.group1.SK)
]
records = BaseModel.batch_get(items)
records = records.records
assert records[0].PK == self.group1.PK
assert records[0].SK == self.group1.SK
assert records[1].PK == self.friend1.PK
assert records[1].SK == self.friend1.SK
def test_update(self):
expected = {
'ID': 'first',
'ListAttr': ['One', 'Two', 'three', 'four'],
'NumberAttr': Decimal('-3'),
'PK': 'update#first',
'SK': 'update#meta',
'SetAttr': {'Alphabet', 'A', 'B'},
'Type': 'update_friend'
}
self.friend_to_update.update(actions=[
FriendToUpdate.NumberAttr.set(FriendToUpdate.NumberAttr - 5),
FriendToUpdate.SetAttr.add({'Alphabet'}),
FriendToUpdate.StringAttr.remove(),
FriendToUpdate.ListAttr.set(FriendToUpdate.ListAttr.append(['three', 'four']))
])
got = BaseModel.get(
self.friend_to_update.PK,
self.friend_to_update.SK).to_dict()
assert expected == got
def test_transact_write(self):
new_friend = FriendModel('friend#new', 'friend#meta', Name='New Friend')
with BaseModel.transact_write() as writer:
writer.condition_check(FriendModel, 'friend#drue', 'friend#meta',
FriendModel.Name.eq('Dan Rue'))
writer.delete(self.friend2)
writer.save(new_friend)
action = FriendToUpdate.NumberAttr.add(5)
writer.update(self.friend_to_update, [
action
], condition=FriendToUpdate.NumberAttr.eq(2))
with pytest.raises(Exception):
BaseModel.get(self.friend2.PK, self.friend2.SK)
BaseModel.get(new_friend.PK, new_friend.SK)
assert self.friend_to_update.NumberAttr + \
5 == BaseModel.get(self.friend_to_update.PK, self.friend_to_update.SK).NumberAttr
def test_transact_get(self):
want = self.friend1.to_dict()
del want['CreatedAt']
attributes_to_get = [
FriendModel.PK.attr_name,
FriendModel.SK.attr_name,
FriendModel.Description.attr_name,
FriendModel.Name.attr_name,
FriendModel.Type.attr_name
]
with BaseModel.transact_get() as getter:
got_friend = getter.get(FriendModel, 'friend#drue', 'friend#meta', attributes_to_get)
got = got_friend.get().to_dict()
assert want == got
def test_save_lists(self):
thingy = FriendToUpdate(
'update#first',
'update#2',
NumberAttr=2,
SetAttr={'A', 'B'},
ListAttr=['One', 2],
StringAttr='First'
)
thingy.save()
thingy = FriendToUpdate.get(
'update#first',
'update#2'
)
thingy.NumberAttr = 5
thingy.save()
thingy = FriendToUpdate.get(
'update#first',
'update#2'
)
assert thingy.ListAttr == ['One', 2]
|
# Copyright (c) 2019 The Felicia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import felicia_py as fel
import felicia_py.command_line_interface as cli
from felicia.core.protobuf.channel_pb2 import ChannelDef
from felicia.core.protobuf.ui_pb2 import PixelFormat
from felicia.drivers.camera.camera_frame_message_pb2 import CameraFrameMessage
from felicia.drivers.camera.camera_settings_message_pb2 import CameraSettingsInfoMessage
class CameraPublishingNode(fel.NodeLifecycle):
def __init__(self, camera_flag, camera_descriptor):
super().__init__()
self.camera_flag = camera_flag
self.topic = camera_flag.topic_flag.value
self.camera_descriptor = camera_descriptor
self.publisher = fel.communication.Publisher(CameraFrameMessage)
def on_init(self):
self.camera = fel.drivers.CameraFactory.new_camera(
self.camera_descriptor)
s = self.camera.init()
if not s.ok():
fel.log(fel.ERROR, s.error_message())
sys.exit(1)
# You can set camera settings here.
camera_settings = fel.drivers.CameraSettings()
s = self.camera.set_camera_settings(camera_settings)
fel.log_if(fel.ERROR, not s.ok(), s.error_message())
message = CameraSettingsInfoMessage()
s = self.camera.get_camera_settings_info(message)
if s.ok():
print(message)
else:
fel.log(fel.ERROR, s.error_message())
def on_did_create(self, node_info):
self.node_info = node_info
self.request_publish()
def on_request_publish(self, status):
if status.ok():
fel.main_thread.post_task(self.start_camera)
else:
fel.log(fel.ERROR, status.error_message())
def on_request_unpublish(self, status):
if status.ok():
fel.main_thread.post_task(self.stop_camera)
else:
fel.log(fel.ERROR, status.error_message())
def request_publish(self):
settings = fel.communication.Settings()
settings.queue_size = 1
settings.is_dynamic_buffer = True
self.publisher.request_publish(self.node_info, self.topic,
ChannelDef.CHANNEL_TYPE_TCP |
ChannelDef.CHANNEL_TYPE_SHM |
ChannelDef.CHANNEL_TYPE_WS,
settings, self.on_request_publish)
def request_unpublish(self):
self.publisher.request_unpublish(self.node_info, self.topic,
self.on_request_unpublish)
def start_camera(self):
pixel_format = PixelFormat.Value(
self.camera_flag.pixel_format_flag.value)
s = self.camera.start(
fel.drivers.CameraFormat(self.camera_flag.width_flag.value,
self.camera_flag.height_flag.value,
pixel_format,
self.camera_flag.fps_flag.value),
self.on_camera_frame, self.on_camera_error)
if s.ok():
print("Camera format: {}".format(self.camera.camera_format()))
# fel.main_thread.post_delayed_task(
# self.request_unpublish, fel.TimeDelta.from_seconds(10))
pass
else:
fel.log(fel.ERROR, s.error_message())
def stop_camera(self):
s = self.camera.stop()
fel.log_if(fel.ERROR, not s.ok(), s.error_message())
def on_camera_frame(self, camera_frame):
if self.publisher.is_unregistered():
return
self.publisher.publish(camera_frame.to_camera_frame_message(False))
def on_camera_error(self, status):
fel.log_if(fel.ERROR, not status.ok(), status.error_message())
|
num1 = float(input("Numero 1: "))
num2 = float(input("Numero 2: "))
num3 = float(input("Numero 3: "))
numeros = [num1,num2,num3]
lista = sorted(numeros,key=int, reverse=True)
print(lista) |
#! python3
# Chapter 14 Project Fetching Current Weather Data
# Prints the weather data for a location from the command line.
import json
import requests
import sys
if len(sys.argv) < 2:
print('Usage: script.py location')
sys.exit()
location = ' '.join(sys.argv[1:])
url = 'http://api.openweathermap.org/data/2.5/forecast/daily?q=%s&cnt=3' % (location)
response = requests.get(url)
response.raise_for_status()
weatherData = json.loads(response.text)
w = weatherData['list']
print('Current weather is %s:' % (location))
print(w[0]['weather'][0]['main'], '-', w[0]['weather'][0]['description'])
print()
print('Tomorrow:')
print(w[1]['weather'][0]['main'], '-', w[1]['weather'][0]['description'])
print()
print('Day after tomorrow:')
print(w[2]['weather'][0]['main'], '-', w[2]['weather'][0]['description'])
# 09/2016 - DOES NOT WORK! This script no longer works due to changes made by
# OpenWeatherMap.org and their API access.
|
import pytest
import numpy as np
import toppra.cpp as tac
pytestmark = pytest.mark.skipif(
not tac.bindings_loaded(), reason="c++ bindings not built"
)
@pytest.fixture
def path():
c = np.array([
[-0.500000, -0.500000, 1.500000, 0.500000, 0.000000, 3.000000, 0.000000, 0.000000],
[-0.500000, -0.500000, 0.000000, -1.000000, 1.500000, 2.500000, 1.000000, 3.000000],
[-0.500000, -0.500000, -1.500000, -2.500000, 0.000000, -1.000000, 2.000000, 4.000000]
])
c = c.reshape((3, 4, 2))
p = tac.PiecewisePolyPath(c, [0, 1, 2, 3])
yield p
def test_solve_toppra(path):
cv = tac.LinearJointVelocity([-1, -1], [1, 1])
ca = tac.LinearJointAcceleration([-0.2, -0.2], [0.2, 0.2])
prob = tac.TOPPRA([cv, ca], path)
prob.setN(50)
ret = prob.computePathParametrization()
assert ret == tac.ReturnCode.OK
data = prob.parametrizationData
sd_expected = [0., 0.00799999, 0.01559927, 0.02295854, 0.03021812,
0.0375065, 0.04494723, 0.05266502, 0.06079176, 0.06947278, 0.07887417, 0.08890758,
0.08734253, 0.08331795, 0.07962036, 0.07621324, 0.0730652, 0.07014912, 0.06744149,
0.06492187, 0.06257243, 0.06037763, 0.05832396, 0.05639983, 0.05459562,
0.05290406, 0.05132157, 0.04985237, 0.04852316, 0.04745693, 0.04761904, 0.0285715,
0.05376003, 0.04275653, 0.04126188, 0.04013804, 0.03912958, 0.03818766,
0.03729606, 0.0364472, 0.03563649, 0.03486069, 0.03411724, 0.03340395, 0.03271895,
0.03206054, 0.02268897, 0.01495547, 0.00883489, 0.00394282, 0.]
np.testing.assert_allclose(data.parametrization, sd_expected, atol=1e-6)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
"""
Counting prime numbers from 0 to 10000000 with help of Apache Spark map-reduce
+ compare time of parallel (Spark-based) vs sequantial calculation
To see parallelism of apache spark you can open 'top' command in separate
terminal while running this script
"""
import sys
try:
import pyspark
except ImportError:
print("ERROR: pyspark module not installed. Use 'sudo pip install pyspark'")
sys.exit(1)
NUMBERS_THRESHOLD=10000000
#running Apache Spark
if not 'sc' in globals():
sc = pyspark.SparkContext()
#timeit decorator
def timeit(method):
def timed(*args, **kw):
import time
ts = time.time()
result = method(*args, **kw)
te = time.time()
print("time of execution method {}: {} ms".format(str(method.__name__),int((te-ts)*1000)))
return timed
def is_it_prime(number):
# make sure n is a positive integer
number = abs(int(number))
# simple tests
if number < 2:
return False
# 2 is prime
if number == 2:
return True
# other even numbers aren't
if not number & 1:
return False
# check whether number is divisible into it's square root
for x in range(3, int(number**0.5)+1, 2):
if number % x == 0:
return False
#if we get this far we are good
return True
@timeit
def parallelSparkBasedCalculation():
# create a set of numbers to
numbers = sc.parallelize(xrange(NUMBERS_THRESHOLD))
# count out the number of primes we found
count = numbers.filter(is_it_prime).count()
print("parallelSparkBasedCalculation result: {}".format(count))
@timeit
def sequantialCalculation():
count = 0
for i in xrange(NUMBERS_THRESHOLD):
if (is_it_prime(i)):
count+=1
print("sequantialCalculation result: {}".format(count))
if __name__ == "__main__":
sequantialCalculation()
parallelSparkBasedCalculation()
|
from time import sleep
from random import choice
from fastapi.responses import StreamingResponse
from fastemplate.module import MOCK_FRIDGE, MOCK_CELSIUS_TEMPERATURES
def list_fridge():
"""
List all items in the frigde.
:return: dict with all items and amounts
:rtype: dict
"""
return MOCK_FRIDGE
def get_temperature():
"""
Yields a mock fridge temperature.
return: fridge temperature encoded in utf-8
:rtype: bytes
"""
for i in range(3):
yield bytes(choice(MOCK_CELSIUS_TEMPERATURES), encoding='utf-8')
sleep(1)
def stream_temperature():
"""
Streams the fridge temperature.
Encoded in `UTF-8`.
:returns: fridge's temperature.
:rtype: StreamingResponse
"""
return StreamingResponse(get_temperature())
|
from tornado.web import HTTPError
from tornado import gen
from biothings.web.api.es.handlers.base_handler import BaseESRequestHandler
from biothings.web.api.es.transform import ScrollIterationDone
from biothings.web.api.es.query import BiothingScrollError, BiothingSearchError
from biothings.web.api.helper import BiothingParameterTypeError
from biothings.utils.web import sum_arg_dicts
import logging
class QueryHandler(BaseESRequestHandler):
''' Request handlers for requests to the query endpoint '''
def initialize(self, web_settings):
''' Tornado handler `.initialize() <http://www.tornadoweb.org/en/stable/web.html#tornado.web.RequestHandler.initialize>`_ function for all requests to the query endpoint.
Here, the allowed arguments are set (depending on the request method) for each kwarg category.'''
super(QueryHandler, self).initialize(web_settings)
self.ga_event_object_ret['action'] = self.request.method
if self.request.method == 'GET':
self.ga_event_object_ret['action'] = self.web_settings.GA_ACTION_QUERY_GET
self.control_kwargs = self.web_settings.QUERY_GET_CONTROL_KWARGS
self.es_kwargs = self.web_settings.QUERY_GET_ES_KWARGS
self.esqb_kwargs = self.web_settings.QUERY_GET_ESQB_KWARGS
self.transform_kwargs = self.web_settings.QUERY_GET_TRANSFORM_KWARGS
elif self.request.method == 'POST':
self.ga_event_object_ret['action'] = self.web_settings.GA_ACTION_QUERY_POST
self.control_kwargs = self.web_settings.QUERY_POST_CONTROL_KWARGS
self.es_kwargs = self.web_settings.QUERY_POST_ES_KWARGS
self.esqb_kwargs = self.web_settings.QUERY_POST_ESQB_KWARGS
self.transform_kwargs = self.web_settings.QUERY_POST_TRANSFORM_KWARGS
else:
# handle other verbs?
pass
self.kwarg_settings = sum_arg_dicts(self.control_kwargs, self.es_kwargs,
self.esqb_kwargs, self.transform_kwargs)
logging.debug("QueryHandler - {}".format(self.request.method))
logging.debug("Google Analytics Base object: {}".format(self.ga_event_object_ret))
logging.debug("Kwarg Settings: {}".format(self.kwarg_settings))
def _pre_scroll_transform_GET_hook(self, options, res):
''' Override me. '''
return res
@gen.coroutine
def get(self):
''' Handle a GET to the query endpoint. '''
###################################################
# Get/type/alias query parameters
###################################################
try:
kwargs = self.get_query_params()
except BiothingParameterTypeError as e:
raise gen.Return(self._return_data_and_track({'success': False, 'error': "{0}".format(e)}, ga_event_data={'total':0}, status_code=400))
#return
#except Exception as e:
# self.log_exceptions("Error in get_query_params")
# self._return_data_and_track({'success': False, 'error': "Error parsing input parameter, check input types"}, ga_event_data={'total': 0})
# return
###################################################
# Split query parameters into categories
###################################################
options = self.get_cleaned_options(kwargs)
logging.debug("Request kwargs: {}".format(kwargs))
logging.debug("Request options: {}".format(options))
if not options.control_kwargs.q and not options.control_kwargs.scroll_id:
raise gen.Return(self._return_data_and_track({'success': False, 'error': "Missing required parameters."},
ga_event_data={'total': 0}, status_code=400))
#return
options = self._pre_query_builder_GET_hook(options)
###################################################
# Instantiate pipeline classes
###################################################
# Instantiate query builder, query, and transform classes
_query_builder = self.web_settings.ES_QUERY_BUILDER(options=options.esqb_kwargs,
index=self._get_es_index(options), doc_type=self._get_es_doc_type(options),
es_options=options.es_kwargs, userquery_dir=self.web_settings.USERQUERY_DIR,
scroll_options={'scroll': self.web_settings.ES_SCROLL_TIME, 'size': self.web_settings.ES_SCROLL_SIZE},
default_scopes=self.web_settings.DEFAULT_SCOPES)
_backend = self.web_settings.ES_QUERY(client=self.web_settings.es_client, options=options.es_kwargs)
_result_transformer = self.web_settings.ES_RESULT_TRANSFORMER(options=options.transform_kwargs,
host=self.request.host, jsonld_context=self.web_settings._jsonld_context,
doc_url_function=self.web_settings.doc_url, output_aliases=self.web_settings.OUTPUT_KEY_ALIASES, source_metadata=self.web_settings.source_metadata())
###################################################
# Scroll request pipeline
###################################################
if options.control_kwargs.scroll_id:
###################################################
# Build scroll query
###################################################
_query = _query_builder.scroll(options.control_kwargs.scroll_id)
#try:
# _query = _query_builder.scroll(options.control_kwargs.scroll_id)
#except Exception as e:
# self.log_exceptions("Error building scroll query")
# self._return_data_and_track({'success': False, 'error': 'Error building scroll query for scroll_id "{}"'.format(options.control_kwargs.scroll_id)}, ga_event_data={'total': 0})
# return
###################################################
# Get scroll results
###################################################
try:
res = _backend.scroll(_query)
except BiothingScrollError as e:
raise gen.Return(self._return_data_and_track({'success': False, 'error': '{}'.format(e)}, ga_event_data={'total': 0}, status_code=400))
#return
#except Exception as e:
# self.log_exceptions("Error getting scroll batch")
# self._return_data_and_track({'success': False, 'error': 'Error retrieving scroll results for scroll_id "{}"'.format(options.control_kwargs.scroll_id)}, ga_event_data={'total': 0})
# return
#logging.debug("Raw scroll query result: {}".format(res))
if options.control_kwargs.raw:
raise gen.Return(self._return_data_and_track(res, ga_event_data={'total': res.get('total', 0)}))
#return
res = self._pre_scroll_transform_GET_hook(options, res)
###################################################
# Transform scroll result
###################################################
try:
res = _result_transformer.clean_scroll_response(res)
except ScrollIterationDone as e:
raise gen.Return(self._return_data_and_track({'success': False, 'error': '{}'.format(e)}, ga_event_data={'total': res.get('total', 0)}, status_code=200))
#return
#except Exception as e:
# self.log_exceptions("Error transforming scroll batch")
# self._return_data_and_track({'success': False, 'error': 'Error transforming scroll results for scroll_id "{}"'.format(options.control_kwargs.scroll_id)})
# return
else:
##### Non-scroll query GET pipeline #############
###################################################
# Build query
###################################################
_query = _query_builder.query_GET_query(q=options.control_kwargs.q)
#try:
# _query = _query_builder.query_GET_query(q=options.control_kwargs.q)
#except Exception as e:
# self.log_exceptions("Error building query")
# self._return_data_and_track({'success': False, 'error': 'Error building query from q="{}"'.format(options.control_kwargs.q)}, ga_event_object={'total': 0})
# return
if options.control_kwargs.rawquery:
raise gen.Return(self._return_data_and_track(_query, ga_event_data={'total': 0}, rawquery=True))
#return
_query = self._pre_query_GET_hook(options, _query)
###################################################
# Get query results
###################################################
try:
res = yield gen.Task(_backend.query_GET_query, _query)
except BiothingSearchError as e:
raise gen.Return(self._return_data_and_track({'success': False, 'error': '{0}'.format(e)}, ga_event_data={'total': 0}, status_code=400))
#return
#except Exception as e:
# self.log_exceptions("Error executing query")
# self._return_data_and_track({'success': False, 'error': 'Error executing query'},
# ga_event_data={'total': 0})
# return
#logging.debug("Raw query result")
#logging.debug("Raw query result: {}".format(res))
# return raw result if requested
if options.control_kwargs.raw:
raise gen.Return(self._return_data_and_track(res, ga_event_data={'total': res.get('total', 0)}))
#return
res = self._pre_transform_GET_hook(options, res)
###################################################
# Transform query results
###################################################
# clean result
res = _result_transformer.clean_query_GET_response(res)
#try:
# res = _result_transformer.clean_query_GET_response(res)
#except Exception as e:
# self.log_exceptions("Error transforming query")
# logging.debug("Return query GET")
# self._return_data_and_track({'success': False, 'error': 'Error transforming query result'},
# ga_event_data={'total': res.get('total', 0)})
# return
res = self._pre_finish_GET_hook(options, res)
logging.debug("In HERE")
# return and track
self.return_json(res)
if options.control_kwargs.fetch_all:
self.ga_event_object_ret['action'] = 'fetch_all'
self.ga_track(event=self.ga_event_object({'total': res.get('total', 0)}))
self.self_track(data=self.ga_event_object_ret)
raise gen.Return()
###########################################################################
@gen.coroutine
def post(self):
''' Handle a POST to the query endpoint.'''
###################################################
# Get/type/alias query parameters
###################################################
try:
kwargs = self.get_query_params()
except BiothingParameterTypeError as e:
raise gen.Return(self._return_data_and_track({'success': False, 'error': "{0}".format(e)}, ga_event_data={'qsize':0}, status_code=400))
#return
#except Exception as e:
# self.log_exceptions("Error in get_query_params")
# self._return_data_and_track({'success': False, 'error': "Error parsing input parameter, check input types"}, ga_event_data={'qsize': 0})
# return
options = self.get_cleaned_options(kwargs)
logging.debug("Request kwargs: {}".format(kwargs))
logging.debug("Request options: {}".format(options))
if not options.control_kwargs.q:
raise gen.Return(self._return_data_and_track({'success': False, 'error': "Missing required parameters."},
ga_event_data={'qsize': 0}, status_code=400))
#return
options = self._pre_query_builder_POST_hook(options)
###################################################
# Instantiate pipeline classes
###################################################
# Instantiate query builder, query, and transform classes
_query_builder = self.web_settings.ES_QUERY_BUILDER(options=options.esqb_kwargs,
index=self._get_es_index(options), doc_type=self._get_es_doc_type(options),
es_options=options.es_kwargs, userquery_dir=self.web_settings.USERQUERY_DIR,
default_scopes=self.web_settings.DEFAULT_SCOPES)
_backend = self.web_settings.ES_QUERY(client=self.web_settings.es_client, options=options.es_kwargs)
_result_transformer = self.web_settings.ES_RESULT_TRANSFORMER(options=options.transform_kwargs, host=self.request.host,
doc_url_function=self.web_settings.doc_url,
jsonld_context=self.web_settings._jsonld_context, output_aliases=self.web_settings.OUTPUT_KEY_ALIASES, source_metadata=self.web_settings.source_metadata())
###################################################
# Build query
###################################################
_query = _query_builder.query_POST_query(qs=options.control_kwargs.q, scopes=options.esqb_kwargs.scopes)
#try:
# _query = _query_builder.query_POST_query(qs=options.control_kwargs.q, scopes=options.esqb_kwargs.scopes)
#except Exception as e:
# self.log_exceptions("Error building POST query")
# logging.debug("Returning query POST")
# self._return_data_and_track({'success': False, 'error': 'Error building query'}, ga_event_data={'qsize': len(options.control_kwargs.q)})
# return
if options.control_kwargs.rawquery:
raise gen.Return(self._return_data_and_track(_query, ga_event_data={'qsize': len(options.control_kwargs.q)}, rawquery=True))
#return
_query = self._pre_query_POST_hook(options, _query)
###################################################
# Execute query
###################################################
try:
res = yield gen.Task(_backend.query_POST_query, _query)
except BiothingSearchError as e:
raise gen.Return(self._return_data_and_track({'success': False, 'error': '{0}'.format(e)}, ga_event_data={'qsize': len(options.control_kwargs.q)}, status_code=400))
#return
#except Exception as e:
# self.log_exceptions("Error executing POST query")
# self._return_data_and_track({'success': False, 'error': 'Error executing query'}, ga_event_data={'qsize': len(options.control_kwargs.q)})
# return
logging.debug("Raw query result: {}".format(res))
# return raw result if requested
if options.control_kwargs.raw:
raise gen.Return(self._return_data_and_track(res, ga_event_data={'qsize': len(options.control_kwargs.q)}))
#return
res = self._pre_transform_POST_hook(options, res)
###################################################
# Transform query results
###################################################
# clean result
res = _result_transformer.clean_query_POST_response(qlist=options.control_kwargs.q, res=res)
#try:
# res = _result_transformer.clean_query_POST_response(qlist=options.control_kwargs.q, res=res)
#except Exception as e:
# self.log_exceptions("Error transforming POST query")
# self._return_data_and_track({'success': False, 'error': 'Error transforming query result'},
# ga_event_data={'qsize': len(options.control_kwargs.q)})
# return
res = self._pre_finish_POST_hook(options, res)
# return and track
self._return_data_and_track(res, ga_event_data={'qsize': len(options.control_kwargs.q)})
raise gen.Return()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
--------------------------------------------------------------------------
OpenMS -- Open-Source Mass Spectrometry
--------------------------------------------------------------------------
Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
ETH Zurich, and Freie Universitaet Berlin 2002-2018.
This software is released under a three-clause BSD license:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of any author or any participating institution
may be used to endorse or promote products derived from this software
without specific prior written permission.
For a full list of authors, refer to the file AUTHORS.
--------------------------------------------------------------------------
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------
$Maintainer: Hannes Roest $
$Authors: Hannes Roest $
--------------------------------------------------------------------------
"""
from __future__ import division
from __future__ import print_function
# Create simulated sonar scans for testing
from pyopenms import *
exp = MSExperiment()
# print(dir(exp))
# Create MS1 spectra
for rt_idx in range(50):
sp = MSSpectrum()
sp.setRT(rt_idx)
sp.setMSLevel(1)
for i in range(100):
p = Peak1D()
p.setMZ(100+i)
p.setIntensity(100+i)
sp.push_back(p)
exp.addSpectrum(sp)
NR_RT_SAMPLES = 50
NR_SONAR_SP = 200
NR_SONAR_SP = 50
NR_PEAKS = 5
# Create MS2 spectra
for rt_idx in range(NR_RT_SAMPLES):
# Base intensity is a single peak at 25 RT with 100 *i intensity spread across 9 SONAR scans
# Second base intensity is a single peak at 10 RT with 100 *i intensity spread across 9 SONAR scans
base_int = 100 - abs(25 - rt_idx)*(100)/25.0
base_int_second = 100 - abs(10 - rt_idx)*(100)/40.0
print("base int", base_int, abs(25 - rt_idx)*(100)/25.0 )
for sonar_idx in range(NR_SONAR_SP):
print("======================================= sonar", sonar_idx)
sp = MSSpectrum()
p = pyopenms.Precursor()
p.setIsolationWindowLowerOffset(12)
p.setIsolationWindowUpperOffset(12)
target_mz = sonar_idx * 2.5 + 400
p.setMZ(target_mz)
sp.setPrecursors([p])
sp.setRT(rt_idx)
sp.setMSLevel(2)
# peaks of a precursor at 412.5 m/z : 100, 101, 102, .. 100 + NR_PEAKS
# range from window 0 to 10
for i in range(NR_PEAKS):
if 412.5 > target_mz - 12 and 412.5 < target_mz + 12:
p = Peak1D()
p.setMZ(100+i)
p.setIntensity(base_int * (i + 1) + sonar_idx)
sp.push_back(p)
else:
# add noise data (6x less)
p = Peak1D()
p.setMZ(100+i)
p.setIntensity(base_int * (i + 1)/ 6.0)
sp.push_back(p)
# peaks of a precursor at 462.5 m/z : 100, 101, 102, .. 100 + NR_PEAKS
# range from window 20 to 30
for i in range(NR_PEAKS):
if 462.5 > target_mz - 12 and 462.5 < target_mz + 12:
p = Peak1D()
p.setMZ(300+i)
p.setIntensity(base_int_second * (i + 1))
sp.push_back(p)
else:
# add noise data (6x less)
p = Peak1D()
p.setMZ(300+i)
p.setIntensity(base_int_second * (i + 1)/ 6.0)
sp.push_back(p)
exp.addSpectrum(sp)
# For debug:
# break
f = MzMLFile()
pf = f.getOptions()
pf.setCompression(True)
f.setOptions(pf)
f.store('sonar.mzML', exp)
|
from google.appengine.ext import ndb
class Character(ndb.Model):
species = ndb.StringProperty(required = True)
class Born(ndb.Model):
born_1 = ndb.StringProperty(required = True)
born_2 = ndb.StringProperty(required = True)
born_3 = ndb.StringProperty(required = True)
owner = ndb.KeyProperty(Character)
class PB2(ndb.Model):
pb_1 = ndb.StringProperty(required = True)
pb_2 = ndb.StringProperty(required = True)
pb_3 = ndb.StringProperty(required = True)
owner = ndb.KeyProperty(Character)
class PPath(ndb.Model):
PP_1 = ndb.StringProperty(required = True)
PP_2 = ndb.StringProperty(required = True)
owner = ndb.KeyProperty(Character)
class Path(ndb.Model):
p_1a = ndb.StringProperty(repeated = True)
p_1b = ndb.StringProperty(repeated = True)
p_2a = ndb.StringProperty(repeated = True)
p_2b = ndb.StringProperty(repeated = True)
p_3a = ndb.StringProperty(repeated = True)
p_3b = ndb.StringProperty(repeated = True)
owner = ndb.KeyProperty(Character)
class Looper(ndb.Model):
loop_1 = ndb.StringProperty(required = True)
loop_2 = ndb.StringProperty(required = True)
loop_3 = ndb.StringProperty(required = True)
run = ndb.StringProperty(required = True)
owner = ndb.KeyProperty(Character)
class KPath(ndb.Model):
p_2a = ndb.StringProperty(required = True)
p_2b = ndb.StringProperty(required = True)
p_3a = ndb.StringProperty(required = True)
p_3b = ndb.StringProperty(required = True)
owner = ndb.KeyProperty(Character)
|
# Copyright David Abrahams 2005. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
import polymorphism2
polymorphism2.test()
|
import logging
from typing import Dict, Tuple, Type, List
import gym
import numpy as np
from ray.rllib.models import ModelCatalog
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.tf.tf_action_dist import (Categorical)
from ray.rllib.models.torch.torch_action_dist import TorchCategorical
from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.torch_policy_template import build_torch_policy
from ray.rllib.utils.error import UnsupportedSpaceException
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.schedules import ConstantSchedule, PiecewiseSchedule
from ray.rllib.utils.typing import TensorType, TrainerConfigDict
import grl
from grl.rllib_tools.modified_policies.safe_set_weights_policy_mixin import SafeSetWeightsPolicyMixin
AVG_POL_SCOPE = "avg_pol"
torch, nn = try_import_torch()
F = None
if nn:
F = nn.functional
logger = logging.getLogger(__name__)
def compute_policy_logits(policy: Policy,
model: ModelV2,
obs: TensorType,
is_training=None) -> TensorType:
model_out, _ = model({
SampleBatch.CUR_OBS: obs,
"is_training": is_training
if is_training is not None else policy._get_is_training_placeholder(),
}, [], None)
return model_out
def get_distribution_inputs_and_class(
policy: Policy,
model: ModelV2,
obs_batch: TensorType,
*,
is_training=True,
**kwargs) -> Tuple[TensorType, type, List[TensorType]]:
"""Build the action distribution"""
logits = compute_policy_logits(policy, model, obs_batch, is_training)
logits = logits[0] if isinstance(logits, tuple) else logits
policy.logits = logits
return policy.logits, (TorchCategorical
if policy.config["framework"] == "torch" else
Categorical), [] # state-outs
def build_avg_model_and_distribution(
policy: Policy, obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict) -> Tuple[ModelV2, Type[TorchDistributionWrapper]]:
if not isinstance(action_space, gym.spaces.Discrete):
raise UnsupportedSpaceException(f"Action space {action_space} is not supported for NFSP.")
policy.avg_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=action_space.n,
model_config=config["model"],
framework=config["framework"],
name=AVG_POL_SCOPE)
policy.avg_func_vars = policy.avg_model.variables()
return policy.avg_model, TorchCategorical
def build_supervised_learning_loss(policy: Policy, model: ModelV2, dist_class: Type[TorchDistributionWrapper],
train_batch: SampleBatch) -> TensorType:
"""Constructs the loss for SimpleQTorchPolicy.
Args:
policy (Policy): The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
dist_class (Type[ActionDistribution]): The action distribution class.
train_batch (SampleBatch): The training data.
Returns:
TensorType: A single loss tensor.
"""
logits_t = compute_policy_logits(policy=policy,
model=policy.avg_model,
obs=train_batch[SampleBatch.CUR_OBS],
is_training=True)
action_targets_t = train_batch[SampleBatch.ACTIONS].long()
policy.loss = F.cross_entropy(input=logits_t, target=action_targets_t)
return policy.loss
def behaviour_logits_fetches(
policy: Policy, input_dict: Dict[str, TensorType],
state_batches: List[TensorType], model: ModelV2,
action_dist: TorchDistributionWrapper) -> Dict[str, TensorType]:
"""Defines extra fetches per action computation.
Args:
policy (Policy): The Policy to perform the extra action fetch on.
input_dict (Dict[str, TensorType]): The input dict used for the action
computing forward pass.
state_batches (List[TensorType]): List of state tensors (empty for
non-RNNs).
model (ModelV2): The Model object of the Policy.
action_dist (TorchDistributionWrapper): The instantiated distribution
object, resulting from the model's outputs and the given
distribution class.
Returns:
Dict[str, TensorType]: Dict with extra tf fetches to perform per
action computation.
"""
return {
"action_probs": policy.action_probs,
"behaviour_logits": policy.logits,
}
def action_sampler(policy, model, input_dict, state, explore, timestep):
obs: np.ndarray = input_dict['obs']
is_training = False
logits = compute_policy_logits(policy, model, obs, is_training)
logits = logits[0] if isinstance(logits, tuple) else logits
action_probs_batch = F.softmax(logits, dim=1)
policy.logits = logits
policy.action_probs = action_probs_batch
# print(f"probs: {action_probs_batch}")
actions = []
logps = []
for action_probs in action_probs_batch.cpu().detach().numpy():
action = np.random.choice(range(0, len(action_probs)), p=action_probs)
logp = np.log(action_probs[action])
# print(f"action: {action}, logp: {logp}")
actions.append(action)
logps.append(logp)
state_out = state
return np.asarray(actions, dtype=np.int32), None, state_out
def sgd_optimizer(policy: Policy,
config: TrainerConfigDict) -> "torch.optim.Optimizer":
return torch.optim.SGD(
policy.avg_func_vars, lr=policy.config["lr"])
def build_avg_policy_stats(policy: Policy, batch) -> Dict[str, TensorType]:
return {"loss": policy.loss}
class ManualLearningRateSchedule:
"""Mixin for TFPolicy that adds a learning rate schedule."""
def __init__(self, lr, lr_schedule):
self.cur_lr = lr
if lr_schedule is None:
self.lr_schedule = ConstantSchedule(lr, framework=None)
else:
self.lr_schedule = PiecewiseSchedule(
lr_schedule, outside_value=lr_schedule[-1][-1], framework=None)
# not called automatically by any rllib logic, call this in your training script or a trainer callback
def update_lr(self, timesteps_total):
print(f"cur lr {self.cur_lr}")
self.cur_lr = self.lr_schedule.value(timesteps_total)
for opt in self._optimizers:
for p in opt.param_groups:
p["lr"] = self.cur_lr
def setup_mixins(policy, obs_space, action_space, config):
ManualLearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
NFSPTorchAveragePolicy = build_torch_policy(
name="NFSPAveragePolicy",
extra_action_out_fn=behaviour_logits_fetches,
loss_fn=build_supervised_learning_loss,
get_default_config=lambda: grl.algos.nfsp_rllib.nfsp.DEFAULT_CONFIG,
make_model_and_action_dist=build_avg_model_and_distribution,
action_sampler_fn=action_sampler,
before_init=setup_mixins,
extra_learn_fetches_fn=lambda policy: {"sl_loss": policy.loss},
optimizer_fn=sgd_optimizer,
stats_fn=build_avg_policy_stats,
mixins=[ManualLearningRateSchedule, SafeSetWeightsPolicyMixin],
# action_distribution_fn=get_distribution_inputs_and_class,
)
|
"""
tests.gunicorn.conftest
~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2017 by ConsenSys France.
:license: BSD, see LICENSE for more details.
"""
import os
import pytest
@pytest.fixture(scope='session')
def files_dir(test_dir):
yield os.path.join(test_dir, 'gunicorn', 'files')
@pytest.fixture(scope='session')
def logging_config_file(files_dir):
yield os.path.join(files_dir, 'logging.yml')
|
from tri.delaunay.helpers import ToPointsAndSegments
from grassfire import calc_skel
from simplegeom.wkt import loads
"""Church in Naaldwijk
"""
if True:
import logging
import sys
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
formatter = logging.Formatter('%(asctime)s - %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
# original
# if True:
# ring = [(74029.47599999999511056,445622.80800000001909211),(74022.8169999999954598,445622.11400000000139698),(74023.09900000000197906,445619.97800000000279397),(74021.96000000000640284,445619.86800000001676381),(74022.06500000000232831,445618.78100000001722947),(74023.11100000000442378,445618.88199999998323619),(74024.28500000000349246,445606.70799999998416752),(74024.5,445606.72899999999208376),(74024.98399999999674037,445601.71000000002095476),(74025.26700000000710133,445601.7370000000228174),(74025.43099999999685679,445600.02799999999115244),(74033.13599999999860302,445600.77100000000791624),(74033.26799999999639113,445599.39600000000791624),(74034.29600000000209548,445599.49499999999534339),(74034.16400000000430737,445600.86300000001210719),(74037.91899999999441206,445601.22499999997671694),(74038.05199999999604188,445599.84600000001955777),(74039.09900000000197906,445599.9469999999855645),(74038.96700000000419095,445601.32099999999627471),(74042.68099999999685679,445601.67999999999301508),(74042.8120000000053551,445600.32099999999627471),(74043.87600000000384171,445600.42300000000977889),(74043.74499999999534339,445601.78100000001722947),(74047.73099999999976717,445602.16499999997904524),(74048.09600000000500586,445598.37599999998928979),(74047.09299999999348074,445598.27899999998044223),(74047.19999999999708962,445597.1720000000204891),(74048.21899999999732245,445597.27100000000791624),(74048.31600000000617001,445596.2629999999771826),(74049.39500000000407454,445596.36700000002747402),(74049.29700000000593718,445597.38000000000465661),(74055.42600000000675209,445597.97100000001955777),(74055.52499999999417923,445596.94300000002840534),(74056.61999999999534339,445597.04899999999906868),(74056.52099999999336433,445598.0719999999855645),(74057.59600000000500586,445598.17599999997764826),(74057.4940000000060536,445599.23300000000745058),(74056.38800000000628643,445599.12599999998928979),(74056.05599999999685679,445602.56800000002840534),(74057.1190000000060536,445602.66999999998370185),(74056.9980000000068685,445603.92099999997299165),(74055.94000000000232831,445603.81800000002840534),(74055.66300000000046566,445606.68599999998696148),(74058.68499999999767169,445606.97700000001350418),(74058.76900000000023283,445606.09999999997671694),(74059.74599999999918509,445606.19400000001769513),(74059.65799999999580905,445607.106000000028871),(74062.35899999999674037,445607.36599999997997656),(74062.44800000000395812,445606.4469999999855645),(74063.42299999999522697,445606.54100000002654269),(74063.32499999999708962,445607.5590000000083819),(74066.11000000000058208,445607.9340000000083819),(74066.59100000000034925,445603.26099999999860302),(74065.60700000000360887,445603.15999999997438863),(74065.71199999999953434,445602.1379999999771826),(74066.66800000000512227,445602.2370000000228174),(74066.77300000000104774,445601.21600000001490116),(74067.73299999999289867,445601.31500000000232831),(74067.62900000000081491,445602.31900000001769513),(74070.46400000000721775,445602.61099999997531995),(74070.57499999999708962,445601.54200000001583248),(74071.51300000000628643,445601.63900000002468005),(74071.40799999999580905,445602.65799999999580905),(74072.27300000000104774,445602.74699999997392297),(74072.16599999999743886,445603.78999999997904524),(74071.26700000000710133,445603.6969999999855645),(74071.0059999999939464,445606.231000000028871),(74071.77899999999499414,445607.34999999997671694),(74071.67900000000372529,445608.32900000002700835),(74073.66300000000046566,445609.20000000001164153),(74074.42100000000209548,445608.67599999997764826),(74074.96000000000640284,445609.45699999999487773),(74074.32899999999790452,445609.93900000001303852),(74075.64800000000104774,445612.22700000001350418),(74076.4940000000060536,445611.94199999998090789),(74076.72800000000279397,445612.91399999998975545),(74075.7519999999931315,445613.26799999998183921),(74075.47599999999511056,445615.94599999999627471),(74076.3690000000060536,445616.33799999998882413),(74076.1889999999984866,445617.23200000001816079),(74075.14699999999720603,445616.83299999998416752),(74073.19599999999627471,445618.96000000002095476),(74073.7519999999931315,445619.76500000001396984),(74072.98699999999371357,445620.29300000000512227),(74072.50999999999476131,445619.60200000001350418),(74069.86000000000058208,445620.56199999997625127),(74069.76700000000710133,445621.46700000000419095),(74068.79700000000593718,445621.36700000002747402),(74068.88599999999860302,445620.50300000002607703),(74065.9330000000045402,445620.19900000002235174),(74065.84299999999348074,445621.07500000001164153),(74064.97999999999592546,445620.98599999997531995),(74065.07099999999627471,445620.09700000000884756),(74062.14100000000325963,445619.79399999999441206),(74062.04799999999522697,445620.69500000000698492),(74061.05299999999988358,445620.59299999999348074),(74061.14500000000407454,445619.69799999997485429),(74059.73399999999674037,445619.55200000002514571),(74059.3120000000053551,445623.64199999999254942),(74060.21499999999650754,445623.73499999998603016),(74060.11699999999837019,445624.68800000002374873),(74059.3129999999946449,445624.60499999998137355),(74059.24099999999452848,445625.31099999998696148),(74058.32000000000698492,445625.21600000001490116),(74058.38999999999941792,445624.54100000002654269),(74053.94599999999627471,445624.08199999999487773),(74053.65700000000651926,445626.89000000001396984),(74054.60099999999511056,445626.98800000001210719),(74054.48200000000360887,445628.143999999971129),(74053.52400000000488944,445628.04499999998370185),(74053.41199999999662396,445629.1379999999771826),(74052.39999999999417923,445629.03399999998509884),(74052.51099999999860302,445627.95400000002700835),(74046.24300000000221189,445627.30800000001909211),(74046.1220000000030268,445628.4870000000228174),(74045.08000000000174623,445628.38000000000465661),(74045.19899999999324791,445627.22100000001955777),(74044.29799999999522697,445627.12800000002607703),(74044.42200000000593718,445625.92599999997764826),(74045.34900000000197906,445626.02199999999720603),(74045.74199999999837019,445622.09999999997671694),(74041.92200000000593718,445621.73200000001816079),(74041.8139999999984866,445622.84999999997671694),(74040.81900000000314321,445622.75400000001536682),(74040.92500000000291038,445621.65500000002793968),(74036.96499999999650754,445621.27299999998649582),(74036.86199999999371357,445622.34499999997206032),(74035.79399999999441206,445622.24200000002747402),(74035.89599999999336433,445621.1840000000083819),(74032.09600000000500586,445620.81800000002840534),(74031.98900000000139698,445621.91800000000512227),(74030.92900000000372529,445621.8159999999916181),(74031.03399999999965075,445620.72499999997671694),(74029.6889999999984866,445620.59499999997206032),(74029.47599999999511056,445622.80800000001909211)]
# conv = ToPointsAndSegments()
# conv.add_polygon([ring])
# skel = calc_skel(conv, pause=True, output=True)
with open("/home/martijn/Documents/work/archive/2016-01_grassfire_for_building_generalization/data/naaldwijk_church/in_out/in.geojson") as fh:
s = fh.read()
import json
x = json.loads(s)
# parse segments from geo-json
segments = []
for y in x['features']:
segments.append(tuple(map(tuple, y['geometry']['coordinates'])))
# convert to triangulation input
conv = ToPointsAndSegments()
for line in segments:
conv.add_point(line[0])
conv.add_point(line[1])
conv.add_segment(*line)
# skeletonize / offset
skel = calc_skel(conv, pause=False, output=True, shrink=False, internal_only=False)
|
def get_provider_info():
return {
"package-name": "airflow_provider_db2",
"name": "DB2 Airflow Provider",
"description": "A custom DB2 provider to implement a conn type that uses ibm_db library, a workaround to use SECURITYMECHANISM parameter to connect to DB2",
"hook-class-names": ["airflow_provider_db2.hooks.db2_hook.DB2Hook"],
'connection-types': [
{
'hook-class-name': 'airflow_provider_db2.hooks.db2_hook.DB2Hook',
'connection-type': 'DB2',
}
],
"versions": ["0.0.1"]
}
} |
import logging
from typing import Optional, Any
from boto3 import client, Session
from b_lambda_layer_common.ssm.fetcher import Fetcher
from b_lambda_layer_common.ssm.refreshable import Refreshable
logger = logging.getLogger(__name__)
class SSMParameter(Refreshable):
"""
SSM Parameter implementation.
"""
def __init__(
self,
param_name: str,
max_age: int = 0,
ssm_client: Optional[client] = None
) -> None:
"""
Constructor.
:param param_name: SSM parameter name for which the value should be fetched.
:param max_age: Max age of the value until refresh is needed.
:param ssm_client: Boto3 SSM client (optional).
"""
self.__ssm_client = ssm_client or Session().client('ssm')
self.__ssm_fetcher = Fetcher(self.__ssm_client)
super().__init__(max_age)
self.__name = param_name
self.__value: Any = None
def update_value(self) -> None:
"""
Force update of the SSM parameter value.
:return: No return.
"""
items, invalid_parameters = self.__ssm_fetcher.get_parameters([self.__name])
if invalid_parameters or self.__name not in items:
raise ValueError(f"{self.__name} is invalid.")
self.__value = items[self.__name]['Value']
@property
def name(self) -> str:
"""
Property for the SSM parameter name.
:return: SSM parameter name.
"""
return self.__name
@property
def value(self) -> Any:
"""
Property for the SSM parameter value.
:return: Value of the SSM parameter.
"""
if self.__value is None:
logger.info('Cached parameter value is none.')
self.refresh()
if self.should_refresh():
logger.info('Cached parameter value should be refreshed.')
self.refresh()
return self.__value
|
# -*- coding: utf-8 -*-
"""
ARRI ALEXA Wide Gamut Colourspace
=================================
Defines the *ARRI ALEXA Wide Gamut* colourspace:
- :attr:`colour.models.ALEXA_WIDE_GAMUT_COLOURSPACE`.
See Also
--------
`RGB Colourspaces Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/models/rgb.ipynb>`_
References
----------
- :cite:`ARRI2012a` : ARRI. (2012). ALEXA - Log C Curve - Usage in VFX.
Retrieved from https://drive.google.com/\
open?id=1t73fAG_QpV7hJxoQPYZDWvOojYkYDgvn
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.colorimetry import ILLUMINANTS
from colour.models.rgb import (RGB_Colourspace, log_encoding_ALEXALogC,
log_decoding_ALEXALogC)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'ALEXA_WIDE_GAMUT_PRIMARIES', 'ALEXA_WIDE_GAMUT_WHITEPOINT_NAME',
'ALEXA_WIDE_GAMUT_WHITEPOINT', 'ALEXA_WIDE_GAMUT_TO_XYZ_MATRIX',
'XYZ_TO_ALEXA_WIDE_GAMUT_MATRIX', 'ALEXA_WIDE_GAMUT_COLOURSPACE'
]
ALEXA_WIDE_GAMUT_PRIMARIES = np.array([
[0.6840, 0.3130],
[0.2210, 0.8480],
[0.0861, -0.1020],
])
"""
*ARRI ALEXA Wide Gamut* colourspace primaries.
ALEXA_WIDE_GAMUT_PRIMARIES : ndarray, (3, 2)
"""
ALEXA_WIDE_GAMUT_WHITEPOINT_NAME = 'D65'
"""
*ARRI ALEXA Wide Gamut* colourspace whitepoint name.
ALEXA_WIDE_GAMUT_WHITEPOINT : unicode
"""
ALEXA_WIDE_GAMUT_WHITEPOINT = (ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer'][ALEXA_WIDE_GAMUT_WHITEPOINT_NAME])
"""
*ARRI ALEXA Wide Gamut* colourspace whitepoint.
ALEXA_WIDE_GAMUT_WHITEPOINT : ndarray
"""
ALEXA_WIDE_GAMUT_TO_XYZ_MATRIX = np.array([
[0.638008, 0.214704, 0.097744],
[0.291954, 0.823841, -0.115795],
[0.002798, -0.067034, 1.153294],
])
"""
*ARRI ALEXA Wide Gamut* colourspace to *CIE XYZ* tristimulus values matrix.
ALEXA_WIDE_GAMUT_TO_XYZ_MATRIX : array_like, (3, 3)
"""
XYZ_TO_ALEXA_WIDE_GAMUT_MATRIX = np.array([
[1.789066, -0.482534, -0.200076],
[-0.639849, 1.396400, 0.194432],
[-0.041532, 0.082335, 0.878868],
])
"""
*CIE XYZ* tristimulus values to *ARRI ALEXA Wide Gamut* colourspace matrix.
XYZ_TO_ALEXA_WIDE_GAMUT_MATRIX : array_like, (3, 3)
"""
ALEXA_WIDE_GAMUT_COLOURSPACE = RGB_Colourspace(
'ALEXA Wide Gamut',
ALEXA_WIDE_GAMUT_PRIMARIES,
ALEXA_WIDE_GAMUT_WHITEPOINT,
ALEXA_WIDE_GAMUT_WHITEPOINT_NAME,
ALEXA_WIDE_GAMUT_TO_XYZ_MATRIX,
XYZ_TO_ALEXA_WIDE_GAMUT_MATRIX,
log_encoding_ALEXALogC,
log_decoding_ALEXALogC,
)
ALEXA_WIDE_GAMUT_COLOURSPACE.__doc__ = """
*ARRI ALEXA Wide Gamut* colourspace.
References
----------
:cite:`ARRI2012a`
ALEXA_WIDE_GAMUT_COLOURSPACE : RGB_Colourspace
"""
|
import pytest
# APP Flask Factory
from controller.controller_sql.factory import create_app
# Repo.py Factory para poblar la base de datos test
from repository.repository_sql.models.db_model import db as _db
from repository.repository_sql.repo import Factory
# Importamos también el modelo
from repository.repository_sql.models.items import Items
# Para la conexión de la DB test, importó el objeto G de Flask
from repository.repository_sql.db_connection import init_app
class TestConfig(object):
"""TestConfig Class: Class with the configuration constants of Testing Envronment of APP Flask"""
# DEBUG debe ser Falso para que no haya error del método SETUP al intentar ejecutar el APP Flask, por ahora es mejor dejarlo en False
DEBUG = False
TESTING = True
SQLALCHEMY_TRACK_MODIFICATIONS = False
ENV = "test"
TESTING = True
@pytest.fixture
def app():
"""Get the APP Flask from the create_app() method from Factory of Controller Package
Yields:
Flask Intance: Yields a APP Flask
"""
app = create_app()
yield app
@pytest.fixture
def client(app):
"""Get the test_client of APP Flask, where before of Yield sentence, it adds the basic items into database and models with the main goal of each test use this data/items to test it and after each test the data of models and database restart by default. This help us to isolate each test from another test
Args:
app (Flask instance): APP Flask
Yields:
test_client: Yields a test_client from APP Flask to test our test cases
"""
with app.test_client() as client:
with app.app_context():
_db.init_app(app)
# _db.drop_all()
_db.create_all()
# Obtenemos la lista con los items
inventario = Factory.loadInventory()
# Poblamos la Base de datos introduciendo los datos
for item in inventario:
add_item = Items(
name=item["name"], sell_in=item["sell_in"], quality=item["quality"]
)
_db.session.add(add_item)
_db.session.commit()
yield client
_db.session.query(Items).delete()
_db.session.commit()
# _db.session.close()
# _db.drop_all()
@pytest.fixture(scope="function")
def db(app):
"""Get the Database Object of SQLAlchemy, where before get the _db it's open the app_context() and init the app in the DB and create all Models, after the Models and database is filled in with the basic items from loadInventory(), after each test the data of models and database restart by default. The scope of this fixture is for each test function
Args:
app (flask instance): App Flask
Yields:
SQLAlchemy Instance: Yields a SQLAlchemy Object with the session init
"""
with app.test_client() as client:
with app.app_context():
_db.init_app(app)
# _db.drop_all()
_db.create_all()
# Obtenemos la lista con los items
inventario = Factory.loadInventory()
# Poblamos la Base de datos introduciendo los datos
for item in inventario:
add_item = Items(
name=item["name"], sell_in=item["sell_in"], quality=item["quality"]
)
_db.session.add(add_item)
_db.session.commit()
yield _db
_db.session.query(Items).delete()
_db.session.commit()
# Tiene el scope='function' para que su alcance solo sea cada test
@pytest.fixture(scope="function", autouse=True)
def session(db):
"""Session fixture help us to do a transaction SQL for each test of repository test cases, this means that for each test function of repository package, session fixture opens a transaction in SQLAlchemy and Database MySQL where after finished the test, the transaction finished with a rollback, this means that all changes of each test actually don't affect the models and database at the finished because of Transactions SQL. Due to this fixture is for each test function, then the scope is function and the autouse is True
Args:
db (SQLAlchemy instance): It's a SQLALchemy Object
Yields:
SQLALchemy session: Yields a session of the SQLAlchemy Object for each test function
"""
connection = db.engine.connect()
transaction = connection.begin()
options = dict(bind=connection, binds={})
session_ = db.create_scoped_session(options=options)
db.session = session_
yield session_
transaction.rollback()
# connection.close()
session_.remove()
|
import numpy as np
np.set_printoptions(edgeitems=30, linewidth=100000)
def part1(lines: list) -> int:
split_index = lines.index("")
sheet_values = lines[:split_index]
values = lines[split_index+1:]
sheet_values_mapped = list(map(lambda x: x.split(","), sheet_values))
# max_x = int(max(sheet_values_mapped, key=lambda item: int(item[0]))[0])
# max_y = int(max(sheet_values_mapped, key=lambda item: int(item[1]))[1])
cutaxis, rawindex = values[0].split()[2].split("=")
index = int(rawindex)
if cutaxis == "y":
max_y = 2*index+1
max_x = 2*int(values[1].split()[2].split("=")[1])+1
else:
max_x = 2*index+1
max_y = 2*int(values[1].split()[2].split("=")[1])+1
sheet = np.zeros(shape=(max_y, max_x))
for i in sheet_values_mapped:
sheet[int(i[1]), int(i[0])] = True
orders = map(lambda x: x.split()[2].split("="), values)
for o in orders:
cutaxis = o[0]
index = int(o[1])
if cutaxis == "y":
max_y = max_y//2
else:
max_x = max_x//2
# newrows = np.zeroes(shape=(max_x + 1, max_y + 1), dtype=bool)
if cutaxis == "y":
sheet = np.logical_or(sheet[:max_y, :], np.flipud(sheet[max_y+1:, :]))
else:
sheet = np.logical_or(sheet[:, :max_x], np.fliplr(sheet[:, max_x+1:]))
print(sheet.astype(int))
return np.sum(sheet)
if __name__ == "__main__":
with open("day13/input13.txt") as fp:
lines = fp.read().split("\n")
print(part1(lines))
# print(part2(lines)) |
# Implementation for MGMA Module.
import torch.nn as nn
import numpy as np
import torch
import copy
import math
class MGMA(nn.Module):
def __init__(self, input_filters, output_filters, mgma_config, freeze_bn=False, temporal_downsample=False, spatial_downsample=False):
super(MGMA, self).__init__()
self.freeze_bn = freeze_bn
self.input_filters = input_filters
self.output_filters = output_filters
self.mgma_type = mgma_config['TYPE'] if 'TYPE' in mgma_config else "TSA"
self.mgma_num_groups = mgma_config['NUM_GROUPS'] if 'NUM_GROUPS' in mgma_config else 4
layers = []
if temporal_downsample and spatial_downsample:
layers.append(self._make_downsample(kernel_type="U"))
elif spatial_downsample:
layers.append(self._make_downsample(kernel_type="S"))
if self.input_filters % self.mgma_num_groups == 0:
self.split_groups = [self.mgma_num_groups]
self.split_channels = [self.input_filters]
else:
group_channels = math.ceil(self.input_filters / self.mgma_num_groups)
split_groups_1 = self.input_filters // group_channels
split_groups_2 = 1
split_channels_1 = group_channels * split_groups_1
split_channels_2 = self.input_filters - split_channels_1
self.split_groups = [split_groups_1, split_groups_2]
self.split_channels = [split_channels_1, split_channels_2]
for i in range(len(self.split_groups)):
if self.mgma_type in ["TA", "SA", "UA"]:
if i == 0:
self.ma = nn.ModuleDict()
layers_i = copy.deepcopy(layers)
self._make_layers(layers_i, self.split_channels[i], self.split_channels[i], self.mgma_type[0], self.split_groups[i])
self.ma[str(i)] = nn.Sequential(*layers_i)
else:
if i == 0:
self.ta = nn.ModuleDict()
self.sa = nn.ModuleDict()
layers_t_i = copy.deepcopy(layers)
layers_s_i = copy.deepcopy(layers)
self._make_layers(layers_t_i, self.split_channels[i], self.split_channels[i], "T", self.split_groups[i])
self._make_layers(layers_s_i, self.split_channels[i], self.split_channels[i], "S", self.split_groups[i])
self.ta[str(i)] = nn.Sequential(*layers_t_i)
self.sa[str(i)] = nn.Sequential(*layers_s_i)
def _make_layers(self, layers, input_filters, output_filters, mgma_type, num_groups):
layers.append(self._make_downsample(kernel_type=mgma_type))
layers.append(self._make_interpolation(kernel_type=mgma_type))
layers.append(self._make_conv_bn(input_filters, output_filters, kernel_type=mgma_type, groups=num_groups, use_relu=False))
layers.append(self._make_activate())
def _make_conv_bn(self, input_filters, out_filters, kernel_type="T", kernel=3, stride=1, padding=1, groups=1, use_bn=True, use_relu=True):
layers = []
if kernel_type.startswith("T"):
layers.append(nn.Conv3d(input_filters, out_filters, kernel_size=(kernel, 1, 1), stride=(stride, 1, 1), padding=(padding, 0, 0), groups=groups, bias=False))
elif kernel_type.startswith("S"):
layers.append(nn.Conv3d(input_filters, out_filters, kernel_size=(1, kernel, kernel), stride=(1, stride, stride), padding=(0, padding, padding), groups=groups, bias=False))
elif kernel_type.startswith("U"):
layers.append(nn.Conv3d(input_filters, out_filters, kernel_size=(kernel, kernel, kernel), stride=(stride, stride, stride), padding=(padding, padding, padding), groups=groups, bias=False))
if use_bn:
layers.append(nn.BatchNorm3d(out_filters, track_running_stats=(not self.freeze_bn)))
if use_relu:
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
def _make_downsample(self, kernel_type="T"):
if kernel_type.startswith("T"):
return nn.MaxPool3d(kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0))
elif kernel_type.startswith("S"):
return nn.MaxPool3d(kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1))
elif kernel_type.startswith("U"):
return nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(2, 2, 2), padding=(1, 1, 1))
def _make_interpolation(self, kernel_type="T"):
if kernel_type.startswith("T"):
self.upsample_scale_factor = (2, 1, 1)
return nn.Upsample(scale_factor=self.upsample_scale_factor, mode='nearest')
elif kernel_type.startswith("S"):
self.upsample_scale_factor = (1, 2, 2)
return nn.Upsample(scale_factor=self.upsample_scale_factor, mode='nearest')
elif kernel_type.startswith("U"):
self.upsample_scale_factor = (2, 2, 2)
return nn.Upsample(scale_factor=self.upsample_scale_factor, mode='nearest')
def _make_activate(self):
return nn.Sigmoid()
def forward(self, x):
mgma_in = x
if self.mgma_type in ["TA", "SA", "UA"]:
ma_in_list = mgma_in.split(self.split_channels, 1)
ma_out_list = []
for i in range(len(ma_in_list)):
ma_out_list.append(self.ma[str(i)](ma_in_list[i]))
mgma_out = torch.cat(ma_out_list, 1)
return mgma_out
else:
ta_in_list = mgma_in.split(self.split_channels, 1)
ta_out_list = []
for i in range(len(ta_in_list)):
ta_out_list.append(self.ta[str(i)](ta_in_list[i]))
mgma_ta_out = torch.cat(ta_out_list, 1)
sa_in_list = mgma_in.split(self.split_channels, 1)
sa_out_list = []
for i in range(len(sa_in_list)):
sa_out_list.append(self.sa[str(i)](sa_in_list[i]))
mgma_sa_out = torch.cat(sa_out_list, 1)
mgma_out = mgma_ta_out + mgma_sa_out
return mgma_out
|
# Time: O(n)
# Space: O(sqrt(n))
# 1265
# You are given an immutable linked list, print out all values of each node in reverse with the help of the following interface:
# - ImmutableListNode: An interface of immutable linked list, you are given the head of the list.
# You need to use the following functions to access the linked list (you can't access the ImmutableListNode directly):
# - ImmutableListNode.printValue(): Print value of the current node.
# - ImmutableListNode.getNext(): Return the next node. The input is only given to initialize the linked list internally. You must solve this problem without modifying the linked list. In other words, you must operate the linked list using only the mentioned APIs.
#
# Follow up:
# Could you solve this problem in:
# 1. Constant space complexity?
# 2. Linear time complexity and less than linear space complexity?
# The length of the linked list is between [1, 1000].
# The value of each node in the linked list is between [-1000, 1000].
import math
class ImmutableListNode:
def __init__(self, x):
self.val = x
self.next = None
def getNext(self):
return self.next
def printValue(self):
print(self.val)
class Solution(object):
# Solution 2 is to put all nodes in 1 bucket, need a O(n) stack to print the nodes in bucket.
# Solution 1 is to divide all nodes into sqrt(n) buckets, each bucket includes sqrt(n) nodes, need a O(sqrt(n)) stack to print.
def printLinkedListInReverse(self, head):
"""
:type head: ImmutableListNode
:rtype: None
"""
def print_nodes(head, count): # similar to the core function in solution 2, use stack to print
nodes = []
while head and len(nodes) != count:
nodes.append(head)
head = head.getNext()
for node in reversed(nodes):
node.printValue()
# find sqrt(n)
curr, count = head, 0
while curr:
curr = curr.getNext()
count += 1
bucket_count = math.ceil(count**0.5)
curr, count, bucket_heads = head, 0, []
while curr:
if count % bucket_count == 0:
bucket_heads.append(curr)
curr = curr.getNext()
count += 1
for head in reversed(bucket_heads):
print_nodes(head, bucket_count)
# Time: O(n)
# Space: O(n)
class Solution2(object):
def printLinkedListInReverse(self, head):
"""
:type head: ImmutableListNode
:rtype: None
"""
nodes = []
while head:
nodes.append(head)
head = head.getNext()
for node in reversed(nodes):
node.printValue()
# Time: O(n^2)
# Space: O(1)
class Solution3(object):
def printLinkedListInReverse(self, head):
"""
:type head: ImmutableListNode
:rtype: None
"""
tail = None
while head != tail:
curr = head
while curr.getNext() != tail:
curr = curr.getNext()
curr.printValue()
tail = curr
dummy = cur = ImmutableListNode(None)
for x in range(10):
cur.next = ImmutableListNode(x)
cur = cur.next
print(Solution().printLinkedListInReverse(dummy.next)) # |
from snowflake_dir.snowflake_connect import SnowflakeConnection
import yaml
from snowflake_dir.data_objects import Stage, Table, File_Format, Pipe, View, Integration
import logging
logger = logging.getLogger(__name__)
class DBModel():
def __init__(self, object_properties):
self.conn = SnowflakeConnection(**self.get_db_credentials()).get_conn()
self.object_properties = object_properties
logger.info("DBModel class initialised, Snowflake connection set")
def get_db_credentials(self):
file_loc = 'config/snowflake_credentials.yml'
try:
with open(file_loc) as db_credentials:
credentials = yaml.load(db_credentials, Loader=yaml.FullLoader)
logger.info("snowflake credentials obtained")
return credentials
except FileNotFoundError:
logger.error("snowflake_credentials.yml file does not exist")
def create_integration(self):
try:
integration_properties = (self.object_properties.get('integration'),
self.object_properties.get('stage').get('bucket'))
Integration(self.conn).create_object(integration_properties)
logger.info(f'Storage Integration created')
return Integration(self.conn).get_integration_props(self.object_properties.get('integration'))
except ValueError:
logger.error(f'Storage Integration not created successfully: check properties in object_details')
def execute_sql(self):
self.create_stage()
self.create_file_format()
self.create_tables_and_pipes()
self.create_views()
def create_stage(self):
try:
stage_properties = self.object_properties.get('stage')
Stage(self.conn).create_object(stage_properties)
logger.info(f'Stage created within Snowflake')
except ValueError:
logger.error(f'Stage not created successfully: check properties in object_details')
def create_file_format(self):
for file_format, file_format_properties in self.object_properties.get('file_format').items():
try:
File_Format(self.conn).create_object(file_format_properties)
logger.info(f'{file_format} File Format created')
except ValueError:
logger.error(f'{file_format} File Format not created successfully: check properties in object_details')
def create_tables_and_pipes(self):
for table, table_properties in self.object_properties.get('table').items():
try:
Table(self.conn).create_object(table_properties)
logger.info(f'{table} created')
except ValueError:
logger.error(f'{table} table not created successfully: check properties in object_details')
table_file_format = table_properties.get('file_format')
pipe_properties = (table_properties,
self.object_properties.get('file_format').get(table_file_format).get('name'),
self.object_properties.get('stage').get('name'))
try:
Pipe(self.conn).create_object(pipe_properties)
logger.info(f'Pipe for table {table} created within Snowflake')
except ValueError:
logger.error(f'Pipe for table {table} not created successfully: check properties in object_details')
def create_views(self):
for view, view_properties in self.object_properties.get('view').items():
try:
View(self.conn).create_object(view_properties)
logger.info(f'{view} created within Snowflake')
except ValueError:
logger.error(f'View {view} not created successfully: check properties in object_details')
def get_pipe_arn(self):
try:
arn = Pipe(self.conn).get_arn(self.get_db_credentials())
logger.info(f'Pipe arn obtained from snowflake SHOW command')
return arn
except ValueError:
logger.error(f'Pipe arn not obtained; check all db_credentials are being passed through')
def drop_objects(self):
self.drop_views()
self.drop_table_pipes()
self.drop_file_format()
self.drop_stage()
self.drop_integration()
def drop_views(self):
for view, view_properties in self.object_properties.get('view').items():
view_name = view_properties.get('name')
View(self.conn).drop_object(name = view_name)
def drop_table_pipes(self):
for table, table_properties in self.object_properties.get('table').items():
Table(self.conn).drop_object(table_properties.get('name'))
Pipe(self.conn).drop_object(table_properties.get('name')+'_PIPE')
def drop_file_format(self):
for file_format, file_format_properties in self.object_properties.get('file_format').items():
File_Format(self.conn).drop_object(file_format_properties.get('name'))
def drop_stage(self):
stage_name = self.object_properties.get('stage').get('name')
Stage(self.conn).drop_object(stage_name)
def drop_integration(self):
integration_name = self.object_properties.get('integration').get('name')
Integration(self.conn).drop_object(integration_name)
|
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from post.models import Follow
# Create your models here.
def user_directory_path(instance, filename):
# file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
return 'user_{0}/{1}'.format(instance.user.id, filename)
class Story(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='story_user')
content = models.FileField(upload_to=user_directory_path)
caption = models.TextField(max_length=50)
expired = models.BooleanField(default=False)
posted = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.username
class StoryStream(models.Model):
following = models.ForeignKey(User, on_delete=models.CASCADE, related_name='story_following')
user = models.ForeignKey(User, on_delete=models.CASCADE)
story = models.ManyToManyField(Story, related_name='storiess')
date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.following.username + ' - ' + str(self.date)
def add_post(sender, instance, *args, **kwargs):
new_story = instance
user = new_story.user
followers = Follow.objects.all().filter(following=user)
for follower in followers:
try:
s = StoryStream.objects.get(user=follower.follower, following=user)
except StoryStream.DoesNotExist:
s = StoryStream.objects.create(user=follower.follower, date=new_story.posted, following=user)
s.story.add(new_story)
s.save()
# Story Stream
post_save.connect(StoryStream.add_post, sender=Story) |
# Generated by Django 3.0.5 on 2020-11-28 15:06
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cole', '0008_alumne_to_many'),
]
operations = [
migrations.AlterModelOptions(
name='alumne',
options={'ordering': ['num_llista', 'cognom1', 'cognom2']},
),
migrations.RemoveIndex(
model_name='alumne',
name='cole_alumne_num_lli_ce2b14_idx',
),
migrations.AlterUniqueTogether(
name='alumne',
unique_together=set(),
),
migrations.RemoveField(
model_name='alumne',
name='classe',
),
]
|
from .wth_file import WTHFile
|
#PROBLEM NUMBER 03
import math
from sympy.ntheory import factorint
def isprime(n):
if n == 2:
return True
if n % 2 == 0 or n <= 1:
return False
sqr = int(math.sqrt(n)) + 1
for divisor in range(3, sqr, 2):
if n % divisor == 0:
return False
return True
def trial_division(n):
for i in range(3,n+1,2):
if n % i == 0:
print("{}".format(i))
def factorize(n): # trial division, pollard rho, p-1 algorithm
return factorint(n,verbose=True)
def prime_factors(n):
start = int(n/2)
factor = 1
for i in range(3,int(math.sqrt(n)),2):
if n%i == 0:
if isprime(i):
print(i)
# trial_division(int(input()))
print(factorize(int(input())))
#prime_factors(int(input())) |
"""fixtures for ecommerce tests"""
from collections import namedtuple
from decimal import Decimal
from types import SimpleNamespace
import pytest
# pylint:disable=redefined-outer-name
from ecommerce.api import ValidatedBasket
from ecommerce.factories import (
BasketItemFactory,
CouponEligibilityFactory,
CouponFactory,
CouponPaymentFactory,
CouponPaymentVersionFactory,
CouponVersionFactory,
CouponSelectionFactory,
CompanyFactory,
DataConsentUserFactory,
ProductVersionFactory,
DataConsentAgreementFactory,
)
from ecommerce.models import CourseRunSelection
CouponGroup = namedtuple(
"CouponGroup", ["coupon", "coupon_version", "payment", "payment_version"]
)
@pytest.fixture()
def basket_and_coupons():
"""
Sample basket and coupon
"""
basket_item = BasketItemFactory()
# Some prices for the basket item product
ProductVersionFactory(product=basket_item.product, price=Decimal("15.00"))
product_version = ProductVersionFactory(
product=basket_item.product, price=Decimal("25.00")
)
run = basket_item.product.content_object
CourseRunSelection.objects.create(run=run, basket=basket_item.basket)
payment_worst = CouponPaymentFactory()
payment_best = CouponPaymentFactory()
coupon_worst = CouponFactory(payment=payment_worst, coupon_code="WORST")
coupon_best = CouponFactory(payment=payment_best, coupon_code="BEST")
# Coupon payment for worst coupon, with lowest discount
civ_worst = CouponPaymentVersionFactory(
payment=payment_worst, amount=Decimal("0.10000"), automatic=True
)
# Coupon payment for best coupon, with highest discount
civ_best_old = CouponPaymentVersionFactory(
payment=payment_best, amount=Decimal("0.50000")
)
# Coupon payment for best coupon, more recent than previous so takes precedence
civ_best = CouponPaymentVersionFactory(
payment=payment_best, amount=Decimal("1.00000")
)
# Coupon version for worst coupon
cv_worst = CouponVersionFactory(payment_version=civ_worst, coupon=coupon_worst)
# Coupon version for best coupon
CouponVersionFactory(payment_version=civ_best_old, coupon=coupon_best)
# Most recent coupon version for best coupon
cv_best = CouponVersionFactory(payment_version=civ_best, coupon=coupon_best)
# Both best and worst coupons eligible for the product
CouponEligibilityFactory(coupon=coupon_best, product=basket_item.product)
CouponEligibilityFactory(coupon=coupon_worst, product=basket_item.product)
# Apply one of the coupons to the basket
CouponSelectionFactory.create(basket=basket_item.basket, coupon=coupon_best)
coupongroup_worst = CouponGroup(coupon_worst, cv_worst, payment_worst, civ_worst)
coupongroup_best = CouponGroup(coupon_best, cv_best, payment_best, civ_best)
return SimpleNamespace(
basket=basket_item.basket,
basket_item=basket_item,
product_version=product_version,
coupongroup_best=coupongroup_best,
coupongroup_worst=coupongroup_worst,
run=run,
)
@pytest.fixture
def validated_basket(basket_and_coupons):
"""Fixture for a ValidatedBasket object"""
return ValidatedBasket(
basket=basket_and_coupons.basket,
basket_item=basket_and_coupons.basket_item,
product_version=basket_and_coupons.product_version,
coupon_version=basket_and_coupons.coupongroup_best.coupon_version,
run_selection_ids={basket_and_coupons.run.id},
data_consent_users=[],
)
@pytest.fixture
def basket_and_agreement(basket_and_coupons):
"""
Sample basket and data consent agreement
"""
basket_item = basket_and_coupons.basket_item
product = basket_and_coupons.product_version.product
coupon = basket_and_coupons.coupongroup_best.coupon
company = coupon.payment.latest_version.company
agreement = DataConsentAgreementFactory(
courses=[basket_and_coupons.run.course], company=company
)
DataConsentUserFactory.create(
user=basket_item.basket.user, agreement=agreement, coupon=coupon
)
return SimpleNamespace(
agreement=agreement, basket=basket_item.basket, product=product, coupon=coupon
)
@pytest.fixture
def coupon_product_ids():
""" Product ids for creating coupons """
product_versions = ProductVersionFactory.create_batch(3)
return [product_version.product.id for product_version in product_versions]
@pytest.fixture
def promo_coupon_json(coupon_product_ids):
""" JSON for creating a promo coupon """
return {
"tag": None,
"name": "TEST NAME 2",
"automatic": True,
"activation_date": "2018-01-01T00:00:00Z",
"expiration_date": "2019-12-31T00:00:00Z",
"amount": 0.75,
"coupon_code": "TESTPROMOCODE",
"coupon_type": "promo",
"company": CompanyFactory.create().id,
"payment_type": "purchase_order",
"payment_transaction": "fake_transaction_num",
"product_ids": coupon_product_ids,
"include_future_runs": False,
}
@pytest.fixture
def single_use_coupon_json(coupon_product_ids):
"""JSON for creating a batch of single-use coupons"""
return {
"tag": "TEST TAG 1",
"name": "TEST NAME 1",
"automatic": True,
"activation_date": "2018-01-01T00:00:00Z",
"expiration_date": "2019-12-31T00:00:00Z",
"amount": 0.75,
"num_coupon_codes": 5,
"coupon_type": "single-use",
"company": CompanyFactory.create().id,
"payment_type": "credit_card",
"payment_transaction": "fake_transaction_num",
"product_ids": coupon_product_ids,
"include_future_runs": False,
}
@pytest.fixture
def mock_hubspot_syncs(mocker):
"""Mock the sync_deal_with_hubspot task"""
return SimpleNamespace(
order=mocker.patch("hubspot.tasks.sync_deal_with_hubspot.delay"),
line=mocker.patch("hubspot.tasks.sync_line_item_with_hubspot.delay"),
product=mocker.patch("hubspot.tasks.sync_product_with_hubspot.delay"),
)
|
"""
iaBot
This is the setup.py file that can be used
to build or install the project.
"""
import setuptools
with open("requirements.txt", "r") as fh:
requirements = fh.read().split("\n")
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="iaBot",
version="0.0.7",
author="Satheesh Kumar",
author_email="[email protected]",
description="A mini framework for building bots using python and can be served using different ways.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/satheesh1997/iaBot",
packages=["iabot"],
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.7.0",
)
|
from itertools import product
from base64 import b32encode
from io import BytesIO
from typing import List
from lor_deckcodes.utils import write_varint
from lor_deckcodes.constants import CURRENT_FORMAT_VERSION, faction_mapping
def _encode_card_block(data_stream: BytesIO, cards: List[object]) -> None:
set_faction_combinations = list(product(
set([card.set for card in cards]),
set([card.faction for card in cards])))
write_varint(data_stream, len(set_faction_combinations))
set_faction_combinations = sorted(set_faction_combinations,
key=lambda l: len([card for card in cards if card.faction == l[1]]))
for card_set, faction in set_faction_combinations:
faction_cards = [card for card in cards if card.faction == faction]
write_varint(data_stream, len(faction_cards))
write_varint(data_stream, card_set)
write_varint(data_stream, faction_mapping.get(faction))
for faction_card in faction_cards:
write_varint(data_stream, faction_card.card_id)
def encode_deck(cards: List[object]) -> str:
data = BytesIO()
write_varint(data, CURRENT_FORMAT_VERSION)
# 3 card copies
three_copies = list(filter(lambda x: x.count == 3, cards))
_encode_card_block(data, three_copies)
# 2 card copies
two_copies = list(filter(lambda x: x.count == 2, cards))
_encode_card_block(data, two_copies)
# 1 card copies
one_copies = list(filter(lambda x: x.count == 1, cards))
_encode_card_block(data, one_copies)
data.seek(0)
return b32encode(data.read()).decode().replace('=', '')
|
from base.service import Service
from abc import abstractmethod
from base.option_descriptor import OptionDescriptor
from base.input_program import InputProgram
from base.output import Output
import subprocess
import time
from threading import Thread
class DesktopService(Service):
"""Is a specialization for a Desktop platform"""
def __init__(self, exe_path):
self._exe_path = exe_path # Stores solver's executable path
_load_from_STDIN_option = None # Stores option string for enable solver to read from standard input
def getExePath(self):
"""Return a execution path of DesktopService"""
return self._exe_path
@abstractmethod
def _get_output(self, output, error):
pass
def setExePath(self, exe_path):
"""Set _exe_path to a new path
The parameter exe_path is a string representing the path for the new solver
"""
self._exe_path = exe_path
def start_async(self, callback, programs, options):
"""Start a new process for the _exe_path and starts solving in Asyncronously way"""
class myThread(Thread):
def __init__(self, start_sync):
Thread.__init__(self)
self.start_sync = start_sync
def run(self):
callback.callback(self.start_sync(programs, options))
th = myThread(self.start_sync)
th.start()
def start_sync(self, programs, options):
"""Start a new process for the _exe_path and starts solving in Syncronously way"""
option = ""
for o in options:
if(o != None):
option += o.get_options()
option += o.get_separator()
else:
print("Warning : wrong " + str(OptionDescriptor().__class__.__name__))
files_paths = ""
final_program = ""
for p in programs:
if (p != None):
final_program += p.get_programs()
program_file = p.get_string_of_files_paths()
if (program_file != None):
files_paths += program_file
else:
print("Warning : wrong " + str(InputProgram().__class__.__name__))
if (self._exe_path == None):
return Output("", "Error: executable not found");
exep = str(self._exe_path)
opt = str(option)
lis = list()
lis.append(exep)
if opt != "":
lis.append(opt)
lis.append(files_paths[:-1])
if self._load_from_stdin_option != "":
lis.append(self._load_from_stdin_option)
print(exep + " " + opt + " " + files_paths + self._load_from_stdin_option)
start = int(time.time()*1e+9)
proc = subprocess.Popen(lis, universal_newlines=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, )
output, error = proc.communicate(final_program)
end = int(time.time()*1e+9)
print("Total time : " + str(end - start))
print("")
return self._get_output(output, error)
|
import numpy as np
from copy import deepcopy
from astropy.convolution import Kernel2D, convolve_fft
from astropy.convolution.kernels import _round_up_to_odd_integer
__all__ = ["Pk", "gen_pkfield"]
def Pk(k, alpha=-11.0 / 3, fknee=1):
"""Simple power law formula"""
return (k / fknee) ** alpha
def gen_pkfield(npix=32, alpha=-11.0 / 3, fknee=1, res=1):
"""Generate a gaussian field with (k/k_0)^alpha law.
Parameters
----------
npix : int, optional
number of pixels for the map, by default 32
alpha : [float], optional
the power law index, by default -11.0/3
fknee : float or ~astropy.units.Quantity, optional
the knee frequency in 1/res unit, where P(k) = 1, by default 1
res : float or ~astropy.units.Quantity, optional
size of a pixel, by default 1
Returns
-------
data : ndarray, shape(n_pix, n_pix)
the requested gaussian field
"""
ufreq = np.fft.fftfreq(npix, d=res)
kfreq = np.sqrt(ufreq[:, np.newaxis] ** 2 + ufreq ** 2)
with np.errstate(divide="ignore"):
psd = 2 * Pk(kfreq, alpha=alpha, fknee=fknee)
psd[0, 0] = 0
pha = np.random.uniform(low=-np.pi, high=np.pi, size=(npix, npix))
fft_img = np.sqrt(psd) * (np.cos(pha) + 1j * np.sin(pha))
return np.real(np.fft.ifft2(fft_img)) * npix / res ** 2
def gen_psffield(positions, fluxes=None, shape=(32, 32), kernel=None, factor=None):
"""Generate a point spread function field given a catalog of point source.
Fourier method
Parameters
----------
positions : array_like, shape (2, M)
x, y positions in pixel coordinates
fluxes : array_like, shape (M,)
corresponding peak fluxes
shape : int or [int, int], optional
the output image shape
kernel : ~astropy.convolution.Kernel2D, optional
the 2D kernel to be used for the PSF
factor : [int], optional
a overpixelization factor used for the projection before smoothing, by default None
Returns
-------
array : ndarray, shape(nx, ny)
The corresponding map
"""
if factor is None:
factor = 1
if fluxes is None:
fluxes = np.ones(positions.shape[1])
if isinstance(shape, (int, np.int)):
shape = [shape, shape]
_shape = np.array(shape) * factor
_positions = (np.asarray(positions) + 0.5) * factor - 0.5
if kernel is not None:
# Upscale the kernel with factor
kernel = deepcopy(kernel)
for param in ["x_stddev", "y_stddev", "width", "radius", "radius_in"]:
if param in kernel._model.param_names:
getattr(kernel._model, param).value *= factor
Kernel2D.__init__(
kernel,
x_size=_round_up_to_odd_integer(kernel.shape[1] * factor),
y_size=_round_up_to_odd_integer(kernel.shape[0] * factor),
),
# Range are maximum bins edges
hist2d_kwd = {"bins": _shape, "range": ((-0.5, _shape[0] - 0.5), (-0.5, _shape[1] - 0.5))}
# reverse the _positions because it needs to be y x
array = np.histogram2d(*_positions[::-1], weights=fluxes, **hist2d_kwd)[0]
# Remove nan if present
array[np.isnan(array)] = 0
if kernel is not None:
kernel.normalize("peak")
array = convolve_fft(array, kernel, normalize_kernel=False, boundary="wrap") / factor ** 2
# Average rebinning onto the input shape
array = array.reshape((shape[0], factor, shape[1], factor)).sum(-1).sum(1)
return array
def gen_psffield_direct(positions, fluxes=None, shape=(32, 32), kernel=None, factor=None):
"""Generate a point spread function field given a catalog of point source.
Direct method
Parameters
----------
positions : array_like, shape (2, M)
x, y positions in pixel coordinates
fluxes : array_like, shape (M,)
corresponding peak fluxes
shape : int or [int, int], optional
the output image shape
kernel : ~astropy.convolution.Kernel2D, optional
the 2D kernel to be used for the PSF
factor : [int], optional
a overpixelization factor used for the projection before smoothing, by default None
Returns
-------
array : ndarray, shape(nx, ny)
The corresponding map
"""
if factor is None:
factor = 1
if fluxes is None:
fluxes = np.ones(positions.shape[1])
if isinstance(shape, (int, np.int)):
shape = [shape, shape]
_shape = np.array(shape) * factor
# _positions = (np.asarray(positions) + 0.5) * factor - 0.5
if kernel is not None:
# Upscale the kernel with factor
kernel = deepcopy(kernel)
for param in ["x_stddev", "y_stddev", "width", "radius", "radius_in"]:
if param in kernel._model.param_names:
getattr(kernel._model, param).value *= factor
Kernel2D.__init__(
kernel,
x_size=_round_up_to_odd_integer(kernel.shape[1] * factor),
y_size=_round_up_to_odd_integer(kernel.shape[0] * factor),
),
xx, yy = np.meshgrid(np.arange(_shape[1]), np.arange(_shape[0]))
array = np.zeros(_shape)
for position, flux in zip(positions.T, fluxes):
kernel._model.x_mean = position[0]
kernel._model.y_mean = position[1]
kernel._model.amplitude = flux
array += kernel._model(xx, yy)
array = array.reshape((shape[0], factor, shape[1], factor)).sum(-1).sum(1)
return array
|
# Generated by Django 3.0.11 on 2021-03-24 20:04
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('common', '__first__'),
('home', '0005_homepage_quotations'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='body',
field=wagtail.core.fields.StreamField([('heading_block', wagtail.core.blocks.StructBlock([('heading_text', wagtail.core.blocks.CharBlock(form_classname='title', required=True)), ('size', wagtail.core.blocks.ChoiceBlock(blank=True, choices=[('', 'Select a header size'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4')], required=False))])), ('paragraph_block', wagtail.core.blocks.RichTextBlock(icon='fa-paragraph', template='common/blocks/paragraph_block.html')), ('image_block', wagtail.core.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock(required=True)), ('caption', wagtail.core.blocks.CharBlock(required=False)), ('attribution', wagtail.core.blocks.CharBlock(required=False))])), ('block_quote', wagtail.core.blocks.StructBlock([('text', wagtail.core.blocks.TextBlock()), ('attribute_name', wagtail.core.blocks.CharBlock(blank=True, label='e.g. Mary Berry', required=False))])), ('embed_block', wagtail.embeds.blocks.EmbedBlock(help_text='Insert an embed URL e.g https://www.youtube.com/embed/SGJFWirQ3ks', icon='fa-s15', template='blocks/embed_block.html'))], blank=True, verbose_name='Page body'),
),
migrations.CreateModel(
name='CounterPageAdvertPlacement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('counter', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='common.Counter')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='counter_placements', to='home.HomePage')),
],
options={
'verbose_name': 'counter placement',
'verbose_name_plural': 'counter placements',
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
# Always prefer setuptools over distutils
from os import path
from setuptools import setup, find_packages
# io.open is needed for projects that support Python 2.7
# It ensures open() defaults to text mode with universal newlines,
# and accepts an argument to specify the text encoding
# Python 3 only projects can skip this import
from io import open
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='crypto-dom', # Required
version='0.0.7', # Required
description='', # Optional
long_description=long_description, # Optional
long_description_content_type='text/markdown', # Optional
url='https://github.com/maxima-us/crypto-dom', # Optional
author='maximaus', # Optional
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
package_dir={'': 'src'}, # Optional
packages=find_packages(where='src'), # Required
python_requires='>=3.8, <4',
install_requires=[
"aiohttp",
"click",
"httpx",
"mypy",
"nox",
"pydantic",
"python-dotenv",
"returns",
"stackprinter",
"typing-extensions"
],
entry_points={ # Optional
'console_scripts': [
'crypto-dom-tests=tests.run:run_tests',
'crypto-dom-update=crypto_dom.update_symbols:run'
],
},
) |
print("HIde module 2") |
# Generated by Django 3.2.3 on 2021-06-22 09:29
from django.db import migrations, models
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('wanted', '0005_auto_20210622_1821'),
]
operations = [
migrations.AddField(
model_name='quest',
name='code',
field=models.TextField(default=''),
),
migrations.AlterField(
model_name='quest',
name='tags',
field=multiselectfield.db.fields.MultiSelectField(choices=[('PYTHON', 'PYTHON'), ('HTML', 'HTML'), ('CSS', 'CSS'), ('JS', 'JS')], default=[], max_length=18),
),
]
|
#!/usr/bin/env python
import argparse
import multiprocessing
import os
import tempfile
from ferrit import ssh
from ferrit import rest
def main():
key = ssh.KEY
logpath = ssh.LOG_PATH
parser = argparse.ArgumentParser()
parser.add_argument('-k', '--key', default=key,
help='Path to the keypair for the SSH server. Path '
'should contain private key. Default is %s' % key)
parser.add_argument('-l', '--log-path', default=logpath,
help='Path to the directory, where log for ssh and '
'http will be stored. Directory must exists. Logs '
'will be named ferrit-ssh.log and ferrit-http.log '
'restively. Logs will be appended to the files if '
'exists. Default location is current directory.')
args = parser.parse_args()
ssh.KEY = args.key
rest.LOG_PATH = ssh.LOG_PATH = args.log_path
fd, fifo = tempfile.mkstemp(suffix='.fifo', prefix='ferrit.')
os.close(fd)
os.unlink(fifo)
os.mkfifo(fifo)
ssh.FIFO = rest.FIFO = fifo
try:
p1 = multiprocessing.Process(target=ssh.main)
p1.daemon = True
p1.start()
p2 = multiprocessing.Process(target=rest.main)
p2.daemon = False
p2.start()
p1.join()
p2.join()
finally:
os.unlink(fifo)
if __name__ == "__main__":
main()
|
## @package net_builder
# Module caffe2.python.net_builder
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, context
from caffe2.python.task import Task, TaskGroup
from caffe2.python.control_ops_util import add_if_op, add_while_op
@context.define_context()
class NetBuilder(object):
"""
Scope-driven mechanism for building nets, loops and conditional blocks.
Arguments:
name: NetBuilder's name
initial_scope: list of blobs that are available for reading/writing
Example:
from caffe2.python.net_builder import NetBuilder, ops
with NetBuilder() as nb:
c = ops.Const(5)
d = ops.Const(0)
with ops.loop():
ops.stop_if(ops.LE([c, ops.Const(0)]))
ops.Add([c, ops.Const(-1)], [c])
with ops.If(ops.GE([c, ops.Const(3)])):
ops.Add([d, ops.Const(10)])
ops.Print(c, [])
ops.Print(d, [])
step = core.to_execution_step(nb)
"""
def __init__(self, name=None, initial_scope=None, _stop_blob_required=False,
_stop_blob=None, _fullname=None, _use_control_ops=False):
parent = NetBuilder.current(required=False)
assert not _fullname or not name, 'Cannot set both _fullname and name'
assert not _use_control_ops or \
(not _stop_blob_required and not _stop_blob), \
'Stop blobs are not used with control operators'
self.name = _fullname or '/'.join(
n for n in (parent.name if parent else None, name) if n
)
self._frozen = False
self._current_net = None
self._children = []
if parent:
# make sure parent has an up to date lexical scope computed
parent._update_lexical_scope()
self._init_lexical_scope = set(parent._lexical_scope) if parent else set()
if initial_scope:
self._init_lexical_scope |= set([str(b) for b in initial_scope])
self._lexical_scope = set(self._init_lexical_scope)
self._stop_blob = _stop_blob
self._stop_blob_required = _stop_blob_required
self._use_control_ops = _use_control_ops
def stop_blob(self):
"""
Returns the BlobReference to the stop_blob of this NetBuilder.
If one is not yet available, creates one.
This function assumes that the stop_blob() will be used immediatelly
in the current net, so it doesn't initialize it if the current net is
the first of the builder.
"""
assert not self._use_control_ops, \
'Stop blobs are not used with control operators'
if self._stop_blob is None:
net = self.current_net()
self._stop_blob = core.BlobReference(
net.NextName('stop_blob'), net=net)
if self._current_net != self._children[0]:
self._children.insert(0, core.Net('stop_blob_init'))
self._children[0].Const(False, blob_out=self._stop_blob)
return self._stop_blob
def stop_if(self, blob):
assert not self._use_control_ops, \
'Stop blobs are not used with control operators'
ops.Copy(blob, self.stop_blob())
self._current_net = None
def _assert_mutable(self):
assert not self._frozen, (
'This NetBuilder (%s) has been built already.' % self.name)
def _update_lexical_scope(self):
"""
Updates lexical scope based on the current list of children.
Lexical scope contains names of blobs that are currently available
and were introduced in the net builder
"""
self._lexical_scope = set(self._init_lexical_scope)
for child in self._children:
if isinstance(child, core.Net):
self._lexical_scope |= child.UsedBlobNames()
elif isinstance(child, NetBuilder) and child._use_control_ops:
self._lexical_scope |= child._lexical_scope
def _reset_children(self):
self._current_net = None
self._children = []
self._lexical_scope = set(self._init_lexical_scope)
def add(self, child):
self._assert_mutable()
if self._use_control_ops:
assert isinstance(child, core.Net) or (
isinstance(child, NetBuilder) and child._use_control_ops), \
"Expected Net or NetBuilder with control ops"
self._current_net = None
self._children.append(child)
# to-do : check it's not a dag net
if isinstance(child, core.Net):
self._current_net = child
self._update_lexical_scope()
return child
def current_net(self, name=None):
self._assert_mutable()
if self._current_net is None or name is not None:
self.add(core.Net(name))
return self._current_net
def freeze(self):
for child in self._children:
if hasattr(child, 'freeze'):
child.freeze()
self._current_net = None
self._frozen = True
def get(self):
self.freeze()
return self._children
def __exit__(self, etype, *args):
if self._use_control_ops and len(self._children) > 0:
_children = self._children
self._reset_children()
merged_net = NetBuilder.merge_nets(
_children, self._lexical_scope)
assert merged_net, "Expected a non-empty merge of children"
self._children = [merged_net]
self.freeze()
if etype is not None:
return
assert (not self._stop_blob_required) or self._stop_blob is not None, (
'This NetBuilder (%s) requires a stop condition ' % self.name +
'to be set with `stop` or `stop_if`')
@staticmethod
def merge_nets(nets_or_builders, outer_blob_names):
# Only nets or builders with control ops are allowed.
# Need to pay attention to external outputs, e.g.
# ...
# IfNet1 (cond_blob):
# (Net1)
# X = 1
# IfNet2 (...):
# X = X + 1
# ...
# In this example there're two children in then branch of IfNet1:
# a subnet Net1 that creates blob X and sets its value to one, and
# a net builder IfNet2 that (conditionally) increments X.
# From IfNet2's point of view X is an external input
# and output blob, it will be put into IfNet2 net's external_output.
# At the same time, from the point of view of IfNet1 X is purely local.
# Net.AppendNet just merges external outputs of the networks, so
# without checking this the result of Net1.AppendNet(IfNet2's net)
# would have blob X in external_output
net = None
for n in nets_or_builders:
cur = None
if isinstance(n, NetBuilder):
assert n._use_control_ops, \
"Merging of NetBuilder supported only for control ops"
nets = n.get()
assert len(nets) == 1 and isinstance(nets[0], core.Net), \
"Invalid control op net builder"
cur = nets[0]
else:
assert isinstance(n, core.Net)
cur = n
if net:
net.AppendNet(cur)
else:
net = cur
if net:
# correct external output
external_outputs = [o for o in net.Proto().external_output
if o in outer_blob_names]
net.Proto().external_output[:] = external_outputs
return net
def __str__(self):
return self.name or 'Un-named NetBuilder'
class Operations(object):
"""
Operations to be used in the context of a NetBuilder.
"""
def net(self, net=None, name=None):
"""
Retrieves the current net, or add a new net to the builder.
Args:
net: If provided, add the given net to the active builder.
Else, returns the current Net or creates a new one as needed.
name: if provided, creates a new Net with given name and makes
it the new current net of the active builder. Cannot
be provided if net is provided.
"""
assert name is None or net is None, (
'Cannot provide both `net` and `name`.')
if net is not None:
NetBuilder.current().add(net)
return net
return NetBuilder.current().current_net(name=name)
def __getattr__(self, op_type):
"""
Adds an operator call to the currently active Net.
"""
if op_type.startswith('__'):
raise AttributeError()
# We want hasattr to work properly even if no context is active.
if NetBuilder.current(required=False) is None:
raise AttributeError('No active NetBuilder.')
return getattr(self.net(), op_type)
def task_group(self):
"""
Creates a local task group which will execute as the next step of
the current NetBuilder.
"""
from caffe2.python import task
group = NetBuilder.current()
with task.Cluster():
with task.Node('local'):
tg = task.TaskGroup()
group.add(tg)
return tg
def stop(self):
"""
Stop execution of the current execution step.
Example:
ops.Print(a, 0)
ops.stop()
ops.Print(b, 0)
In the example, 'b' will never be printed.
"""
return self.stop_if(ops.Const(True))
def stop_if(self, blob):
"""
Stop execution of the current execution step if the
condition `blob` is met.
Example:
ops.Print(a, 0)
ops.stop_if(ops.LE([x, ops.Const(0)]))
ops.Print(b, 0)
In the example, 'b' will only be printed if the value of scalar
tensor 'x' lower or equal to 0.
"""
return NetBuilder.current().stop_if(blob)
def loop(self, iters=None, name=None):
"""
Creates a NetBuilder that will execute in a loop as the next step of
the current NetBuilder. If `iters` is provided, the loop will execute
for `iters` iterations and then stop. `iters` can be a constant or a
BlobReference. If `iters` is not provided, the loop will execute
until `ops.stop` or `ops.stop_if` is called.
Examples:
a = ops.Const(5)
with ops.loop():
ops.stop_if(ops.LE([a, ops.Const(0)]))
ops.Print(a, 0)
ops.Add([a, ops.Const(-1)], [a])
Above, 'a' will be printed 5 times, with values 5 to 1.
with ops.loop(10) as loop:
ops.LogInfo(loop.iter())
This will print the numbers from 0 to 9.
x = ops.Add([ops.Const(10), ops.Const(10)])
with ops.loop(x) as loop:
ops.LogInfo(loop.iter())
This will print the numbers from 0 to 19.
"""
return NetBuilder.current().add(_Loop(iters, name=name))
def stop_guard(self, has_stopped_blob=None, name=None):
"""
Creates a NetBuilder that will execute once as the next step of the
current NetBuilder. After execution, a bool tensor will indicate
whether the inner execution was halted with `stop` or `stop_if`.
Example:
a = ops.Const(True)
with ops.stop_guard() as sg1:
ops.stop_if(a)
ops.Print(ops.Const('did not stop'))
b = ops.Const(False)
with ops.stop_guard() as sg2:
ops.stop_if(b)
ops.Print(ops.Const('did not stop'))
ops.Print(sg1.has_stopped(), [])
ops.Print(sg2.has_stopped(), [])
In the example, 'did not stop' will be printed once,
followed by True and False.
"""
return NetBuilder.current().add(
_StopGuard(has_stopped_blob=has_stopped_blob, name=name))
def If(self, cond, name=None):
"""
Creates a NetBuilder that will execute once as the next step of the
current NetBuilder if the blob `cond` is True.
Example:
with ops.If(ops.Const(True)):
ops.Print(ops.Const('Will print'))
with ops.If(ops.Const(False)):
ops.Print(ops.Const('Wont print'))
The example will print 'Will print' once.
"""
return NetBuilder.current().add(_RunIf(cond, name=name))
def IfNet(self, cond, name=None):
"""
Same as If, but uses 'If' operator instead of execution step logic
"""
return NetBuilder.current().add(_RunIfNet(cond, name=name))
def Else(self, name=None):
"""
Else branch of IfNet, has to be specified immediately after IfNet.
Example:
with ops.IfNet(ops.LT([x, y])):
...
with ops.Else():
...
"""
return _RunElseNet(name=name)
def WhileNet(self, name=None):
"""
NetBuilder for 'While' control operator
"""
return NetBuilder.current().add(_RunWhileNet(name=name))
def Condition(self, name=None):
"""
Loop's condition, executed within WhileNet context
"""
assert isinstance(NetBuilder.current(), _RunWhileNet), \
"Use of Condition outside of WhileNet"
return _RunWhileCondition(name=name)
def task_init(self):
"""
Defines operations that will be executed once at task startup.
Useful when implementing processors, that don't have access to the Task
top-level structure.
This setup will be run only once, even if multiple instances of the task
will run in parallel. For instance-local initialization, use
`task_instance_init` instead.
Example:
def my_processor(rec):
with ops.task_init():
one = ops.Const(1)
two = ops.Const(1)
return Tuple(
ops.Add(rec[0](), zero), ops.Add(rec[1](), two))
"""
setup = _SetupBuilder(_SetupBuilder.INIT)
self.net().add_attribute(Task.TASK_SETUP, setup)
return setup
def task_exit(self):
"""
Define operations to be executed once at task shutdown.
Useful when implementing processors, that don't have access to the Task
top-level structure.
This shutdown will be run only once, after all concurrent instances of
the task have already finished. For instance-local shutdown,
use `task_instance_exit` instead.
Example:
def read_queue(queue):
with ops.task_exit():
queue.close(ops.net())
return queue.read(ops.net())
"""
setup = _SetupBuilder(_SetupBuilder.EXIT)
self.net().add_attribute(Task.TASK_SETUP, setup)
return setup
def task_instance_init(self):
"""
Defines operations that will be executed once at startup of each
instance of a task. This can be seen as "thread_local" initialization.
It is guaranteed to run only after all `task_init` logic finishes.
This setup will be run concurrently for each instance of a task.
For global task initialization, use `task_init` instead.
"""
setup = _SetupBuilder(_SetupBuilder.INIT)
self.net().add_attribute(Task.TASK_INSTANCE_SETUP, setup)
return setup
def task_instance_exit(self):
"""
Defines operations that will be executed once at shutdown of each
instance of a task. This can be seen as "thread_local" finalization.
This shutdown will be run concurrently for each instance of a task.
For global task shutdown, use `task_exit` instead.
"""
setup = _SetupBuilder(_SetupBuilder.EXIT)
self.net().add_attribute(Task.TASK_INSTANCE_SETUP, setup)
return setup
def local_init(self):
"""
Similar to `task_init`, but executes at TaskGroup's startup instead,
before any task of the group starts executing. This will run only
once on each node, before initialization of any task, so it can be
used e.g. to initialize blobs shared across tasks.
"""
setup = _SetupBuilder(_SetupBuilder.INIT)
self.net().add_attribute(TaskGroup.LOCAL_SETUP, setup)
return setup
def local_exit(self, name=None):
"""
Similar to `task_exit`, but executes at TaskGroup's exit instead,
after all tasks of the group finished execution.
This will run only once on each node.
"""
setup = _SetupBuilder(_SetupBuilder.EXIT, name)
self.net().add_attribute(TaskGroup.LOCAL_SETUP, setup)
return setup
def task_reporter(self, interval_ms=1000, name=None):
"""
Define operations to be executed at every time interval from
task start-up to finish. These operations are guaranteed to
execute at least once after all other operations of the task are
finished.
Example:
with ops.task_reporter(interval_ms=10000):
ops.LogInfo('10s elapsed')
"""
return _ReporterBuilder(interval_ms, net=self.net(), name=name)
def local_reporter(self, interval_ms=1000, name=None):
"""
Similar to task_report, but operations defined within this block
will run repeatedly for as long as any of the tasks in the current
TaskGroup have not finished.
"""
return _ReporterBuilder(interval_ms, name=name)
ops = Operations()
class _ReporterBuilder(NetBuilder):
def __init__(self, interval_ms, net=None, name=None):
NetBuilder.__init__(self, name)
self._net = net
self.interval_ms = interval_ms
def __exit__(self, etype, *args):
if etype is None:
step = core.to_execution_step(self)
step.RunEveryMillis(self.interval_ms)
if self._net:
self._net.add_attribute(Task.REPORT_STEP, step)
else:
TaskGroup.current().report_step(
step, interval_ms=self.interval_ms)
NetBuilder.__exit__(self, etype, *args)
class _SetupBuilder(NetBuilder):
INIT = 'init'
EXIT = 'exit'
def __init__(self, type, name=None):
NetBuilder.__init__(self, name)
self.type = type
def setup(self, net):
if self.type == _SetupBuilder.INIT:
return core.to_execution_step(self)
def exit(self, net):
if self.type == _SetupBuilder.EXIT:
return core.to_execution_step(self)
class _RunOnce(NetBuilder):
def __init__(self, name=None):
NetBuilder.__init__(self, name)
def __exit__(self, etype, *args):
if etype is None and self._stop_blob is not None:
ops.stop()
NetBuilder.__exit__(self, etype, *args)
class _StopGuard(_RunOnce):
def __init__(self, has_stopped_blob=None, name=None):
_RunOnce.__init__(self, name)
self._stopped = has_stopped_blob
self._ran = False
def __enter__(self):
r = _RunOnce.__enter__(self)
self._stopped = ops.Const(True, blob_out=self._stopped)
return r
def __exit__(self, etype, *args):
if etype is None:
self._ran = True
ops.Const(False, blob_out=self._stopped)
_RunOnce.__exit__(self, etype, *args)
def has_stopped(self):
"""
Return a blob that will be set to scalar bool `True` after
this net builder ran, iff it was halted early.
"""
assert self._ran, 'Context not used yet.'
return self._stopped
class _Loop(NetBuilder):
def __init__(self, iters=None, name=None):
NetBuilder.__init__(self, name, _stop_blob_required=True)
if iters is not None:
self._inc = ops.Const(1)
self._iter = ops.Const(0)
self._num_iters = (
iters if isinstance(iters, core.BlobReference)
else ops.Const(iters))
else:
self._num_iters = None
def iter(self):
assert self._num_iters is not None, (
'This loop does not have a number of iterations.')
assert self._iter is not None, (
'iter() must be called from inside the loop context')
return self._iter
def __enter__(self):
builder = NetBuilder.__enter__(self)
if self._num_iters is not None:
ops.stop_if(ops.GE([self._iter, self._num_iters]))
return builder
def __exit__(self, type, *args):
if type is None and self._num_iters is not None:
self.current_net().Add([self._iter, self._inc], [self._iter])
NetBuilder.__exit__(self, type, *args)
class _RunIf(_RunOnce):
def __init__(self, cond_blob=None, name=None, _already_ran=None):
_RunOnce.__init__(self, name)
assert cond_blob or _already_ran
self._is_else = cond_blob is None
if _already_ran is None:
self._else_blob = ops.Not(cond_blob)
self._already_ran = ops.Const(False)
else:
self._already_ran = _already_ran
self._else_blob = _already_ran if cond_blob is None else (
ops.Or([_already_ran, ops.Not(cond_blob)]))
def __enter__(self):
r = _RunOnce.__enter__(self)
ops.stop_if(self._else_blob)
ops.Const(True, blob_out=self._already_ran)
return r
def Elif(self, cond, name=None):
assert not self._is_else, 'Else not allowed for an Else.'
return NetBuilder.current().add(_RunIf(
cond, name=name or self.name, _already_ran=self._already_ran))
def Else(self, name=None):
assert not self._is_else, 'Elif not allowed for an Else.'
return NetBuilder.current().add(
_RunIf(name=name or self.name, _already_ran=self._already_ran))
class _RunIfNet(NetBuilder):
"""
Generates a single net that uses If operator
"""
def __init__(self, cond_blob, name=None):
NetBuilder.__init__(self, name=name, _use_control_ops=True)
assert cond_blob, 'Conditional blob is not specified for an If net'
self._cond_blob = cond_blob
self._then_net = None
self._else_net = None
def add(self, child):
return NetBuilder.add(self, child)
def __exit__(self, type, *args):
if type is None:
_then_nets = self._children
self._reset_children()
self._then_net = NetBuilder.merge_nets(
_then_nets, self._lexical_scope)
if not self._then_net:
self._then_net = core.Net('empty_then_net')
if_net = core.Net(self.name + '/if_net')
add_if_op(if_net, self._cond_blob, self._lexical_scope,
self._then_net, self._else_net)
self._current_net = if_net
self._children = [if_net]
NetBuilder.__exit__(self, type, *args)
class _RunElseNet(NetBuilder):
"""
Else branch for _RunIfNet builder
"""
def __init__(self, name=None):
NetBuilder.__init__(self, name=name, _use_control_ops=True)
parent = NetBuilder.current(required=False)
assert parent and len(parent._children) > 0 and \
isinstance(parent._children[-1], _RunIfNet), \
'Invalid use of Else builder'
self._if_builder = parent._children[-1]
def __exit__(self, type, *args):
if type is None:
_else_nets = self._children
self._reset_children()
self._if_builder._else_net = NetBuilder.merge_nets(
_else_nets, self._lexical_scope)
if self._if_builder._else_net:
if_else_net = core.Net(self.name + '/if_else_net')
add_if_op(
if_else_net,
self._if_builder._cond_blob,
self._lexical_scope,
self._if_builder._then_net,
self._if_builder._else_net)
self._if_builder._current_net = if_else_net
self._if_builder._children = [if_else_net]
NetBuilder.__exit__(self, type, *args)
class _RunWhileNet(NetBuilder):
"""
Generates a single net that uses While operator
"""
def __init__(self, name=None):
NetBuilder.__init__(self, name=name, _use_control_ops=True)
self._cond_builder = None
def __exit__(self, type, *args):
if type is None:
assert self._cond_builder, \
'Condition builder must be specified in While op'
_cond_blob = self._cond_builder._cond_blob
_cond_net = self._cond_builder._cond_net
loop_body = self._children
self._reset_children()
loop_body_net = NetBuilder.merge_nets(
loop_body, self._lexical_scope)
if not loop_body_net:
loop_body_net = core.Net('empty_loop_body_net')
while_net = core.Net(self.name + '/while_net')
add_while_op(while_net, _cond_blob, self._lexical_scope,
loop_body_net, _cond_net)
self._current_net = while_net
self._children = [while_net]
NetBuilder.__exit__(self, type, *args)
class _RunWhileCondition(NetBuilder):
"""
Computes loop's condition, used in the context of WhileNet.
Last operator must have a single scalar boolean output that will be used
as a condition value, no other blobs created in the condition net are
visible outside of it
"""
def __init__(self, name=None):
NetBuilder.__init__(self, name=name, _use_control_ops=True)
parent = NetBuilder.current(required=False)
assert parent and isinstance(parent, _RunWhileNet), \
'Invalid use of loop condition builder'
assert not parent._cond_builder, \
'Multiple loop condition builders specified'
assert len(parent._children) == 0, \
'Condition definition must be specified before the loop\'s body'
parent._cond_builder = self
self._cond_blob = None
self._cond_net = None
def __exit__(self, type, *args):
if type is None:
condition_body = self._children
self._reset_children()
self._cond_net = NetBuilder.merge_nets(
condition_body, self._lexical_scope)
assert self._cond_net, 'Invalid loop condition specified'
assert len(self._cond_net.Proto().op) > 0, 'Invalid condition net'
last_op = self._cond_net.Proto().op[-1]
assert len(last_op.output) == 1, 'Invalid condition net'
self._cond_blob = core.BlobReference(name=last_op.output[0], net=None)
self._current_net = self._cond_net
self._children = [self._cond_net]
NetBuilder.__exit__(self, type, *args)
|
from generate import list
def bubble_sort(l):
for i in range(len(l)):
sorted = True
for j in range(len(l) - i - 1):
if l[j] > l[j + 1]:
sorted = False
l[j], l[j + 1] = l[j + 1], l[j]
if sorted: break
return l
for i in range(100):
test = list.generate_list(i)
assert bubble_sort(test) == sorted(test)
|
#! /usr/bin/env python
from __future__ import print_function
import csv
import argparse
import sys
import collections
import numpy
# Parse command line
parser = argparse.ArgumentParser(description='Summarize results')
parser.add_argument('-i', '--infile', metavar='results.tsv', required=True, dest='infile', help='LAMP results (required)')
parser.add_argument('-t', '--threshold', metavar='0.3', type=float, required=False, dest='thres', help='threshold (optional)')
args = parser.parse_args()
# addFailSamples
#afail = set(['VZTBYZ', 'AZGDWT'])
afail = set([])
# qPCR
qpcr = dict()
#qpcr['O2X2HK'] = 'likely_positive'
#qpcr['X3EV9R'] = 'likely_positive'
#qpcr['NH8GFX'] = 'negative'
#qpcr['DX0YDO'] = 'negative'
#qpcr['KU8R64'] = 'negative'
#qpcr['EURFVTE'] = 'negative'
#qpcr['EHUAZEY'] = 'negative'
# Estimate threshold
icval = collections.defaultdict(list)
thres = dict()
if args.infile:
f_reader = csv.DictReader(open(args.infile), delimiter="\t")
for fields in f_reader:
if fields['group'] == "saliva":
icval[fields['plate']].append(float(fields['ic']))
for pl in icval.keys():
thres[pl] = numpy.percentile(numpy.array(icval[pl]), 75) - 0.1
if thres[pl] > 0.3:
thres[pl] = 0.3
if (args.thres):
for pl in thres.keys():
thres[pl] = float(args.thres)
print("IC and COVID threshold:", thres, file=sys.stderr)
# Classify results
print("BARCODE", "FAIL", "LAMP", "qPCR", "OUTCOME", "IC", "COVID", sep="\t")
if args.infile:
f_reader = csv.DictReader(open(args.infile), delimiter="\t")
for fields in f_reader:
if fields['group'] != "saliva":
continue
fields['lamp'] = "unclear"
if float(fields['covid']) > thres[fields['plate']]:
fields['lamp'] = "likely_positive"
elif float(fields['covid']) <= thres[fields['plate']]:
fields['lamp'] = "negative"
fields['failure'] = "no"
if float(fields['ic']) <= thres[fields['plate']]:
fields['failure'] = "yes"
fields['qPCR'] = "na"
if fields['id'] in qpcr.keys():
fields['qPCR'] = qpcr[fields['id']]
fields['outcome'] = "unclear"
if fields['qPCR'] != "na":
fields['outcome'] = fields['qPCR']
else:
# Lamp
if fields['failure'] == "yes":
fields['outcome'] = "fail"
else:
fields['outcome'] = fields['lamp']
print(fields['id'], fields['failure'], fields['lamp'], fields['qPCR'], fields['outcome'], fields['ic'], fields['covid'], sep="\t")
# Add missing, failed samples
for s in afail:
print(s, 'yes', 'na', 'na', 'fail', 0.0, 0.0, sep="\t")
|
"""
Script that builds whole database.
Set database connection information in file: settings.py
Read about database structure in docs.
Data is scrapped from next two sources:
- http://www.tennis-data.co.uk/alldata.php
- https://github.com/JeffSackmann/tennis_atp
"""
START_YEAR = 2003
END_YEAR = 2015
__author__ = 'riko'
import _mysql_exceptions
import MySQLdb
import xlrd
import settings as stg
################################################################################
# Set-up database connection. #
################################################################################
try:
db_data = stg.MYSQL_SETTINGS
except AttributeError:
print "Could not load the database settings! Make sure you set them in settings.py"
raise
# Establish a MySQL connection.
database = MySQLdb.connect(**db_data)
database.set_character_set('utf8')
# Get the cursor and set UTF8 format.
cursor = database.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
################################################################################
# Functions. #
################################################################################
def add_id_to_table(table_name):
'''
Add Id column to table.
:param table_name: Name of table we want to add ID to.
:return: void.
'''
cursor.execute("ALTER TABLE %s ADD id MEDIUMINT NOT NULL AUTO_INCREMENT KEY" % table_name)
cursor.execute("ALTER TABLE %s DROP primary key, add primary key(Id)" % table_name)
def drop_table(table_name):
'''
This function tries to drop table with name 'table_name'.
:param table_name: Name of table we are trying to drop.
:return: void.
'''
query = "DROP TABLE %s" % table_name
try:
cursor.execute(query)
except _mysql_exceptions.OperationalError:
print "Table %s doesn't exist yet!" % table_name
def get_short_name(name):
'''
This function is used to find shortened versions of names. We need them
to join data with different name formats.
:param name: Name of player we want to shorten.
:return: Shortened name of a player.
'''
keep = ["De", "Del", "Estrella", "Huta"]
name_split = name.split()
if name_split[-2] in keep:
begin = " ".join(name_split[-2:])
name_split = name_split[:-2]
else:
begin = name_split[-1]
name_split = name_split[:-1]
end = " " + ".".join([x[0] for x in name_split]) + "."
return begin + end
def parse_tennis_data(year):
'''
Long function that parses two excel files. It first parses each of them
seperately and then combines them together.
Very ugly, but no other way than to hard code it.
:param year: Parse one year of data from .xls and .csv files.
:return: void.
'''
# FIRST PART - get data from .xls file.
year = str(year)
excel_dir = stg.ROOT_PATH + "data/tennis_betting/" + year + ".xls"
# Open the workbook and define the worksheet.
book = xlrd.open_workbook(excel_dir)
sheet = book.sheet_by_name(year)
query = """
CREATE TABLE temp_a
(
Location VARCHAR (255),
Tournament VARCHAR (255),
Date INT,
Surface VARCHAR (255),
Length INT ,
Winner VARCHAR (255),
Loser VARCHAR (255),
Winner_points INT,
Loser_points INT,
Winner_odds FLOAT,
Loser_odds FLOAT
);
"""
drop_table("temp_a")
cursor.execute(query)
query = """INSERT INTO temp_a (Location, Tournament, Date, Surface, Length, Winner, Loser, Winner_points, Loser_points, Winner_odds, Loser_odds) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
for r in xrange(1, sheet.nrows):
location = sheet.cell(r,1).value
tournament = sheet.cell(r,2).value
date = sheet.cell(r,3).value
surface = sheet.cell(r,6).value
length = sheet.cell(r,8).value
winner = sheet.cell(r,9).value
loser = sheet.cell(r,10).value
winner_points = sheet.cell(r,25).value
loser_points = sheet.cell(r,26).value
winner_odds = 1.0
loser_odds = 1.0
for i in range(5):
try:
other_win = float(str(sheet.cell(r,28+2*i).value))
other_lose = float(str(sheet.cell(r,29+2*i).value))
except ValueError:
other_win = 1.0
other_lose = 1.0
winner_odds = max(winner_odds, other_win)
loser_odds = max(loser_odds, other_lose)
values = (location, tournament, date, surface, length, winner, loser, winner_points, loser_points, winner_odds, loser_odds)
cursor.execute(query, values)
# SECOND PART - get data from .csv file
query = """
CREATE TABLE temp_b
(
Tournament VARCHAR (255),
Surface VARCHAR (255),
Size INT,
Level VARCHAR (255),
Date DATE,
Winner VARCHAR (255),
Winner_short VARCHAR (255),
Winner_hand VARCHAR (255),
Winner_ioc VARCHAR (255),
Winner_rank VARCHAR (255),
Loser VARCHAR (255),
Loser_short VARCHAR (255),
Loser_hand VARCHAR (255),
Loser_ioc VARCHAR (255),
Loser_rank VARCHAR (255),
Score VARCHAR (255),
Best_of INT,
Round VARCHAR (255),
Minutes INT,
W_sv INT,
W_1stIn INT,
W_ace INT,
W_1stWon INT,
W_2ndWon INT,
L_sv INT,
L_1stIn INT,
L_ace INT,
L_1stWon INT,
L_2ndWon INT
);
"""
drop_table("temp_b")
cursor.execute(query)
excel_dir = stg.ROOT_PATH + "data/tennis_atp/atp_matches_" + year + ".csv"
query = """INSERT INTO temp_b (Tournament, Surface, Size, Level, Date, Winner, Winner_short, Winner_hand, Winner_ioc, Winner_rank, Loser, Loser_short, Loser_hand, Loser_ioc, Loser_rank, Score, Best_of, Round, Minutes, W_sv, W_1stIn, W_ace, W_1stWon, W_2ndWon, L_sv, L_1stIn, L_ace, L_1stWon, L_2ndWon) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"""
with open(excel_dir) as f:
lines = f.readlines()
for line in lines[1:]:
values = line.split(",")
Tournament = values[1]
Surface = values[2]
Size = values[3]
Level = values[4]
Date = values[5]
Winner = values[10]
Winner_short = get_short_name(Winner)
Winner_hand = values[11]
Winner_ioc = values[13]
Winner_rank = values[15]
Loser = values[20]
Loser_short = get_short_name(Loser)
Loser_hand = values[21]
Loser_ioc = values[23]
Loser_rank = values[25]
Score = values[27]
Best_of = values[28]
Round = values[29]
Minutes = values[30]
W_sv = values[33]
W_1stIn = values[34]
W_ace = values[31]
W_1stWon = values[35]
W_2ndWon = values[36]
L_sv = values[42]
L_1stIn = values[43]
L_ace = values[40]
L_1stWon = values[44]
L_2ndWon = values[45]
values = (Tournament, Surface, Size, Level, Date, Winner, Winner_short, Winner_hand, Winner_ioc, Winner_rank, Loser, Loser_short, Loser_hand, Loser_ioc, Loser_rank, Score, Best_of, Round, Minutes, W_sv, W_1stIn, W_ace, W_1stWon, W_2ndWon, L_sv, L_1stIn, L_ace, L_1stWon, L_2ndWon)
cursor.execute(query, values)
# COMBINE BOTH TABLES
query = '''
CREATE TABLE new_table
AS
SELECT b.Location, a.Tournament, a.Level, a.Surface, a.Size, a.Date, a.Winner, a.Winner_hand, a.Winner_ioc, a.Winner_rank, a.Loser, a.Loser_short, a.Loser_hand, a.Loser_ioc, a.Loser_rank, a.Score, a.Best_of, a.Round, a.Minutes, a.W_sv, a.W_1stIn, a.W_ace, a.W_1stWon, a.W_2ndWon, a.L_sv, a.L_1stIn, L_ace, L_1stWon, L_2ndWon, b.Winner_odds, b.Loser_odds
FROM tennis.temp_b a
LEFT JOIN tennis.temp_a b
ON a.Tournament=b.Tournament AND a.Winner_short=b.Winner AND a.Loser_short=b.Loser;
'''
drop_table("new_table")
cursor.execute(query)
################################################################################
# Building database step by step. #
################################################################################
# CREATE TABLE base
print "Starting table 'Base' creation."
for year in xrange(START_YEAR, END_YEAR + 1):
print "Parsing year %s." % year
parse_tennis_data(year)
if year == START_YEAR:
drop_table("Base")
cursor.execute("CREATE TABLE Base LIKE new_table")
cursor.execute("INSERT INTO Base SELECT * FROM new_table")
# Adding ID to base table.
add_id_to_table("Base")
# Clear unneeded tables.
print "Table 'Base' done! Clearing left over tables."
drop_table("temp_a")
drop_table("temp_b")
drop_table("new_table")
# CREATE TABLE players
print "Starting table 'Players' creation."
drop_table("Players")
query = '''
CREATE TABLE Players
SELECT Winner as Name, Winner_hand as Hand, Winner_ioc as Country
FROM Base
GROUP BY Winner, Winner_hand, Winner_ioc
UNION
SELECT Loser, Loser_hand, Loser_ioc
FROM Base
GROUP BY Loser, Winner_hand, Winner_ioc
'''
cursor.execute(query)
add_id_to_table("Players")
# CREATE TABLE tournaments
print "Starting table 'Tounaments' creation."
query = '''
CREATE TABLE Tournaments
SELECT Tournament, Surface, Size, min(Date) as Date, Best_of
FROM Base
GROUP BY Tournament, Surface, Size, Best_of
'''
drop_table("Tournaments")
cursor.execute(query)
add_id_to_table("Tournaments")
# CREATE TABLE games
print "Starting table 'Games' creation."
"""
query = '''
CREATE TABLE Games
SELECT p1.Id as Winner_ID, p2.Id as Loser_ID, t.Id as Tournament_ID, b.Score, b.Minutes
FROM Base b
LEFT JOIN Players p1
ON b.Winner=p1.Name and b.Winner_hand=p1.Hand and b.Winner_ioc=p1.Country
LEFT JOIN Players p2
ON b.Loser=p2.Name and b.Loser_hand=p2.Hand and b.Loser_ioc=p2.Country
LEFT JOIN Tournaments t
ON b.Tournament=t.Tournament and b.Surface=t.Surface and b.Size=t.Size and b.Date=t.Date and b.Best_of=t.Best_of
'''
drop_table("Games")
cursor.execute(query)
add_id_to_table("Games")
"""
print "ALL DONE. Closing cursor and database."
cursor.close()
database.commit()
database.close() |
import enum
import logging
import re
from nameko.dependency_providers import Config
from nameko.rpc import rpc, RpcProxy
from .models import Loc
from .schemas import ChangeSchema, CommentSchema, FunctionSchema, LocSchema
logger = logging.getLogger(__name__)
NEWLINE_RE = re.compile(r'\r\n?|\n')
class Flag(enum.IntFlag):
COMMENT = 2
BLANK = 1
NONE = 0
def _get_lines(content):
lines = NEWLINE_RE.split(content)
return lines[:-1] if lines[-1] == '' else lines # Handle newline at EOF
def _get_flags(lines, comments):
flags = [Flag.BLANK if not l else Flag.NONE for l in lines]
for comment in comments:
begin, end = comment.span.begin, comment.span.end
if begin.line == end.line:
line = lines[begin.line - 1]
# Line on which the comment exists must not have anything else
if not line.replace(line[begin.column - 1:end.column], '').strip():
flags[begin.line - 1] |= Flag.COMMENT
else:
line = lines[begin.line - 1]
# Line on which a comment begins must not have anything else
if not line.replace(line[begin.column - 1:], '').strip():
flags[begin.line - 1] |= Flag.COMMENT
for index in range(begin.line, end.line - 1):
flags[index] |= Flag.COMMENT
line = lines[end.line - 1]
# Line on which a comment ends must not have anything else
if not line.replace(line[:end.column], '').strip():
flags[end.line - 1] |= Flag.COMMENT
return flags
def _get_functionloc(function, flags):
aggregate, blank, comment = 0, 0, 0
begin, end = function.span.begin, function.span.end
for index in range(begin.line - 1, end.line):
aggregate += 1
blank += 1 if flags[index] == Flag.BLANK else 0
comment += 1 if Flag.COMMENT in flags[index] else 0
source = aggregate - (blank + comment)
return LocSchema().dump(Loc(blank, comment, source, aggregate))
def _get_functionsloc(functions, flags):
loc = dict()
for function in functions:
if function.signature in loc:
logger.warning('Duplicate function `%s`', function.signature)
loc[function.signature] = _get_functionloc(function, flags)
return loc
def _get_fileloc(lines, flags):
aggregate, blank, comment = len(lines), 0, 0
for flag in flags:
blank += 1 if flag == Flag.BLANK else 0
comment += 1 if Flag.COMMENT in flag else 0
source = aggregate - (blank + comment)
return LocSchema().dump(Loc(blank, comment, source, aggregate))
class LocService:
name = 'loc'
config = Config()
parser_rpc = RpcProxy('parser')
repository_rpc = RpcProxy('repository')
@rpc
def collect(self, project, sha, path, **options):
logger.debug('%s %s %s', project, sha, path)
granularity = options.get('granularity', 'file')
if granularity not in {'file', 'function'}:
granularity = 'file'
change = self.repository_rpc.get_change(project, sha, path)
change = ChangeSchema().load(change)
content = self.repository_rpc.get_content(project, change.oids.after)
if content is not None:
lines = _get_lines(content)
comments = self._get_comments(path, content)
if comments is not None:
flags = _get_flags(lines, comments)
if granularity == 'file':
return _get_fileloc(lines, flags)
functions = self._get_functions(path, content)
return _get_functionsloc(functions, flags)
return None
def _get_comments(self, path, content):
comments = self.parser_rpc.get_comments(path, content)
if comments is not None:
return CommentSchema(many=True).load(comments)
return None
def _get_functions(self, path, content):
functions = self.parser_rpc.get_functions(path, content)
if functions is not None:
return FunctionSchema(many=True).load(functions)
return None
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-11-08 12:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userprofile', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='landing_page',
field=models.CharField(choices=[('network', 'Network'), ('services', 'Services'), ('community', 'Community')], default='community', max_length=255),
),
migrations.AddField(
model_name='userprofile',
name='view_community',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='userprofile',
name='view_network',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='userprofile',
name='view_services',
field=models.BooleanField(default=True),
),
]
|
#!/usr/bin/python
""" producer.py
Produces random data samples for the EQ sliders.
Uses the Hookbox REST api for publishing the data
on channel "chan1".
--- License: MIT ---
Copyright (c) 2010 Hookbox
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time, urllib, urllib2, json, random
def main ():
# assume the hookbox server is on localhost:2974
url = "http://127.0.0.1:2974/web/publish"
values = { "security_token" : "altoids",
"channel_name" : "chan1",
"payload" : []
}
# data samples from 0 - 99 inclusive
pop = range(0,100)
while True:
# generate seven random data points every 0.5 seconds
# and publish them via the HTTP/REST api.
values["payload"] = random.sample(pop, 7)
data = urllib.urlencode(values)
req = urllib2.Request(url, data)
print data
print url
resp = urllib2.urlopen(req)
# Print the server response. This is a demo, after all!
page = resp.read()
print page
print values["payload"]
time.sleep(0.1)
if __name__ == "__main__":
main()
|
#function practice
print("print the last character of my string: ")
def last_char(name):
return name[-1]
print(last_char("Beenash"))
print("function is odd or even: ")
num = int(input("Enter a number: "))
def odd_even(num):
if num%2 == 0:
return "Even Number"
#else:
return "odd NUmber"
print(odd_even(num))
print("short method for odd or even number")
Num = int(input("Enter a number: "))
def is_even(Num):
if Num%2 == 0:
return True
else:
return False
print(odd_even(Num))
print("3 very short method for odd or even number")
number = int(input("Enter a number: "))
def is_even(number):
return number%2 == 0 #true
print(is_even(number))
def song():
return("happy birthday song")
print(song()) |
import numpy as np
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_array, check_consistent_length
class IsotonicRegressionInf(IsotonicRegression):
"""
Sklearn implementation IsotonicRegression throws an error when values are Inf or -Inf when in fact IsotonicRegression
can handle infinite values. This wrapper around the sklearn implementation of IsotonicRegression prevents the error
being thrown when Inf or -Inf values are provided.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like of shape (n_samples,)
Training data.
y : array-like of shape (n_samples,)
Training target.
sample_weight : array-like of shape (n_samples,), default=None
Weights. If set to None, all weights will be set to 1 (equal
weights).
Returns
-------
self : object
Returns an instance of self.
Notes
-----
X is stored for future use, as :meth:`transform` needs X to interpolate
new input data.
"""
check_params = dict(accept_sparse=False, ensure_2d=False, force_all_finite=False)
X = check_array(X, dtype=[np.float64, np.float32], **check_params)
y = check_array(y, dtype=X.dtype, **check_params)
check_consistent_length(X, y, sample_weight)
# Transform y by running the isotonic regression algorithm and
# transform X accordingly.
X, y = self._build_y(X, y, sample_weight)
# It is necessary to store the non-redundant part of the training set
# on the model to make it possible to support model persistence via
# the pickle module as the object built by scipy.interp1d is not
# picklable directly.
self._necessary_X_, self._necessary_y_ = X, y
# Build the interpolation function
self._build_f(X, y)
return self
def transform(self, T):
"""Transform new data by linear interpolation
Parameters
----------
T : array-like of shape (n_samples,)
Data to transform.
Returns
-------
T_ : array, shape=(n_samples,)
The transformed data
"""
if hasattr(self, '_necessary_X_'):
dtype = self._necessary_X_.dtype
else:
dtype = np.float64
T = check_array(T, dtype=dtype, ensure_2d=False, force_all_finite=False)
if len(T.shape) != 1:
raise ValueError("Isotonic regression input should be a 1d array")
# Handle the out_of_bounds argument by clipping if needed
if self.out_of_bounds not in ["raise", "nan", "clip"]:
raise ValueError("The argument ``out_of_bounds`` must be in "
"'nan', 'clip', 'raise'; got {0}"
.format(self.out_of_bounds))
if self.out_of_bounds == "clip":
T = np.clip(T, self.X_min_, self.X_max_)
res = self.f_(T)
# on scipy 0.17, interp1d up-casts to float64, so we cast back
res = res.astype(T.dtype)
return res
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
yf.pdr_override()
import datetime as dt
# input
symbol = 'AAPL'
start = dt.date.today() - dt.timedelta(days = 365*4)
end = dt.date.today()
# Read data
df = yf.download(symbol,start,end)
n = 25
high_max = lambda xs: np.argmax(xs[::-1])
low_min = lambda xs: np.argmin(xs[::-1])
df['Days since last High'] = df['High'].rolling(center=False,min_periods=0,window=n).apply(func=high_max).astype(int)
df['Days since last Low'] = df['Low'].rolling(center=False,min_periods=0,window=n).apply(func=low_min).astype(int)
df['Aroon_Up'] = ((25-df['Days since last High'])/25) * 100
df['Aroon_Down'] = ((25-df['Days since last Low'])/25) * 100
df = df.drop(['Days since last High', 'Days since last Low'],axis=1)
fig = plt.figure(figsize=(14,7))
ax1 = plt.subplot(2, 1, 1)
ax1.plot(df['Adj Close'])
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax1.legend(loc='best')
ax2 = plt.subplot(2, 1, 2)
ax2.plot(df['Aroon_Up'], label='Aroon UP', color='g')
ax2.plot(df['Aroon_Down'], label='Aroon DOWN', color='r')
ax2.axhline(y=70, color='lightgreen')
ax2.axhline(y=50, color='darkblue')
ax2.axhline(y=30, color='lightgreen')
ax2.grid()
ax2.legend(loc='best')
ax2.set_ylabel('Aroon')
ax2.set_xlabel('Date')
plt.show()
# ## Candlestick with Aroon
from matplotlib import dates as mdates
dfc = df.copy()
dfc['VolumePositive'] = dfc['Open'] < dfc['Adj Close']
#dfc = dfc.dropna()
dfc = dfc.reset_index()
dfc['Date'] = mdates.date2num(dfc['Date'].tolist())
from mplfinance.original_flavor import candlestick_ohlc
fig = plt.figure(figsize=(14,7))
ax1 = plt.subplot(2, 1, 1)
candlestick_ohlc(ax1,dfc.values, width=0.5, colorup='g', colordown='r', alpha=1.0)
ax1.xaxis_date()
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%d-%m-%Y'))
ax1.grid(True, which='both')
ax1.minorticks_on()
ax1v = ax1.twinx()
colors = dfc.VolumePositive.map({True: 'g', False: 'r'})
ax1v.bar(dfc.Date, dfc['Volume'], color=colors, alpha=0.4)
ax1v.axes.yaxis.set_ticklabels([])
ax1v.set_ylim(0, 3*df.Volume.max())
ax1.set_title('Stock '+ symbol +' Closing Price')
ax1.set_ylabel('Price')
ax2 = plt.subplot(2, 1, 2)
ax2.plot(df['Aroon_Up'], label='Aroon UP', color='g')
ax2.plot(df['Aroon_Down'], label='Aroon DOWN', color='r')
ax2.axhline(y=70, color='lightgreen')
ax2.axhline(y=50, color='darkblue')
ax2.axhline(y=30, color='lightgreen')
ax2.grid()
ax2.legend(loc='best')
ax2.set_ylabel('Aroon')
ax2.set_xlabel('Date')
plt.show() |
def vat_faktura(lista):
suma = sum(lista)
return suma * 0.23
def vat_paragon(lista):
lista_vat = [item * 0.23 for item in lista]
return sum(lista_vat)
zakupy = [0.2, 0.5, 4.59, 6]
print(vat_paragon(zakupy))
print(vat_faktura(zakupy))
print(vat_faktura(zakupy) == vat_paragon(zakupy)) |
from plenum.test import waits
from plenum.test.helper import sdk_send_random_and_check, assertExp
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.test.pool_transactions.helper import sdk_add_new_steward_and_node
from plenum.test.test_node import checkNodesConnected
from stp_core.loop.eventually import eventually
CHK_FREQ = 5
LOG_SIZE = 3 * CHK_FREQ
def test_second_checkpoint_after_catchup_can_be_stabilized(
chkFreqPatched, looper, txnPoolNodeSet, sdk_wallet_steward,
sdk_wallet_client, sdk_pool_handle, tdir, tconf,
allPluginsPath):
_, new_node = sdk_add_new_steward_and_node(
looper, sdk_pool_handle, sdk_wallet_steward,
'EpsilonSteward', 'Epsilon', tdir, tconf,
allPluginsPath=allPluginsPath)
txnPoolNodeSet.append(new_node)
looper.run(checkNodesConnected(txnPoolNodeSet))
waitNodeDataEquality(looper, new_node, *txnPoolNodeSet[:-1])
# Epsilon did not participate in ordering of the batch with EpsilonSteward
# NYM transaction and the batch with Epsilon NODE transaction.
# Epsilon got these transactions via catch-up.
master_replica = new_node.replicas._master_replica
assert len(master_replica._checkpointer._checkpoint_state) == 0
assert len(master_replica._checkpointer._stashed_recvd_checkpoints) == 0
assert master_replica.h == 2
assert master_replica.H == 17
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client, 1)
for replica in new_node.replicas.values():
looper.run(eventually(
lambda r: assertExp(len(r._checkpointer._checkpoint_state) == 1), replica))
assert len(replica._checkpointer._stashed_recvd_checkpoints) == 0
assert replica.h == 2
assert replica.H == 17
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client, 6)
stabilization_timeout = \
waits.expectedTransactionExecutionTime(len(txnPoolNodeSet))
looper.runFor(stabilization_timeout)
for replica in new_node.replicas.values():
assert len(replica._checkpointer._checkpoint_state) == 2
keys_iter = iter(replica._checkpointer._checkpoint_state)
assert next(keys_iter) == (3, 5)
assert replica._checkpointer._checkpoint_state[3, 5].seqNo == 5
assert replica._checkpointer._checkpoint_state[3, 5].digest is None
assert replica._checkpointer._checkpoint_state[3, 5].isStable is False
assert next(keys_iter) == (6, 10)
assert replica._checkpointer._checkpoint_state[6, 10].seqNo == 9
assert replica._checkpointer._checkpoint_state[6, 10].digest is None
assert replica._checkpointer._checkpoint_state[6, 10].isStable is False
# nothing is stashed since it's ordered during catch-up
assert len(replica._checkpointer._stashed_recvd_checkpoints) == 0
assert replica.h == 2
assert replica.H == 17
sdk_send_random_and_check(looper, txnPoolNodeSet,
sdk_pool_handle, sdk_wallet_client, 1)
looper.runFor(stabilization_timeout)
for replica in new_node.replicas.values():
assert len(replica._checkpointer._checkpoint_state) == 1
keys_iter = iter(replica._checkpointer._checkpoint_state)
assert next(keys_iter) == (6, 10)
assert replica._checkpointer._checkpoint_state[6, 10].seqNo == 10
assert replica._checkpointer._checkpoint_state[6, 10].digest is not None
assert replica._checkpointer._checkpoint_state[6, 10].isStable is True
assert len(replica._checkpointer._stashed_recvd_checkpoints) == 0
assert replica.h == 10
assert replica.H == 25
|
import unittest
from main import remove_duplicate_words
class TestSum(unittest.TestCase):
def test(self):
self.assertEqual(remove_duplicate_words("alpha beta beta gamma gamma gamma delta alpha beta beta gamma gamma gamma delta"), "alpha beta gamma delta")
self.assertEqual(remove_duplicate_words("my cat is my cat fat"), "my cat is fat")
if __name__ == '__main__':
unittest.main()
|
from typing import Dict, Optional
from code_base.excess_mortality.decode_args import *
from code_base.utils.file_utils import *
from code_base.excess_mortality.url_constants import *
class GetBulkEurostatDataBase(SaveFileMixin):
def __init__(self, eurostat_data: str,
add_current_year: bool = False,
current_year_weeks: Optional[int] = None,
zipped: bool = True):
self.eurostat_data: str = eurostat_data
self.add_current_year: bool = add_current_year
self.current_year_weeks: Optional[int] = current_year_weeks
self.split_columns: Dict = {
'split_from_demo': DECODE_DEMO_COL[self.eurostat_data],
'split_into_demo': DECODE_DEMO_REPL[self.eurostat_data],
'split_from_year_week': 'Year_week',
'split_into_year_week': ['Year', 'Week']
}
self.retain_demo_columns = RETAIN_COLUMNS[self.eurostat_data]
self.replace_location_name = COUNTRY_REPLACE[self.eurostat_data]
if zipped:
self.eurostat_df: pd.DataFrame = pd.read_csv(self.url,
compression='gzip',
sep='\t',
encoding='utf-8-sig',
low_memory=False)
else:
self.eurostat_df: pd.DataFrame = pd.read_csv(self.url,
encoding='utf-8-sig')
super().__init__()
@property
def url(self) -> str:
domain: str = EUROSTAT_DATA['main']
url_path: str = EUROSTAT_DATA['files'][self.eurostat_data]
full_url: str = domain + url_path
return full_url
@property
def generate_year_week_columns(self) -> List:
week_year_columns = generate_past_week_years(2015, 2020)
if self.add_current_year:
week_year_columns.extend(generate_current_year_weeks(self.current_year_weeks))
return week_year_columns
def split_demographic_data(self, split_from, split_into, separator) -> None:
"""
The Eurostat files are presented with sex, age and other demographic data into a single column.
This functions separates them into their own columns. Function performs this inplace and does not return anything.
:param split_from: The column header name that needs to be split.
:param split_into: The names of the resulting column headers.
:param separator: The separator used to split the columns, i.e. comma "," or some other symbol.
:return: The function does not return data. It manipulates the existing dataframe within the class instance.
"""
col_ind = self.eurostat_df.columns.get_loc(split_from)
self.eurostat_df[split_into] = self.eurostat_df.iloc[:, col_ind].str.split(separator, expand=True)
self.eurostat_df.drop(split_from, axis=1, inplace=True)
def filter_cols(self, filt_cols):
filt_columns = self.retain_demo_columns + filt_cols
self.eurostat_df.drop(self.eurostat_df.columns[
~self.eurostat_df.columns.isin(filt_columns)],
axis=1,
inplace=True)
def decode_demo_values(self):
decode_demo_info = {
'Location': self.replace_location_name,
'Sex': EUROSTAT_SEX_CONVERSION,
'Age': EUROSTAT_AGES_CONVERSION
}
for key, val in decode_demo_info.items():
self.eurostat_df[key] = self.eurostat_df.apply(lambda x: val.get(x[key]), axis=1) |
#from django.forms import ModelForm
from .models import *
# ubah bentuk form
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Fieldset, ButtonHolder, Submit
# login and registration form
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class CreateUserForm(UserCreationForm, forms.ModelForm):
first_name = forms.CharField(max_length=100,required=True, widget= forms.TextInput(attrs={'class':'form-control'}))
last_name = forms.CharField(max_length=100, required=True,widget= forms.TextInput(attrs={'class':'form-control'}))
email = forms.EmailField(max_length=254, help_text='Inform a valid email address.',widget= forms.TextInput(attrs={'class':'form-control'}))
username = forms.CharField(max_length=50, help_text='Make unique username. Max. 50 characters',widget= forms.TextInput(attrs={'class':'form-control'}))
password1 = forms.CharField(label='Password', widget=forms.PasswordInput(attrs={'class':'form-control'}))
password2 = forms.CharField(label='Repeat Password', widget=forms.PasswordInput(attrs={'class':'form-control'}))
class Meta:
model = User
#fields = '__all__'
fields = ('username', 'email', 'first_name', 'last_name')
class FileForm(forms.ModelForm):
name = forms.CharField(max_length=30)
email = forms.EmailField(max_length=254)
message = forms.CharField(
max_length=2000,
widget=forms.Textarea(),
help_text='Write here your message!'
)
source = forms.CharField( # A hidden input for internal use
max_length=50, # tell from which page the user sent the message
widget=forms.HiddenInput()
)
def clean(self):
cleaned_data = super(ContactForm, self).clean()
name = cleaned_data.get('name')
email = cleaned_data.get('email')
message = cleaned_data.get('message')
if not name and not email and not message:
raise forms.ValidationError('You have to write something!')
# class Meta:
# model = Image
# fields = ('name', 'desc', 'picture')
# def __init__(self, *args, **kwargs):
# super().__init__(*args, **kwargs)
# self.helper = FormHelper()
# self.helper.form_method = 'post'
# self.helper.add_input(Submit('submit', 'Create'))
class ProfileForm(forms.ModelForm):
first_name = forms.CharField(max_length=100, required=True, widget= forms.TextInput(attrs={'class':'form-control'}))
last_name = forms.CharField(max_length=100, required=True, widget= forms.TextInput(attrs={'class':'form-control'}))
email = forms.EmailField(max_length=254, required=True, widget= forms.TextInput(attrs={'class':'form-control'}))
username = forms.CharField(max_length=50, required=True, widget= forms.TextInput(attrs={'class':'form-control'}))
#picture = forms.ImageField()
class Meta:
model = User
#fields = '__all__'
fields = ('username', 'email', 'first_name', 'last_name')
class UserImageForm(forms.ModelForm):
users = forms.CharField(widget=forms.HiddenInput(), required=False)
imageprofile = forms.ImageField()
class Meta:
model = UserImageProfile
fields = ('users', 'imageprofile',)
class CreatePostForm(forms.ModelForm):
title = forms.CharField(max_length=240, required=True, widget= forms.TextInput(attrs={'class':'form-control'}))
desc = forms.CharField(max_length=1000, required=False, widget= forms.Textarea(attrs={'class':'form-control', 'rows':'5'}))
class Meta:
model = Post
fields = ('title', 'desc',)
class CreateCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=500, required=True, widget= forms.Textarea(attrs={'class':'form-control', 'rows':'5'}))
class Meta:
model = PostComment
fields = ('comment', )
class CreateForumForm(forms.ModelForm):
title = forms.CharField(max_length=240, required=True, widget= forms.TextInput(attrs={'class':'form-control'}))
article = forms.CharField(max_length=3000, required=True, widget= forms.Textarea(attrs={'class':'form-control', 'rows':'10'}))
class Meta:
model = Forum
fields = ('title', 'article') |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any
class DataProviderError(Exception):
"""Base Exception for Ax DataProviders.
The type of the data provider must be included.
The raw error is stored in the data_provider_error section,
and an Ax-friendly message is stored as the actual error message.
"""
def __init__(
self, message: str, data_provider: str, data_provider_error: Any
) -> None:
self.message = message
self.data_provider = data_provider
self.data_provider_error = data_provider_error
def __str__(self) -> str:
return (
"{message}. \n Error thrown by: {dp} data provider \n"
+ "Native {dp} data provider error: {dp_error}"
).format(
dp=self.data_provider,
message=self.message,
dp_error=self.data_provider_error,
)
|
import netifaces
interfaces = netifaces.interfaces()
for i in interfaces:
if i == 'lo':
continue
iface = netifaces.ifaddresses(i).get(netifaces.AF_INET)
if iface != None:
for j in iface:
print j['addr']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.