max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
test.py | ahwhbc/LookIntoPerson | 79 | 12794851 | <gh_stars>10-100
# import the necessary packages
import argparse
import os
import cv2 as cv
import keras.backend as K
import numpy as np
from config import num_classes
from data_generator import random_choice, safe_crop, to_bgr
from model import build_model
if __name__ == '__main__':
# Parse arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", help="path to the image to be processed")
args = vars(ap.parse_args())
filename = args["image"]
img_rows, img_cols = 320, 320
channel = 3
model_weights_path = 'models/model.54-2.2507.hdf5'
model = build_model()
model.load_weights(model_weights_path)
print(model.summary())
image = cv.imread(filename)
image = cv.resize(image, (img_rows, img_cols), cv.INTER_CUBIC)
image_size = image.shape[:2]
x, y = random_choice(image_size)
image = safe_crop(image, x, y)
print('Start processing image: {}'.format(filename))
x_test = np.empty((1, img_rows, img_cols, 3), dtype=np.float32)
x_test[0, :, :, 0:3] = image / 255.
out = model.predict(x_test)
out = np.reshape(out, (img_rows, img_cols, num_classes))
out = np.argmax(out, axis=2)
out = to_bgr(out)
ret = image * 0.6 + out * 0.4
ret = ret.astype(np.uint8)
if not os.path.exists('images'):
os.makedirs('images')
cv.imwrite('images/test_image.png', image)
cv.imwrite('images/test_merged.png', ret)
cv.imwrite('images/test_out.png', out)
K.clear_session()
| 2.453125 | 2 |
tests/func_indycmd_test.py | pustotnik/raven | 2 | 12794852 | # coding=utf-8
#
# pylint: disable = wildcard-import, unused-wildcard-import
# pylint: disable = missing-docstring, invalid-name
# pylint: disable = unused-argument, no-member, attribute-defined-outside-init
# pylint: disable = too-many-lines, too-many-branches, too-many-statements
"""
Copyright (c) 2020, <NAME>. All rights reserved.
license: BSD 3-Clause License, see LICENSE for more details.
"""
from zipfile import is_zipfile as iszip
import pytest
from zm import zipapp
from tests.func_utils import *
class TestIndyCmd(object):
@pytest.fixture(params = getZmExecutables(), autouse = True)
def allZmExe(self, request):
self.zmExe = zmExes[request.param]
def teardown():
printErrorOnFailed(self, request)
request.addfinalizer(teardown)
def testZipAppCmd(self, tmpdir):
cmdLine = ['zipapp']
self.cwd = str(tmpdir.realpath())
exitcode = runZm(self, cmdLine)[0]
assert exitcode == 0
zipAppPath = joinpath(self.cwd, zipapp.ZIPAPP_NAME)
assert isfile(zipAppPath)
assert iszip(zipAppPath)
def testVersionCmd(self, tmpdir):
cmdLine = ['version']
self.cwd = str(tmpdir.realpath())
exitcode, stdout, _ = runZm(self, cmdLine)
assert exitcode == 0
assert 'version' in stdout
def testSysInfoCmd(self, tmpdir):
cmdLine = ['sysinfo']
self.cwd = str(tmpdir.realpath())
exitcode, stdout, _ = runZm(self, cmdLine)
assert exitcode == 0
assert 'information' in stdout
| 1.867188 | 2 |
Forum/views.py | Galbar/django-forum | 2 | 12794853 | # -*- coding: utf-8 -*-
import json
from django.http import Http404, HttpResponse
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import logout as lgout, authenticate, login as lgin
from django.shortcuts import render, redirect
from datetime import datetime
from Forum.models import *
from Forum.settings import *
from Forum.forms import *
from Forum.lib import *
from Forum.getInstanceLib import *
from Forum.modelsLib import *
import Forum.signals as signals
from math import ceil
# Create your views here.
def login(request, forum_id, template="Forum/forms/login.html", template_ajax="Forum/forms/ajax/login.html"):
form = None
if request.method == 'POST':
form = FormUserLogin(request.POST)
if form.is_valid():
user = authenticate(username=form.data['username'], password=form.data['password'])
if user:
lgin(request, user)
forum = get_forum_instance(forum_id)
if forum:
return redirect('base_forum', forum_id=forum.local_id)
else:
raise Http404
if not form:
form = FormUserLogin()
c = {
'forum_id':forum_id,
'form': form,
}
if request.is_ajax():
return render(request, template_ajax, c)
else:
return render(request, template, c)
@login_required
def logout(request, forum_id):
lgout(request)
forum = get_forum_instance(forum_id)
if forum:
return redirect('base_forum', forum_id=forum.local_id)
raise Http404
def forum(request, forum_id, page=1, template=MAIN_FORUM_TEMPLATE):
forum = get_forum_instance(forum_id)
if forum:
subforum_slug = forum.main_forum.slug()
return subforum(request, forum_id, 0, subforum_slug, page, template=template)
raise Http404
def subforum(request, forum_id, subforum_id, subforum_slug, page=1, template=SUBFORUM_TEMPLATE):
forum = get_forum_instance(forum_id)
if forum:
subforum = get_subforum_instance(forum, subforum_id)
if subforum:
if not check_slug(subforum, subforum_slug):
if page == 1:
return redirect('Forum.views.subforum', forum_id=forum_id, subforum_id=subforum_id, subforum_slug=subforum.slug())
else:
return redirect('Forum.views.subforum', forum_id=forum_id, subforum_id=subforum_id, subforum_slug=subforum.slug(), page=page)
if subforum.canView(request.user):
is_mod = subforum.canModerate(request.user)
can_create_thread = subforum.canCreateThread(request.user)
subforum_list = []
for sf in subforum.child_set.order_by('local_id'):
if sf.canView(request.user):
sf.is_visited = sf.isVisited(request.user)
subforum_list.append(sf)
sf_th_set = subforum.thread_set.order_by('-pinned', '-last_publication_datetime', 'name')
if not subforum.canModerate(request.user):
sf_th_set = sf_th_set.exclude(hidden=True)
thread_list = []
for th in sf_th_set:
th.is_visited = th.isVisited(request.user)
thread_list.append(th)
page = int(page) -1
subforum_num_pages = int(ceil(float(len(thread_list))/float(forum.threads_per_page)))
if (subforum_num_pages > page and 0 <= page) or subforum_num_pages == 0:
c = {
'forum_id':forum_id,
'forum': subforum,
'subforum_list':subforum_list,
'thread_list':thread_list[(page*forum.threads_per_page):(page*forum.threads_per_page)+forum.threads_per_page],
'subforum_current_page':page+1,
'subforum_pages':range(max(page, 1), min(page+3, subforum_num_pages+1)),
'is_admin':user_has_permission(forum.admin_permission, request.user),
'is_moderator': is_mod,
'can_create_thread':can_create_thread and request.user.is_authenticated(),
}
return render(request, template, c)
else:
c = {
'forum_id':forum_id,
}
return render(request, CANT_VIEW_CONTENT, c)
raise Http404
@login_required
@csrf_protect
def newSubforum(request, forum_id, subforum_id, subforum_slug, template=FORM_TEMPLATE):
check_user_is_spamming(request.user)
forum = get_forum_instance(forum_id)
if forum:
subforum = get_subforum_instance(forum, subforum_id)
if subforum:
if not check_slug(subforum, subforum_slug):
return redirect('Forum.views.newSubforum', forum_id=forum_id, subforum_id=subforum_id, subforum_slug=subforum.slug())
if forum.canAdministrate(request.user):
if request.method == 'POST':
new_subforum_form = Subforum(forum=forum)
new_subforum_form = FormSubforum(request.POST, instance=new_subforum_form)
if new_subforum_form.is_valid():
new_subforum = new_subforum_form.save(commit=False)
new_subforum.local_id = forum.subforum_set.count()
new_subforum.parent = subforum
new_subforum.forum = forum
new_subforum.creator = request.user
new_subforum.save()
return redirect('subforum', forum_id=forum_id, subforum_id=new_subforum.local_id, subforum_slug=new_subforum.slug())
else:
new_subforum = Subforum(
forum = subforum.forum,
view_permission = subforum.view_permission,
mod_permission = subforum.mod_permission,
create_thread_permission = subforum.create_thread_permission,
reply_thread_permission = subforum.reply_thread_permission,
)
new_subforum_form = FormSubforum(instance=new_subforum)
c = {
'forum_id':forum_id,
'form': new_subforum_form,
'page_title': 'Create Subforum',
'title': 'Create Subforum',
'submit_btn_text': 'Create',
}
return render(request, template, c)
else:
c = {
'forum_id':forum_id,
}
return render(request, CANT_VIEW_CONTENT, c)
raise Http404
def thread(request, forum_id, thread_id, thread_slug, page=1, template=THREAD_TEMPLATE):
forum = get_forum_instance(forum_id)
if forum:
thread = get_thread_instance(forum, thread_id)
if thread:
if not check_slug(thread, thread_slug):
if page == 1:
return redirect('Forum.views.thread', forum_id=forum_id, thread_id=thread_id, thread_slug=thread.slug())
else:
return redirect('Forum.views.thread', forum_id=forum_id, thread_id=thread_id, thread_slug=thread.slug(), page=page)
subforum = thread.parent
is_mod = subforum.canModerate(request.user)
if subforum.canView(request.user) and (not thread.hidden or is_mod):
can_post = subforum.canReplyThread(request.user)
post_list = []
unfiltered_post_list = thread.post_set.order_by('local_id')
if not subforum.canModerate(request.user):
unfiltered_post_list = unfiltered_post_list.exclude(hidden=True)
for pt in unfiltered_post_list:
if request.user.is_authenticated():
pt.is_quoted = get_quote_instance(request.user, pt)
pt.vote = get_vote_instance(request.user, pt)
post_list.append(pt)
if request.user.is_authenticated() and thread.poll_set.count() and thread.poll_set.first().userCanVote(request.user):
poll = thread.poll_set.first()
else:
poll = None
page = int(page) -1
thread_num_pages = int(ceil(float(len(post_list))/float(forum.posts_per_page)))
if thread_num_pages > page and 0 <= page:
set_visit(thread, request.user)
thread.visit_counter += 1
thread.save()
c = {
'forum_id':forum_id,
'thread': thread,
'post_list':post_list[(page*forum.posts_per_page):(page*forum.posts_per_page)+forum.posts_per_page],
'thread_current_page':page+1,
'thread_pages':range(max(page, 1), min(page+3, thread_num_pages+1)),
'is_moderator': is_mod,
'is_admin':forum.canAdministrate(request.user),
'can_post':can_post and request.user.is_authenticated() and (not thread.closed or is_mod),
'poll': poll,
}
return render(request, template, c)
else:
c = {
'forum_id':forum_id,
}
return render(request, CANT_VIEW_CONTENT, c)
raise Http404
def threadLastPage(request, forum_id, thread_id, thread_slug):
forum = get_forum_instance(forum_id)
if forum:
thread = get_thread_instance(forum, thread_id)
if thread:
if not check_slug(thread, thread_slug):
return redirect('Forum.views.threadLastPage', forum_id=forum_id, thread_id=thread_id, thread_slug=thread.slug())
subforum = thread.parent
post_list = []
unfiltered_post_list = thread.post_set.order_by('local_id')
for pt in unfiltered_post_list:
if (not pt.hidden) or subforum.canModerate(request.user):
post_list.append(pt)
thread_num_pages = int(ceil(float(len(post_list))/float(forum.posts_per_page)))
page = thread_num_pages
return redirect('Forum.views.thread', forum_id=forum_id, thread_id=thread.local_id, thread_slug=thread.slug(), page=page)
raise Http404
@csrf_protect
def saveThreadSettings(request, forum_id, thread_id, thread_slug, template="Forum/forms/thread_settings.html"):
forum = get_forum_instance(forum_id)
if forum:
thread = get_thread_instance(forum, thread_id)
if thread:
if not check_slug(thread, thread_slug):
return redirect('Forum.views.saveThreadSettings', forum_id=forum_id, thread_id=thread_id, thread_slug=thread.slug())
if thread.parent.canModerate(request.user):
if (request.method == 'POST'):
form = FormThreadSettings(request.POST, instance=thread)
if form.is_valid():
thread.save()
return redirect('Forum.views.thread', forum_id=forum_id, thread_id=thread_id, thread_slug=thread.slug())
else:
form = FormThreadSettings(instance=thread)
c = {
'forum_id':forum_id,
'form': form,
'thread': thread,
}
return render(request, template, c)
raise Http404
@login_required
def firstPostUnreadThread(request, forum_id, thread_id, thread_slug):
forum = get_forum_instance(forum_id)
if forum:
thread = get_thread_instance(forum, thread_id)
if thread:
if not check_slug(thread, thread_slug):
return redirect('Forum.views.firstPostUnreadThread', forum_id=forum_id, thread_id=thread_id, thread_slug=thread.slug())
last_visit = get_last_visit_instance(request.user, thread)
if last_visit:
last_post = Post.objects.order_by('publication_datetime').filter(thread=thread, publication_datetime__gt=last_visit.datetime).first()
if last_post:
return redirect('Forum.views.post', forum_id=forum_id, post_id=last_post.local_id)
print("shiet")
return redirect('Forum.views.post', forum_id=forum.local_id, post_id=thread.getLastPublishedPost().local_id)
raise Http404
@login_required
@csrf_protect
def newThread(request, forum_id, subforum_id, subforum_slug, template="Forum/forms/thread.html"):
check_user_is_spamming(request.user)
forum = get_forum_instance(forum_id)
if forum:
subforum = get_subforum_instance(forum, subforum_id)
if subforum:
if not check_slug(subforum, subforum_slug):
return redirect('Forum.views.newThread', forum_id=forum_id, subforum_id=subforum_id, subforum_slug=subforum.slug())
if subforum.canCreateThread(request.user):
if request.method == 'POST':
new_post = Post(publisher=request.user)
new_post_form = FormNewThread(request.POST, instance=new_post)
if new_post_form.is_valid():
new_post = new_post_form.save(commit=False)
new_post.local_id = forum.post_set.count()
new_thread = Thread(
local_id=forum.thread_set.count(),
name=new_post.title,
parent=subforum,
forum=forum,
creator=request.user,
last_publication_datetime=datetime.now(),
hidden=new_post.hidden,
)
new_thread.save()
if request.POST.get("add_poll", "False") == "True" and request.POST.get("question", "") != "":
rang = range(0, int(request.POST.get("poll_option_count", "2")))
question = request.POST.get("question")
option_list = []
for i in rang:
opt = request.POST.get("poll-option["+str(i)+"]", "")
if opt != "":
option_list.append(opt)
if len(option_list) >= 2:
new_thread.setPoll(question, option_list)
new_post.hidden=False
new_post.forum=forum
new_post.thread=new_thread
new_post.save()
# Send new thread signal
signals.thread_published.send(sender=forum, thread=new_thread)
return redirect('Forum.views.thread', forum_id=forum_id, thread_id=new_thread.local_id, thread_slug=new_thread.slug())
else:
new_post = Post()
new_post_form = FormNewThread(instance=new_post)
c = {
'forum_id':forum_id,
'form': new_post_form,
'page_title': 'New Thread',
'title': 'New Thread',
'submit_btn_text': 'Create',
}
return render(request, template, c)
else:
c = {
'forum_id':forum_id,
}
return render(request, CANT_VIEW_CONTENT, c)
raise Http404
@login_required
@csrf_protect
def replyThread(request, forum_id, thread_id, thread_slug, template="Forum/forms/post.html", template_ajax="Forum/forms/ajax/post.html"):
check_user_is_spamming(request.user)
forum = get_forum_instance(forum_id)
if forum:
thread = get_thread_instance(forum, thread_id)
if thread and (not thread.closed or thread.parent.canModerate(request.user)):
if not check_slug(thread, thread_slug):
return redirect('Forum.views.replythread', forum_id=forum_id, thread_id=thread_id, thread_slug=thread.slug())
if thread.parent.canReplyThread(request.user) and request.user.is_authenticated():
if request.method == 'POST':
new_post = Post(publisher=request.user)
if thread.parent.canModerate(request.user):
new_post_form = FormPost_Mod(request.POST, instance=new_post)
else:
new_post_form = FormPost(request.POST, instance=new_post)
if new_post_form.is_valid():
new_post = new_post_form.save(commit=False)
new_post.local_id = forum.post_set.count()
new_post.forum=forum
new_post.thread=thread
new_post.save()
# Send signal new post published
signals.post_published.send(sender=forum, post=new_post)
thread.last_publication_datetime=new_post.publication_datetime
thread.save()
quote_list = Quote.objects.filter(user=request.user, thread=thread)
for quote in quote_list:
quote.delete()
return redirect('Forum.views.post', forum_id=forum_id, post_id=new_post.local_id)
else:
new_post = Post()
quotes_text = ""
quote_list = Quote.objects.filter(user=request.user, thread=thread)
for quote in quote_list:
quotes_text += "[quote="+quote.post.publisher.username+"]"+quote.post.content+"[/quote]\n\n"
new_post.content = quotes_text
if thread.parent.canModerate(request.user):
new_post_form = FormPost_Mod(instance=new_post)
else:
new_post_form = FormPost(instance=new_post)
if request.is_ajax():
template = template_ajax
c = {
'forum_id':forum_id,
'form': new_post_form,
'thread':thread,
}
else:
c = {
'forum_id':forum_id,
'form': new_post_form,
'page_title': 'Reply Thread',
'title': 'Reply Thread',
'submit_btn_text': 'Send',
}
return render(request, template, c)
else:
c = {
'forum_id':forum_id,
}
return render(request, CANT_VIEW_CONTENT, c)
raise Http404
@login_required
@csrf_protect
def voteThreadPoll(request, forum_id, thread_id, thread_slug):
forum = get_forum_instance(forum_id)
if forum:
thread = get_thread_instance(forum, thread_id)
if thread:
if not check_slug(thread, thread_slug):
return redirect('Forum.views.voteThreadPoll', forum_id=forum_id, thread_id=thread_id, thread_slug=thread.slug())
subforum = thread.parent
is_mod = subforum.canModerate(request.user)
if subforum.canView(request.user) and (not thread.hidden or is_mod):
if thread.poll:
if thread.poll.userCanVote(request.user) and request.method == 'POST':
answer = request.POST.get("poll_answer", False)
if answer:
thread.poll.vote(request.user, answer)
return redirect('Forum.views.thread', forum_id=forum_id, thread_id=thread_id, thread_slug=thread.slug())
def post(request, forum_id, post_id):
forum = get_forum_instance(forum_id)
if forum:
post = get_post_instance(forum, post_id)
if post:
thread = post.thread
post_list = thread.post_set.order_by('local_id')
num = 0
found = False
for pt in post_list:
if pt == post:
found = True
break
num += 1
if found:
page = (num/forum.posts_per_page)+1
return redirect('Forum.views.thread', forum_id=forum_id, thread_id=post.thread.local_id, thread_slug=post.thread.slug(), page=page, post_id=post_id)
raise Http404
@login_required
@csrf_protect
def editPost(request, forum_id, post_id, template="Forum/forms/edit_post.html", template_ajax="Forum/forms/ajax/edit_post.html"):
check_user_is_spamming(request.user)
forum = get_forum_instance(forum_id)
if forum:
post = get_post_instance(forum, post_id)
if post and post.thread.parent.canView(request.user):
post_old_title = post.title
post_old_content = post.content
if request.method == 'POST':
if post.thread.parent.canModerate(request.user):
edit_post_form = FormPost_Mod(request.POST, instance=post)
else:
edit_post_form = FormPost(request.POST, instance=post)
if edit_post_form.is_valid():
post_edited = PostEdited(
post=post,
user=request.user,
datetime=datetime.now(),
reason='',
old_title=post_old_title,
old_content=post_old_content,
user_is_moderator = post.thread.parent.canModerate(request.user),
user_is_administrator = forum.canAdministrate(request.user),
)
post = edit_post_form.save(commit=False)
if post.thread.post_set.first() == post:
if post.title == "":
post.title = post_old_title
post.thread.name = post.title
post.thread.save()
post_edited.save()
post.save()
return redirect('Forum.views.post', forum_id=forum_id, post_id=post.local_id)
else:
if post.thread.parent.canModerate(request.user):
edit_post_form = FormPost_Mod(instance=post)
elif post.publisher == request.user:
edit_post_form = FormPost(instance=post)
else:
c = {
'forum_id':forum_id,
}
return render(request, CANT_VIEW_CONTENT, c)
c = {
'forum_id':forum_id,
'form': edit_post_form,
'post':post,
'user_is_mod':user_has_permission(post.thread.parent.mod_permission, request.user),
}
if request.is_ajax():
return render(request, template_ajax, c)
else:
return render(request, template, c)
raise Http404
@login_required
@csrf_protect
def reportPost(request, forum_id, post_id, template="Forum/forms/report_post.html", template_ajax="Forum/forms/ajax/report_post.html"):
check_user_is_spamming(request.user)
forum = get_forum_instance(forum_id)
if forum:
post = get_post_instance(forum, post_id)
if post and post.thread.parent.canView(request.user):
if request.method == 'POST':
report_post_form = FormReportPost(request.POST)
if report_post_form.is_valid():
report_post = report_post_form.save(commit=False)
report_post.user = request.user
report_post.post = post
report_post.save()
return redirect('Forum.views.post', forum_id=forum_id, post_id=post.local_id)
else:
report_post_form = FormReportPost()
c = {
'forum_id':forum_id,
'form': report_post_form,
'post': post,
}
if request.is_ajax():
return render(request, template_ajax, c)
else:
return render(request, template, c)
raise Http404
@login_required
def quotePost(request, forum_id, post_id):
forum = get_forum_instance(forum_id)
if forum:
post = get_post_instance(forum, post_id)
if post and post.thread.parent.canView(request.user):
quote = get_quote_instance(request.user, post)
response_data = {}
if quote:
quote.delete()
response_data['action'] = 'removed'
else:
Quote(user=request.user, post=post, thread=post.thread).save()
response_data['action'] = 'added'
return HttpResponse(json.dumps(response_data), content_type="application/json")
raise Http404
@login_required
def votePostUp(request, forum_id, post_id):
forum = get_forum_instance(forum_id)
if forum and forum.allow_up_votes:
post = get_post_instance(forum, post_id)
if post and post.thread.parent.canView(request.user):
vote = get_vote_instance(request.user, post)
response_data = {}
if vote:
if vote.type == "Up":
vote.delete()
response_data['action'] = 'removed'
else:
vote.type = "Up"
vote.save()
response_data['action'] = 'added'
else:
Vote(user=request.user, post=post, type="Up").save()
response_data['action'] = 'added'
# Send signal
signals.upvote.send(sender=forum, user=request.user, post=post)
if not post.score_event_sent and post.score() >= forum.positive_score_event:
post.score_event_sent = True
post.save()
signals.positive_score_event.send(sender=forum, post=post)
response_data['score'] = post.score()
return HttpResponse(json.dumps(response_data), content_type="application/json")
raise Http404
@login_required
def votePostDown(request, forum_id, post_id):
forum = get_forum_instance(forum_id)
if forum and forum.allow_down_votes:
post = get_post_instance(forum, post_id)
if post and post.thread.parent.canView(request.user):
vote = get_vote_instance(request.user, post)
response_data = {}
if vote:
if vote.type == "Down":
vote.delete()
response_data['action'] = 'removed'
elif vote.type == "Up":
vote.type = "Down"
vote.save()
response_data['action'] = 'added'
else:
Vote(user=request.user, post=post, type="Down").save()
response_data['action'] = 'added'
# Send signal
signals.downvote.send(sender=forum, user=request.user, post=post)
if not post.score_event_sent and post.score() <= forum.negative_score_event:
post.score_event_sent = True
post.save()
signals.negative_score_event.send(sender=forum, post=post)
response_data['score'] = post.score()
return HttpResponse(json.dumps(response_data), content_type="application/json")
raise Http404
| 2.03125 | 2 |
spinup/algos/ude_td3_new/uncertainty_on_rnd.py | LinghengMeng/spinningup | 0 | 12794854 | <reponame>LinghengMeng/spinningup
import numpy as np
import tensorflow as tf
import time
from spinup.algos.ude_td3_new.core import MLP, BeroulliDropoutMLP, BootstrappedEnsemble, get_vars, count_vars
from spinup.algos.ude_td3_new.replay_buffer import ReplayBuffer, RandomNetReplayBuffer
from spinup.utils.logx import EpochLogger, Logger
class UncertaintyOnRandomNetwork(object):
def __init__(self, x_dim, y_dim, hidden_sizes, post_sample_size,
logger_kwargs, loger_file_name='unc_on_random_net.txt'):
"""
:param x_dim: input size
:param y_dim: output size
:param hidden_sizes: hidden layer sizes
"""
self.replay_size = int(1e6)
self.learning_rate = 1e-3
self.mlp_kernel_regularizer = None # tf.keras.regularizers.l2(l=0.01)
self.bernoulli_dropout_weight_regularizer = 1e-6
self.dropout_rate = 0.05
self.ensemble_size = post_sample_size
self.post_sample_size = post_sample_size
self.batch_size = 100
self.bootstrapp_p = 0.75 # probability used to add to replay buffer
self.x_dim = x_dim
self.y_dim = y_dim
self.layer_sizes = hidden_sizes + [y_dim]
self.x_ph = tf.placeholder(dtype=tf.float32, shape=(None, x_dim))
self.y_ph = tf.placeholder(dtype=tf.float32, shape=(None, y_dim))
######################################################################################################
# Define random target network
# Note: initialize RNT weights far away from 0 and keep fixed
random_net = MLP(self.layer_sizes,
kernel_initializer=tf.keras.initializers.random_uniform(minval=-0.8, maxval=0.8),
bias_initializer=tf.keras.initializers.random_uniform(minval=-0.8, maxval=0.8),
hidden_activation=tf.keras.activations.relu, output_activation=tf.keras.activations.sigmoid)
self.random_net_y = random_net(self.x_ph) # target y
######################################################################################################
# Define LazyBernoulliDropout MLP
# 1. Create MLP to learn RTN: which is only used for LazyBernoulliDropoutMLP.
self.mlp_replay_buffer = RandomNetReplayBuffer(self.x_dim, self.y_dim, size=self.replay_size)
with tf.variable_scope('MLP'):
mlp = MLP(self.layer_sizes, kernel_regularizer=self.mlp_kernel_regularizer,
hidden_activation=tf.keras.activations.relu, output_activation=tf.keras.activations.sigmoid)
mlp_y = mlp(self.x_ph)
self.mlp_loss = tf.reduce_mean((self.y_ph - mlp_y) ** 2) # mean-square-error
mlp_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.mlp_train_op = mlp_optimizer.minimize(self.mlp_loss, var_list=get_vars('MLP'))
# 2. Create lazy BernoulliDropoutMLP:
# which copys weights from MLP by
# sess.run(lazy_ber_drop_mlp_update)
# , then post sample predictions with dropout masks.
with tf.variable_scope('LazyBernoulliDropoutUncertaintySample'):
# define placeholder for parallel sampling
# batch x n_post x dim
lazy_bernoulli_dropout_mlp = BeroulliDropoutMLP(self.layer_sizes, weight_regularizer=1e-6,
dropout_rate=self.dropout_rate,
hidden_activation=tf.keras.activations.relu,
output_activation=tf.keras.activations.sigmoid)
self.lazy_ber_drop_mlp_y = lazy_bernoulli_dropout_mlp(self.x_ph,
training=True) # Set training=True to sample with dropout masks
self.lazy_ber_drop_mlp_update = tf.group([tf.assign(v_lazy_ber_drop_mlp, v_mlp)
for v_mlp, v_lazy_ber_drop_mlp in
zip(mlp.variables, lazy_bernoulli_dropout_mlp.variables)])
######################################################################################################
# Define BernoulliDropout MLP:
# which is trained with dropout masks and regularization term
with tf.variable_scope('BernoulliDropoutUncertaintyTrain'):
bernoulli_dropout_mlp = BeroulliDropoutMLP(self.layer_sizes,
weight_regularizer=self.bernoulli_dropout_weight_regularizer,
dropout_rate=self.dropout_rate,
hidden_activation=tf.keras.activations.relu,
output_activation=tf.keras.activations.sigmoid)
self.ber_drop_mlp_y = bernoulli_dropout_mlp(self.x_ph,
training=True) # Must set training=True to use dropout mask
ber_drop_mlp_reg_losses = tf.reduce_sum(
tf.losses.get_regularization_losses(scope='BernoulliDropoutUncertaintyTrain'))
self.ber_drop_mlp_loss = tf.reduce_mean(
(self.y_ph - self.ber_drop_mlp_y) ** 2 + ber_drop_mlp_reg_losses) # TODO: heteroscedastic loss
ber_drop_mlp_optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
self.ber_drop_mlp_train_op = ber_drop_mlp_optimizer.minimize(self.ber_drop_mlp_loss,
var_list=get_vars(
'BernoulliDropoutUncertaintyTrain'))
######################################################################################################
# Define BootstrappedEnsemble
# Create BootstrappedEnsembleNN
with tf.variable_scope('BootstrappedEnsembleUncertainty'):
self.boots_ensemble = BootstrappedEnsemble(ensemble_size=self.ensemble_size,
x_dim=self.x_dim, y_dim=self.y_dim,
replay_size=self.replay_size,
x_ph=self.x_ph, y_ph=self.y_ph,
layer_sizes=self.layer_sizes,
kernel_regularizer=self.mlp_kernel_regularizer,
learning_rate=self.learning_rate)
######################################################################################################
# Define logger
self.uncertainty_on_random_net_logger = Logger(output_fname=loger_file_name, **logger_kwargs)
def get_predError_and_uncerEstimate_on_policy_based_input(self, input, sess, t, start_time):
x = input
# Generate target for the selected input
y = sess.run(self.random_net_y, feed_dict={self.x_ph: x.reshape(1, -1)})[0]
# store the (input, target) to replay buffer
self.mlp_replay_buffer.store(x, y)
# add (x, y) to ensemble's replay buffer with probability bootstrapp_p
self.boots_ensemble.add_to_replay_buffer(x, y, self.bootstrapp_p)
if t<self.batch_size:
lazy_ber_drop_mlp_pred_error = 0
ber_drop_mlp_pred_error = 0
boots_ensemble_pred_error = 0
lazy_ber_drop_mlp_postSample_unc = 0
ber_drop_mlp_postSample_unc = 0
boots_ensemble_preds_unc = 0
lazy_ber_drop_mlp_loss = 0
ber_drop_mlp_loss = 0
boots_ensemble_loss = 0
else:
###########################################################################################
# Post Sample and estimate uncertainty
x_postSampling = np.matlib.repmat(x, self.post_sample_size, 1) # repmat x for post sampling
# LazyBernoulliDropoutMLP
lazy_ber_drop_mlp_postSample = sess.run(self.lazy_ber_drop_mlp_y, feed_dict={self.x_ph: x_postSampling})
lazy_ber_drop_mlp_pred = np.mean(lazy_ber_drop_mlp_postSample, axis=0)
lazy_ber_drop_mlp_pred_error = np.linalg.norm((y - lazy_ber_drop_mlp_pred), ord=2)
lazy_ber_drop_mlp_postSample_cov = np.cov(lazy_ber_drop_mlp_postSample, rowvar=False)
lazy_ber_drop_mlp_postSample_unc = np.sum(np.diag(lazy_ber_drop_mlp_postSample_cov))
# BernoulliDropoutMLP
ber_drop_mlp_postSample = sess.run(self.ber_drop_mlp_y, feed_dict={self.x_ph: x_postSampling})
ber_drop_mlp_pred = np.mean(ber_drop_mlp_postSample, axis=0)
ber_drop_mlp_pred_error = np.linalg.norm((y - ber_drop_mlp_pred), ord=2)
ber_drop_mlp_postSample_cov = np.cov(ber_drop_mlp_postSample, rowvar=False)
ber_drop_mlp_postSample_unc = np.sum(np.diag(ber_drop_mlp_postSample_cov))
# BootstrappedEnsemble
boots_ensemble_preds = self.boots_ensemble.prediction(sess, x)
boots_ensemble_preds_pred = np.mean(boots_ensemble_preds, axis=0)
boots_ensemble_pred_error = np.linalg.norm((y - boots_ensemble_preds_pred), ord=2)
boots_ensemble_preds_cov = np.cov(boots_ensemble_preds, rowvar=False)
boots_ensemble_preds_unc = np.sum(np.diag(boots_ensemble_preds_cov))
########################################################################################
# train
lazy_ber_drop_mlp_loss, ber_drop_mlp_loss, boots_ensemble_loss = self._train(sess)
########################################################################################
# log data
self.uncertainty_on_random_net_logger.log_tabular('Step', t)
self.uncertainty_on_random_net_logger.log_tabular('LBDPredError', lazy_ber_drop_mlp_pred_error)
self.uncertainty_on_random_net_logger.log_tabular('BDPredError', ber_drop_mlp_pred_error)
self.uncertainty_on_random_net_logger.log_tabular('BEPredError', boots_ensemble_pred_error)
self.uncertainty_on_random_net_logger.log_tabular('LBDUnc', lazy_ber_drop_mlp_postSample_unc)
self.uncertainty_on_random_net_logger.log_tabular('BDUnc', ber_drop_mlp_postSample_unc)
self.uncertainty_on_random_net_logger.log_tabular('BEUnc', boots_ensemble_preds_unc)
self.uncertainty_on_random_net_logger.log_tabular('LBDLoss', lazy_ber_drop_mlp_loss)
self.uncertainty_on_random_net_logger.log_tabular('BDLoss', ber_drop_mlp_loss)
self.uncertainty_on_random_net_logger.log_tabular('BELoss', boots_ensemble_loss)
self.uncertainty_on_random_net_logger.log_tabular('Time', time.time() - start_time)
self.uncertainty_on_random_net_logger.dump_tabular(print_data=False)
return [lazy_ber_drop_mlp_pred_error, ber_drop_mlp_pred_error, boots_ensemble_pred_error,
lazy_ber_drop_mlp_postSample_unc, ber_drop_mlp_postSample_unc, boots_ensemble_preds_unc,
lazy_ber_drop_mlp_loss, ber_drop_mlp_loss, boots_ensemble_loss]
def _train(self, sess):
# Train MLP
mlp_batch = self.mlp_replay_buffer.sample_batch(self.batch_size)
mlp_outs = sess.run([self.mlp_loss, self.mlp_train_op], feed_dict={self.x_ph: mlp_batch['x'],
self.y_ph: mlp_batch['y']})
sess.run(self.lazy_ber_drop_mlp_update)
# Train BernoulliDropoutMLP on the same batch with MLP
ber_drop_outs = sess.run([self.ber_drop_mlp_loss, self.ber_drop_mlp_train_op],
feed_dict={self.x_ph: mlp_batch['x'], self.y_ph: mlp_batch['y']})
# Train BootstrappedEnsemble
boots_ensemble_loss = self.boots_ensemble.train(sess, self.batch_size)
return mlp_outs[0], ber_drop_outs[0], boots_ensemble_loss.mean()
| 1.984375 | 2 |
venv/lib/python2.7/dist-packages/landscape/monitor/plugin.py | pengwu/scapy_env | 0 | 12794855 | <reponame>pengwu/scapy_env
from logging import info
from twisted.internet.defer import succeed
from landscape.log import format_object
from landscape.lib.log import log_failure
from landscape.broker.client import BrokerClientPlugin
class MonitorPlugin(BrokerClientPlugin):
"""
@cvar persist_name: If specified as a string, a C{_persist} attribute
will be available after registration.
"""
persist_name = None
scope = None
def register(self, monitor):
super(MonitorPlugin, self).register(monitor)
if self.persist_name is not None:
self._persist = self.monitor.persist.root_at(self.persist_name)
else:
self._persist = None
def _reset(self):
if self.persist_name is not None:
self.registry.persist.remove(self.persist_name)
@property
def persist(self):
"""Return our L{Persist}, if any."""
return self._persist
@property
def monitor(self):
"""An alias for the C{client} attribute."""
return self.client
class DataWatcher(MonitorPlugin):
"""
A utility for plugins which send data to the Landscape server
which does not constantly change. New messages will only be sent
when the result of get_data() has changed since the last time it
was called.
Subclasses should provide a get_data method, and message_type,
message_key, and persist_name class attributes.
"""
message_type = None
message_key = None
def get_message(self):
"""
Construct a message with the latest data, or None, if the data
has not changed since the last call.
"""
data = self.get_data()
if self._persist.get("data") != data:
self._persist.set("data", data)
return {"type": self.message_type, self.message_key: data}
def send_message(self, urgent):
message = self.get_message()
if message is not None:
info("Queueing a message with updated data watcher info "
"for %s.", format_object(self))
result = self.registry.broker.send_message(
message, self._session_id, urgent=urgent)
def persist_data(message_id):
self.persist_data()
result.addCallback(persist_data)
result.addErrback(log_failure)
return result
return succeed(None)
def persist_data(self):
"""
Sub-classes that need to defer the saving of persistent data
should override this method.
"""
pass
def exchange(self, urgent=False):
"""
Conditionally add a message to the message store if new data
is available.
"""
return self.registry.broker.call_if_accepted(self.message_type,
self.send_message, urgent)
| 2.3125 | 2 |
Desafio 4/main.py | nathaliadv/AceleraDevDS_Codenation | 1 | 12794856 | #!/usr/bin/env python
# coding: utf-8
# # Desafio 4
#
# Neste desafio, vamos praticar um pouco sobre testes de hipóteses. Utilizaremos o _data set_ [2016 Olympics in Rio de Janeiro](https://www.kaggle.com/rio2016/olympic-games/), que contém dados sobre os atletas das Olimpíadas de 2016 no Rio de Janeiro.
#
# Esse _data set_ conta com informações gerais sobre 11538 atletas como nome, nacionalidade, altura, peso e esporte praticado. Estaremos especialmente interessados nas variáveis numéricas altura (`height`) e peso (`weight`). As análises feitas aqui são parte de uma Análise Exploratória de Dados (EDA).
#
# > Obs.: Por favor, não modifique o nome das funções de resposta.
# ## _Setup_ geral
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
import seaborn as sns
import statsmodels.api as sm
# In[2]:
#%matplotlib inline
from IPython.core.pylabtools import figsize
figsize(12, 8)
sns.set()
# In[3]:
athletes = pd.read_csv("athletes.csv")
# In[4]:
athletes.info()
# In[5]:
athletes.head()
# In[6]:
athletes[['height','weight']].describe()
# In[7]:
athletes[['height','weight']].hist()
# In[8]:
def get_sample(df, col_name, n=100, seed=42):
"""Get a sample from a column of a dataframe.
It drops any numpy.nan entries before sampling. The sampling
is performed without replacement.
Example of numpydoc for those who haven't seen yet.
Parameters
----------
df : pandas.DataFrame
Source dataframe.
col_name : str
Name of the column to be sampled.
n : int
Sample size. Default is 100.
seed : int
Random seed. Default is 42.
Returns
-------
pandas.Series
Sample of size n from dataframe's column.
"""
np.random.seed(seed)
random_idx = np.random.choice(df[col_name].dropna().index, size=n, replace=False) #retorna uma array com index das colunas
return df.loc[random_idx, col_name] #retorna uma series com index e valor da coluna
# ## Inicia sua análise a partir daqui
# In[9]:
# Sua análise começa aqui.
# ## Questão 1
#
# Considerando uma amostra de tamanho 3000 da coluna `height` obtida com a função `get_sample()`, execute o teste de normalidade de Shapiro-Wilk com a função `scipy.stats.shapiro()`. Podemos afirmar que as alturas são normalmente distribuídas com base nesse teste (ao nível de significância de 5%)? Responda com um boolean (`True` ou `False`).
# In[10]:
def q1():
amostra_q1 = get_sample(athletes,'height', n=3000, seed=42)
stat, p = sct.shapiro(amostra_q1)
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[11]:
q1()
# __Para refletir__:
#
# * Plote o histograma dessa variável (com, por exemplo, `bins=25`). A forma do gráfico e o resultado do teste são condizentes? Por que?
# * Plote o qq-plot para essa variável e a analise.
# * Existe algum nível de significância razoável que nos dê outro resultado no teste? (Não faça isso na prática. Isso é chamado _p-value hacking_, e não é legal).
# In[12]:
amostra_q1 = get_sample(athletes,'height', n=3000, seed=42)
# In[13]:
sns.distplot(amostra_q1, bins=25, hist_kws={"density": True})
plt.show ()
# In[14]:
sm.qqplot(amostra_q1, fit=True, line="45")
plt.show ()
# In[15]:
amostra_q1 = get_sample(athletes,'height', n=3000, seed=42)
stat, p = sct.shapiro(amostra_q1)
p > 0.0000001
# ## Questão 2
#
# Repita o mesmo procedimento acima, mas agora utilizando o teste de normalidade de Jarque-Bera através da função `scipy.stats.jarque_bera()`. Agora podemos afirmar que as alturas são normalmente distribuídas (ao nível de significância de 5%)? Responda com um boolean (`True` ou `False`).
# In[16]:
def q2():
amostra_q2 = get_sample(athletes,'height', n=3000, seed=42)
stat, p = sct.jarque_bera(amostra_q2)
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[17]:
q2()
# __Para refletir__:
#
# * Esse resultado faz sentido?
# In[18]:
amostra_q2 = get_sample(athletes,'height', n=3000, seed=42)
sm.qqplot(amostra_q2, fit=True, line="45")
plt.show ()
# ## Questão 3
#
# Considerando agora uma amostra de tamanho 3000 da coluna `weight` obtida com a função `get_sample()`. Faça o teste de normalidade de D'Agostino-Pearson utilizando a função `scipy.stats.normaltest()`. Podemos afirmar que os pesos vêm de uma distribuição normal ao nível de significância de 5%? Responda com um boolean (`True` ou `False`).
# In[19]:
def q3():
amostra_q3 = get_sample(athletes,'weight', n=3000, seed=42)
stat, p = sct.normaltest(amostra_q3)
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[20]:
q3()
# __Para refletir__:
#
# * Plote o histograma dessa variável (com, por exemplo, `bins=25`). A forma do gráfico e o resultado do teste são condizentes? Por que?
# * Um _box plot_ também poderia ajudar a entender a resposta.
# In[21]:
amostra_q3 = get_sample(athletes,'weight', n=3000, seed=42)
sns.distplot(amostra_q3, bins=25, hist_kws={"density": True})
plt.show ()
# In[22]:
sns.boxplot(data = amostra_q3)
# ## Questão 4
#
# Realize uma transformação logarítmica em na amostra de `weight` da questão 3 e repita o mesmo procedimento. Podemos afirmar a normalidade da variável transformada ao nível de significância de 5%? Responda com um boolean (`True` ou `False`).
# In[23]:
def q4():
amostra_q4 = get_sample(athletes,'weight', n=3000, seed=42)
amostra_q4_transformada = np.log(amostra_q4)
stat, p = sct.normaltest(amostra_q4_transformada)
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[24]:
q4()
# __Para refletir__:
#
# * Plote o histograma dessa variável (com, por exemplo, `bins=25`). A forma do gráfico e o resultado do teste são condizentes? Por que?
# * Você esperava um resultado diferente agora?
# In[25]:
amostra_q4 = get_sample(athletes,'weight', n=3000, seed=42)
amostra_q4_transformada = np.log(amostra_q4)
sns.distplot(amostra_q4_transformada, bins=25, hist_kws={"density": True})
plt.show ()
# In[26]:
sns.boxplot(data = amostra_q4_transformada)
# > __Para as questão 5 6 e 7 a seguir considere todos testes efetuados ao nível de significância de 5%__.
# ## Questão 5
#
# Obtenha todos atletas brasileiros, norte-americanos e canadenses em `DataFrame`s chamados `bra`, `usa` e `can`,respectivamente. Realize um teste de hipóteses para comparação das médias das alturas (`height`) para amostras independentes e variâncias diferentes com a função `scipy.stats.ttest_ind()` entre `bra` e `usa`. Podemos afirmar que as médias são estatisticamente iguais? Responda com um boolean (`True` ou `False`).
# In[27]:
athletes.columns
# In[45]:
athletes[(athletes.nationality == 'BRA') | (athletes.nationality == 'USA') | (athletes.nationality == 'CAN')]
# In[28]:
bra = athletes[athletes.nationality == 'BRA']
usa = athletes[athletes.nationality == 'USA']
can = athletes[athletes.nationality == 'CAN']
# In[29]:
bra['height'].describe()
# In[30]:
bra.isna().sum()
# In[31]:
usa['height'].describe()
# In[32]:
usa.isna().sum()
# In[46]:
can['height'].describe()
# In[47]:
can.isna().sum()
# In[33]:
def q5():
stat, p = sct.ttest_ind(bra['height'], usa['height'], equal_var = False, nan_policy = 'omit') #False: se falso, execute o teste t de Welch, que não assume igual variação populaciona
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[34]:
q5()
# In[35]:
sns.distplot(bra['height'], bins=25, hist=False, rug=True, label='BRA')
sns.distplot(usa['height'], bins=25, hist=False, rug=True, label='USA')
# ## Questão 6
#
# Repita o procedimento da questão 5, mas agora entre as alturas de `bra` e `can`. Podemos afimar agora que as médias são estatisticamente iguais? Reponda com um boolean (`True` ou `False`).
# In[48]:
def q6():
stat, p = sct.ttest_ind(bra['height'], can['height'], equal_var = False, nan_policy = 'omit') #False: se falso, execute o teste t de Welch, que não assume igual variação populaciona
print('stat= {}, p={}'.format(stat,p))
return bool(p> 0.05)
# In[49]:
q6()
# In[50]:
sns.distplot(bra['height'], bins=25, hist=False, rug=True, label='BRA')
sns.distplot(can['height'], bins=25, hist=False, rug=True, label='CAN')
# ## Questão 7
#
# Repita o procedimento da questão 6, mas agora entre as alturas de `usa` e `can`. Qual o valor do p-valor retornado? Responda como um único escalar arredondado para oito casas decimais.
# In[87]:
def q7():
stat, p = sct.ttest_ind(usa['height'], can['height'], equal_var = False, nan_policy = 'omit') #False: se falso, execute o teste t de Welch, que não assume igual variação populaciona
print('stat= {}, p={}'.format(stat,p))
if p > 0.05:
print('Probably the same distribution')
else:
print('Probably different distributions')
return float(np.round(p, 8))
# In[88]:
q7()
# __Para refletir__:
#
# * O resultado faz sentido?
# * Você consegue interpretar esse p-valor?
# * Você consegue chegar a esse valor de p-valor a partir da variável de estatística?
# In[72]:
stat, p = sct.ttest_ind(usa['height'], can['height'], equal_var = True, nan_policy = 'omit')
print('stat= {}, p={}'.format(stat,p))
# In[69]:
#grau de liberdade para o teste t independente com variancias semelhantes: df = n1 + n2 - 2
gl = len(usa) + len(can) - 2
print(f"Graus de liberdade: {gl}")
q7_sf = sct.t.sf(stat, gl)*2 #Para Hipótese Bicaudal
print(q7_sf)
# In[77]:
sns.distplot(usa['height'], bins=25, hist=False, rug=True, label='USA')
sns.distplot(can['height'], bins=25, hist=False, rug=True, label='CAN')
| 3.828125 | 4 |
blog/migrations/0003_auto_20190504_0806.py | avahmh/Drman | 0 | 12794857 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-04 08:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_post_likes'),
]
operations = [
migrations.CreateModel(
name='Categories',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(allow_unicode=True)),
('lft', models.PositiveIntegerField(db_index=True, editable=False)),
('rght', models.PositiveIntegerField(db_index=True, editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(db_index=True, editable=False)),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='category', to='blog.Categories')),
],
options={
'verbose_name': 'دسته بندی',
'verbose_name_plural': 'دسته بندی ها',
},
),
migrations.AddField(
model_name='post',
name='categories',
field=mptt.fields.TreeManyToManyField(related_name='categories', to='blog.Categories'),
),
migrations.AlterUniqueTogether(
name='categories',
unique_together=set([('parent', 'slug')]),
),
]
| 1.570313 | 2 |
src/cautious_invention/utils.py | MerktSimon/cautious-invention | 0 | 12794858 | # -*- coding: utf-8 -*-
"""Contain the utilities of cautious-invention."""
my_awesome_constant = 4
def hello_world():
"""Print hello World."""
print("Hello, I am a cautious invention")
def add(x, y):
"""Add two numbers together."""
return x+y
| 2.59375 | 3 |
SparkProgramming/2.4.MovieRatingData/p24_spark.py | yevheniyku/Cloud-and-Big-Data | 0 | 12794859 | <gh_stars>0
from pyspark import SparkConf, SparkContext
def rating(x):
if(x <= 1) : return 1
if(x <= 2) : return 2
if(x <= 3) : return 3
if(x <= 4) : return 4
else : return 5
def movieRating(input):
ratingsRDD = input.map(lambda x: x.split(","))
ratingsRDD = ratingsRDD.map(lambda x: (x[1], float(x[2])))
ratingsRDD = ratingsRDD.groupByKey().sortByKey()
ratingsRDD = ratingsRDD.map(lambda x: (x[0], sum(list(x[1]))/len(x[1])))
ratingsRDD = ratingsRDD.map(lambda x: (x[0], rating(float(x[1]))))
ratingsRDD.sortByKey()
ratingsRDD.saveAsTextFile("output")
def main():
conf = SparkConf().setMaster('local').setAppName('MovieRating')
sc = SparkContext(conf = conf)
input = sc.textFile("ratings.csv")
movieRating(input)
if __name__ == '__main__':
main()
| 3.234375 | 3 |
tests/test_content_search.py | TAPP-TV/django-bulbs | 0 | 12794860 | from __future__ import absolute_import
import itertools
import datetime
from django.utils import timezone
from django.test.client import Client
from django.template.defaultfilters import slugify
from bulbs.content.models import Content, Tag, FeatureType
from elastimorphic.tests.base import BaseIndexableTestCase
from tests.testcontent.models import TestContentObj, TestContentObjTwo
class PolyContentTestCase(BaseIndexableTestCase):
def setUp(self):
super(PolyContentTestCase, self).setUp()
"""
Normally, the "Content" class picks up available doctypes from installed apps, but
in this case, our test models don't exist in a real app, so we'll hack them on.
"""
# generate some data
one_hour_ago = timezone.now() - datetime.timedelta(hours=1)
two_days_ago = timezone.now() - datetime.timedelta(days=2)
words = ['spam', 'driver', 'dump truck', 'restaurant']
self.num_subclasses = 2
self.combos = list(itertools.combinations(words, 2))
self.all_tags = []
ft_one = FeatureType.objects.create(name="Obj one", slug="obj-one")
ft_two = FeatureType.objects.create(name="Obj two", slug="obj-two")
for i, combo in enumerate(self.combos):
tags = []
for atom in combo:
tag, created = Tag.objects.get_or_create(name=atom, slug=slugify(atom))
tags.append(tag)
self.all_tags.append(tag)
obj = TestContentObj.objects.create(
title=' '.join(combo),
description=' '.join(reversed(combo)),
foo=combo[0],
published=one_hour_ago,
feature_type=ft_one
)
obj.tags.add(*tags)
obj.index()
obj2 = TestContentObjTwo.objects.create(
title=' '.join(reversed(combo)),
description=' '.join(combo),
foo=combo[1],
bar=i,
published=two_days_ago,
feature_type=ft_two
)
obj2.tags.add(*tags)
obj2.index()
obj = TestContentObj.objects.create(
title="Unpublished draft",
description="Just to throw a wrench",
foo="bar",
feature_type=ft_one
)
# We need to let the index refresh
TestContentObj.search_objects.refresh()
TestContentObjTwo.search_objects.refresh()
def test_filter_search_content(self):
self.assertEqual(Content.objects.count(), 13) # The 12, plus the unpublished one
q = Content.search_objects.search()
self.assertEqual(q.count(), 12)
q = Content.search_objects.search(query="spam")
self.assertEqual(q.count(), 6)
q = Content.search_objects.search(tags=["spam"])
self.assertEqual(q.count(), 6)
for content in q.full():
self.assertTrue("spam" in content.tags.values_list("slug", flat=True))
q = Content.search_objects.search(feature_types=["obj-one"])
self.assertEqual(q.count(), 6)
for content in q.full():
self.assertEqual("Obj one", content.feature_type.name)
q = Content.search_objects.search(types=["testcontent_testcontentobj"])
self.assertEqual(q.count(), 6)
q = Content.search_objects.search(before=timezone.now())
self.assertEqual(q.count(), 12)
q = Content.search_objects.search(before=timezone.now() - datetime.timedelta(hours=4))
self.assertEqual(q.count(), 6)
q = Content.search_objects.search(after=timezone.now() - datetime.timedelta(hours=4))
self.assertEqual(q.count(), 6)
q = Content.search_objects.search(after=timezone.now() - datetime.timedelta(days=40))
self.assertEqual(q.count(), 12)
q = Content.search_objects.search(types=["testcontent_testcontentobjtwo"]).full()
self.assertEqual(q.count(), 6)
q = Content.search_objects.search(types=[
"testcontent_testcontentobjtwo", "testcontent_testcontentobj"])
self.assertEqual(q.count(), 12)
def test_status_filter(self):
q = Content.search_objects.search(status="final")
self.assertEqual(q.count(), 12)
q = Content.search_objects.search(status="draft")
self.assertEqual(q.count(), 1)
def test_negative_filters(self):
q = Content.search_objects.search(tags=["-spam"])
self.assertEqual(q.count(), 6)
q = Content.search_objects.search(feature_types=["-obj-one"])
self.assertEqual(q.count(), 6)
for content in q.full():
self.assertNotEqual("Obj one", content.feature_type.name)
def test_content_subclasses(self):
# We created one of each subclass per combination so the following should be true:
self.assertEqual(Content.objects.count(), (len(self.combos) * self.num_subclasses) + 1)
self.assertEqual(TestContentObj.objects.count(), len(self.combos) + 1)
self.assertEqual(TestContentObjTwo.objects.count(), len(self.combos))
def test_content_list_view(self):
client = Client()
response = client.get('/content_list_one.html')
self.assertEqual(response.status_code, 200)
self.assertEqual(
len(response.context['object_list']), len(self.combos) * self.num_subclasses)
def test_num_polymorphic_queries(self):
with self.assertNumQueries(1 + self.num_subclasses):
for content in Content.objects.all():
self.assertIsInstance(content, (TestContentObj, TestContentObjTwo))
def test_add_remove_tags(self):
content = Content.objects.all()[0]
original_tag_count = len(content.tags.all())
new_tag = Tag.objects.create(name='crankdat')
content.tags.add(new_tag)
self.assertEqual(len(content.tags.all()), original_tag_count + 1)
self.assertEqual(len(content.tags.all()), len(content.extract_document()['tags']))
def test_search_exact_name_tags(self):
Tag.objects.create(name='Beeftank')
Tag.search_objects.refresh()
results = Tag.search_objects.query(name__match='beeftank').full()
self.assertTrue(len(results) > 0)
tag_result = results[0]
self.assertIsInstance(tag_result, Tag)
def test_in_bulk_performs_polymorphic_query(self):
content_ids = [c.id for c in Content.objects.all()]
results = Content.objects.in_bulk(content_ids)
subclasses = tuple(Content.__subclasses__())
for result in results.values():
self.assertIsInstance(result, subclasses)
| 2.125 | 2 |
misc.py | Friendly0Fire/manim-utils | 0 | 12794861 | <gh_stars>0
def split_on_condition(seq, condition):
a, b = [], []
for item in seq:
(a if condition(item) else b).append(item)
return a, b | 2.8125 | 3 |
gwtarget/DESI_mainInjector/Main-Injector-master/python/sourceProb.py | rknop/timedomain | 1 | 12794862 | import numpy as np
import os
import scipy.stats
import mags
import hp2np
import warnings
license="""
Copyright (C) 2014 <NAME>
This program is free software; you can redistribute it and/or modify it
under the terms of version 3 of the GNU General Public License as
published by the Free Software Foundation.
More to the points- this code is science code: buggy, barely working,
with little or no documentation. Science code in the the alpine fast
& light style. (Note the rate at which people who stand on the
summit of K2 successfully make it down.)
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
class map(object):
"""
"""
def __init__(self, observation, type="bright", apparent_mag_source=21.5) :
"""
"""
data_dir = os.environ["DESGW_DATA_DIR"]
self.limitingMag = observation.maglim
self.limits = observation.limits
tryApparent = True
if type == "bright" :
if tryApparent :
self.lumModel = "apparent"
self.modelAbsoluteMagnitude = apparent_mag_source
self.modelAbsoluteMagnitudeSigma = 0
else :
# this can be used to scale the magnitudes from the source model
# this can be done as the source abs magnitude for calculations
# is taken from self.absMagMean, self.absMagSigma
# whereas we're setting the absolute scale here with
# self.modelAbsoluteMagnitude, self.modelAbsoluteMagnitudeSigma
# Currently this is done in mapsAtTimeT.probabilityMaps
self.lumModel = "kn-gaussian"
# LIGO O1 and O2
#self.modelAbsoluteMagnitude = -11.1
# LIGO O3
self.modelAbsoluteMagnitude = -15.5
self.modelAbsoluteMagnitudeSigma = 1.0
elif type == "dark" :
if tryApparent :
self.lumModel = "apparent"
self.modelAbsoluteMagnitude = 21.5
self.modelAbsoluteMagnitudeSigma = 0
else :
# fixed luminosity
self.lumModel = "bh-gaussian"
self.modelAbsoluteMagnitude = -25.0
self.modelAbsoluteMagnitudeSigma = 1.0
else :
raise Exception (
"only trigger types known are bright and dark, not {}".format(type))
self.absMagMean = self.modelAbsoluteMagnitude
self.absMagSigma = self.modelAbsoluteMagnitudeSigma
# get the P_recognition
self.pra = observation.pra
self.pdec = observation.pdec
self.precog = observation.precog
# keep the answer
self.probMap = ""
# for plotting contours, keep the intermediate data around
self.xi, self.yi, self.zi = ["","",""]
self.lastCtype = ""
def calculateProb(self, ligo, ligo_distance, ligo_distance_sigma, verbose = True) :
import scipy.integrate
warnings.filterwarnings("ignore")
# bookkeeping for plotting
self.zi= ""
#
telescopeLimits = self.limits
limitingMag = self.limitingMag
# check if sun is up
if limitingMag.mean() < -9 :
self.probMap = np.zeros(ligo.size)
return 0
# realwork
ligo_spatial = ligo
ligo_d_mean = ligo_distance
ligo_d_var = ligo_distance_sigma**2
# we are going to change variables to distance modulus (magnitude),
# then, assume error is gaussian in DM and in absolute magnitude
# so that we can add guassians to get a gaussian.
absMag_mean = self.absMagMean
absMag_var = self.absMagSigma**2
test_sum = ligo_d_var.sum()
if self.lumModel == "apparent" :
# implementing this in Feb 2020
prob_map = np.zeros(ligo_spatial.size)
apparent_mag = absMag_mean
ix, = np.where(apparent_mag < limitingMag)
prob_map[ix] = 1.0
ix, = np.where((apparent_mag >= limitingMag) & (apparent_mag < limitingMag+0.5))
prob_map[ix] = 0.5
else :
# we can't afford to do every pixel.
# for res= 128, a cut at > 1e-8 brings the number of pixels
# down from 196,608 to 10^4
ix = (ligo_spatial > 1e-8) & (ligo_d_mean > 0) & (limitingMag > 0)
distance_mod = 5*np.log10(ligo_d_mean[ix]*1e6/10.)
# error propgation on converting from distance to distance modulus
dm_var = ligo_d_var[ix]*(5./np.log(10)/ligo_d_mean[ix])
# assume gaussians, so adding gaussians to get a gaussian
ap_mag_mean = distance_mod + absMag_mean
ap_mag_var = absMag_var + dm_var
ic = 0
prob_map = np.copy(ligo_spatial)*0.0
for pix in np.nonzero(ix)[0] :
mag = ap_mag_mean[ic]
var = ap_mag_var[ic]
# Now we weight by r^2 dr , thus assuming equal probability per unit volume
# We will also drop constants, equivalent to assuming we are going
# to renormalize by integrating from 0 to infinity
# normalize
norm_min, norm_max = 7.0, 25.0 # magnitudes
ans = scipy.integrate.quad(self.probability_density,
norm_min, norm_max, args=(mag, var ) )
norm = ans[0]; norm_error = ans[1]
# calculate prob
mag_min, mag_max = 7.0, limitingMag[pix]
ans = scipy.integrate.quad(self.probability_density,
mag_min, mag_max, args=(mag, var ) )
prob = ans[0]; prob_error = ans[1]
prob = prob/norm
prob_map[pix] = prob
ic += 1
print "\t probMap: made for source absMag {:.1f}".format(absMag_mean)
# probability of recognition propto star density
prob_map = prob_map * self.precog
# finally, eliminate every where the telescope cannot point
prob_map = prob_map * telescopeLimits
self.probMap = prob_map
return 1
#
# This is a gaussian in apparent magnitude, weighted by r^2 (itself
# trasformed into a distance modulus (i.e. magnitude)
#
# Now we weight by r^2 dr , thus assuming equal probability per unit volume
# probability_density = np.exp( (m - ap_mag_mean)/2/ap_mag_var) * \
# np.exp(0.6*np.log(10.)*(m - ap_mag_mean))
def probability_density(self,m, mag_mean, mag_var ) :
#print m, mag_mean, mag_var
pd= np.exp( -(m-mag_mean)/2/mag_var) * \
np.exp(0.6*np.log(10.)*(m -mag_mean))
return pd
# one can choose to plot the ligo contours
# or the ligo*obs_prob contours
# type="ligo" type="ls"
# houranlge chooses HA instead of RA projected into mcbryde coords
def plotLigoContours(self, obs, type="ligo", whiteLine="False", hourangle=False) :
import matplotlib
import matplotlib.pyplot as plt
con_levels=10
if self.zi == "" or self.lastCtype != type:
print "\t calculating contours for type = ",type
if hourangle == False :
xmin = obs.x.min(); xmax = obs.x.max()
ymin = obs.y.min(); ymax = obs.y.max()
else :
xmin = obs.hx.min(); xmax = obs.hx.max()
ymin = obs.hy.min(); ymax = obs.y.max()
xi=np.linspace(xmin, xmax, 500)
yi=np.linspace(ymin, ymax, 500)
if type == "ligo" :
probMap = obs.map
if type == "ls" :
probMap = obs.map*self.probMap
if hourangle == False :
x = obs.x; y = obs.y
else :
x = obs.hx; y = obs.hy
zi = matplotlib.mlab.griddata(x,y,probMap,xi,yi)
self.xi, self.yi, self.zi = xi,yi,zi
self.lastCtype = type
if not whiteLine :
plt.contour(self.xi,self.yi,self.zi,con_levels,linewidths=3,colors="k")
plt.contour(self.xi,self.yi,self.zi,con_levels,linewidths=0.66,colors="w")
| 1.953125 | 2 |
Python/Maths/matrices.py | GeneralNZR/maths-and-javascript | 3 | 12794863 | <filename>Python/Maths/matrices.py<gh_stars>1-10
"""
Description:
Différentes fonctions pour manipuler des matrices.
Auteur:
<NAME>
Version:
1.0
"""
def matrice_identite(n: int) -> list:
"""
Description:
Crée une matrice identité de taille n.
Paramètres:
n: {int} -- Taille de la matrice.
Retourne:
{list} -- Matrice identité de taille n.
Exemple:
>>> matrice_identite(3)
[[1, 0, 0], [0, 1, 0], [0, 0, 1]]
"""
matrice = []
for i in range(n):
matrice.append([])
for j in range(n):
if i == j:
matrice[i].append(1)
else:
matrice[i].append(0)
return matrice
def dimension_matrice(matrice: list, n: int) -> bool:
"""
Description:
Vérifie si la matrice est de taille n.
Paramètres:
matrice: {list} -- Matrice à vérifier.
n: {int} -- Taille de la matrice.
Retourne:
{bool} -- True si la matrice est de taille n, False sinon.
Exemple:
>>> dimension_matrice([[0, 0, 0], [0, 0, 0], [0, 0, 0]], 3)
True
"""
if len(matrice) == n and len(matrice[0]) == n:
return True
else:
return False
# TESTS
print(
'matrice_identite(3)'+' ----------------------------> ', matrice_identite(3),
'\ndimension_matrice(matrice_identite(3), 3)'+' ------> ', dimension_matrice(matrice_identite(3), 3)
) | 3.59375 | 4 |
app/__init__.py | onyxcherry/OnyxcherryOTP | 1 | 12794864 | <reponame>onyxcherry/OnyxcherryOTP<gh_stars>1-10
import os
from dataclasses import dataclass
from config import Config, CSPSettings
from dotenv import load_dotenv
from flask import Flask, current_app, render_template, request
from flask_babel import Babel
from flask_babel import lazy_gettext as _l
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
from flask_mail import Mail
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_talisman import Talisman
from flask_wtf.csrf import CSRFProtect
db = SQLAlchemy()
migrate = Migrate()
login = LoginManager()
login.login_view = "auth.login"
login.login_message = _l("Please log in to access this page.")
login.refresh_view = "auth.refresh_login"
login.needs_refresh_message = _l(
u"To protect your account, please reauthenticate to access this page."
)
login.needs_refresh_message_category = "info"
# login.session_protection = "strong"
# Above strong mode causes deleting session cookie after recovered
# cookie by remember-me option
mail = Mail()
flask_bcrypt = Bcrypt()
babel = Babel()
csrf = CSRFProtect()
talisman = Talisman()
def page_not_found(e):
return render_template("errors/404.html"), 404
def internal_error(e):
db.session.rollback()
return render_template("errors/500.html"), 500
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
db.init_app(app)
login.init_app(app)
mail.init_app(app)
csrf.init_app(app)
babel.init_app(app)
flask_bcrypt.init_app(app)
csp = CSPSettings()
talisman.init_app(
app,
content_security_policy=csp.content_security_policy,
content_security_policy_nonce_in=csp.content_security_policy_nonce_in,
force_https=csp.force_https,
frame_options=csp.frame_options,
session_cookie_secure=csp.session_cookie_secure,
session_cookie_http_only=csp.session_cookie_http_only,
strict_transport_security=csp.strict_transport_security,
referrer_policy=csp.referrer_policy,
)
with app.app_context():
if db.engine.url.drivername == "sqlite":
migrate.init_app(app, db, render_as_batch=True)
else:
migrate.init_app(app, db)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
app.register_error_handler(404, page_not_found)
app.register_error_handler(500, internal_error)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix="/auth")
from app.twofa import bp as twofa_bp
app.register_blueprint(twofa_bp, url_prefix="/twofa")
from app.webauthn import bp as webauthn_bp
app.register_blueprint(webauthn_bp, url_prefix="/webauthn")
csrf.exempt(webauthn_bp)
from app.main import bp as main_bp
app.register_blueprint(main_bp)
return app
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(current_app.config["LANGUAGES"])
from app import models
| 2.265625 | 2 |
silverpieces/__init__.py | fareedmirza/silverpieces | 1 | 12794865 | <reponame>fareedmirza/silverpieces<filename>silverpieces/__init__.py
# required for python2?
| 0.984375 | 1 |
RetrieveMolWeights.py | dcoukos/CHO_network | 1 | 12794866 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 16:03:28 2018
@author: dimitricoukos
Test: in command line:
python RetrieveUniProt.py 'Unit Tests/sample_brenda_parameters.json'
"""
import sys
import cobra_services as CS
from multiprocessing import Pool
from urllib.error import HTTPError
from DataTreatment import openJson, write
mammals = ['HSA', 'PTR', 'PPS', 'GGO', 'PON', 'NLE', 'MCC', 'MCF', 'RRO',
'RBB', 'CJC', 'SBQ', 'MMU', 'RNO', 'CGE', 'NGI', 'HGL', 'OCU',
'TUP', 'CFA', 'AML', 'UMR', 'ORO', 'FCA', 'PTG', 'AJU', 'BTA',
'BOM', 'BIU', 'PHD', 'CHX', 'OAS', 'SSC', 'CFR', 'CDK', 'LVE',
'OOR', 'ECB', 'EPZ', 'EAI', 'MYB', 'MYD', 'HAI', 'RSS', 'LAV',
'TMU', 'MDO', 'SHR', 'OAA']
animals = ['HSA', 'PTR', 'PPS', 'GGO', 'PON', 'NLE', 'MCC', 'MCF', 'RRO',
'RBB', 'CJC', 'SBQ', 'MMU', 'RNO', 'CGE', 'NGI', 'HGL', 'OCU',
'TUP', 'CFA', 'AML', 'UMR', 'ORO', 'FCA', 'PTG', 'AJU', 'BTA',
'BOM', 'BIU', 'PHD', 'CHX', 'OAS', 'SSC', 'CFR', 'CDK', 'LVE',
'OOR', 'ECB', 'EPZ', 'EAI', 'MYB', 'MYD', 'HAI', 'RSS', 'LAV',
'TMU', 'MDO', 'SHR', 'OAA', 'GGA', 'MGP', 'CJO', 'TGU', 'GFR',
'FAB', 'PHI', 'CCW', 'FPG', 'FCH', 'CLV', 'EGZ', 'AAM', 'ASN',
'AMJ', 'PSS', 'CMY', 'SEA', 'ACS', 'PVT', 'PBI', 'GJA', 'XLA',
'XTR', 'NPR', 'DRE', 'SRX', 'SGH', 'IPU', 'TRU', 'TNG', 'LCO',
'NCC', 'MZE', 'OLA', 'XMA', 'NFU', 'LCF', 'HCQ', 'ELS', 'SFM',
'LCM', 'CMK']
def returnBestAddress(genes, loop):
"""Searches for available genes matching kegg enzyme entry.
This function searches 'sequentially'. It returns the best available model
organism genes. Organisms phylogenetically closer to Cricetulus griseus are
preferred, but they are chosen by approximation. A detailed study of the
phylogenetic tree has not been done for this project. Hopefully going
sequentially increases both readability and efficiency.
Parameters
----------
genes : dict
key: value pair is organism: address
loop : string
Indicates the highest potential group of matching organisms to search
in.
Returns
-------
dict
key: kegg organism code. value: gene addresses for enzyme and organism
"""
if loop == 'best':
if 'CGE' in genes:
return genes['CGE']
elif 'MMU' in genes:
return genes['MMU']
elif 'RNO' in genes:
return genes['RNO']
elif 'HSA' in genes:
return genes['HSA']
else:
loop = 'mammals'
if loop == 'mammals':
mammal_match = set(genes.keys()).intersection(mammals)
if bool(mammal_match):
return mammal_match
else:
loop = 'vertebrates'
if loop == 'vertebrates':
animal_match = set(genes.keys()).intersection(animals)
if bool(animal_match):
return animal_match
else:
loop = 'csm' # Stands for "common simple models"
if loop == 'csm':
if 'DME' in genes:
return genes['DME']
elif 'SCE' in genes:
return genes['SCE']
elif 'ECO' in genes:
return genes['ECO']
def loopHandler(mol_weights, ec_number, genes, loop):
"""Calls the correct loop of returnBestAddress based on best potential genes
matches.
Parameters
----------
mol_weights : list
empty list. will contain estimated molecular weights of enzymes.
ec_number : string
genes : list
Addresses of genes corresponding to ec number.
loop : string
"""
searching = True
while searching:
best = returnBestAddress(genes, loop)
if not best:
if loop == 'best':
loop = 'mammals'
break
if loop == 'mammals':
loop = 'vertebrates'
break
if loop == 'vertebrates':
loop = 'csm'
break
if loop == 'csm':
searching = False
return None
searching = False
mol_weights[ec_number]['weights'] = []
mol_weights[ec_number]['uniprot_ids'] = []
if loop == 'best' or loop == 'csm':
for address in best:
organism = best # for readability
try:
fillData(mol_weights, ec_number, organism, address)
except HTTPError as err:
if err.code == 404:
pass
else:
for gene in best:
for address in best[gene]:
organism = best[gene] # for readability
try:
fillData(mol_weights, ec_number, organism, address)
except HTTPError as err:
if err.code == 404:
pass
def fillData(mol_weights, ec_number, organism, address):
"""Searches kegg for enzyme uniprot id and AA sequence.
Parameters
----------
mol_weights : dict
object containing all information collected by program.
ec_number : string
enzyme classification number used to organize data.
address : string
gene address for sequence lookup.
"""
mol_weights[ec_number]['genes'].append(organism.lower() + ':' + address)
sequence = CS.kegggene_to_sequence(organism, address)
weight = CS.sequence_weight(sequence)
mol_weights[ec_number]['weights'].append(weight)
uniprot = CS.kegggene_to_uniprotid(organism, address)
if uniprot:
mol_weights[ec_number]['uniprot_ids'].uniprot
def mainSubprocess(bigg_ids, del_ec):
"""Main function called by each multiprocessing.process.
Parameters
----------
bigg_ids : dict
key: ec_number. value: corresponding bigg ids.
del_ec : list
empty list which is appended to here containing depicrated ec numbers
Returns
-------
dict
key: ec number. value: all collected data in program by this process.
"""
try:
mol_weights = {}
for ec_number in bigg_ids: # WARNING: joblib may require list
mol_weights[ec_number] = {}
print('Currently processing BiGG id: ' + ec_number)
mol_weights[ec_number]['bigg ids'] = bigg_ids[ec_number]
try:
genes = CS.ecnumber_to_genes(ec_number)
except HTTPError as err:
if err.code == 404:
print('Excepted: No entry for ec number: '+ec_number)
continue
else:
raise
if genes:
loop = 'best'
searching = True
while searching:
try:
loopHandler(mol_weights, ec_number, genes, loop)
searching = False
except HTTPError as err:
if err.code == 404 and loop == 'csm':
searching = False
except TypeError as err:
if loop == 'best':
loop = 'mammals'
if loop == 'mammals':
loop = 'vertebrates'
if loop == 'vertebrates':
loop = 'csm'
if loop == 'csm':
searching = False
finally:
return mol_weights
if __name__ == '__main__':
sub_dict_1 = {}
sub_dict_2 = {}
sub_dict_3 = {}
sub_dict_4 = {}
mol_weights = {}
if len(sys.argv) == 1:
brenda_parameters = openJson('JSONs/brenda_parameters.json')
else:
brenda_parameters = openJson(sys.argv[1])
simplified_brenda = {}
for bigg_id in brenda_parameters:
simplified_brenda[bigg_id] = brenda_parameters[bigg_id][0]
optimized_bigg = {}
for k, v in simplified_brenda.items():
optimized_bigg[v] = optimized_bigg.get(v, [])
optimized_bigg[v].append(k)
counter = 0
for ec_number in optimized_bigg:
if counter % 4 == 0:
sub_dict_1[ec_number] = optimized_bigg[ec_number]
if counter % 4 == 1:
sub_dict_2[ec_number] = optimized_bigg[ec_number]
if counter % 4 == 2:
sub_dict_3[ec_number] = optimized_bigg[ec_number]
if counter % 4 == 3:
sub_dict_4[ec_number] = optimized_bigg[ec_number]
counter = counter + 1
try:
with Pool(processes=4) as pool:
del_ec1 = []
del_ec2 = []
del_ec3 = []
del_ec4 = []
mw_1 = pool.apply_async(mainSubprocess, (sub_dict_1, del_ec1,))
mw_2 = pool.apply_async(mainSubprocess, (sub_dict_2, del_ec2,))
mw_3 = pool.apply_async(mainSubprocess, (sub_dict_3, del_ec3,))
mw_4 = pool.apply_async(mainSubprocess, (sub_dict_4, del_ec4,))
pool.close()
pool.join()
for ec in del_ec1:
mw_1.pop(ec, None)
for ec in del_ec2:
mw_2.pop(ec, None)
for ec in del_ec3:
mw_3.pop(ec, None)
for ec in del_ec4:
mw_4.pop(ec, None)
finally:
mol_weights.update(mw_1.get())
mol_weights.update(mw_2.get())
mol_weights.update(mw_3.get())
mol_weights.update(mw_4.get())
if len(sys.argv) > 1:
write('Unit Tests/multiprocessing_sub_output1.json', mw_1.get())
write('Unit Tests/multiprocessing_sub_output3.json', mw_3.get())
mol_weights_to_write = {}
for ec_number in mol_weights:
for bigg_id in mol_weights[ec_number]['bigg ids']:
mol_weights_to_write[bigg_id] = {}
mol_weights_to_write[bigg_id]['ec_number'] = ec_number
mol_weights_to_write[bigg_id].update(mol_weights[ec_number])
write('JSONs/molecular_weights.json', mol_weights_to_write)
| 1.75 | 2 |
local-pipelines/python-ggnn-expr-completion/datagen/get_data.py | kiteco/kiteco-public | 17 | 12794867 | import argparse
import datetime
import json
import logging
import pickle
import time
import shutil
from kite.graph_data.data_feeder import EndpointDataFeeder
from kite.graph_data.session import RequestInit
from kite.graph_data.graph_feed import GraphFeedConfig
from kite.infer_expr.config import MetaInfo, Config
from kite.infer_call.request import Request as CallRequest, KwargRequest, ArgTypeRequest, ArgPlaceholderRequest
from kite.infer_expr.request import Request as ExprRequest
from kite.infer_expr.attr_base import Request as AttrBaseRequest
from kite.infer_attr.request import Request as AttrRequest
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s')
def get_filename(cur_sample: int, total: int, timestamp: int) -> str:
n_digits = len(str(total))
format_str = "{{:0{}d}}".format(n_digits) + "-of-{}-{}.pickle"
return format_str.format(cur_sample, total, timestamp)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--endpoint', type=str, default='http://localhost:3039')
parser.add_argument('--random_seed', type=int)
parser.add_argument('--batch', type=int, default=10)
parser.add_argument('--samples', type=int, default=1000, help='number of samples to generate')
parser.add_argument('--meta_info', type=str)
parser.add_argument('--out_dir', type=str, default='data')
parser.add_argument('--samples_per_file', type=int, default=500)
parser.add_argument('--max_samples', type=int)
parser.add_argument('--attr_base_proportion', type=float)
parser.add_argument('--attr_proportion', type=float)
parser.add_argument('--call_proportion', type=float)
parser.add_argument('--arg_type_proportion', type=float)
parser.add_argument('--kwarg_name_proportion', type=float)
parser.add_argument('--arg_placeholder_proportion', type=float)
args = parser.parse_args()
meta_info = MetaInfo.from_json(json.load(open(args.meta_info, 'r')))
config = Config()
req = RequestInit(
config=GraphFeedConfig(edge_set=config.ggnn.edge_set),
random_seed=args.random_seed,
num_batches=args.batch,
max_hops=config.max_hops,
name_subtoken_index=meta_info.name_subtoken_index,
type_subtoken_index=meta_info.type_subtoken_index,
production_index=meta_info.production,
expr=ExprRequest(
max_samples=args.max_samples,
call=CallRequest(
symbols=meta_info.call.dist,
batch_proportion=args.call_proportion,
),
attr=AttrRequest(
symbols=meta_info.attr.dist,
batch_proportion=args.attr_proportion,
parents=meta_info.attr.parents,
),
attr_base=AttrBaseRequest(
symbols=meta_info.attr_base.dist,
batch_proportion=args.attr_base_proportion,
),
arg_type=ArgTypeRequest(
symbols=meta_info.call.dist,
batch_proportion=args.arg_type_proportion,
),
kwarg_name=KwargRequest(
symbols=meta_info.call.dist,
keywords=meta_info.call.keywords,
batch_proportion=args.kwarg_name_proportion,
),
arg_placeholder=ArgPlaceholderRequest(
symbols=meta_info.call.dist,
batch_proportion=args.arg_placeholder_proportion,
)
),
)
logging.info("will write {} samples to {}, random seed = {}".format(
args.samples, args.out_dir, args.random_seed))
feeder = EndpointDataFeeder(args.endpoint, req)
try:
tmp_filename = None
filename = None
file = None
file_samples = 0
start = None
n_names = 0
n_production = 0
def finish_file():
file.close()
shutil.move(tmp_filename, filename)
end = datetime.datetime.now()
logging.info(
"sample {}: saved {} with {} samples ({} name, {} production), took {}".format(
i, filename, args.samples_per_file, n_names, n_production, end - start
))
for i in range(args.samples):
if not file or file_samples >= args.samples_per_file:
if file:
finish_file()
file_samples = 0
ts = int(time.time() * 1000)
filename = "{}/{}".format(args.out_dir, get_filename(i, args.samples, ts))
tmp_filename = "{}.part".format(filename)
file = open(tmp_filename, 'wb')
start = datetime.datetime.now()
logging.info("writing to {}".format(tmp_filename))
sample = feeder.next()
pickle.dump(sample, file)
n_names += len(sample.data.expr.infer_name.prediction_nodes)
n_production += len(sample.data.expr.infer_production.prediction_nodes)
file_samples += 1
if file_samples > 0:
finish_file()
finally:
feeder.stop()
if __name__ == "__main__":
main()
| 2.140625 | 2 |
experiments/utils/nets/cnn_factory.py | ezetl/deep-learning-techniques-thesis | 0 | 12794868 | <filename>experiments/utils/nets/cnn_factory.py
#!/usr/bin/env python2.7
import caffe
from caffe import (layers as L, params as P)
from layers_wrappers import *
caffe.set_device(0)
caffe.set_mode_gpu()
class MNISTNetFactory:
@staticmethod
def standar(lmdb_path=None, batch_size=125, scale=1.0, is_train=True, learn_all=True):
"""
Creates a protoxt similar to the first layers of AlexNet architecture for the MNIST experiment
:param lmdb_path: str. Path to train LMDB
:param batch_size: int. Batch size
:param scale: float. How to scale the images
:param is_train: bool. Flag indicating if this is for testing or training
:returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs
"""
n = caffe.NetSpec()
phase = caffe.TRAIN if is_train else caffe.TEST
n.data, n.label = L.Data(include=dict(phase=phase), batch_size=batch_size, backend=P.Data.LMDB, source=lmdb_path, transform_param=dict(scale=scale), ntop=2)
n.conv1 = L.Convolution(n.data, kernel_size=11, stride=4, num_output=96, param=[weight_param('conv1_w', learn_all=learn_all), bias_param('conv1_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0)
n.relu1 = L.ReLU(n.conv1, in_place=True)
n.pool1 = L.Pooling(n.relu1, pool=P.Pooling.MAX, kernel_size=3, stride=2)
n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
n.conv2 = L.Convolution(n.norm1, kernel_size=5, num_output=256, pad=2, group=2, param=[weight_param('conv2_w', learn_all=learn_all), bias_param('conv2_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0)
n.relu2 = L.ReLU(n.conv2, in_place=True)
n.pool2 = L.Pooling(n.relu2, pool=P.Pooling.MAX, kernel_size=3, stride=2)
n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
n.fc500 = L.InnerProduct(n.norm2, num_output=500, param=[weight_param('fc500_w', learn_all=True), bias_param('fc500_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu3 = L.ReLU(n.fc500, in_place=True)
if is_train:
n.dropout = fc10input = L.Dropout(n.relu3, in_place=True)
else:
fc10input = n.relu3
# Learn all true because we always want to train the top classifier no matter if we are training from scratch or finetuning
n.fc10 = L.InnerProduct(fc10input, num_output=10, param=[weight_param('fc10_w', learn_all=True), bias_param('fc10_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
if is_train:
n.loss = L.SoftmaxWithLoss(n.fc10, n.label)
n.acc = L.Accuracy(n.fc10, n.label, include=dict(phase=caffe.TEST))
# Returning the name of the loss/acc layers is useful because then we can
# know which outputs of the net we can track to test the 'health'
# of the training process
return n, ('loss',), ('acc',)
@staticmethod
def siamese_egomotion(lmdb_path=None, labels_lmdb_path=None,
batch_size=125, scale=1.0, is_train=True, learn_all=False, sfa=False):
"""
Creates a protoxt for the AlexNet architecture for the MNIST experiment
Uses Egomotion as stated in the paper
:param lmdb_path: str. Path to train LMDB
:param labels_lmdb_path: str. Path to train LMDB labels
:param batch_size: int. Batch size
:param scale: float. How to scale the images
:param is_train: bool. Flag indicating if this is for testing or training
:param learn_all: bool. Flag indicating if we should learn all the layers from scratch
:returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs
"""
n = caffe.NetSpec()
n.data, n.label = input_layers(lmdb_path=lmdb_path, labels_lmdb_path=labels_lmdb_path, batch_size=batch_size, scale=scale, is_train=is_train)
# Slice data/labels for MNIST
n.data0, n.data1 = L.Slice(n.data, slice_param=dict(axis=1, slice_point=1), ntop=2)
n.labelx, n.labely, n.labelz = L.Slice(n.label, slice_param=dict(axis=1, slice_point=[1,2]), ntop=3)
# BCNN
n.norm2, n.norm2_p = bcnn(n.data0, n.data1, n, learn_all, True)
# TCNN
n.concat = L.Concat(n.norm2, n.norm2_p, concat_param=dict(axis=1))
n.fc1000 = L.InnerProduct(n.concat, num_output=1000, param=[weight_param('fc1000_w', learn_all=True), bias_param('fc1000_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu3 = L.ReLU(n.fc1000, in_place=True)
if is_train:
n.dropout = fcxinput = fcyinput = fczinput = L.Dropout(n.relu3, in_place=True)
else:
fcxinput = fcyinput = fczinput = n.relu3
# Classifiers
n.fcx = L.InnerProduct(fcxinput, num_output=7, param=[weight_param('fcx_w', learn_all=True), bias_param('fcx_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.fcy = L.InnerProduct(fcyinput, num_output=7, param=[weight_param('fcy_w', learn_all=True), bias_param('fcy_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.fcz = L.InnerProduct(fczinput, num_output=20, param=[weight_param('fcz_w', learn_all=True), bias_param('fcz_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.loss_x = L.SoftmaxWithLoss(n.fcx, n.labelx)
n.loss_y = L.SoftmaxWithLoss(n.fcy, n.labely)
n.loss_z = L.SoftmaxWithLoss(n.fcz, n.labelz)
n.acc_x = L.Accuracy(n.fcx, n.labelx, include=dict(phase=caffe.TEST))
n.acc_y = L.Accuracy(n.fcy, n.labely, include=dict(phase=caffe.TEST))
n.acc_z = L.Accuracy(n.fcz, n.labelz, include=dict(phase=caffe.TEST))
return n, ('loss_x', 'loss_y', 'loss_z'), ('acc_x', 'acc_y', 'acc_z')
@staticmethod
def siamese_contrastive(lmdb_path=None, labels_lmdb_path=None,
batch_size=125, scale=1.0, contrastive_margin=10, is_train=True, learn_all=False, sfa=False):
"""
Creates a protoxt for the AlexNet architecture for the MNIST experiment
Uses Contrastive loss
:param lmdb_path: str. Path to train LMDB
:param labels_lmdb_path: str. Path to train LMDB labels
:param batch_size: int. Batch size
:param scale: float. How to scale the images
:param contrastive_margin: int. Margin for the contrastive loss layer
:param is_train: bool. Flag indicating if this is for testing or training
:param learn_all: bool. Flag indicating if we should learn all the layers from scratch
:returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs
"""
n = caffe.NetSpec()
n.data, n.label = input_layers(lmdb_path=lmdb_path, labels_lmdb_path=labels_lmdb_path, batch_size=batch_size, scale=scale, is_train=is_train)
# Slice data/labels for MNIST
n.data0, n.data1 = L.Slice(n.data, slice_param=dict(axis=1, slice_point=1), ntop=2)
# BCNN
n.norm2, n.norm2_p = bcnn(n.data0, n.data1, n, learn_all, True)
# TCNNs
n.fc1 = L.InnerProduct(n.norm2, num_output=500, param=[weight_param('fc1_p_w', learn_all=True), bias_param('fc1_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu3 = L.ReLU(n.fc1, in_place=True)
n.dropout1 = L.Dropout(n.relu3, in_place=True)
n.fc2 = L.InnerProduct(n.relu3, num_output=100, param=[weight_param('fc2_p_w', learn_all=True), bias_param('fc2_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.fc1_p = L.InnerProduct(n.norm2_p, num_output=500, param=[weight_param('fc1_p_w', learn_all=True), bias_param('fc1_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu3_p = L.ReLU(n.fc1_p, in_place=True)
n.dropout1_p = L.Dropout(n.relu3_p, in_place=True)
n.fc2_p = L.InnerProduct(n.relu3_p, num_output=100, param=[weight_param('fc2_p_w', learn_all=True), bias_param('fc2_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.contrastive = L.ContrastiveLoss(n.fc2, n.fc2_p, n.label, contrastive_loss_param=dict(margin=contrastive_margin))
return n, ('contrastive',), None
class KITTINetFactory:
@staticmethod
def siamese_egomotion(lmdb_path=None, labels_lmdb_path=None, mean_file=None,
batch_size=125, scale=1.0, is_train=True, learn_all=True):
"""
Creates a protoxt for the AlexNet architecture
:param lmdb_path: str. Path to train LMDB
:param labels_lmdb_path: str. Path to train LMDB labels
:param test_lmdb: str. Path to train LMDB
:param test_labels_lmdb: str. Path to test LMDB labels
:param batch_size: int. Batch size
:param scale: float. How to scale the images
:param is_train: bool. Flag indicating if this is for testing or training
:param learn_all: bool. Flag indicating if we should learn all the layers from scratch
:returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs
"""
n = caffe.NetSpec()
n.data, n.label = input_layers(lmdb_path=lmdb_path, labels_lmdb_path=labels_lmdb_path, mean_file=mean_file, batch_size=batch_size, scale=scale, is_train=is_train)
# Slice data/labels
n.data0, n.data1 = L.Slice(n.data, slice_param=dict(axis=1, slice_point=3), ntop=2)
n.labelx, n.labely, n.labelz = L.Slice(n.label, slice_param=dict(axis=1, slice_point=[1,2]), ntop=3)
# BCNN
relu5, relu5_p = bcnn(n.data0, n.data1, n, learn_all, False)
# TCNN
n.concat = L.Concat(relu5, relu5_p, concat_param=dict(axis=1))
n.conv6 = L.Convolution(n.concat, kernel_size=3, stride=2, num_output=256, param=[weight_param('conv6_w', learn_all=learn_all), bias_param('conv6_b', learn_all=learn_all)], weight_filler=weight_filler_fc, bias_filler=bias_filler_0)
n.relu6 = L.ReLU(n.conv6, in_place=True)
n.conv7 = L.Convolution(n.relu6, kernel_size=3, stride=2, num_output=128, param=[weight_param('conv7_w', learn_all=learn_all), bias_param('conv7_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0)
n.relu7 = L.ReLU(n.conv7, in_place=True)
n.fc7_ego = L.InnerProduct(n.relu7, num_output=500, param=[weight_param('fc7_ego_w', learn_all=True), bias_param('fc7_ego_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu8 = L.ReLU(n.fc7_ego, in_place=True)
if is_train:
n.drop = fcxinput = fcyinput = fczinput = L.Dropout(n.relu8, dropout_param=dict(dropout_ratio=0.5), in_place=True)
else:
fcxinput = fcyinput = fczinput = n.relu8
# Classifiers
n.fcx = L.InnerProduct(fcxinput, num_output=20, param=[weight_param('fcx_w', learn_all=True), bias_param('fcx_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.fcy = L.InnerProduct(fcyinput, num_output=20, param=[weight_param('fcy_w', learn_all=True), bias_param('fcy_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.fcz = L.InnerProduct(fczinput, num_output=20, param=[weight_param('fcz_w', learn_all=True), bias_param('fcz_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
if is_train:
n.loss_x = L.SoftmaxWithLoss(n.fcx, n.labelx)
n.loss_y = L.SoftmaxWithLoss(n.fcy, n.labely)
n.loss_z = L.SoftmaxWithLoss(n.fcz, n.labelz)
n.acc_x = L.Accuracy(n.fcx, n.labelx, include=dict(phase=caffe.TEST))
n.acc_y = L.Accuracy(n.fcy, n.labely, include=dict(phase=caffe.TEST))
n.acc_z = L.Accuracy(n.fcz, n.labelz, include=dict(phase=caffe.TEST))
return n, ('loss_x', 'loss_y', 'loss_z'), ('acc_x', 'acc_y', 'acc_z')
@staticmethod
def siamese_contrastive(lmdb_path=None, labels_lmdb_path=None, mean_file=None,
batch_size=125, scale=1.0, contrastive_margin=10, is_train=True, learn_all=True):
"""
Creates a protoxt for siamese AlexNet architecture with a contrastive loss layer on top
:param lmdb_path: str. Path to train LMDB
:param labels_lmdb_path: str. Path to train LMDB labels
:param test_lmdb: str. Path to train LMDB
:param test_labels_lmdb: str. Path to test LMDB labels
:param batch_size: int. Batch size
:param scale: float. How to scale the images
:param contrastive_margin: int. Margin for the contrastive loss layer
:param is_train: bool. Flag indicating if this is for testing or training
:param learn_all: bool. Flag indicating if we should learn all the layers from scratch
:returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs
"""
n = caffe.NetSpec()
n.data, n.label = input_layers(lmdb_path=lmdb_path, labels_lmdb_path=labels_lmdb_path, mean_file=mean_file, batch_size=batch_size, scale=scale, is_train=is_train)
# Slice data/labels
n.data0, n.data1 = L.Slice(n.data, slice_param=dict(axis=1, slice_point=3), ntop=2)
# BCNN
relu5, relu5_p = bcnn(n.data0, n.data1, n, learn_all, False)
# TCNNs
n.fc1 = L.InnerProduct(relu5, num_output=500, param=[weight_param('fc1_p_w', learn_all=True), bias_param('fc1_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu6 = L.ReLU(n.fc1, in_place=True)
n.dropout1 = L.Dropout(n.relu6, in_place=True)
n.fc2 = L.InnerProduct(n.relu6, num_output=100, param=[weight_param('fc2_p_w', learn_all=True), bias_param('fc2_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.fc1_p = L.InnerProduct(relu5_p, num_output=500, param=[weight_param('fc1_p_w', learn_all=True), bias_param('fc1_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu6_p = L.ReLU(n.fc1_p, in_place=True)
n.dropout1_p = L.Dropout(n.relu6_p, in_place=True)
n.fc2_p = L.InnerProduct(n.relu6_p, num_output=100, param=[weight_param('fc2_p_w', learn_all=True), bias_param('fc2_p_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.contrastive = L.ContrastiveLoss(n.fc2, n.fc2_p, n.label, contrastive_loss_param=dict(margin=contrastive_margin))
return n, ('contrastive',), None
@staticmethod
def standar(lmdb_path=None, labels_lmdb_path=None, batch_size=126, mean_file=None,
scale=1.0, is_train=True, num_classes=397, learn_all=True, layers='5', is_imagenet=False):
"""
Creates a protoxt for the AlexNet architecture
:param lmdb_path: str. Path to train LMDB
:param labels_lmdb_path: str. Path to train LMDB labels
:param test_lmdb: str. Path to train LMDB
:param test_labels_lmdb: str. Path to test LMDB labels
:param batch_size: int. Batch size
:param scale: float. How to scale the images
:param is_train: bool. Flag indicating if this is for testing or training
:param num_classes: int. number of classes for the top classifier
:param classifier_name: str. name of the top classifier
:param learn_all: bool. Flag indicating if we should learn all the layers from scratch
:param layers: str. from which layer we will extract features to train a classifier
:returns: Caffe NetSpec, tuple with names of loss blobs, tuple with name of accuracy blobs
"""
n = caffe.NetSpec()
n.data, n.label = input_layers(lmdb_path=lmdb_path, labels_lmdb_path=labels_lmdb_path, mean_file=mean_file, batch_size=batch_size, scale=scale, is_train=is_train)
n.conv1 = L.Convolution(n.data, kernel_size=11, stride=4, num_output=96, param=[weight_param('conv1_w', learn_all=learn_all), bias_param('conv1_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0)
n.relu1 = L.ReLU(n.conv1, in_place=True)
n.pool1 = L.Pooling(n.relu1, pool=P.Pooling.MAX, kernel_size=3, stride=2)
n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
if layers == '1':
n.fc_intermediate = L.InnerProduct(n.norm1, num_output=num_classes, param=[weight_param('fc_intermediate_w', learn_all=True), bias_param('fc_intermediate_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
if is_train:
n.loss = L.SoftmaxWithLoss(n.fc_intermediate, n.label)
n.acc = L.Accuracy(n.fc_intermediate, n.label, include=dict(phase=caffe.TEST))
return n, ('loss',), ('acc',)
n.conv2 = L.Convolution(n.norm1, kernel_size=5, num_output=256, pad=2, group=2, param=[weight_param('conv2_w', learn_all=learn_all), bias_param('conv2_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0)
n.relu2 = L.ReLU(n.conv2, in_place=True)
n.pool2 = L.Pooling(n.relu2, pool=P.Pooling.MAX, kernel_size=3, stride=2)
n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
if layers == '2':
n.fc_intermediate = L.InnerProduct(n.norm2, num_output=num_classes, param=[weight_param('fc_intermediate_w', learn_all=True), bias_param('fc_intermediate_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
if is_train:
n.loss = L.SoftmaxWithLoss(n.fc_intermediate, n.label)
n.acc = L.Accuracy(n.fc_intermediate, n.label, include=dict(phase=caffe.TEST))
return n, ('loss',), ('acc',)
n.conv3 = L.Convolution(n.norm2, kernel_size=3, num_output=384, pad=1, param=[weight_param('conv3_w', learn_all=learn_all), bias_param('conv3_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0)
n.relu3 = L.ReLU(n.conv3, in_place=True)
if layers == '3':
n.fc_prev = L.InnerProduct(n.relu3, num_output=1000, param=[weight_param('fc_prev_w', learn_all=True), bias_param('fc_prev_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu_prev = L.ReLU(n.fc_prev, in_place=True)
n.fc_intermediate = L.InnerProduct(n.relu_prev, num_output=num_classes, param=[weight_param('fc_intermediate_w', learn_all=True), bias_param('fc_intermediate_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
if is_train:
n.loss = L.SoftmaxWithLoss(n.fc_intermediate, n.label)
n.acc = L.Accuracy(n.fc_intermediate, n.label, include=dict(phase=caffe.TEST))
return n, ('loss',), ('acc',)
n.conv4 = L.Convolution(n.relu3, kernel_size=3, num_output=384, pad=1, group=2, param=[weight_param('conv4_w', learn_all=learn_all), bias_param('conv4_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0)
n.relu4 = L.ReLU(n.conv4, in_place=True)
if layers == '4':
n.fc_prev = L.InnerProduct(n.relu4, num_output=1000, param=[weight_param('fc_prev_w', learn_all=True), bias_param('fc_prev_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu_prev = L.ReLU(n.fc_prev, in_place=True)
n.fc_intermediate = L.InnerProduct(n.relu_prev, num_output=num_classes, param=[weight_param('fc_intermediate_w', learn_all=True), bias_param('fc_intermediate_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
if is_train:
n.loss = L.SoftmaxWithLoss(n.fc_intermediate, n.label)
n.acc = L.Accuracy(n.fc_intermediate, n.label, include=dict(phase=caffe.TEST))
return n, ('loss',), ('acc',)
n.conv5 = L.Convolution(n.relu4, kernel_size=3, num_output=256, pad=1, group=2, param=[weight_param('conv5_w', learn_all=learn_all), bias_param('conv5_b', learn_all=learn_all)], weight_filler=weight_filler, bias_filler=bias_filler_0)
n.relu5 = L.ReLU(n.conv5, in_place=True)
if not is_imagenet:
if layers == '5':
n.fc_prev = L.InnerProduct(n.relu5, num_output=1000, param=[weight_param('fc_prev_w', learn_all=True), bias_param('fc_prev_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu_prev = L.ReLU(n.fc_prev, in_place=True)
n.fc_intermediate = L.InnerProduct(n.relu_prev, num_output=num_classes, param=[weight_param('fc_intermediate_w', learn_all=True), bias_param('fc_intermediate_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
if is_train:
n.loss = L.SoftmaxWithLoss(n.fc_intermediate, n.label)
n.acc = L.Accuracy(n.fc_intermediate, n.label, include=dict(phase=caffe.TEST))
return n, ('loss',), ('acc',)
n.fc6 = L.InnerProduct(n.relu5, num_output=4096, param=[weight_param('fc6_w', learn_all=True), bias_param('fc6_b', learn_all=learn_all)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu6 = L.ReLU(n.fc6, in_place=True)
if is_train:
n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
else:
fc7input = n.relu6
n.fc7 = L.InnerProduct(fc7input, num_output=4096, param=[weight_param('fc7_w', learn_all=True), bias_param('fc7_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu7 = L.ReLU(n.fc7, in_place=True)
if is_train:
n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
else:
fc8input = n.relu7
n.fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=[weight_param('fc8_w', learn_all=True), bias_param('fc8_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
if is_train:
n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
n.acc = L.Accuracy(n.fc8, n.label, include=dict(phase=caffe.TEST))
else:
if layers == '5':
n.fc_imgnet = L.InnerProduct(n.relu5, num_output=num_classes, param=[weight_param('fc_w', learn_all=True), bias_param('fc_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
if is_train:
n.loss = L.SoftmaxWithLoss(n.fc_imgnet, n.label)
n.acc = L.Accuracy(n.fc_imgnet, n.label, include=dict(phase=caffe.TEST))
return n, ('loss',), ('acc',)
n.fc6_imgnet = L.InnerProduct(n.relu5, num_output=4096, param=[weight_param('fc6_w', learn_all=True), bias_param('fc6_b', learn_all=learn_all)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu6 = L.ReLU(n.fc6_imgnet, in_place=True)
if is_train:
n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
else:
fc7input = n.relu6
n.fc7_imgnet = L.InnerProduct(fc7input, num_output=4096, param=[weight_param('fc7_w', learn_all=True), bias_param('fc7_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
n.relu7 = L.ReLU(n.fc7_imgnet, in_place=True)
if is_train:
n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
else:
fc8input = n.relu7
n.fc8_imgnet = L.InnerProduct(fc8input, num_output=num_classes, param=[weight_param('fc8_w', learn_all=True), bias_param('fc8_b', learn_all=True)], weight_filler=weight_filler_fc, bias_filler=bias_filler_1)
if is_train:
n.loss = L.SoftmaxWithLoss(n.fc8_imgnet, n.label)
n.acc = L.Accuracy(n.fc8_imgnet, n.label, include=dict(phase=caffe.TEST))
return n, ('loss',), ('acc',)
| 2.796875 | 3 |
solveLinearEquation/gauss-with-pivot.py | terasakisatoshi/pythonCodes | 0 | 12794869 | <gh_stars>0
import numpy as np
# Example1
a = np.matrix([[0, 4, 5, 2],
[1, 0, 2, -6],
[4, 1, 0, -2],
[1, 7, 1, 0]], dtype='float64')
b = np.matrix([9, -3, 1, -3], dtype='float64').T
x = np.matrix([None]*4, dtype='float64').T
# Example2
a = np.matrix([[2, 4, 6],
[1, -1, 5],
[4, 1, -2]], dtype='float64')
b = np.matrix([28, 7, 21], dtype='float64').T
x = np.matrix([None]*3, dtype='float64').T
# for check sum
ori_a = a
# define extended coefficient matrix of eq ax=b
a = np.concatenate([a, b], axis=1)
(row, col) = a.shape
for j in range(row):
# search pivot
max_idx = j+np.argmax(abs(a[j:, j]))
# swap!
a[[j, max_idx]] = a[[max_idx, j]]
# push forward
for i in range(j+1, row):
p = -a[i, j] / a[j, j]
a[i] += p*a[j]
print('extended coefficient matrix applied gauss method \n a={}'.format(a))
# back forward
for i in range(row)[::-1]:
x[i] = (a[i, -1]-np.dot(a[i, i+1:row], x[i+1:row]))/a[i, i]
print('x={}'.format(x))
# confirm
print("ax-b\n={}".format(ori_a@x-b))
| 2.234375 | 2 |
setup.py | hansek/django-groups-sync | 0 | 12794870 | <gh_stars>0
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="django-groups-sync",
version="0.0.7",
author="<NAME>",
author_email="<EMAIL>",
description="A set of management commands to export and sync Django User Groups permissions between environments.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/hansek/django-groups-sync",
license="License :: OSI Approved :: MIT License",
packages=setuptools.find_packages(exclude=["contrib", "docs", "tests*"]),
install_requires=[],
scripts=[],
entry_points={},
classifiers=[
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Framework :: Django",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Intended Audience :: Developers",
],
)
| 1.289063 | 1 |
VGGish/f_XGboost_spatial_class_hand.py | ducphucnguyen/TransferLearningWFN | 0 | 12794871 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 23 11:24:09 2021
@author: nguy0936
I used this code to classify noise at different location using Xhand data
"""
# load packages
import pandas as pd
import umap
import matplotlib.pyplot as plt
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import xgboost as xgb
from xgboost import XGBClassifier
from xgboost import cv
from sklearn.metrics import roc_auc_score
from hyperopt import STATUS_OK, Trials, fmin, hp, tpe
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_curve
from sklearn.preprocessing import label_binarize
##========== load low-high level features
mdir1 = 'R:\\CMPH-Windfarm Field Study\\Duc Phuc Nguyen\\4. Machine Listening\Data set\\set1\\'
mdir2 = 'R:\\CMPH-Windfarm Field Study\\Duc Phuc Nguyen\\4. Machine Listening\Data set\\set2\\'
mdir6 = 'R:\\CMPH-Windfarm Field Study\\Duc Phuc Nguyen\\4. Machine Listening\Data set\\set6\\'
mdir9 = 'R:\\CMPH-Windfarm Field Study\\Duc Phuc Nguyen\\4. Machine Listening\Data set\\set9_WL8\\'
def load_feature(mdir): # function to load data
conv1 = pd.read_csv(mdir + 'result_conv1.csv', header=None) # conv1
conv2 = pd.read_csv(mdir + 'result_conv2.csv', header=None) # conv2
embedding = pd.read_csv(mdir + 'result_embedding.csv', header=None) # embedding
X_hand = pd.read_csv(mdir + 'X_hand.csv') # bias features
X_hand = X_hand.fillna(0)
Y = pd.read_csv(mdir + 'Y.csv', header=None) # score
y = Y
y[:]=np.where(y<3,0,1)
# combine data
lowd_conv1 = PCA(n_components=10).fit_transform(conv1)
lowd_conv2 = PCA(n_components=10).fit_transform(conv2)
lowd_embedding = PCA(n_components=20).fit_transform(embedding)
lowd_frames = [pd.DataFrame(lowd_conv1), pd.DataFrame(lowd_conv2), pd.DataFrame(lowd_embedding)]
lowd_df = pd.concat(lowd_frames, axis=1)
scaled_lowd_df = StandardScaler().fit_transform(lowd_df)
return lowd_conv1, lowd_embedding, y, X_hand
lowd_conv1_1, lowd_embedding1, y1, X_hand1 = load_feature(mdir1) # set1
lowd_conv1_2, lowd_embedding2, y2, X_hand2 = load_feature(mdir2) # set2
lowd_conv1_6, lowd_embedding6, y6, X_hand6 = load_feature(mdir6) # set6
lowd_conv1_9, lowd_embedding9, y9, X_hand9 = load_feature(mdir9) # set9
df = np.concatenate((X_hand1, X_hand2, X_hand6), axis=0)
#df_hand = np.concatenate((X_hand1, X_hand2), axis=0)
#y_AM = np.concatenate((y1, y2), axis=0)
scaled_df = StandardScaler().fit_transform(df)
y1 = 1*np.ones((5000,), dtype=int)
y2 = 2*np.ones((3000,), dtype=int)
y6 = 3*np.ones((1000,), dtype=int)
#y9 = 4*np.ones((1000,), dtype=int)
y = np.concatenate((y1, y2, y6), axis=0)
# plot umap
reducer = umap.UMAP(random_state=42,
n_neighbors=20,
min_dist=0.0,
metric='euclidean',
n_components=2)
data_umap = reducer.fit_transform(scaled_df)
def clf_location(df,y):
#========Split data for tranning and testing
# split data into train and test sets (80% for training and 20% for testing)
X_train, X_test, y_train, y_test = train_test_split(df, y, test_size = 0.2) # note X vs Xhand
params_deep = {"objective":"multi:softmax",
'max_depth': 19,
'learning_rate': 0.13,
'gamma': 1.11,
'min_child_weight': 31,
'colsample_bytree': 0.92,
'reg_alpha': 5.0,
'reg_lambda': 0.796,
'scale_pos_weight': 1,
'n_estimators': 200}
# train the classifier to the training data
xgb_clf = XGBClassifier(**params_deep)
xgb_clf.fit(X_train, y_train ) # train with deep features
y_test_pred = xgb_clf.predict_proba(X_test)
y_test = label_binarize(y_test, classes=[1, 2, 3])
# print( roc_auc_score(y_test, y_test_pred) )
return roc_auc_score(y_test, y_test_pred)
#AUC = np.empty([10, 1])
#for i in range(0,10):
# AUC[i] = clf_location(data_umap,y)
LALC = df[:, [13, 15]]
LALC[:,1] = np.multiply(LALC[:,0], LALC[:,1])
AUC2 = np.empty([10, 1])
for i in range(0,10):
AUC2[i] = clf_location(LALC,y)
| 2.1875 | 2 |
examples/control_point_av.py | aleixq/python3-brisa | 4 | 12794872 | # Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php or see LICENSE file.
# Copyright 2007-2008 <NAME> <<EMAIL>>
from brisa.core.reactors import install_default_reactor
reactor = install_default_reactor()
import sys
from brisa.upnp.control_point.control_point_av import ControlPointAV
from brisa.core.threaded_call import run_async_function
class CommandLineControlPointAV(ControlPointAV):
def __init__(self):
ControlPointAV.__init__(self)
self.running = True
self._initial_subscribes()
self.devices_found = []
self.commands = {'start': self._search,
'stop': self._stop,
'list': self._cmd_list_devices,
'exit': self._exit,
'help': self._help}
def _initial_subscribes(self):
self.subscribe('new_device_event', self.on_new_device)
self.subscribe('remove_device_event', self.on_remove_device)
def on_new_device(self, dev):
self.devices_found.append(dev)
def on_remove_device(self, udn):
for dev in self.devices:
if dev.udn == udn:
self.devices_found.remove(dev)
break
def _cmd_list_devices(self):
n = 0
for dev in self.devices_found:
print 'device %d:' % n
print '\tudn:', dev.udn
print '\tfriendly_name:', dev.friendly_name
print '\tservices:', dev.services
print '\ttype:', dev.device_type
if dev.devices:
print '\tchild devices:'
for child_dev in dev.devices.values():
print '\t\tudn:', child_dev.udn
print '\t\tfriendly_name:', child_dev.friendly_name
print '\t\tservices:', dev.services
print '\t\ttype:', child_dev.device_type
print
n += 1
def _cmd_set_server(self, id):
self._current_server = self.devices_found[id]
def _cmd_set_render(self, id):
self._current_renderer = self.devices_found[id]
def _cmd_browse(self, id):
result = self.browse(id, 'BrowseDirectChildren', '*', 0, 10)
result = result['Result']
for d in result:
print "%s %s %s" % (d.id, d.title, d.upnp_class)
def _search(self):
self.start_search(600, 'upnp:rootdevice')
print 'search started'
def _stop(self):
self.stop_search()
print 'search stopped'
def _help(self):
print 'commands: start, stop, list, ' \
'browse, set_server, set_render, play, exit, help'
def _exit(self):
self.running = False
def run(self):
self.start()
run_async_function(self._handle_cmds)
reactor.add_after_stop_func(self.stop)
reactor.main()
def _handle_cmds(self):
try:
while self.running:
command = str(raw_input('>>> '))
try:
self.commands[command]()
except KeyError:
if command.startswith('browse'):
self._cmd_browse(command.split(' ')[1])
elif command.startswith('set_server'):
self._cmd_set_server(int(command.split(' ')[1]))
elif command.startswith('set_render'):
self._cmd_set_render(int(command.split(' ')[1]))
elif command.startswith('play'):
self.av_play(command.split(' ')[1])
else:
print 'Invalid command, try help'
command = ''
except KeyboardInterrupt, k:
print 'quiting'
reactor.main_quit()
def main():
print "BRisa ControlPointAV example\n"
cmdline = CommandLineControlPointAV()
cmdline.run()
if __name__ == "__main__":
main()
| 1.90625 | 2 |
OnlineStudy/rbac/service/routers.py | NanRenTeam-9/MongoMicroCourse | 132 | 12794873 | from collections import OrderedDict
from django.utils.module_loading import import_string
from django.conf import settings
from django.urls.resolvers import URLResolver, URLPattern
import re
def check_url_exclude(url):
for regex in settings.AUTO_DISCOVER_EXCLUDE:
if re.match(regex, url):
return True
def recursive_url(pre_namespace, pre_url, urlpattern, url_order_dict):
"""
递归发现url
:param pre_namespace: 根别名
:param pre_url: url前缀
:param urlpattern: 路由关系表
:param url_order_dict 有序url字典,用于保存递归中获取的所有路由
:return:
"""
for item in urlpattern:
if isinstance(item, URLPattern): # 非路由分发
if not item.name:
continue
if pre_namespace:
name = '%s:%s' % (pre_namespace, item.name)
else:
name = item.name
url = pre_url + item.pattern.regex.pattern
url = url.replace('^', '').replace('$', '') # 去掉正则表达式里的前缀和后缀
if check_url_exclude(url):
continue
url_order_dict[name] = {'name': name, 'url': url}
elif isinstance(item, URLResolver): # 路由分发
if pre_namespace:
if item.namespace:
namespace = '%s:%s' % (pre_namespace, item.namespace)
else:
# namespace = item.namespace # 另一种写法
namespace = pre_namespace
else:
if item.namespace:
namespace = item.namespace
else:
namespace = None
# print(item.pattern.regex.pattern)
recursive_url(namespace, pre_url + item.pattern.regex.pattern, item.url_patterns, url_order_dict)
def get_all_url_dict():
url_order_dict = OrderedDict()
root = import_string(settings.ROOT_URLCONF)
recursive_url(None, '/', root.urlpatterns, url_order_dict)
return url_order_dict
| 2.375 | 2 |
tests/views/test_camera_control_widget.py | lsst-sitcom/spot_motion_monitor | 0 | 12794874 | # This file is part of spot_motion_monitor.
#
# Developed for LSST System Integration, Test and Commissioning.
#
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
from PyQt5.QtCore import Qt
from spot_motion_monitor.views.camera_control_widget import CameraControlWidget
class TestCameraControl():
def setup_class(self):
self.fast_timeout = 250 # ms
def stateIsFalse(self, state):
return not state
def stateIsTrue(self, state):
print("A:", state)
return state
def test_startStopCameraButton(self, qtbot):
cc = CameraControlWidget()
cc.show()
qtbot.addWidget(cc)
assert not cc.startStopButton.isChecked()
assert cc.startStopButton.text() == "Start Camera"
assert cc.acquireRoiCheckBox.isEnabled() is False
assert cc.acquireFramesButton.isEnabled() is False
with qtbot.waitSignal(cc.cameraState, timeout=self.fast_timeout,
check_params_cb=self.stateIsTrue):
qtbot.mouseClick(cc.startStopButton, Qt.LeftButton)
assert cc.startStopButton.isChecked()
assert cc.startStopButton.text() == "Stop Camera"
assert cc.acquireRoiCheckBox.isEnabled() is True
assert cc.acquireFramesButton.isEnabled() is True
with qtbot.waitSignal(cc.cameraState, timeout=self.fast_timeout,
check_params_cb=self.stateIsFalse):
qtbot.mouseClick(cc.startStopButton, Qt.LeftButton)
assert not cc.startStopButton.isChecked()
assert cc.startStopButton.text() == "Start Camera"
def test_acquireFramesButton(self, qtbot):
cc = CameraControlWidget()
cc.show()
qtbot.addWidget(cc)
qtbot.mouseClick(cc.startStopButton, Qt.LeftButton)
assert not cc.acquireFramesButton.isChecked()
assert cc.acquireFramesButton.text() == "Start Acquire Frames"
with qtbot.waitSignal(cc.acquireFramesState, timeout=self.fast_timeout,
check_params_cb=self.stateIsTrue):
qtbot.mouseClick(cc.acquireFramesButton, Qt.LeftButton)
assert cc.acquireFramesButton.isChecked()
assert not cc.startStopButton.isEnabled()
assert cc.acquireFramesButton.text() == "Stop Acquire Frames"
with qtbot.waitSignal(cc.acquireFramesState, timeout=self.fast_timeout,
check_params_cb=self.stateIsFalse):
qtbot.mouseClick(cc.acquireFramesButton, Qt.LeftButton)
assert not cc.acquireFramesButton.isChecked()
assert cc.acquireFramesButton.text() == "Start Acquire Frames"
assert cc.startStopButton.isEnabled()
def test_acquireRoiCheckbox(self, qtbot):
cc = CameraControlWidget()
cc.show()
qtbot.addWidget(cc)
qtbot.mouseClick(cc.startStopButton, Qt.LeftButton)
assert not cc.acquireRoiCheckBox.isChecked()
with qtbot.waitSignal(cc.acquireRoiState, timeout=self.fast_timeout,
check_params_cb=self.stateIsTrue):
qtbot.mouseClick(cc.acquireRoiCheckBox, Qt.LeftButton)
assert cc.acquireRoiCheckBox.isChecked()
assert not cc.roiFpsSpinBox.isEnabled()
assert not cc.bufferSizeSpinBox.isEnabled()
with qtbot.waitSignal(cc.acquireRoiState, timeout=self.fast_timeout,
check_params_cb=self.stateIsFalse):
qtbot.mouseClick(cc.acquireRoiCheckBox, Qt.LeftButton)
assert not cc.acquireRoiCheckBox.isChecked()
assert cc.roiFpsSpinBox.isEnabled()
assert cc.bufferSizeSpinBox.isEnabled()
def test_roiFpsSpinBox(self, qtbot):
cc = CameraControlWidget()
cc.show()
qtbot.addWidget(cc)
assert cc.roiFpsSpinBox.value() == 40
cc.roiFpsSpinBox.setValue(0)
assert cc.roiFpsSpinBox.value() == 1
cc.roiFpsSpinBox.setValue(200)
assert cc.roiFpsSpinBox.value() == 150
cc.roiFpsSpinBox.stepUp()
assert cc.roiFpsSpinBox.value() == 150
cc.roiFpsSpinBox.stepDown()
assert cc.roiFpsSpinBox.value() == 149
def test_bufferSizeSpinBox(self, qtbot):
cc = CameraControlWidget()
cc.show()
qtbot.addWidget(cc)
assert cc.bufferSizeSpinBox.value() == 1024
cc.bufferSizeSpinBox.stepUp()
assert cc.bufferSizeSpinBox.value() == 2048
cc.bufferSizeSpinBox.setValue(1024)
cc.bufferSizeSpinBox.stepDown()
assert cc.bufferSizeSpinBox.value() == 512
def test_showFramesCheckBox(self, qtbot):
cc = CameraControlWidget()
cc.show()
qtbot.addWidget(cc)
assert cc.showFramesCheckBox.isChecked()
qtbot.mouseClick(cc.showFramesCheckBox, Qt.LeftButton)
assert not cc.showFramesCheckBox.isChecked()
def test_takeScreenshotButton(self, qtbot):
cc = CameraControlWidget()
cc.show()
qtbot.addWidget(cc)
assert cc.takeScreenshotButton.isEnabled() is False
qtbot.mouseClick(cc.startStopButton, Qt.LeftButton)
qtbot.mouseClick(cc.acquireFramesButton, Qt.LeftButton)
assert cc.takeScreenshotButton.isEnabled() is True
with qtbot.waitSignal(cc.takeScreenshotState, timeout=self.fast_timeout):
qtbot.mouseClick(cc.takeScreenshotButton, Qt.LeftButton)
| 2.15625 | 2 |
baselines/neural_best_buddies/get_missing.py | iviazovetskyi/rewriting | 526 | 12794875 | <reponame>iviazovetskyi/rewriting
import os
from netdissect import pidfile
from options.options import Options
from tqdm import tqdm
opt = Options().parse()
def get_imgs():
img_nums = sorted([int(f.strip().split(f'{base_name}_')[1].split('.')[0]) for f in os.listdir(opt.source)])
file_names = [f'{base_name}_{num}.png' for num in img_nums]
return img_nums, file_names
def get_imgnums(root):
base_name = os.path.basename(root)
img_nums = sorted([int(f.strip().split(f'{base_name}_')[1].split('.')[0]) for f in os.listdir(root)])
file_names = [f'{base_name}_{num}.png' for num in img_nums]
return list(zip(img_nums, file_names))[:10000]
def check_missing(src_root, corr_root):
dne = []
for imgnum, file_path in tqdm(get_imgnums(src_root)):
if not os.path.exists(os.path.join(corr_root, str(imgnum), 'BtoA.npy')):
dne.append(imgnum)
return dne
missing = check_missing(opt.source, opt.results_dir)
base_name = os.path.basename(opt.source)
def main():
import numpy as np
from models import vgg19_model
from algorithms import neural_best_buddies as NBBs
from util import util
from util import MLS
vgg19 = vgg19_model.define_Vgg19(opt)
img_nums, images = get_imgs()
for imgnum in tqdm(missing):
print(imgnum)
save_dir = os.path.join(opt.results_dir, str(imgnum))
if os.path.exists(os.path.join(save_dir, 'BtoA.npy')):
continue
try:
print('Working on', imgnum)
source_path = os.path.join(opt.source, f'{base_name}_{imgnum}.png')
A = util.read_image(source_path, opt.imageSize)
B = util.read_image(opt.target, opt.imageSize)
print(A.shape, B.shape)
nbbs = NBBs.sparse_semantic_correspondence(vgg19, opt.gpu_ids, opt.tau,
opt.border_size, save_dir,
opt.k_per_level, opt.k_final,
opt.fast)
points = nbbs.run(A, B)
mls = MLS.MLS(v_class=np.int32)
mls.run_MLS_in_folder(root_folder=save_dir)
except Exception as e:
print(e)
with open(os.path.join(save_dir, 'no_correspondence.txt'), 'w') as f:
f.write('')
if __name__ == "__main__":
main()
| 2.21875 | 2 |
asyncio_connection_pool/contrib/datadog.py | fellowinsights/asyncio-connection-pool | 6 | 12794876 | <gh_stars>1-10
from contextlib import asynccontextmanager, AsyncExitStack
from datadog import statsd
from ddtrace import tracer
from typing import AsyncIterator, TypeVar
from asyncio_connection_pool import ConnectionPool as _ConnectionPool
__all__ = ("ConnectionPool",)
Conn = TypeVar("Conn")
class ConnectionPool(_ConnectionPool[Conn]):
def __init__(self, service_name, *args, extra_tags=None, **kwargs):
super().__init__(*args, **kwargs)
self._connections_acquiring = 0
self._service_name = service_name
self._is_bursting = False
self._reported_hitting_burst_limit = False
self._extra_tags = extra_tags or []
self._loop.call_soon(self._periodically_send_metrics)
def _periodically_send_metrics(self):
try:
self._record_pressure()
finally:
self._loop.call_later(60, self._periodically_send_metrics)
def _record_pressure(self):
statsd.gauge(
f"{self._service_name}.pool.total_connections",
self._total,
tags=self._extra_tags,
)
statsd.gauge(
f"{self._service_name}.pool.available_connections",
self.available.qsize(),
tags=self._extra_tags,
)
statsd.gauge(
f"{self._service_name}.pool.waiting", self._waiters, tags=self._extra_tags
)
statsd.gauge(
f"{self._service_name}.pool.connections_used",
self.in_use,
tags=self._extra_tags,
)
self._record_connection_acquiring()
if self._total > self.max_size:
if not self._is_bursting:
self._is_bursting = True
statsd.event(
f"{self._service_name} pool using burst capacity",
f"Pool max size of {self.max_size} will be exceeded temporarily, up to {self.burst_limit}", # noqa E501
alert_type="warning",
tags=self._extra_tags,
)
elif self._is_bursting:
self._is_bursting = False
self._reported_hitting_burst_limit = False
statsd.event(
f"{self._service_name} pool no longer bursting",
f"Number of connections has dropped below {self.max_size}",
alert_type="success",
tags=self._extra_tags,
)
if self._total == self.burst_limit:
self._reported_hitting_burst_limit = True
statsd.event(
f"{self._service_name} pool reached burst limit",
"There are not enough redis connections to satisfy all users",
alert_type="error",
tags=self._extra_tags,
)
def _record_connection_acquiring(self, value=0):
self._connections_acquiring += value
statsd.gauge(
f"{self._service_name}.pool.connections_acquiring",
self._connections_acquiring,
tags=self._extra_tags,
)
def _connection_maker(self):
statsd.increment(
f"{self._service_name}.pool.getting_connection",
tags=self._extra_tags + ["method:new"],
)
async def connection_maker(self):
with tracer.trace(
f"{self._service_name}.pool._create_new_connection",
service=self._service_name,
):
return await super()._connection_maker()
return connection_maker(self)
def _connection_waiter(self):
statsd.increment(
f"{self._service_name}.pool.getting_connection",
tags=self._extra_tags + ["method:wait"],
)
async def connection_waiter(self):
with tracer.trace(
f"{self._service_name}.pool._wait_for_connection",
service=self._service_name,
):
return await super()._connection_waiter()
return connection_waiter(self)
def _get_conn(self):
if not self.available.empty():
statsd.increment(
f"{self._service_name}.pool.getting_connection",
tags=self._extra_tags + ["method:available"],
)
return super()._get_conn()
@asynccontextmanager
async def get_connection(self) -> AsyncIterator[Conn]: # type: ignore
async with AsyncExitStack() as stack:
self._record_connection_acquiring(1)
try:
with tracer.trace(
f"{self._service_name}.pool.acquire_connection",
service=self._service_name,
):
conn = await stack.enter_async_context(super().get_connection())
finally:
self._record_connection_acquiring(-1)
self._record_pressure()
yield conn
self._record_pressure()
| 2.234375 | 2 |
parse_map.py | cadoman/map-parser | 0 | 12794877 | <filename>parse_map.py
import argparse
import matplotlib.pyplot as plt
from shapely import geometry
from map_extractor import preprocessing, shape_extraction, country_naming
import skimage.color
from map_extractor.PolygonGroup import PolygonGroup
import json
import numpy as np
import cv2
def display_img(img, title="", big=False) :
if big :
plt.figure(figsize=(18, 16), dpi= 80, facecolor='w', edgecolor='k')
plt.title(title)
plt.imshow(img, cmap='gray')
plt.show()
def parse_ignored_boxes(box_string):
return [geometry.box(tup[0], tup[1], tup[2], tup[3]) for tup in eval(box_string)]
def get_args() :
parser = argparse.ArgumentParser(description='Parse a map to labelled svgs')
parser.add_argument("--input", type=str, dest="input_file",
help="Path of the map to parse", required=True)
parser.add_argument("--ignore", type=str, dest="ignored_boxes", help='A list of comma separated rectangle (minx, miny, maxx, maxy), ex : "(20, 30, 200, 400) , (0, 0, 100, 100)"')
parser.add_argument(
"--colors",
type=int,
dest="nb_colors",
help="The estimated number of color on the map (usually works better by over-estimating the number of colors)",
required=True
)
return parser.parse_args()
def main() :
args = get_args()
print('Loading image...')
image = skimage.color.rgba2rgb(plt.imread(args.input_file))
if args.ignored_boxes :
print('Removing ignored areas...')
image = preprocessing.remove_ignored_areas(image, parse_ignored_boxes(args.ignored_boxes))
print('Clustering image colors..')
image_clustered = preprocessing.regroup_image_colors(image, args.nb_colors)
# image_clustered = skimage.color.rgba2rgb(plt.imread('notebooks/tmp/clustered_europe.png'))
polygon_groups = shape_extraction.extract_shapes(image_clustered)
# for i, shape in enumerate(shapes) :
# with open('/tmp/shape_'+str(i)+'.json', 'w') as f :
# dict_shape = shape.to_dict()
# json_rep = json.dumps(dict_shape)
# f.write(json_rep)
# for i in range(16) :
# with open('/tmp/shape_'+str(i)+'.json', 'r') as f :
# jsonrep = f.read()
# a = PolygonGroup.from_dict(json.loads(jsonrep))
# polygon_groups.append(a)
# img = skimage.color.rgba2rgb(plt.imread('notebooks/tmp/europe_cleaned.png'))
pg_group_named = []
print('Performing OCR with Tesseract ...')
ocr_results = country_naming.apply_tesseract(image)
print('Extracting OCR results..')
for group in polygon_groups:
pg_group_named = np.concatenate((pg_group_named, country_naming.label_polygon_group(ocr_results, group)))
print('Done')
def disp_ocr_results(img, ocr_results) :
todisp = img.copy()
disp_color = (int(np.max(img)) , 0, 0)
for i, row in ocr_results.iterrows():
(minx, miny, maxx, maxy) = np.array(row['bbox'].bounds).astype(int)
cv2.rectangle(todisp, (minx, miny), (maxx, maxy),disp_color , 2)
cv2.putText(todisp,row['text'], (maxx+3,maxy), cv2.FONT_HERSHEY_SIMPLEX, 1, disp_color, 2)
display_img(todisp, '', True)
if __name__ == "__main__" :
main()
| 2.828125 | 3 |
assemblyline/assemblyline/common/yara/yara_importer.py | dendisuhubdy/grokmachine | 46 | 12794878 | <reponame>dendisuhubdy/grokmachine
import os
import logging
from assemblyline.common import isotime
from assemblyline.al.common import forge
class YaraImporter(object):
REQUIRED_META = ['description', 'id', 'organisation', 'poc', 'rule_version', 'yara_version', 'rule_group']
def __init__(self, logger=None):
if not logger:
from assemblyline.al.common import log as al_log
al_log.init_logging('yara_importer')
logger = logging.getLogger('assemblyline.yara_importer')
logger.setLevel(logging.INFO)
yara_parser_class = forge.get_yara_parser()
self.ds = forge.get_datastore()
self.yp = yara_parser_class()
self.log = logger
self._id_cache = {}
self._name_cache = []
def _get_next_id(self, org):
if org in self._id_cache:
self._id_cache[org] += 1
else:
self._id_cache[org] = self.ds.get_last_signature_id(org) + 1
return self._id_cache[org]
# noinspection PyMethodMayBeStatic
def translate_rule(self, rule):
return rule
def display_rule(self, rule):
return self.yp.dump_rule_file([rule], fake_dependencies=True)
def display_rules(self, rules):
return self.yp.dump_rule_file(rules, fake_dependencies=True)
def import_now(self, rules):
failed_list = []
for rule in rules:
validation_error = rule.get('validation_error', None)
if validation_error:
failed_list.append((rule['name'], "Previously failed rule validation (%s)" % validation_error))
continue
if rule['meta']['id'] == "<AUTO_INCREMENT>":
rule['meta']['id'] = "%s_%06d" % (rule['meta']['organisation'],
self._get_next_id(rule['meta']['organisation']))
if rule['meta']['rule_version'] == "<AUTO_INCREMENT>":
rule['meta']['rule_version'] = self.ds.get_last_rev_for_id(rule['meta']['id'])
if rule.get('is_new_revision', False):
del rule['is_new_revision']
new_id, new_rev = self.ds.get_next_rev_for_name(rule['meta']['organisation'], rule['name'])
if new_id is not None and new_rev is not None:
rule['meta']['id'], rule['meta']['rule_version'] = new_id, new_rev
else:
failed_list.append((rule['name'], "Could not find matching rule to increment revision number."))
continue
key = "%sr.%s" % (rule['meta']['id'], rule['meta']['rule_version'])
yara_version = rule['meta'].get('yara_version', None)
rule['meta']['creation_date'] = isotime.now_as_iso()
rule['meta']['last_saved_by'] = rule['meta']['al_imported_by']
rule['depends'], rule['modules'] = self.yp.parse_dependencies(rule['condition'],
self.yp.YARA_MODULES.get(yara_version, None))
res = self.yp.validate_rule(rule)
if res['valid']:
rule['warning'] = res.get('warning', None)
self.ds.save_signature(key, rule)
self.log.info("Added signature %s" % rule['name'])
else:
failed_list.append((rule['name'], "Failed rule validation (%s)" % res['message']['error']))
return failed_list
def validate_rule(self, rule):
return self.yp.validate_rule(rule)
# noinspection PyBroadException
def validate(self, field, value, rule):
if not value:
return False, "%s cannot be empty." % field
elif field == "name":
if " " in value:
return False, "There should be no space in the name."
elif field == "yara_version":
if value not in self.yp.VALID_YARA_VERSION:
return False, "yara_version should be one of the following: %s" % ", ".join(self.yp.VALID_YARA_VERSION)
elif field == "rule_version":
try:
int(value)
except:
return False, "rule_version should be a simple integer value"
elif field == "rule_group":
if value not in self.yp.RULE_GROUPS:
return False, "rule_group should be one of the following: %s" % ", ".join(self.yp.RULE_GROUPS)
elif field == "organisation":
if value != value.upper():
return False, "organisation should be in all CAPS."
elif field == "id":
if not value == "<AUTO_INCREMENT>":
try:
org, num = value.split("_")
if len(num) != 6:
error = True
elif org != rule['meta']['organisation']:
error = True
else:
int(num)
error = False
except:
error = True
if error:
return False, "id should have the following schema: ORG_000000"
return True, ""
def check_for_id_conflicts(self, rid, rev):
if rid is None or rev is None:
return False
key = "%sr.%s" % (rid, rev)
id_lookup = self.ds.get_signature(key)
if id_lookup:
return True
return False
def check_for_name_conflicts(self, name):
try:
name_lookup = self.ds.search_signature(query="name:%s" % name, rows=0)
if name_lookup['total'] > 0:
return True
if name in self._name_cache:
return True
return False
finally:
self._name_cache.append(name)
def parse_data(self, yara_bin, force_safe_str=False):
output = []
parsed_rules = self.yp.parse_rule_file(yara_bin, force_safe_str=force_safe_str)
for rule in parsed_rules:
missing_meta = []
for item in self.REQUIRED_META:
if item not in rule['meta']:
missing_meta.append(item)
id_conflict = self.check_for_id_conflicts(rule['meta'].get('id', None),
rule['meta'].get('rule_version', None))
name_conflict = self.check_for_name_conflicts(rule['name'])
output.append({'rule': rule,
"missing_meta": sorted(missing_meta, reverse=True),
"id_conflict": id_conflict,
"name_conflict": name_conflict})
return output
def parse_file(self, cur_file, force_safe_str=False):
cur_file = os.path.expanduser(cur_file)
if os.path.exists(cur_file):
with open(cur_file, "rb") as yara_file:
yara_bin = yara_file.read()
return self.parse_data(yara_bin, force_safe_str=force_safe_str)
else:
raise Exception("File '%s' does not exists.")
def parse_files(self, files, force_safe_str=False):
output = {}
for cur_file in files:
try:
output[cur_file] = self.parse_file(cur_file, force_safe_str=force_safe_str)
except Exception, e:
output[cur_file] = e
return output
| 1.757813 | 2 |
test.py | doncat99/zvt | 10 | 12794879 | <filename>test.py
# import asyncio
# import functools
# import time
# import random
# import multiprocessing
# import aiomultiprocess
# import uvloop
# import numpy as np
# from tqdm import tqdm
# # # 下载协程
# # async def download(url):
# # cost = time.time()
# # await asyncio.sleep(3) # 模拟1秒的下载过程
# # return 'cost: {}'.format(round(time.time()-cost, 3))
# # # 回调函数
# # def on_finish(task):
# # print('下载完成:', task.result()) # 获取协程返回值或者抛出的异常
# # def mp_schedule():
# # pages = list(range(500))
# # prefix = 'https://yuerblog.cc/'
# # urls = ['{}{}'.format(prefix, page) for page in pages]
# # # url_chunks = np.array_split(urls ,5)
# # with multiprocessing.Pool(processes=5) as pool:
# # for url_chunk in urls:
# # pool.apply_async(download, args = (url_chunk, ), callback = on_finish)
# # async def aio_mp_schedule():
# # pages = list(range(500))
# # prefix = 'https://yuerblog.cc/'
# # urls = ['{}{}'.format(prefix, page) for page in pages]
# # pbar = tqdm(total=len(urls), ncols=80)
# # async with aiomultiprocess.Pool(processes=5, loop_initializer=uvloop.new_event_loop) as pool:
# # async for result in pool.map(download, urls):
# # pbar.update()
# # async def schedule():
# # pages = list(range(500))
# # prefix = 'https://yuerblog.cc/'
# # tasks = [asyncio.create_task(download('{}{}'.format(prefix, page))) for page in pages]
# # responses = [await t for t in tqdm(asyncio.as_completed(tasks), total=len(tasks), ncols=80)]
# # if __name__ == '__main__':
# # uvloop.install()
# # cost = time.time()
# # # asyncio.run(schedule())
# # mp_schedule()
# # print("total time cost: {}".format(round(time.time()-cost, 3)))
# import multiprocessing as mp
# import time
# async def foo_pool(x):
# await asyncio.sleep(2)
# return x*x
# result_list = []
# def log_result(result):
# # This is called whenever foo_pool(i) returns a result.
# # result_list is modified only by the main process, not the pool workers.
# result_list.append(result)
# print(result)
# def apply_async_with_callback():
# pool = mp.Pool(processes=3)
# for i in range(10):
# pool.apply_async(foo_pool, args = (i, ), callback = log_result)
# pool.close()
# pool.join()
# print(result_list)
# if __name__ == '__main__':
# cost = time.time()
# apply_async_with_callback()
# print("total cost: {}".format(round(time.time()-cost, 3)))
# # def on_finish(future, n):
# # print('{}: future done: {}'.format(n, future.result()))
# # async def register_callbacks(all_done):
# # print('registering callbacks on future')
# # page = 1
# # while True:
# # all_done.add_done_callback(functools.partial(on_finish, n=page))
# # page += 1
# # if page > 5:
# # return
# # async def main(all_done):
# # await register_callbacks(all_done)
# # print('setting result of future')
# # all_done.set_result('the result')
# # event_loop = asyncio.get_event_loop()
# # try:
# # all_done = asyncio.Future()
# # event_loop.run_until_complete(main(all_done))
# # finally:
# # event_loop.close()
# # async def download(fut, delay, value):
# # cost = time.time()
# # # Sleep for *delay* seconds.
# # await asyncio.sleep(delay)
# # # Set *value* as a result of *fut* Future.
# # fut.set_result('task: {} cost: {}'.format(value, round(time.time()-cost, 3)))
# # async def main():
# # # Get the current event loop.
# # # loop = asyncio.get_running_loop()
# # # Create a new Future object.
# # # fut = loop.create_future()
# # # Run "set_after()" coroutine in a parallel Task.
# # # We are using the low-level "loop.create_task()" API here because
# # # we already have a reference to the event loop at hand.
# # # Otherwise we could have just used "asyncio.create_task()".
# # while True:
# # page = 1
# # asyncio.create_task(download(fut, 1, page))
# # if page > 5:
# # break
# # # Wait until *fut* has a result (1 second) and print it.
# # print(await fut)
# # asyncio.run(main())
# import aiohttp
# import asyncio
# import time
# from bs4 import BeautifulSoup
# from urllib.request import urljoin
# import re
# import multiprocessing as mp
# base_url = "https://morvanzhou.github.io/"
# seen = set()
# unseen = set([base_url])
# def parse(html):
# soup = BeautifulSoup(html, 'lxml')
# urls = soup.find_all('a', {"href": re.compile('^/.+?/$')})
# title = soup.find('h1').get_text().strip()
# page_urls = set([urljoin(base_url, url['href']) for url in urls])
# url = soup.find('meta', {'property': "og:url"})['content']
# return title, page_urls, url
# async def crawl(url, session):
# r = await session.get(url)
# html = await r.text()
# await asyncio.sleep(0.1) # slightly delay for downloading
# return html
# async def main(loop):
# processes = 8
# pool = mp.Pool(processes) # slightly affected
# async with aiohttp.ClientSession() as session:
# count = 1
# while len(unseen) != 0:
# print('\nAsync Crawling...')
# tasks = [loop.create_task(crawl(url, session)) for url in unseen]
# finished, unfinished = await asyncio.wait(tasks)
# htmls = [f.result() for f in finished]
# print('\nDistributed Parsing...')
# parse_jobs = [pool.apply_async(parse, args=(html,)) for html in htmls]
# results = pool.map(parse, htmls)
# # results = pool.map_async(parse, htmls).get()
# # print(parse_jobs.get())
# results = [j.get() for j in parse_jobs]
# # print(results)
# print('\nAnalysing...')
# seen.update(unseen)
# unseen.clear()
# for title, page_urls, url in results:
# # print(count, title, url)
# unseen.update(page_urls - seen)
# count += 1
# if __name__ == "__main__":
# t1 = time.time()
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main(loop))
# loop.close()
# print("Async total time: ", time.time() - t1)
import requests
import json
import pandas as pd
url = 'http://127.0.0.1:5010/get_all/'
def a():
try:
resp = requests.get(url)
except Exception as e:
print(f'url: {url}, error: {e}')
return pd.DataFrame()
proxy_list_string = json.loads(resp.content)
for proxy in proxy_list_string:
print(proxy)
# proxy_dict = json.loads(proxy)
# print(proxy_dict)
a()
| 2.609375 | 3 |
teamdjibouti-solutions/teamdjibouti_contains.py | benkhaireh/parcrobotic | 0 | 12794880 | # Script pour rechercher un entier x dans un tableau
# ------------------------------------------------------------
# Fonction de recherche d'un entier x dans un tableau
def contient(arr, taille, x):
# Recherche de l'entier x dans le tableau.
if x in arr:
print("true")
else:
print("false")
# ------------------------------------------------------------
# Execution de la fonction de recherche
contient([1, 2, 3, 4, 5, 6], 6, 4)
| 3.6875 | 4 |
pyny/fields.py | 7pairs/pyny | 0 | 12794881 | <filename>pyny/fields.py
# -*- coding: utf-8 -*-
#
# Copyright 2015−2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta, abstractmethod
import datetime
import decimal
class BaseField(metaclass=ABCMeta):
"""
すべてのフィールドクラスのスーパークラス。
新しいフィールドクラスを実装する場合は当クラスを継承すること。
"""
def __init__(self, name=None):
"""
BaseFieldを構築する。
:param name: 当フィールドが参照する項目のキー
:type name: str
"""
# プロパティを設定する
self.name = name
@abstractmethod
def convert(self, target):
"""
指定された値をそのフィールドクラスに適した型に変換する。
サブクラスでそれぞれのクラスに応じた値を返すようオーバーライドすること。
:param target: 変換対象の値
:type target: object
:return: 変換後の値
:rtype: object
"""
raise NotImplementedError()
class StringField(BaseField):
"""
文字列を表現するフィールドクラス。
"""
def __init__(self, name=None):
"""
StringFieldを構築する。
:param name: 当フィールドが参照する項目のキー
:type name: str
"""
# プロパティを設定する
super().__init__(name)
def convert(self, target):
"""
指定された値を文字列に変換する。
:param target: 変換対象の値
:type target: object
:return: 変換後の値
:rtype: str
"""
# 対象が文字列の場合はそのまま返却する
if isinstance(target, str):
return target
# 対象を文字列に変換する
return str(target)
class IntegerField(BaseField):
"""
整数を表現するフィールドクラス。
"""
def __init__(self, name=None):
"""
IntegerFieldを構築する。
:param name: 当フィールドが参照する項目のキー
:type name: str
"""
# プロパティを設定する
super().__init__(name)
def convert(self, target):
"""
指定された値を整数に変換する。
:param target: 変換対象の値
:type target: object
:return: 変換後の値
:rtype: int
"""
# 対象が整数の場合はそのまま返却する
if isinstance(target, int):
return target
# 対象を整数に変換する
return int(target)
class DecimalField(BaseField):
"""
固定小数点数を表現するフィールドクラス。
"""
def __init__(self, name=None):
"""
DecimalFieldを構築する。
:param name: 当フィールドが参照する項目のキー
:type name: str
"""
# プロパティを設定する
super().__init__(name)
def convert(self, target):
"""
指定された値を固定小数点数に変換する。
:param target: 変換対象の値
:type target: object
:return: 変換後の値
:rtype: decimal.Decimal
"""
# 対象が固定小数点数の場合はそのまま返却する
if isinstance(target, decimal.Decimal):
return target
# 対象を固定小数点数に変換する
return decimal.Decimal(str(target))
class FloatField(BaseField):
"""
浮動小数点数を表現するフィールドクラス。
"""
def __init__(self, name=None):
"""
FloatFieldを構築する。
:param name: 当フィールドが参照する項目のキー
:type name: str
"""
# プロパティを設定する
super().__init__(name)
def convert(self, target):
"""
指定された値を浮動小数点数に変換する。
:param target: 変換対象の値
:type target: object
:return: 変換後の値
:rtype: float
"""
# 対象が浮動小数点数の場合はそのまま返却する
if isinstance(target, float):
return target
# 対象を浮動小数点数に変換する
return float(target)
class DateField(BaseField):
"""
日付を表現するフィールドクラス。
"""
def __init__(self, name=None, fmt='%Y/%m/%d'):
"""
DateFieldを構築する。
:param name: 当フィールドが参照する項目のキー
:type name: str
:param fmt: 日付のフォーマット
:type fmt: str
"""
# プロパティを設定する
super().__init__(name)
self._fmt = fmt
def convert(self, target):
"""
指定された値を日付に変換する。
:param target: 変換対象の値
:type target: object
:return: 変換後の値
:rtype: datetime.date
"""
# 対象が日付の場合はそのまま返却する
if isinstance(target, datetime.date):
return target
# 対象を日付に変換する
return datetime.datetime.strptime(str(target), self._fmt).date()
class DateTimeField(BaseField):
"""
日時を表現するフィールドクラス。
"""
def __init__(self, name=None, fmt='%Y/%m/%d %H:%M:%S'):
"""
DateTimeFieldを構築する。
:param name: 当フィールドが参照する項目のキー
:type name: str
:param fmt: 日時のフォーマット
:type fmt: str
"""
# プロパティを設定する
super().__init__(name)
self._fmt = fmt
def convert(self, target):
"""
指定された値を日時に変換する。
:param target: 変換対象の値
:type target: object
:return: 変換後の値
:rtype: datetime.datetime
"""
# 対象が日時の場合はそのまま返却する
if isinstance(target, datetime.datetime):
return target
# 対象を日時に変換する
return datetime.datetime.strptime(str(target), self._fmt)
| 2.234375 | 2 |
idingtalk/idingtalk/__init__.py | icmdb/iwebhooks | 0 | 12794882 | # -*- coding: utf-8 -*-
from .incoming import dingtalk_incoming
if __name__ == "__main__":
token = ""
payload = {
"msgtype" : "markdown",
"markdown": {
"title": "This is the title!",
"text" : "\n\n".join([
"# This is title",
"> This is first line.",
"> This is second line.",
])
},
"at" : {
"atMobiles": ["18600000000"],
"isAll": False
}
}
dingtalk_incoming(token, payload=payload, at_mobiles=["18600000000"])
| 2.0625 | 2 |
yolov5-coreml-tflite-converter/utils/constants.py | SchweizerischeBundesbahnen/sbb-ml-models | 0 | 12794883 | <filename>yolov5-coreml-tflite-converter/utils/constants.py<gh_stars>0
import os
# -------------------------------------------------------------------------------------------------------------------- #
# Constants
# -------------------------------------------------------------------------------------------------------------------- #
# General
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
DATA_DIR = os.path.join('data')
OUTPUT_DIR = os.path.join(DATA_DIR, 'output')
FLOAT32 = 'float32'
FLOAT16 = 'float16'
INT8 = 'int8'
FULLINT8 = 'fullint8'
FLOAT32_SUFFIX = '_float32'
FLOAT16_SUFFIX = '_float16'
INT8_SUFFIX = '_int8'
FULLINT8_SUFFIX = '_fullint8'
BATCH_SIZE = 1
NB_CHANNEL = 3
# x, y, w, h, score, class1, class2, ...
XY_SLICE = (0, 2)
WH_SLICE = (2, 4)
SCORE_SLICE = (4, 5)
CLASSES_SLICE = (5, 0)
NB_OUTPUTS = 5 # 1 objectness score + 4 bounding box coordinates
NORMALIZATION_FACTOR = 255.
# Input names
IMAGE_NAME = 'image'
NORMALIZED_SUFFIX = '_normalized'
QUANTIZED_SUFFIX = '_quantized'
IOU_NAME = 'iou threshold'
CONF_NAME = 'conf threshold'
# Colors
BLUE = '\033[36m'
GREEN = '\033[32m'
RED = '\033[31m'
YELLOW = '\033[33m'
PURPLE = '\033[34m'
END_COLOR = '\033[0m'
BOLD = '\033[1m'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# CoreML converter
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
COREML_SUFFIX = '.mlmodel'
TORCHSCRIPT_SUFFIX = '.torchscript.pt'
# Outputs names
CONFIDENCE_NAME = 'confidence' # list of class scores
COORDINATES_NAME = 'coordinates' # (x, y, w, h)
RAW_PREFIX = 'raw_'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# TFLite converter
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
TFLITE_SUFFIX = '.tflite'
LABELS_NAME = 'labels.txt'
# Format
TFLITE = 'tflite'
SAVED_MODEL = 'saved_model'
GRAPH_DEF_SUFFIX = '.pb'
# NMS
PADDED = 'padded'
SIMPLE = 'simple'
COMBINED = 'combined'
# Representative dataset
BAHNHOF = 'bahnhof'
WAGEN = 'wagen'
TRAKTION = 'traktion'
# Output names
BOUNDINGBOX_NAME = 'location' # (y1, x1, y2, x2)
CLASSES_NAME = 'category' # class index
SCORES_NAME = 'score' # confidence score
NUMBER_NAME = 'number of detections' # number of detected object in the image
DETECTIONS_NAME = 'detection results'
PREDICTIONS_NAME = 'predictions'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# ONNX converter
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
ONNX_SUFFIX = '.onnx'
OPSET = 12
# -------------------------------------------------------------------------------------------------------------------- #
# Default values
# -------------------------------------------------------------------------------------------------------------------- #
DEFAULT_COREML_NAME = 'yolov5-coreML'
DEFAULT_TFLITE_NAME = 'yolov5-TFLite'
DEFAULT_ONNX_NAME = 'yolov5-ONNX'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Common values
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
DEFAULT_MODEL_OUTPUT_DIR = os.path.join(OUTPUT_DIR, 'converted_models')
DEFAULT_PT_MODEL = os.path.join('data', 'models', 'best.pt')
DEFAULT_INPUT_RESOLUTION = 640
DEFAULT_QUANTIZATION_TYPE = FLOAT32
DEFAULT_IOU_THRESHOLD = 0.45
DEFAULT_CONF_THRESHOLD = 0.25
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# TFlite additional default values
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
DEFAULT_SOURCE_DATASET = WAGEN
DEFAULT_NB_CALIBRATION = 500
DEFAULT_MAX_NUMBER_DETECTION = 20
def get_zipfile_path(source):
return os.path.join(DATA_DIR, f'{source}_500.zip')
def get_dataset_url(source):
return f'https://sbb-ml-public-resources-prod.s3.eu-central-1.amazonaws.com/quantization/{source}_500.zip'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
# Inference
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - #
DEFAULT_DETECTED_IMAGE_DIR = os.path.join(OUTPUT_DIR, 'detections')
| 1.734375 | 2 |
subsworld/subsEnd.py | mugdhaadhav/subsworld | 1 | 12794884 | <filename>subsworld/subsEnd.py
def endSubStatus(val):
if val == 0:
print('\nSubtitle Downloaded Successfully... ')
if not input('\nPlease press enter to keep searching the same or press any other key to search other substitle: '):
return
else:
from subsworld import subsworld
subsworld()
elif val == 1:
print('\nUnknown Error Occured... ')
return
elif val == 2:
print('\nSubtitles not found... ')
if not input('\nPlease press enter to exit or press any other key to search other substitle: '):
print('Thanks for using subsworld ... ')
exit(0)
else:
return | 3.34375 | 3 |
src/setup.py | GML22/GeocoderPL | 0 | 12794885 | <reponame>GML22/GeocoderPL
from setuptools import setup
setup(
name='geocoderpl',
version='1.1',
description='GeocoderPL is an application written in Python, which can be used for geocoding address points in ' +
'Poland along with the possibility to display basic information about a given address point and the ' +
'building assigned to this address. GeocoderPL has a form of search engine with three map layers: ' +
'OpenStreetMap, Google Maps and Stamens Map.',
author='<NAME>',
author_email='<EMAIL>',
license="MIT License",
keywords="search-engine geocoding numpy pyqt5 geospatial sqlite3 gdal-python superpermutation folium-maps",
url="https://github.com/GML22/GeocoderPL",
packages=['geocoderpl'],
install_requires=['folium', 'numpy', 'pyqt5', 'unidecode', 'pyproj', 'lxml', 'geocoder', 'pandas', 'matplotlib',
'setuptools', 'sqlalchemy', 'python-dotenv'],
)
| 2.453125 | 2 |
tests/plugins/test_yum_inject.py | goldmann/dock | 0 | 12794886 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function
import os
try:
from collections import OrderedDict
except ImportError:
# Python 2.6
from ordereddict import OrderedDict
from dock.core import DockerTasker
from dock.inner import DockerBuildWorkflow
from dock.plugin import PreBuildPluginsRunner, PostBuildPluginsRunner
from dock.plugins.pre_inject_yum_repo import InjectYumRepoPlugin, alter_yum_commands
from dock.util import ImageName
from tests.constants import DOCKERFILE_GIT
class X(object):
pass
def test_yuminject_plugin(tmpdir):
df = """\
FROM fedora
RUN yum install -y python-django
CMD blabla"""
tmp_df = os.path.join(str(tmpdir), 'Dockerfile')
with open(tmp_df, mode="w") as fd:
fd.write(df)
tasker = DockerTasker()
workflow = DockerBuildWorkflow(DOCKERFILE_GIT, "test-image")
setattr(workflow, 'builder', X)
metalink = 'https://mirrors.fedoraproject.org/metalink?repo=fedora-$releasever&arch=$basearch'
workflow.repos['yum'] = [OrderedDict(
(('name', 'my-repo'),
('metalink', metalink),
('enabled', 1),
('gpgcheck', 0)),
)]
setattr(workflow.builder, 'image_id', "asd123")
setattr(workflow.builder, 'df_path', tmp_df)
setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='21'))
setattr(workflow.builder, 'git_dockerfile_path', None)
setattr(workflow.builder, 'git_path', None)
runner = PreBuildPluginsRunner(tasker, workflow, [{
'name': InjectYumRepoPlugin.key,
'args': {}}])
runner.run()
assert InjectYumRepoPlugin.key is not None
with open(tmp_df, 'r') as fd:
altered_df = fd.read()
expected_output = r"""FROM fedora
RUN printf "[my-repo]\nname=my-repo\nmetalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-\$releasever&arch=\$basearch\nenabled=1\ngpgcheck=0\n" >/etc/yum.repos.d/dock-injected.repo && yum install -y python-django && yum clean all && rm -f /etc/yum.repos.d/dock-injected.repo
CMD blabla"""
assert expected_output == altered_df
def test_yuminject_multiline(tmpdir):
df = """\
FROM fedora
RUN yum install -y httpd \
uwsgi
CMD blabla"""
tmp_df = os.path.join(str(tmpdir), 'Dockerfile')
with open(tmp_df, mode="w") as fd:
fd.write(df)
tasker = DockerTasker()
workflow = DockerBuildWorkflow(DOCKERFILE_GIT, "test-image")
setattr(workflow, 'builder', X)
metalink = r'https://mirrors.fedoraproject.org/metalink?repo=fedora-$releasever&arch=$basearch'
workflow.repos['yum'] = [OrderedDict(
(('name', 'my-repo'),
('metalink', metalink),
('enabled', 1),
('gpgcheck', 0)),
)]
setattr(workflow.builder, 'image_id', "asd123")
setattr(workflow.builder, 'df_path', tmp_df)
setattr(workflow.builder, 'base_image', ImageName(repo='Fedora', tag='21'))
setattr(workflow.builder, 'git_dockerfile_path', None)
setattr(workflow.builder, 'git_path', None)
runner = PreBuildPluginsRunner(tasker, workflow,
[{'name': InjectYumRepoPlugin.key, 'args': {}}])
runner.run()
assert InjectYumRepoPlugin.key is not None
with open(tmp_df, 'r') as fd:
altered_df = fd.read()
expected_output = r"""FROM fedora
RUN printf "[my-repo]\nname=my-repo\nmetalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-\$releasever&arch=\$basearch\nenabled=1\ngpgcheck=0\n" >/etc/yum.repos.d/dock-injected.repo && yum install -y httpd uwsgi && yum clean all && rm -f /etc/yum.repos.d/dock-injected.repo
CMD blabla"""
assert altered_df == expected_output
def test_complex_df():
df = """\
FROM fedora
RUN asd
RUN yum install x
ENV x=y
RUN yum install \
x \
y \
&& something else
CMD asd"""
wrap_cmd = "RUN test && %(yum_command)s && asd"
out = alter_yum_commands(df, wrap_cmd)
expected_output = """\
FROM fedora
RUN asd
RUN test && yum install x && asd
ENV x=y
RUN test && yum install x y && something else && asd
CMD asd"""
assert out == expected_output
| 2.03125 | 2 |
run/base/hpe_base_util.py | KAIST-HCIL/DeepFisheyeNet | 23 | 12794887 | from dataset.data_model import HandDataModel
def unpack_data(results, is_eval = False):
joint_out = results['joint']
heatmap = None
heatmap_true = None
if not is_eval:
heatmap = results['heatmap']
heatmap_true = results['heatmap_true']
heatmap_reprojected = results['heatmap_reprojected']
return joint_out, heatmap, heatmap_true, heatmap_reprojected
| 2.234375 | 2 |
leetcode/1143.最长公共子序列/1143-最长公共子序列.py | ruisunyc/- | 2 | 12794888 | <reponame>ruisunyc/-<gh_stars>1-10
class Solution:
def longestCommonSubsequence(self, text1: str, text2: str) -> int:
m = len(text1)+1
n = len(text2)+1
dp = [[0]*n for _ in range(m)]
for i in range(1,m):
for j in range(1,n):
if text1[i-1]==text2[j-1]:
dp[i][j]=dp[i-1][j-1]+1
else:
dp[i][j] = max(dp[i-1][j],dp[i][j-1])
return dp[-1][-1] | 2.78125 | 3 |
neurokit2/hrv/hrv.py | vansjyo/NeuroKit | 0 | 12794889 | # -*- coding: utf-8 -*-
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from .hrv_time import hrv_time
from .hrv_frequency import hrv_frequency
from .hrv_frequency import _hrv_frequency_show
from .hrv_nonlinear import hrv_nonlinear
from .hrv_nonlinear import _hrv_nonlinear_show
from .hrv_utils import _hrv_get_rri
from .hrv_utils import _hrv_sanitize_input
from ..stats import summary_plot
def hrv(peaks, sampling_rate=1000, show=False):
""" Computes indices of Heart Rate Variability (HRV).
Computes HRV indices in the time-, frequency-, and nonlinear domain. Note
that a minimum duration of the signal containing the peaks is recommended
for some HRV indices to be meaninful. For instance, 1, 2 and 5 minutes of
high quality signal are the recomended minima for HF, LF and LF/HF,
respectively. See references for details.
Parameters
----------
peaks : dict
Samples at which cardiac extrema (i.e., R-peaks, systolic peaks) occur.
Dictionary returned by ecg_findpeaks, ecg_peaks, ppg_findpeaks, or
ppg_peaks.
sampling_rate : int, optional
Sampling rate (Hz) of the continuous cardiac signal in which the peaks
occur. Should be at least twice as high as the highest frequency in vhf.
By default 1000.
show : bool, optional
If True, returns the plots that are generates for each of the domains.
Returns
-------
DataFrame
Contains HRV metrics from three domains:
- frequency (for details see hrv_frequency)
- time (for details see hrv_time)
- non-linear (for details see hrv_nonlinear)
See Also
--------
ecg_peaks, ppg_peaks, hrv_time, hrv_frequency, hrv_nonlinear
Examples
--------
>>> import neurokit2 as nk
>>>
>>> # Download data
>>> data = nk.data("bio_resting_5min_100hz")
>>>
>>> # Find peaks
>>> peaks, info = nk.ecg_peaks(data["ECG"], sampling_rate=100)
>>>
>>> # Compute HRV indices
>>> nk.hrv(peaks, sampling_rate=100, show=True)
References
----------
- <NAME>. (2002). Assessing heart rate variability from real-world
Holter reports. Cardiac electrophysiology review, 6(3), 239-244.
- <NAME>., & <NAME>. (2017). An overview of heart rate
variability metrics and norms. Frontiers in public health, 5, 258.
"""
# Get indices
out = [] # initialize empty container
# Gather indices
out.append(hrv_time(peaks, sampling_rate=sampling_rate))
out.append(hrv_frequency(peaks, sampling_rate=sampling_rate))
out.append(hrv_nonlinear(peaks, sampling_rate=sampling_rate))
out = pd.concat(out, axis=1)
# Plot
if show:
_hrv_plot(peaks, out, sampling_rate)
return out
def _hrv_plot(peaks, hrv, sampling_rate=1000):
fig = plt.figure(constrained_layout=False)
spec = matplotlib.gridspec.GridSpec(ncols=2, nrows=2,
height_ratios=[1, 1], width_ratios=[1, 1])
# Arrange grids
ax_distrib = fig.add_subplot(spec[0, :-1])
ax_distrib.set_xlabel('R-R intervals (ms)')
ax_distrib.set_title("Distribution of R-R intervals")
ax_psd = fig.add_subplot(spec[1, :-1])
ax_poincare = fig.add_subplot(spec[:, -1])
# Distribution of RR intervals
peaks = _hrv_sanitize_input(peaks)
rri = _hrv_get_rri(peaks, sampling_rate=sampling_rate, interpolate=False)
ax_distrib = summary_plot(rri, ax=ax_distrib)
# Poincare plot
out_poincare = hrv.copy()
out_poincare.columns = [col.replace('HRV_', '') for col in out_poincare.columns]
ax_poincare = _hrv_nonlinear_show(rri, out_poincare, ax=ax_poincare)
# PSD plot
rri, sampling_rate = _hrv_get_rri(peaks,
sampling_rate=sampling_rate, interpolate=True)
_hrv_frequency_show(rri, out_poincare,
sampling_rate=sampling_rate, ax=ax_psd)
| 3.0625 | 3 |
disno/objects/__init__.py | QwireDev/disno | 0 | 12794890 | """
disno.objects
~~~~~~~~~~~~~
Independently usable object models for the Discord API.
Docs reference: https://discord.dev
:copyright: (c) 2021-present Qwire Development Team
:license: MIT, see LICENSE for more details.
"""
from .user import *
| 0.839844 | 1 |
tensorstream/helpers/any_nan_spec.py | clems4ever/tensorstream | 5 | 12794891 | import math
import tensorflow as tf
import unittest
from tensorstream.helpers.any_nan import any_nan
def in_tf(x):
with tf.Session() as sess:
return sess.run(x)
class AnyNanSpec(unittest.TestCase):
def test_any_nan_scalar(self):
x = any_nan(tf.constant(4.0))
self.assertEqual(in_tf(x), False)
y = any_nan(tf.constant(math.nan))
self.assertEqual(in_tf(y), True)
def test_any_nan_tensor(self):
x = any_nan(tf.constant([4.0, 3.0, 2.0]))
self.assertEqual(in_tf(x), False)
y = any_nan(tf.constant([math.nan, 3.0, 2.0]))
self.assertEqual(in_tf(y), True)
z = any_nan(tf.constant([math.nan, math.nan, math.nan]))
self.assertEqual(in_tf(z), True)
def test_any_nan_complex_type(self):
x = any_nan({
'a': tf.constant([3.0, 2.0]),
'b': [tf.constant(3.2), tf.constant([2.1, 2.3, 4.3])],
'c': {
'z': tf.constant([5.2, 5.2]),
'y': tf.constant([3.4, 5.2])
}
})
self.assertEqual(in_tf(x), False)
y = any_nan({
'a': tf.constant([3.0, 2.0]),
'b': [tf.constant(3.2), tf.constant([2.1, 2.3, math.nan])],
'c': {
'z': tf.constant([5.2, 5.2]),
'y': tf.constant([3.4, 5.2])
}
})
self.assertEqual(in_tf(y), True)
z = any_nan({
'a': tf.constant([math.nan, math.nan]),
'b': [tf.constant(math.nan), tf.constant([math.nan, math.nan, math.nan])],
'c': {
'z': tf.constant([math.nan, math.nan]),
'y': tf.constant([math.nan, math.nan])
}
})
self.assertEqual(in_tf(z), True)
| 2.6875 | 3 |
texaslan/voting/views.py | hsmeans/texaslan.org | 2 | 12794892 | from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.views.generic import ListView, CreateView, DetailView, FormView
from django.forms import ValidationError
from .models import Candidate, VoteBallot, CANDIDATE_POSITIONS, VoteService, VoteStatus, POSITION_NUMS
from .forms import StartElectionForm, CreateCandidateApplicationForm, VoteForm
from texaslan.utils.utils import PledgeOrActiveRequiredMixin, HasNotAppliedRequiredMixin, HasNotVotedRequiredMixin
from texaslan.site_settings.models import SiteSettingService
class CandidateListView(PledgeOrActiveRequiredMixin, FormView):
template_name = 'voting/candidate_list.html'
form_class = StartElectionForm
def get_context_data(self, **kwargs):
context = super(CandidateListView, self).get_context_data(**kwargs)
context['voting_closed'] = SiteSettingService.is_voting_closed()
if context['voting_closed']:
return context
context['voting_open'] = SiteSettingService.is_voting_currently()
context['has_not_voted'] = True
try:
vote_status = VoteStatus.objects.get(voter__username=self.request.user.username)
context['has_not_voted'] = not vote_status.has_voted
except VoteStatus.DoesNotExist:
pass
positions_list = []
for (position_code, position_name) in CANDIDATE_POSITIONS:
has_winner = False
has_applied = False
is_applying_open = SiteSettingService.is_voting_applications_open()
try:
list = Candidate.objects.filter(position=position_code)
for cand in list:
if cand.user.pk == self.request.user.pk:
has_applied = True
if cand.has_won:
has_winner = True
except Candidate.DoesNotExist:
list = []
positions_list.append((position_name, position_code, has_winner, has_applied, is_applying_open, list,))
context['positions'] = positions_list
return context
def get_success_url(self):
messages.add_message(self.request, messages.SUCCESS, 'Election was successful!')
return reverse('voting:list')
class CandidateApplyView(HasNotAppliedRequiredMixin, CreateView):
template_name = 'voting/candidate_apply.html'
model = Candidate
form_class = CreateCandidateApplicationForm
def get_context_data(self, **kwargs):
context = super(CandidateApplyView, self).get_context_data(**kwargs)
context['position_id'] = self.kwargs.get("position")
context['position'] = VoteService.get_position_str(context['position_id'])
return context
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
candidate = form.instance
candidate.position = form.data['position_id']
candidate.user = self.request.user
return super(CandidateApplyView, self).form_valid(form)
def get_success_url(self):
messages.add_message(self.request, messages.SUCCESS, 'Application was submitted!')
return reverse('voting:list')
class CandidateDetailsView(PledgeOrActiveRequiredMixin, DetailView):
template_name = 'voting/candidate_detail.html'
model = Candidate
def get_context_data(self, **kwargs):
context = super(CandidateDetailsView, self).get_context_data(**kwargs)
context['position_id'] = self.kwargs.get("position")
context['position'] = VoteService.get_position_str(context['position_id'])
return context
def get_object(self, queryset=None):
return get_object_or_404(Candidate,
position=self.kwargs.get('position'), user__username=self.kwargs.get('username'))
class VoteView(HasNotVotedRequiredMixin, FormView):
template_name = 'voting/vote.html'
form_class = VoteForm
def form_invalid(self, form):
messages.add_message(self.request, messages.ERROR, form.errors.as_data()['__all__'][0].message)
return super(VoteView, self).form_invalid(form)
def get_success_url(self):
messages.add_message(self.request, messages.SUCCESS, 'Successfully voted!')
return reverse('voting:list')
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
form.submit_ballot(self.request.user)
return super(VoteView, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(VoteView, self).get_form_kwargs()
extra = []
for (position_id, position) in CANDIDATE_POSITIONS:
# If we have all our winners, no need to fill this out.
if len(set(Candidate.objects.filter(position=position_id, has_won=True))) == POSITION_NUMS[position_id]:
continue
extra.append((position_id, position, set(Candidate.objects.filter(position=position_id)),))
kwargs['extra'] = extra
kwargs['user'] = self.request.user
return kwargs
| 1.953125 | 2 |
remote/task.py | jackyin68/paramiko-windows | 1 | 12794893 | from multiprocessing import Process
import os
import time
def run_proc(process_name):
print('running subprocess %s(%s)......' % (process_name, os.getpid()))
count = 100
for i in range(count):
print("*** {} ***".format(i))
time.sleep(1)
os.mkdir(str(count))
print('sub process end')
if __name__ == '__main__':
print('Process %s' % os.getpid())
p = Process(target=run_proc, args=('test',))
print('sub process beginning')
p.start()
# p.join()
# print('sub process end')
print('Process end')
| 2.921875 | 3 |
src/affe/tests/__init__.py | eliavw/affe | 1 | 12794894 | <filename>src/affe/tests/__init__.py
from .resources import get_dummy_flow
| 1.109375 | 1 |
DeepHyperion-MNIST/train_model.py | IharBakhanovich/DeepHyperion | 5 | 12794895 | <reponame>IharBakhanovich/DeepHyperion<filename>DeepHyperion-MNIST/train_model.py
import argparse
import sys
import numpy as np
from os import makedirs
from os.path import exists
import tensorflow as tf
CLIP_MIN = -0.5
CLIP_MAX = 0.5
K = tf.keras.backend
mnist = tf.keras.datasets.mnist
np_utils = tf.keras.utils
Sequential = tf.keras.models.Sequential
Dense = tf.keras.layers.Dense
Dropout = tf.keras.layers.Dropout
Activation = tf.keras.layers.Activation
Flatten = tf.keras.layers.Flatten
Conv2D = tf.keras.layers.Conv2D
MaxPooling2D = tf.keras.layers.MaxPooling2D
l2 = tf.keras.regularizers.l2
def train(name):
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
layers = [
Conv2D(64, (3, 3), padding="valid", input_shape=(28, 28, 1)),
Activation("relu"),
Conv2D(64, (3, 3)),
Activation("relu"),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.5),
Flatten(),
Dense(128),
Activation("relu"),
Dropout(0.5),
Dense(10),
]
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train = (x_train / 255.0) - (1.0 - CLIP_MAX)
x_test = (x_test / 255.0) - (1.0 - CLIP_MAX)
y_train = np_utils.to_categorical(y_train, 10)
y_test = np_utils.to_categorical(y_test, 10)
model = Sequential()
for layer in layers:
model.add(layer)
model.add(Activation("softmax"))
print(model.summary())
model.compile(
loss="categorical_crossentropy", optimizer="adadelta", metrics=["accuracy"]
)
model.fit(
x_train,
y_train,
epochs=50,
batch_size=128,
shuffle=True,
verbose=1,
validation_data=(x_test, y_test),
)
if not exists("model"):
makedirs("model")
model.save(f"./models/{name}.h5")
if __name__ == "__main__":
name = str(sys.argv[1])
train(name)
| 2.65625 | 3 |
2new_task.py | booiljung/pymqdemo | 0 | 12794896 | <filename>2new_task.py
# 참조: https://blog.storyg.co/rabbitmqs/tutorials/python/02-work-queue
import pika
import sys
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
message = ' '.join(sys.argv[1:]) or "Hello World!"
channel.basic_publish(exchange='', routing_key='task_queue', body=message,
properties=pika.BasicProperties(
delivery_mode = 2,
)
)
print(" [x] Sent %r" % message)
| 2.75 | 3 |
read_shapefile.py | clearedTakeoff/Bridge-Reconstruction | 0 | 12794897 | <gh_stars>0
import shapefile
class ShapefileReader:
def __init__(self, filename):
# Open and read the shapefile (file extension not required, only filename)
self.sf = shapefile.Reader(filename)
# Save all entries in the file, each entry containing both shape and record object
self.entries = self.sf.shapeRecords()
self.bridges = []
for entry in self.entries:
# Only extract shapeRecord entries of bridges (tipobj 3), limit also by coordinates??
if entry.record["TIPOBJ_CES"] == 3:
self.bridges.append(entry)
# print(len(self.bridges))
# Returns bridges inside coordinates bound between points (lowX, lowY) and (highX, highY)
def bridgesInsideCoords(self, lowX, lowY, highX, highY):
bridges = []
for bridge in self.bridges:
if lowX <= bridge.shape.bbox[0] <= highX and lowY <= bridge.shape.bbox[1] <= highY\
and lowX <= bridge.shape.bbox[2] <= highX and lowY <= bridge.shape.bbox[3] <= highY:
bridges.append(bridge)
# print("Found", len(bridges), "bridges")
return bridges
def writeBridges(self, lowX, lowY, highX, highY, output):
sf = shapefile.Writer(output)
sf.fields = self.sf.fields[1:]
for bridge in self.sf.iterShapeRecords():
if lowX <= bridge.shape.bbox[0] <= highX and lowY <= bridge.shape.bbox[1] <= highY\
and lowX <= bridge.shape.bbox[2] <= highX and lowY <= bridge.shape.bbox[3] <= highY:
sf.record(*bridge.record)
sf.shape(bridge.shape)
sf.close()
if __name__ == "__main__":
s = ShapefileReader("TN_CESTE_L")
| 3.25 | 3 |
timelap/models.py | TonyEight/django-timelap | 0 | 12794898 | from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
__all__ = ['ModelWithDateRange', 'ModelWithDateTimeRange',]
class ModelWithDateRange(models.Model):
# Attributes
start_date = models.DateField()
end_date = models.DateField()
# Methods
def clean(self):
if self.start_date and self.end_date\
and self.start_date > self.end_date:
raise ValidationError(_('End date must be greater or ' \
'equal to start date.'))
# Meta-data
class Meta:
abstract = True
class ModelWithDateTimeRange(models.Model):
# Attributes
start_datetime = models.DateTimeField()
end_datetime = models.DateTimeField()
# Methods
def clean(self):
if self.start_datetime and self.end_datetime\
and self.start_datetime > self.end_datetime:
raise ValidationError(_('End datetime must be greater or equal' \
' to start datetime.'))
# Meta-data
class Meta:
abstract = True
| 2.421875 | 2 |
28_uvm_component/testbench.py | raysalemi/python4uvm_examples | 1 | 12794899 | <reponame>raysalemi/python4uvm_examples<filename>28_uvm_component/testbench.py
import pyuvm
from pyuvm import *
# # uvm_component
# ## Running the phases
# Figure 1: A uvm_test demonstrating the phase methods
@pyuvm.test()
class PhaseTest(uvm_test): # uvm_test extends uvm_component
def build_phase(self):
print("1 build_phase")
def connect_phase(self):
print("2 connect_phase")
def end_of_elaboration_phase(self):
print("3 end_of_elaboration_phase")
def start_of_simulation_phase(self):
print("4 start_of_simulation_phase")
async def run_phase(self):
self.raise_objection()
print("5 run_phase")
self.drop_objection()
def extract_phase(self):
print("6 extract_phase")
def check_phase(self):
print("7 check_phase")
def report_phase(self):
print("8 report_phase")
def final_phase(self):
print("9 final_phase")
# ## Building the testbench hierarchy
# ### TestTop (uvm_test_top)
# Figure 4: pyuvm instantiates TestTop as uvm_test_top
# The test is always named uvm_test_top
@pyuvm.test()
class TestTop(uvm_test):
def build_phase(self):
self.logger.info(f"{self.get_name()} build_phase")
self.mc = MiddleComp("mc", self)
def final_phase(self):
self.logger.info("final phase")
# ### MiddleComp (uvm_test_top.mc)
# Figure 5: The middle component is instantiated by
# uvm_test_top as "mc" and instantiates "bc".
class MiddleComp(uvm_component):
def build_phase(self):
self.bc = BottomComp(name="bc", parent=self)
def end_of_elaboration_phase(self):
self.logger.info(f"{self.get_name()} end of elaboration phase")
# ### BottomComp (uvm_test_top.mc.bc)
# Figure 6: The bottom component is instantiated by
# the middle component and is at "uvm_test_top.mc.bc"
class BottomComp(uvm_component):
async def run_phase(self):
self.raise_objection()
self.logger.info(f"{self.get_name()} run phase")
self.drop_objection()
| 2.25 | 2 |
degvabank/degvabank/apps/transaction/urls.py | Vixx-X/DEGVABanck-backend | 0 | 12794900 | from django.urls.conf import path, include
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
router.register(
r"transactions",
views.TransactionViewSet,
)
user_transaction_urls = [
path(
"transactions/",
views.UserTransactionListCreateView.as_view(),
name="user-transactions",
),
path(
"transactions/<int:id>/",
views.UserTransactionView.as_view(),
name="user-transactions-detail",
),
]
urlpatterns = [
path(
"user/",
include(user_transaction_urls),
),
path(
"",
include(router.urls),
),
path(
"bank/transaction/",
views.ForeignTransactionView.as_view(),
name="bank-transaction",
),
]
| 1.875 | 2 |
cardinal_pythonlib/openxml/pause_process_by_disk_space.py | RudolfCardinal/pythonlib | 10 | 12794901 | #!/usr/bin/env python3
# cardinal_pythonlib/openxml/pause_process_by_disk_space.py
"""
===============================================================================
Original code copyright (C) 2009-2021 <NAME> (<EMAIL>).
This file is part of cardinal_pythonlib.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===============================================================================
**Pauses and resumes a process by disk space; LINUX ONLY.**
"""
from argparse import ArgumentParser
import logging
import shutil
import subprocess
import sys
from time import sleep
from typing import NoReturn
from cardinal_pythonlib.logs import (
BraceStyleAdapter,
main_only_quicksetup_rootlogger,
)
from cardinal_pythonlib.sizeformatter import human2bytes, sizeof_fmt
log = BraceStyleAdapter(logging.getLogger(__name__))
def is_running(process_id: int) -> bool:
"""
Uses the Unix ``ps`` program to see if a process is running.
"""
pstr = str(process_id)
encoding = sys.getdefaultencoding()
s = subprocess.Popen(["ps", "-p", pstr], stdout=subprocess.PIPE)
for line in s.stdout:
strline = line.decode(encoding)
if pstr in strline:
return True
return False
def main() -> NoReturn:
"""
Command-line handler for the ``pause_process_by_disk_space`` tool.
Use the ``--help`` option for help.
"""
parser = ArgumentParser(
description="Pauses and resumes a process by disk space; LINUX ONLY."
)
parser.add_argument(
"process_id", type=int,
help="Process ID."
)
parser.add_argument(
"--path", required=True,
help="Path to check free space for (e.g. '/')"
)
parser.add_argument(
"--pause_when_free_below", type=str, required=True,
help="Pause process when free disk space below this value (in bytes "
"or as e.g. '50G')"
)
parser.add_argument(
"--resume_when_free_above", type=str, required=True,
help="Resume process when free disk space above this value (in bytes "
"or as e.g. '70G')"
)
parser.add_argument(
"--check_every", type=int, required=True,
help="Check every n seconds (where this is n)"
)
parser.add_argument(
"--verbose", action="store_true",
help="Verbose output"
)
args = parser.parse_args()
main_only_quicksetup_rootlogger(
level=logging.DEBUG if args.verbose else logging.INFO)
minimum = human2bytes(args.pause_when_free_below)
maximum = human2bytes(args.resume_when_free_above)
path = args.path
process_id = args.process_id
period = args.check_every
pause_args = ["kill", "-STOP", str(process_id)]
resume_args = ["kill", "-CONT", str(process_id)]
assert minimum < maximum, "Minimum must be less than maximum"
log.info(
f"Starting: controlling process {process_id}; "
f"checking disk space every {period} s; "
f"will pause when free space on {path} "
f"is less than {sizeof_fmt(minimum)} and "
f"resume when free space is at least {sizeof_fmt(maximum)}; "
f"pause command will be {pause_args}; "
f"resume command will be {resume_args}."
)
log.debug("Presuming that the process is RUNNING to begin with.")
paused = False
while True:
if not is_running(process_id):
log.info("Process {} is no longer running", process_id)
sys.exit(0)
space = shutil.disk_usage(path).free
log.debug("Disk space on {} is {}", path, sizeof_fmt(space))
if space < minimum and not paused:
log.info("Disk space down to {}: pausing process {}",
sizeof_fmt(space), process_id)
subprocess.check_call(pause_args)
paused = True
elif space >= maximum and paused:
log.info("Disk space up to {}: resuming process {}",
sizeof_fmt(space), process_id)
subprocess.check_call(resume_args)
paused = False
log.debug("Sleeping for {} seconds...", period)
sleep(period)
if __name__ == '__main__':
main()
| 1.960938 | 2 |
aula06/Strings_P1.py | viniciusFernandesInacio/Curso_Python | 0 | 12794902 | <filename>aula06/Strings_P1.py
curso="Curso de Python"
#print(curso[9:15])
#print(curso.strip()) #remove os espaços da string
#print(curso.lower().strip()) #converte a string para letras minusculas
#print(curso.upper()) #converte a string para letras maiusculas
#print(curso.replace("Python","SQL")) #troca objetos da array
a=curso.split(" ") #remove tudo que estiver entre "" e divide o restante em uma array
print(a[0])
print("Tamanho: " + str(len(curso))) | 4.0625 | 4 |
src/labeling.py | misads/im_scripts | 8 | 12794903 | <reponame>misads/im_scripts
# encoding=utf-8
import pdb
import numpy as np
import os
import argparse
import cv2
import numpy as np
from src.base import Base
from src.load_config import load_yml
from src.misc_utils import attach_file_suffix, binaryzation, args, random_crop
class Labeling(Base):
def __init__(self, cfg):
Base.__init__(self, cfg)
self.mode = '1_to_1' if args.mode == 'default' else args.mode
def _handle_image(self, input_path, output_path, compare_path=None, abs_out_dir=None, filename=None):
img = cv2.imread(input_path)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img[img >= 122] = 255
img[img < 122] = 1
img[img == 255] = 0
# BGR set all G and R to 0
img[:, :, 1] = 0
img[:, :, 2] = 0
cv2.imwrite(output_path, img)
# mask = np.unpackbits(np.array(img), axis=2)[:, :, -1:-2:-1]
# mask= mask *255
# cv2.imshow("vis", mask)
# cv2.waitKey(0)
#pdb.set_trace()
def labeling(cfg):
label = Labeling(cfg)
label.handle()
if __name__ == '__main__':
cfg = load_yml(args.ymlpath)
labeling(cfg)
| 2.390625 | 2 |
maincrawler/apps.py | sharawy/genie_crawler | 0 | 12794904 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class SpidermanagerConfig(AppConfig):
name = 'maincrawler'
| 1.023438 | 1 |
ALDS/ALDS1_2_D.py | yu8ikmnbgt6y/MyAOJ | 1 | 12794905 | <filename>ALDS/ALDS1_2_D.py
import sys
import io
input_txt = """
1
1
"""
sys.stdin = io.StringIO(input_txt)
print(input())
# copy the below part and paste to the submission form.
# ---------function------------
def insertion_sort(array, gap):
n = len(array)
cnt = 0
for i in range(gap, n):
moving = array[i]
j = i - gap
while j >= 0 and array[j] > moving:
array[j + gap] = array[j]
j -= gap
cnt += 1
array[j + gap] = moving
return array, cnt
def main():
n = int(input())
array = []
for i in range(n):
array.append(int(input()))
cnt = 0
gaps = []
i = 1
gap = 1
while gap <= n:
gaps.append(gap)
i += 1
gap = (3 ** i - 1) // 2
gaps = gaps[:100][::-1]
# shell_sort
for gap in gaps:
array, tmp_cnt = insertion_sort(array, gap)
cnt += tmp_cnt
print(len(gaps))
print(" ".join(map(str, gaps)))
print(cnt)
for i in array:
print(i)
return
main()
# -----------------------------
sys.stdin = sys.__stdin__
| 3.6875 | 4 |
setup.py | btrspg/Animal-Courier | 0 | 12794906 | <reponame>btrspg/Animal-Courier
import configparser
from setuptools import setup, find_packages
version = configparser.ConfigParser()
version.read('VERSION')
install_requires = [
'numpy>=1.15',
'pandas>=0.23.4',
'certifi>=2018.10.15',
'psutil>=5.4.8',
'python-dateutil>=2.7.5',
'pytz>=2018.7',
'six>=1.12.0',
'plotly',
'apprise'
]
tests_require = [
'unittest'
]
scripts = [
'bin/multi_run.py'
]
setup(
name='Animal-Courier',
version=version.get('latest', 'version'),
packages=find_packages(exclude=['tests']),
url='https://github.com/dota2-BioTools/Animal-Courier',
project_urls={
"issues": "https://github.com/dota2-BioTools/Animal-Courier/issues",
"releases": "https://github.com/dota2-BioTools/Animal-Courier/releases",
},
license='MIT License',
author='chenyuelong',
author_email='<EMAIL>',
description='''
This is just a test package for learn something about 'How to create a python package'
''',
exclude_package_date={
'Animal-Courier': ['.gitignore', '.circleci/*', '.anaconda/*'],
},
install_requires=install_requires,
scripts=scripts,
)
| 1.875 | 2 |
multi_sokoban/actions.py | FjodBorg/Multi_Agent_AI | 1 | 12794907 | <filename>multi_sokoban/actions.py
"""Define literals and actions schemas for the muli-PDDL framework."""
import copy
import operator
from typing import Dict
import numpy as np
from utils import println
class Literals:
def __init__(self, parent: "Literals" = None):
# initializes the literals
if parent is None:
# if no parent is present!
self.dir = {"N": (-1, 0), "E": (0, 1), "S": (1, 0), "W": (0, -1)}
self.deltaPos = {
(-1, 0): "N",
(0, 1): "E",
(1, 0): "S",
(0, -1): "W",
}
self.goals = {} # hashtable
self.agentColor = {} # hashtable
self.agents = {} # hashtable
self.boxes = {} # hashtable
self.prevState = None
self.actionPerformed = None
self.g = 0
self.t = 0
self.h = None
self.f = None
self.explored = set()
else:
# if a parent is present!
self.dir = parent.dir # rigid
self.deltaPos = parent.deltaPos # rigid
self.goals = parent.goals # rigid
self.agentColor = parent.agentColor # rigid
# TODO avoid deepcopies
self.agents = copy.deepcopy(parent.agents)
self.boxes = copy.deepcopy(parent.boxes)
self.map = parent.map.copy()
self.prevState = parent # reference to previous state
self.actionPerformed = None # gets defined when action is chosen
self.g = parent.g + 1
self.h = None
self.f = None
self.t = parent.t + 1
self.explored = parent.explored
super().__init__()
def addMap(self, map2):
# initialized a map with only walls
self.map = np.array(map2)
def addAgent(self, key, pos, color="c"):
# Adds an agent to the map and to a hashtable
# key is the agent number and color is the color of the agent
self.map[pos] = key
self.agents[key] = [[pos, color]]
# This is only used to get easy access to agents by color
if color not in self.agentColor:
self.agentColor[color] = [key]
else:
self.agentColor[color].append(key)
def addGoal(self, key, pos, color=None):
# Adds a goal to a hashtable
# key is a letter
key = key.lower()
if key not in self.goals:
self.goals[key] = [[pos, color]]
else:
self.goals[key].append([pos, color])
def addBox(self, key, pos, color="c"):
# Adds a box to the map and to a hashtable
# key is a letter
key = key.upper()
self.map[pos] = key
if key not in self.boxes:
self.boxes[key] = [[pos, color]]
else:
self.boxes[key].append([pos, color])
def forget_exploration(self):
"""Remove explored nodes."""
self.explored = set()
def deleteAgent(self, external_key):
"""Delete from `agents`, the `map` and `agent_color`."""
pos = self.getPos(self.agents, external_key)
del self.agents[external_key]
self.map[pos] = " "
for color in self.agentColor:
if external_key in self.agentColor[color]:
to_del = self.agentColor[color].index(external_key)
del self.agentColor[color][to_del]
def deleteBox(self, external_key):
pos = self.getPos(self.boxes, external_key)
del self.boxes[external_key]
self.map[pos] = " "
def deleteGoal(self, external_key):
del self.goals[external_key]
def keepJustAgent(self, external_key):
ext_agents = list(self.agents.keys())
for external_agent in ext_agents:
if external_agent != external_key:
self.deleteAgent(external_agent)
def keepJustGoal(self, external_key):
ext_goals = list(self.goals.keys())
for external_goal in ext_goals:
if external_goal != external_key:
self.deleteGoal(external_goal)
def keepJustBox(self, external_key):
boxes = list(self.boxes.keys())
for external_agent in boxes:
if external_agent != external_key:
self.deleteBox(external_agent)
def getPos(self, objtype, obj, i=0):
# gets the position of an object getPos(objecttype, the key, the index (if multiple))
# returns None if not in hashtable
if obj in objtype:
return objtype[obj][i][0]
else:
return None
def setPos(self, objtype, obj, pos, i=0):
# sets the position of an object
# setPos(objecttype, the key, position, the index (if multiple))
# returns None if not in hashtable
if type(objtype[obj][i][0]) == tuple:
objtype[obj][i][0] = pos
else:
return None
def Free(self, pos):
# checks if position in map is free
# returns true if it is free and false otherwise
if self.map[pos] == chr(32) or self.map[pos].islower():
return True
else:
return False
def Color(self, obj):
pass
def Neighbour(self, pos1, pos2):
# Returns true if the 2 positions are neighbours, otherwise false
if abs(pos1[0] - pos2[0]) + abs(pos1[1] - pos2[1]) == 1:
return True
else:
return False
def __str__(self):
# Debugging purposes
return "\n".join(["".join(line) for line in self.map]) + f" t={self.t}"
class StateInit(Literals):
def __init__(self, parent: "Literals" = None):
# initializes the state
# it is (row, column) and not (x, y)
super().__init__(parent)
def getAgentsByKey(self, key):
# same as getPos, just for all agents with the given key
# if key not in self.agents:
# return None
return self.agents[key]
def getAgentsByColor(self, color):
# same as getPos, just for all agents with the given key
return self.agentColor[color]
def getBoxesByKey(self, key):
key = key.upper()
# same as getPos, just for all Boxes with the given key
# if key not in self.boxes:
# return None
return self.boxes[key]
def getGoalsByKey(self, key):
key = key.lower()
# same as getPos, just for all Goal with the given key
# if key not in self.goals:
# return None
return self.goals[key]
def getGoalKeys(self):
# returns all the keys
return list(self.goals.keys())
def getAgentKeys(self):
# returns all the keys
return list(self.agents.keys())
"""def updateParentCost(self, total_cost):
state = self.prevState
i = 0
while state is not None:
i += 1
state.h = total_cost + i
state.f = state.g + state.h
state = state.prevState"""
def __addPos(self, agtfrom, agtdir):
# simply adds two positions together
return tuple(map(operator.add, agtfrom, self.dir[agtdir]))
def __getDir(self, agtfrom, agtto):
# returns the direction the agent moved
dir = (agtto[0] - agtfrom[0], agtto[1] - agtfrom[1])
return self.deltaPos[dir]
def __MovePrec(self, agt, agtdir):
# returns the movement parameters if the preconditions are met
# otherwise it returns 0
agtfrom = self.getPos(self.agents, agt)
if agtfrom is None:
# # # print("agent", agt, "does not exist")
return None
if agtdir not in self.dir:
# # # print("Direction", agtdir, "does not exist")
return None
agtto = self.__addPos(agtfrom, agtdir)
if self.Free(agtto):
return (agt, agtfrom, agtto)
else:
# # # print("Pos " + str(agtto) + " (row,col) is not free")
return None
def __MoveEffect(self, agt, agtfrom, agtto):
# Moves the object with the given parameters
# Does not check preconditions
self.setPos(self.agents, agt, agtto)
self.map[agtfrom] = chr(32)
self.map[agtto] = agt
# print("Agent " + agt + " is now at " + str(agtto) + " (row,col)")
return True
def Move(self, agt, agtdir):
# moves the object in the given direction, it checks the precondition and does the movemnt
actionParams = self.__MovePrec(agt, agtdir)
if actionParams is not None:
return self.__MoveEffect(*actionParams)
else:
return None
def __PushPrec(self, agt, boxkey, boxdir, i=0):
# returns the movement parameters if the preconditions are met
# otherwise it returns 0
agtfrom = self.getPos(self.agents, agt)
boxfrom = self.getPos(self.boxes, boxkey, i)
if agtfrom is None:
# # # print("agent", agt, "does not exist")
return None
if boxfrom is None:
# # # print("Box", boxkey, "does not exist")
return None
if boxdir not in self.dir:
# # # print("Direction", boxdir, "does not exist")
return None
if self.Neighbour(agtfrom, boxfrom) != 1:
# # # print("agent", agt, "and box", boxkey, "are not neighbors")
return None
boxto = self.__addPos(boxfrom, boxdir)
if self.Free(boxto):
return (agt, boxkey, agtfrom, boxfrom, boxto, i)
else:
# # # print("Pos " + str(boxto) + " (row,col) is not free")
return None
def __PushEffect(self, agt, boxkey, agtfrom, boxfrom, boxto, i):
# Moves the objects with the given parameters
# Does not check preconditions
self.setPos(self.agents, agt, boxfrom, 0) # agents are unique thus 0
self.setPos(self.boxes, boxkey, boxto, i)
self.map[agtfrom] = chr(32)
self.map[boxfrom] = agt
self.map[boxto] = boxkey
# print("Agent " + agt + " is now at " + str(boxto) + " (row,col)")
# print("Box " + str(box) + " is now at " + str(boxfrom) + " (row,col)")
return True
def Push(self, agt, boxkey, boxdir, i):
# moves the objects in the given direction, it checks the precondition and does the movemnt
actionParams = self.__PushPrec(agt, boxkey, boxdir, i)
if actionParams is not None:
return self.__PushEffect(*actionParams)
else:
return None
def __PullPrec(self, agt, boxkey, agtdir, i=0):
# Moves the object with the given parameters
# Does not check preconditions
agtfrom = self.getPos(self.agents, agt)
boxfrom = self.getPos(self.boxes, boxkey, i)
if agtfrom is None:
# # # print("agent", agt, "does not exist")
return None
if boxfrom is None:
# # # print("Box", boxkey, "does not exist")
return None
if agtdir not in self.dir:
# # # print("Direction", agtdir, "does not exist")
return None
if self.Neighbour(agtfrom, boxfrom) != 1:
# # # print("agent", agt, "and box", boxkey, "are not neighbors")
return None
agtto = self.__addPos(agtfrom, agtdir)
if self.Free(agtto):
return (agt, boxkey, agtfrom, agtto, boxfrom, i)
else:
# # # print("Pos " + str(agtto) + " (row,col) is not free")
return None
def __PullEffect(self, agt, boxkey, agtfrom, agtto, boxfrom, i):
# Moves the objects with the given parameters
# Does not check preconditions
self.setPos(self.agents, agt, agtto, 0) # agents are unique thus 0
self.setPos(self.boxes, boxkey, agtfrom, i)
self.map[boxfrom] = chr(32)
self.map[agtfrom] = boxkey
self.map[agtto] = agt
# print("Agent " + agt + " is now at " + str(agtto) + " (row,col)")
# print("Box " + str(box) + " is now at " + str(agtfrom) + " (row,col)")
return True
def Pull(self, agt, boxkey, boxdir, i):
# moves the objects in the given direction, it checks the precondition and does the movemnt
actionParams = self.__PullPrec(agt, boxkey, boxdir, i)
if actionParams is not None:
return self.__PullEffect(*actionParams)
else:
return None
def minimalRep(self):
# returns the minimal representation of the states
return str([self.agents, self.boxes])
def isExplored(self):
# returns true if the state is explored
return self.minimalRep() in self.explored
def __addToExplored(self, children):
# adds the state to the explored list
if not self.isExplored():
self.explored.add(self.minimalRep())
children.append(self)
def isGoalState(self):
# checks if the state is a goal state
keys = self.getGoalKeys()
for key in keys:
goals = self.getGoalsByKey(key)
for pos, color in goals:
if self.map[pos] != key.upper():
return False
return True
def bestPath(self, format=0, index=0):
# function returns the list of actions used to reach the state
path = []
state = copy.deepcopy(self)
if format == 1:
# format used by actions
while state.actionPerformed is not None:
path.append(state.actionPerformed)
state = state.prevState
elif isinstance(format, str):
# trace back an object
looking_for = format
obj_group = "agents" if format.isnumeric() else "boxes"
while state.actionPerformed is not None:
path.append(
[
state.t,
state.getPos(getattr(state, obj_group), looking_for, index),
]
)
state = state.prevState
else:
# format used by server
while state.actionPerformed is not None:
# print(state.actionPerformed, state.actionPerformed[0])
cmd = state.actionPerformed[0]
if cmd == "Push": # (agtfrom, boxfrom, boxto)
parm1 = self.__getDir(
state.actionPerformed[1][2], state.actionPerformed[1][3]
)
parm2 = self.__getDir(
state.actionPerformed[1][3], state.actionPerformed[1][4]
)
cmd = f"Push({parm1},{parm2})"
elif cmd == "Pull": # (agtfrom, agtto, boxfrom)
parm1 = self.__getDir(
state.actionPerformed[1][2], state.actionPerformed[1][3]
)
parm2 = self.__getDir(
state.actionPerformed[1][2], state.actionPerformed[1][4]
)
cmd = f"Pull({parm1},{parm2})"
elif cmd == "Move":
parm1 = self.__getDir(
state.actionPerformed[1][1], state.actionPerformed[1][2]
)
cmd = f"Move({parm1})"
elif cmd == "NoOp":
cmd = "NoOp"
path.append(cmd)
state = state.prevState
# Reverse the order
return path[::-1]
def explore(self):
# Explores unexplroed states and returns a list of children
children = []
# Loop iterales through every possible action
for direction in self.dir:
for agtkey in self.agents:
# TODO reformat these nested loops and if statements!
# This can be perhaps be optimized by only looking at boxes at the
# neighboring tiles of the agent
for boxkey in self.boxes:
for i in range(len(self.boxes[boxkey])):
boxcolor = self.boxes[boxkey][i][1]
# [agent letter][agent number (0 since it is unique)][color]
if self.agents[agtkey][0][1] == boxcolor:
# Checks a pull action if it is possible it is appended to the the children
actionParams = self.__PullPrec(agtkey, boxkey, direction, i)
if actionParams is not None:
child = StateInit(self)
child.actionPerformed = ["Pull", actionParams]
child.__PullEffect(*actionParams)
child.__addToExplored(children)
actionParams = self.__PushPrec(agtkey, boxkey, direction, i)
if actionParams is not None:
child = StateInit(self)
child.actionPerformed = ["Push", actionParams]
child.__PushEffect(*actionParams)
child.__addToExplored(children)
# Checks a Push action if it is possible it is appended to the the children
# Checks a Move action if it is possible it is appended to the the children
actionParams = self.__MovePrec(agtkey, direction)
if actionParams is not None:
child = StateInit(self)
child.actionPerformed = ["Move", actionParams]
child.__MoveEffect(*actionParams)
child.__addToExplored(children)
for agtkey in self.agents:
# TODO make a noop function
child = StateInit(self)
child.actionPerformed = ["NoOp", None]
child.__addToExplored(children)
return children
class StateConcurrent(StateInit):
"""Extend StateInit with concurrent literals."""
def __init__(self, parent: StateInit = None, concurrent: Dict = None):
"""Initialize by adding a time table to the usual `StateInit`.
Parameters
----------
parent: StateInit
concurrent: Dict
Shared table that contains times where an object (box) in the
environment has been changed by another agent:
{t: {box: [(row, col), index], ...}, ...}
"""
super().__init__(parent)
self.concurrent = concurrent if concurrent else parent.concurrent
self.hunt_ghost()
def __NoOpPrec(self):
"""Evaluate precondition for NoOp.
Agent can stay at his position without doing anything.
"""
return self.__WaitPrec_t(self.t) and self.__WaitPrec_t(self.t+1)
def __WaitPrec_t(self, t):
if t in self.concurrent:
joint_concurrent = self.concurrent[t]
# a state that it is being solved is guaranteed to have only one agent
agent_pos = self.getPos(self.agents, list(self.agents.keys())[0])
for pos, _ in joint_concurrent.values():
if pos is None:
continue
if agent_pos[0] == pos[0] and agent_pos[1] == pos[1]:
return False
return True
def __ConcurrentPrec(self):
"""Evaluate precondition for concurrent changes of the world.
Something has changed given a concurrent action by another agent.
"""
return self.t in self.concurrent
def __ConcurrentEffect(self, t):
"""Modify environment according to concurrent actions at time `t`."""
joint_concurrent = self.concurrent[t]
for obj_key in joint_concurrent:
pos, index = list(joint_concurrent[obj_key])
obj_group = "agents" if obj_key.isnumeric() else "boxes"
if obj_group == "boxes":
prev_pos = self.getPos(getattr(self, obj_group), obj_key, index)
self.setPos(getattr(self, obj_group), obj_key, pos, index)
# introduce a ghost box which will be removed on child nodes
self.map[prev_pos[0], prev_pos[1]] = "Ñ"
if pos is not None:
self.map[pos[0], pos[1]] = obj_key
else:
# agents don't leave ghosts behind and are not in the StateInit
self.map[self.map == obj_key] = "Ñ"
self.map[pos[0], pos[1]] = obj_key
return True
def hunt_ghost(self):
"""Remove ghosted positions put by a Councurent Effect."""
self.map[self.map == "Ñ"] = " "
def explore(self):
"""Explore with 'NoOp's.
The Preconditions to a NoOp is that the environment was changed
by another agent; i.e., there is an entry in `self.concurrent`
for the next time `self.t`. This ensures that agents just wait if the
next state is new and applies the concurrent changes to all children.
"""
children = []
# Loop iterales through every possible action
child_def = StateConcurrent(self)
if child_def.__ConcurrentPrec():
# apply concurrent effects to all children but also append
# a NoOp children which just waits for the env to change
# println("Applying NoOp")
child_def.__ConcurrentEffect(child_def.t)
if child_def.__NoOpPrec():
child = copy.deepcopy(child_def)
child.actionPerformed = ["NoOp", None]
child._StateInit__addToExplored(children)
for direction in self.dir:
for agtkey in self.agents:
# TODO reformat these nested loops and if statements!
# This can be perhaps be optimized by only looking at boxes at
# the neighboring tiles of the agent
for boxkey in child_def.boxes:
for i in range(len(child_def.boxes[boxkey])):
boxcolor = child_def.boxes[boxkey][i][1]
# [agent letter][agent number (0 since it is unique)][color]
if child_def.agents[agtkey][0][1] == boxcolor:
# Checks a pull action if it is possible it is appended to the the children
actionParams = child_def._StateInit__PullPrec(
agtkey, boxkey, direction, i
)
if actionParams is not None:
child = copy.deepcopy(child_def)
child.actionPerformed = ["Pull", actionParams]
child._StateInit__PullEffect(*actionParams)
child._StateInit__addToExplored(children)
# Checks a Push action if it is possible it is appended to the the children
actionParams = child_def._StateInit__PushPrec(
agtkey, boxkey, direction, i
)
if actionParams is not None:
child = copy.deepcopy(child_def)
child.actionPerformed = ["Push", actionParams]
child._StateInit__PushEffect(*actionParams)
child._StateInit__addToExplored(children)
# Checks a Move action if it is possible it is appended to the the children
actionParams = child_def._StateInit__MovePrec(agtkey, direction)
if actionParams is not None:
child = copy.deepcopy(child_def)
child.actionPerformed = ["Move", actionParams]
child._StateInit__MoveEffect(*actionParams)
child._StateInit__addToExplored(children)
return children
def AdvancePrec(self):
"""Is there some concurrent change in the future.
It will be called by by strategy.
"""
future = [t for t in self.concurrent if t > self.t]
if future:
return min(future)
return False
def advance(self) -> StateInit:
"""Advance in time until the environment is changed by other agent."""
next_time = self.AdvancePrec()
if not next_time:
return self
future_self = self
while next_time > future_self.t:
println(future_self)
future_self = StateConcurrent(future_self)
future_self.actionPerformed = ["NoOp", None]
return future_self
| 2.40625 | 2 |
adv_patch_bench/models/common.py | chawins/adv-patch-bench | 1 | 12794908 | from turtle import forward
import torch
import torch.nn as nn
class Normalize(nn.Module):
def __init__(self, mean, std, *args, **kwargs):
super().__init__()
self.register_buffer('mean', torch.tensor(mean)[None, :, None, None])
self.register_buffer('std', torch.tensor(std)[None, :, None, None])
def forward(self, x):
return (x - self.mean) / self.std
# class ColorDetectorWrapper(nn.Module):
# def __init__(self, model):
# super().__init__()
# self.model = model
# self.color_dict = {
# 'circle-750.0': ['white', 'blue', 'red'], # (1) white+red, (2) blue+white
# 'triangle-900.0': ['white', 'yellow'], # (1) white, (2) yellow
# 'triangle_inverted-1220.0': [], # (1) white+red
# 'diamond-600.0': [], # (1) white+yellow
# 'diamond-915.0': [], # (1) yellow
# 'square-600.0': [], # (1) blue
# 'rect-458.0-610.0': ['white', 'other'], # (1) chevron (also multi-color), (2) white
# 'rect-762.0-915.0': [], # (1) white
# 'rect-915.0-1220.0': [], # (1) white
# 'pentagon-915.0': [], # (1) yellow
# 'octagon-915.0': [], # (1) red
# 'other': [],
# }
# self.class_list = list(self.color_dict.keys())
# self.class_idx = {
# 'circle-750.0': 0, # (1) white+red, (2) blue+white
# 'triangle-900.0': 3, # (1) white, (2) yellow
# 'triangle_inverted-1220.0': 5, # (1) white+red
# 'diamond-600.0': 6, # (1) white+yellow
# 'diamond-915.0': 7, # (1) yellow
# 'square-600.0': 8, # (1) blue
# 'rect-458.0-610.0': 9, # (1) chevron (also multi-color), (2) white
# 'rect-762.0-915.0': 11, # (1) white
# 'rect-915.0-1220.0': 12, # (1) white
# 'pentagon-915.0': 13, # (1) yellow
# 'octagon-915.0': 14, # (1) red
# 'other': 15,
# }
# # Define HSV range of the desired colors (H, S, L)
# WHITE = [[0, 0, 95], [360, 360, 100]]
# def forward(self, x):
# logits = self.model(x)
# y = logits.argmax(-1)
# # Change image to HSL color space
# # Count pixels that satisfy the color range
| 2.703125 | 3 |
main_headless.py | riteshgn/aryabhatta-2048 | 0 | 12794909 | <filename>main_headless.py
import json
import statistics
import time
# from ai_engine.engine_best_score_v2 import BestScore as AIEngine
# from ai_engine.engine_best_score_with_depth import BestScoreWithDepth as AIEngine
from ai_engine.engine_expectimax_with_heur_v1 import Expectimax as AIEngine
from ai_oracle.oracle_python_impl import OraclePythonImpl as Oracle
from ai_oracle.game import Game
from ai_oracle.game_cache import Cache
from ai_utils import print_grid, random_initial_state
game_engine = AIEngine(Oracle())
GAME_SETTINGS = {
'algorithm': 'Expectimax',
# 'algorithm': 'BestScoreWithDepth',
# 'algorithm': 'BestScore v2',
'max_depth': 3,
'heuristics_enabled': False,
'number_of_simulations': 3
}
def create_initial_game_board():
state = random_initial_state()
# print_grid(state)
return Game(state)
async def play_game(game_num):
print('\t>>> Setting up the game board for game ', game_num)
game = create_initial_game_board()
print('\t>>> Starting game')
try:
while not game.is_game_over():
# game.print_board()
direction = await game_engine.next_move(game.state())
# print('selected move ', direction)
game.move(direction, add_random_tile=True)
except:
print('\t\t>>> Simulation Error')
print('\n\t>>> Game is over!')
print_grid(game.state())
print('\n')
return game.metrics()
async def simulate_games(number_of_games=1):
print('>>> Starting simulations\n')
scores = []
max_tiles = []
execution_times = []
for index in range(0, number_of_games):
t0 = time.time()
metrics = await play_game(game_num=index+1)
t1 = time.time()
scores.append(metrics['score'])
max_tiles.append(metrics['max_tile'])
execution_times.append(t1 - t0)
print('\n---------------------------')
print('--- Summary Report ---')
print('---------------------------\n')
print('games: ', number_of_games)
print('algorithm: ', GAME_SETTINGS['algorithm'])
print('max_depth: ', GAME_SETTINGS['max_depth'])
print('heuristics_enabled: ', GAME_SETTINGS['heuristics_enabled'])
print('\n--- score stats ---')
print('min: ', min(scores))
print('max: ', max(scores))
print('mean: ', statistics.mean(scores))
print('median:', statistics.median(scores))
print('sd: ', statistics.stdev(scores))
print('\n--- max tile stats ---')
print('min: ', min(max_tiles))
print('max: ', max(max_tiles))
print('mean: ', statistics.mean(max_tiles))
print('median:', statistics.median(max_tiles))
print('sd: ', statistics.stdev(max_tiles))
print('\n--- execution time stats ---')
print('min: ', min(execution_times))
print('max: ', max(execution_times))
print('mean: ', statistics.mean(execution_times))
print('median:', statistics.median(execution_times))
print('sd: ', statistics.stdev(execution_times))
print('cache metrics: ', json.dumps(Cache().metrics()))
if __name__ == '__main__':
import asyncio
loop = asyncio.new_event_loop()
loop.run_until_complete(simulate_games(GAME_SETTINGS['number_of_simulations']))
loop.close()
| 2.375 | 2 |
safe/safe_dict.py | andreclaudino/python-safe-dict | 0 | 12794910 | <gh_stars>0
class SafeDict(dict):
def __init__(self, dictionary:dict = {}, default=None, **kwargs):
super().__init__(**dictionary, **kwargs)
self.default = default
def __getitem__(self, key):
return self.get(key, self.default)
| 2.78125 | 3 |
extension/level2hbase.py | wanji/dicaf | 0 | 12794911 | #!/usr/bin/env python
# coding: utf-8
#########################################################################
#########################################################################
"""
File Name: level2hbase.py
Author: <NAME>
E-mail: <EMAIL>
Created on: Sat Jun 7 13:36:03 2014 CST
"""
DESCRIPTION = """
This program can transfer the data from LevelDB to HBase.
"""
import os
import sys
import argparse
import leveldb
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
def perr(msg):
""" Print error message.
"""
sys.stderr.write("%s" % msg)
sys.stderr.flush()
def pinfo(msg):
""" Print information message.
"""
sys.stdout.write("%s" % msg)
sys.stdout.flush()
def runcmd(cmd):
""" Run command.
"""
perr("%s\n" % cmd)
os.system(cmd)
def getargs():
""" Parse program arguments.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION,
formatter_class=
argparse.RawTextHelpFormatter)
parser.add_argument('leveldb', type=str,
help='path to the LevelDB database')
parser.add_argument('table', type=str,
help='target table name in hbase')
parser.add_argument('host', type=str, nargs='?', default="127.0.0.1",
help='IP address / Host name of hbase server')
parser.add_argument('port', type=int, nargs='?', default=9090,
help='port number of hbase server')
parser.add_argument('pyhbase', type=str, nargs='?', default="gen-py",
help='python interface of hbase')
return parser.parse_args()
def main(args):
""" Main entry.
"""
transport = TSocket.TSocket(args.host, args.port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = Hbase.Client(protocol)
transport.open()
contents = ColumnDescriptor(name='cf:', maxVersions=1)
ldb = leveldb.LevelDB(args.leveldb)
iter = ldb.RangeIter()
try:
client.createTable(args.table, [contents])
except AlreadyExists as err:
perr("ERROR: %s\n" % err.message)
sys.exit(1)
cnt = 0
pinfo("Processed image:\n")
pinfo("\r\t%d" % cnt)
while True:
try:
item = iter.next()
except StopIteration:
break
cnt += 1
if cnt % 100 == 0:
pinfo("\r\t%d" % cnt)
client.mutateRow(args.table, item[0],
[Mutation(column="cf:data", value=item[1])], None)
pinfo("\r\t%d\tDone!\n" % cnt)
if __name__ == '__main__':
args = getargs()
sys.path.append(args.pyhbase)
from hbase import Hbase
from hbase.ttypes import *
main(args)
| 2.734375 | 3 |
bannerpunk/extension.py | jarret/bannerpunk | 3 | 12794912 | <reponame>jarret/bannerpunk
from bolt.util import h2b
from bolt.bigsize import BigSize
from bolt.tlv import Tlv
from bolt.namespace import Namespace
from bolt.hop_payload import HopPayload, TlvHopPayload
from bannerpunk.pixel import Pixel
PIXEL_TLV_TYPE = 44445
ART_TLV_TYPE = 44443
class Extension:
def _encode_pixels(pixels):
encoded = b''.join([p.to_bin() for p in pixels])
return Tlv(PIXEL_TLV_TYPE, encoded).encode()
def _encode_art_no(art_no):
encoded = Namespace.encode_tu16(art_no)
return Tlv(ART_TLV_TYPE, encoded).encode()
def encode_non_final(amt_to_forward, outgoing_cltv_value, short_channel_id,
art_no, pixels):
unextended = TlvHopPayload.encode_non_final(amt_to_forward,
outgoing_cltv_value,
short_channel_id)
old_len, content, err = BigSize.pop(unextended)
assert err is None
art_no_content = Extension._encode_art_no(art_no)
pixel_content = Extension._encode_pixels(pixels)
new_content = content + art_no_content + pixel_content
return BigSize.encode(len(new_content)) + new_content
def encode_final(amt_to_forward, outgoing_cltv_value, payment_secret,
total_msat, art_no, pixels):
unextended = TlvHopPayload.encode_final(amt_to_forward,
outgoing_cltv_value,
pament_secret=payment_secret,
totol_msat= total_msat)
old_len, content, err = BigSize.pop(unextended)
assert err is None
art_no_content = Extension._encode_art_no(art_no)
pixel_content = Extension._encode_pixels(pixels)
new_content = content + art_no_content + pixel_content
return BigSize.encode(len(new_content)) + new_content
###########################################################################
def parse_pixels(tlv):
pixels = []
if tlv.l % 5 != 0:
return None, "unexpected length"
remainder = tlv.v
while len(remainder) > 0:
pixel_hex, remainder, err = Namespace.pop_bytes(5, remainder)
if err:
return None, err
pixels.append(Pixel.from_bin(h2b(pixel_hex)))
return {'tlv_type_name': "bannerpunk_pixels",
'pixels': pixels}, None
def parse_art_no(tlv):
art_no, remainder, err = Namespace.pop_tu16(tlv.l, tlv.v)
if err:
return None, err
if len(remainder) > 0:
return None, "unexpected extra bytes"
return {'tlv_type_name': "bannerpunk_art_no",
'art_no': art_no}, None
def parse(byte_string):
extension_parsers = {PIXEL_TLV_TYPE: Extension.parse_pixels,
ART_TLV_TYPE: Extension.parse_art_no}
parsed, err = HopPayload.parse(byte_string,
extension_parsers=extension_parsers)
if err:
return None, err
if parsed['format'] != "tlv":
return None, "non-tlv payload byte string"
if ART_TLV_TYPE not in parsed['tlvs']:
return None, "no art tlv in payload"
if PIXEL_TLV_TYPE not in parsed['tlvs']:
return None, "no pixel data tlv in payload"
return parsed, None
| 1.914063 | 2 |
Rota_System/UI/Vacancies/__init__.py | ergoregion/Rota-Program | 0 | 12794913 | <gh_stars>0
__author__ = '<NAME>'
from widget_vacancies import VacanciesWidget | 1.125 | 1 |
Chapter04/reduction.py | DzulJalali/Python-Pararel_SISTER | 1 | 12794914 | <reponame>DzulJalali/Python-Pararel_SISTER
import numpy
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.size
rank = comm.rank
array_size = 10
recvdata = numpy.zeros(array_size,dtype=numpy.int)
senddata = (rank+1)*numpy.arange(array_size,dtype=numpy.int)
print(" process %s sending %s " %(rank , senddata))
comm.Reduce(senddata,recvdata,root=0,op=MPI.SUM)
print ('on task',rank,'after Reduce: data = ',recvdata)
| 2.765625 | 3 |
dog_cmd.py | vilsmeier/SummerSession_2021 | 1 | 12794915 | <reponame>vilsmeier/SummerSession_2021
from DogService import *
add_heart_rate(2,89)
| 1.03125 | 1 |
src/modeling/datagens.py | ilrd/Pulmonary-Fibrosis | 0 | 12794916 | import numpy as np
from tensorflow import keras
import pandas as pd
import os
class DcmDataGenerator(keras.utils.Sequence):
"""Generates data for Keras
Sequence based data generator. Suitable for building data generator for training and prediction.
"""
def __init__(self, images_path, dim=(15, 512, 512), window=None):
"""Initialization
:param images_path: path to images location
:param dim: tuple indicating image dimension in format CHW
"""
self.list_IDs = os.listdir(images_path)
self.images_path = images_path
self.dim = dim
self.indexes = np.arange(len(self.list_IDs))
self.on_epoch_end()
self.window = window
def __len__(self):
"""Denotes the number of batches per epoch
:return: number of batches per epoch
"""
return len(self.list_IDs)
def on_epoch_end(self):
"""Updates indexes after each epoch
"""
self.indexes = np.arange(len(self.list_IDs))
def flow(self, seed):
np.random.seed(seed)
i = int(np.random.randint(0, self.__len__(), size=(1,)))
while True:
yield self.__getitem__(i % self.__len__())
i += 1
def __getitem__(self, index):
"""Generate one patient's data
:param index: index of the patient
:return: X_dcm
"""
# Find list of IDs
patient_ID = self.list_IDs[index]
# Generate data
X_dcm = self._generate_X(patient_ID)
return X_dcm, np.array([1, ])
def _generate_X(self, patient_ID):
"""Generates data containing patient's images
:param patient_ID: ID of the patient
:return: patient's images
"""
# Initialization
X_dcm = np.empty((1, *self.dim), dtype=np.float32)
patient_path = os.path.join(self.images_path, patient_ID)
dcm_names = np.array([dcm_name[:-4] for dcm_name in os.listdir(patient_path)], dtype=int)
dcm_names = sorted(list(dcm_names))
patient_dcm_paths = [f'{self.images_path}/{patient_ID}/{dcm_num}.npy' for dcm_num in dcm_names]
# Generate data
for j, dcm_path in enumerate(patient_dcm_paths):
X_dcm[0, j] = self._load_dcm(dcm_path)
X_dcm = np.moveaxis(X_dcm, 1, -1)
return X_dcm
def _load_dcm(self, image_path):
"""Load grayscale image
:param image_path: path to image to load
:return: loaded image
"""
img = np.load(image_path, allow_pickle=True)
if self.window:
lb = self.window[0]
ub = self.window[1]
img[img < lb] = lb
img[img > ub] = ub
img = (img - lb) / (ub - lb)
return img
class CsvDataGenerator(keras.utils.Sequence):
"""Generates data for Keras
Sequence based data generator. Suitable for building data generator for training and prediction.
"""
def __init__(self, csv_path, to_fit=True, to_normalize=True):
"""Initialization
:param to_normalize: True to normalize, False otherwise
:param csv_path: path to csv file location
:param to_fit: True to return X and y, False to return X only
"""
self.to_normalize = to_normalize
self.list_IDs = os.listdir(csv_path[:-4])
self.csv_path = csv_path
self.to_fit = to_fit
self.indexes = np.arange(len(self.list_IDs))
self.on_epoch_end()
def __len__(self):
"""Denotes the number of batches per epoch
:return: number of batches per epoch
"""
return len(self.list_IDs)
def on_epoch_end(self):
"""Updates indexes after each epoch
"""
self.indexes = np.arange(len(self.list_IDs))
def flow(self, seed):
np.random.seed(seed)
i = int(np.random.randint(0, self.__len__(), size=(1,)))
while True:
yield self.__getitem__(i % self.__len__())
i += 1
def __getitem__(self, index):
"""Generate one patient's data
:param index: index of the patient
:return: X
"""
# Find list of IDs
patient_ID = self.list_IDs[index]
# Generate data
X = self._generate_X(patient_ID)
if self.to_fit:
y = self._generate_y(patient_ID)
return X, y
else:
return X
def _generate_X(self, patient_ID):
"""Generates data containing patient's first csv record
:param patient_ID: ID of the patient
:return: patient's first csv record
"""
X = np.empty(shape=(1, 7), dtype=np.float32)
# Generate data
X[0] = self._load_X(self.csv_path, patient_ID)
return X
def _load_X(self, csv_path, patient_ID):
"""Load csv with patient's weeks and corresponding FVC
:param csv_path: path to csv file with weeks and FVC file to load
:return: loaded csv file with weeks and FVC file to load
"""
patients_df = pd.read_csv(csv_path)
patient = patients_df[patients_df['Patient'] == patient_ID]
patient.reset_index(inplace=True)
X_columns = ['Weeks', 'FVC', 'Age', 'Ex-smoker', 'Never smoked', 'Currently smokes', 'Sex_n']
X_patient = patient.loc[0, X_columns]
if self.to_normalize:
X_patient['Age'] = (X_patient['Age'] - 67.18850871530019) / 7.055116199848975
X_patient['FVC'] = (X_patient['FVC'] - 2690.479018721756) / 832.5021066817238
X_patient['Weeks'] = (X_patient['Weeks'] - 31.861846352485475) / 23.265510111399017
X_patient = X_patient.to_numpy()
return X_patient
def _generate_y(self, patient_ID):
"""Generates data containing patient's [1:] csv records
:param patient_ID: ID of the patient
:return: patient's [1:] csv records
"""
y = np.empty(shape=(1, 146, 2), dtype=np.float32)
# Generate data
y[0] = self._load_y(self.csv_path, patient_ID)
return y
def _load_y(self, csv_path, patient_ID):
"""Load csv with patient's weeks and corresponding FVC
:param csv_path: path to csv file with weeks and FVC file to load
:return: loaded csv file with weeks and FVC file to load
"""
patients_df = pd.read_csv(csv_path)
patient = patients_df[patients_df['Patient'] == patient_ID]
patient.reset_index(inplace=True)
weeks_FVC = patient.loc[1:, ['Weeks', 'FVC']]
weeks_FVC = weeks_FVC[~weeks_FVC.duplicated(['Weeks'])]
weeks_FVC = self.pad_y(weeks_FVC)
weeks_FVC = weeks_FVC.to_numpy()
return weeks_FVC
def pad_y(self, csv_df):
csv_df['isRecord'] = 1
for i in range(-12, 134):
if not np.any(csv_df['Weeks'] == i):
csv_df = csv_df.append({'Weeks': i, 'FVC': 0, 'isRecord': 0}, ignore_index=True)
csv_df.sort_values('Weeks', inplace=True)
csv_df.drop(columns='Weeks', inplace=True)
if self.to_normalize:
csv_df.loc[:, 'FVC'] = (csv_df.loc[:, 'FVC'] - 2690.479018721756) / 832.5021066817238
csv_df.reset_index(drop=True, inplace=True)
return csv_df
# ==================================#
# Creating datagen
def _merge_datagens(csv_gen, dcm_gen, shuffle=True, is_patient_record=True):
seed = 0
while True:
csv_flow = csv_gen.flow(seed)
dcm_flow = dcm_gen.flow(seed)
patient_num = 1
while True:
csv_data = next(csv_flow)
dcm_data = next(dcm_flow)
csv_X = csv_data[0]
dcm_X_img = dcm_data[0]
csv_y = csv_data[1][:, :, 0]
csv_is_patient_record = csv_data[1][:, :, 1]
if is_patient_record:
yield [csv_X, dcm_X_img], csv_y, csv_is_patient_record
else:
yield [csv_X, dcm_X_img], csv_y
patient_num += 1
if patient_num > 175:
break
if shuffle:
seed += 1
def create_datagen(shuffle=True, window=None, is_patient_record=True):
"""Returns generator that yields [csv_X, dcm_X_img], csv_y, csv_is_patient_record"""
csv_datagen = CsvDataGenerator('../../data/processed/train.csv', to_normalize=True)
dcm_datagen = DcmDataGenerator('../../data/processed/train', window=window)
merged_gen = _merge_datagens(csv_datagen, dcm_datagen, shuffle=shuffle, is_patient_record=is_patient_record)
return merged_gen
# def gen_train_test_split(datagen):
# datagen.
# gen = create_datagen(shuffle=True)
# x1, y1, is_p_r1 = next(gen)
| 3.171875 | 3 |
lib/fuzzy_word_search_results.py | dev-mike-del/fuzzy-word-search | 0 | 12794917 | <filename>lib/fuzzy_word_search_results.py
"""This module performs a 'fuzzy search on on JSON file"""
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from lib.fuzzy_word_search_work import FuzzyWordSearchWork
# Performs a 'fuzzy search' on given JSON.
# See test_input.json for JSON format
class FuzzyWordSearch(FuzzyWordSearchWork):
"""
Performs a 'fuzzy word search' on given JSON. See test_input.json for JSON format.
Instantiate FuzzyWordSearch with the path to the JSON file as the argument. Once
initiated, the 'run()' method will execute the 'fuzzy search' and print the
results.
"""
def __init__(self, json_filepath):
FuzzyWordSearchWork.__init__(self, json_filepath)
def print_results(self):
"""
Prints fuzzy_search_dict data in formated string
"""
for query in self.fuzzy_search_dict:
try:
if self.fuzzy_search_dict[query]["phrases"]:
print(f"\nQuery: {query}")
for phrase in self.fuzzy_search_dict[query]["phrases"]:
print(
f"""{self.fuzzy_search_dict[query]['phrases'].index(phrase) +1}
Phrase: {phrase['original phrase']}
Fuzzy Search: {phrase['fuzzy match']}"""
)
except KeyError:
pass
def print_results_dict(self):
"""
Calls 'add_phrases_to_fuzzy_search_dict()' and 'print_results()'. If
FuzzySearch is initiated with a properly formated JSON file, calling
'run()' with run through the class methods and print out the result.
"""
print(self.fuzzy_search_dict)
| 3.703125 | 4 |
src/abc194_b.py | 06keito/study-atcoder | 1 | 12794918 | <gh_stars>1-10
N = int(input())
li = [list(map(int,input().split())) for i in range(N)]
ans = 10**9
for idx_a in range(N):
for idx_b in range(N):
A,B = li[idx_a][0],li[idx_b][1]
if idx_a==idx_b:
ans = min(ans,A+B)
else:
ans = min(ans,max(A,B))
print(ans) | 2.40625 | 2 |
src/api/datahub/databus/migration_views.py | Chromico/bk-base | 84 | 12794919 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import time
from common.decorators import detail_route, list_route
from common.log import logger
from common.transaction import auto_meta_sync
from common.views import APIViewSet
from datahub.common.const import DEFAULT
from datahub.databus.exceptions import (
MigrationCannotOperatError,
MigrationNotFoundError,
)
from datahub.databus.task.task_utils import check_task_auth
from django.forms import model_to_dict
from rest_framework.response import Response
from datahub.databus import exceptions, migration, models, rt, serializers, settings
class MigrationViewset(APIViewSet):
"""
对于资源 REST 操作逻辑统一放置在 APIViewSet 中提供接口
"""
serializer_class = serializers.MigrateCreateSerializer
# 对于URL中实例ID变量名进行重命名,默认为 pk
lookup_field = "id"
def create(self, request):
"""
@apiGroup migration
@api {post} /databus/migrations/ 创建迁移任务
@apiDescription 创建迁移任务
@apiParam {string} result_table_id result_table_id
@apiParam {string} source 源存储
@apiParam {string} dest 目标存储
@apiParam {string} start 起始时间
@apiParam {string} end 结束时间
@apiParam {int} parallelism 【选填】处理并发数,默认3
@apiParam {boolean} overwrite 是否覆盖已有数据
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": [{}],
"message": "ok",
"code": "1500200",
}
"""
args = self.params_valid(serializer=serializers.MigrateCreateSerializer)
rt_id = args["result_table_id"]
check_task_auth(rt_id)
rt_info = rt.get_databus_rt_info(rt_id)
if not rt_info:
raise exceptions.NotFoundRtError()
# 源存储配置检查
if args["source"] not in rt_info["storages.list"]:
raise exceptions.TaskStorageNotFound(
message_kv={
"result_table_id": args["result_table_id"],
"storage": args["source"],
}
)
# 目的存储配置检查
if args["dest"] not in rt_info["storages.list"]:
raise exceptions.TaskStorageNotFound(
message_kv={
"result_table_id": args["result_table_id"],
"storage": args["dest"],
}
)
task_label = migration.create_task(rt_info, args)
objs = models.DatabusMigrateTask.objects.filter(task_label=task_label).values()
return Response(objs)
def partial_update(self, request, id):
"""
@apiGroup migration
@api {patch} /databus/migrations/:task_id/ 更新迁移任务
@apiDescription 更新迁移任务
@apiParam {string} status 状态
@apiParam {int} parallelism 处理并发数
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {},
"message": "ok",
"code": "1500200",
}
"""
args = self.params_valid(serializer=serializers.MigrateUpdateSerializer)
obj = models.DatabusMigrateTask.objects.get(id=id)
with auto_meta_sync(using=DEFAULT):
if args["status"] != "":
obj.status = args["status"]
if args["parallelism"] > 0:
obj.parallelism = args["parallelism"]
obj.save()
return Response(model_to_dict(models.DatabusMigrateTask.objects.get(id=id)))
def list(self, request):
"""
@apiGroup migration
@api {patch} /databus/migrations/ 查询未完成任务
@apiDescription 查询未完成任务
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": [{}],
"message": "ok",
"code": "1500200",
}
"""
tasks = models.DatabusMigrateTask.objects.exclude(status__in=["finish"]).values(
"id",
"task_label",
"task_type",
"result_table_id",
"parallelism",
"dest",
"dest_config",
"overwrite",
"start",
"end",
"status",
)
for task_obj in tasks:
if task_obj["task_type"] != "overall":
task_obj["source_config"] = ""
task_obj["dest_config"] = ""
return Response(tasks)
def retrieve(self, request, id):
"""
@apiGroup migration
@api {patch} /databus/migrations/:task_id/ 查询任务
@apiDescription 查询任务
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": [{}],
"message": "ok",
"code": "1500200",
}
"""
task_id = 0
try:
task_id = int(id)
except Exception:
pass
if task_id == 0:
return Response(
models.DatabusMigrateTask.objects.filter(result_table_id=id, task_type="overall")
.order_by("task_label")
.values(
"id",
"task_type",
"result_table_id",
"source",
"dest",
"start",
"end",
"created_at",
"created_by",
"updated_at",
"status",
)
)
else:
obj = models.DatabusMigrateTask.objects.get(id=task_id)
return Response(
models.DatabusMigrateTask.objects.filter(task_label=obj.task_label).values(
"id",
"task_type",
"result_table_id",
"source",
"dest",
"input",
"output",
"start",
"end",
"created_at",
"created_by",
"updated_at",
"status",
)
)
@detail_route(methods=["get"], url_path="start")
def start_task(self, request, id):
"""
@apiGroup migration
@api {post} /databus/migrations/:task_id/start/ 启动任务
@apiDescription 启动任务
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": True,
"message": "ok",
"code": "1500200",
}
"""
args = self.params_valid(serializer=serializers.MigrateTaskTypeSerializer)
task_obj = migration.get_task(id)
if task_obj.status in ["finish"]:
raise MigrationCannotOperatError(message_kv={"type": task_obj.status})
return Response(migration.start_task(task_obj, args["type"]))
@detail_route(methods=["get"], url_path="stop")
def stop_task(self, request, id):
"""
@apiGroup migration
@api {post} /databus/migrations/:task_id/stop/ 停止任务
@apiDescription 停止任务
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": True,
"message": "ok",
"code": "1500200",
}
"""
args = self.params_valid(serializer=serializers.MigrateTaskTypeSerializer)
task_obj = migration.get_task(id)
migration.stop_task(task_obj, args["type"])
return Response(True)
@detail_route(methods=["get"], url_path="status")
def get_status(self, request, id):
"""
@apiGroup migration
@api {get} /databus/migrations/:task_id/status/ 查询任务pulsar运行状态
@apiDescription 查询任务pulsar运行状态
@apiParam {string} type 查询的任务类型,选值:all—所有类型任务,默认;source;sink
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {"source":{},
"sink":{}},
"message": "ok",
"code": "1500200",
}
"""
args = self.params_valid(serializer=serializers.MigrateTaskTypeSerializer)
task_obj = migration.get_task(id)
result = migration.get_task_status(task_obj, args["type"])
return Response(result)
@list_route(methods=["get"], url_path="get_clusters")
def get_clusters(self, request):
"""
@apiGroup migration
@api {get} /databus/migrations/get_clusters/ 获取result_table_id支持迁移集群列表
@apiDescription 获取result_table_id支持迁移集群列表
@apiParam {string} result_table_id result_table_id
@apiParam {string} type 查询的任务类型,选值:all—所有类型任务,默认;source;sink
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {"source":[],
"sink":[]},
"message": "ok",
"code": "1500200",
}
"""
args = self.params_valid(serializer=serializers.TasksRtIdSerializer)
result = {"source": [], "dest": []}
# 查询已配置存储集群列表
rt_info = rt.get_rt_fields_storages(args["result_table_id"])
if not rt_info or not rt_info.get("storages"):
return Response(result)
storages = rt_info.get("storages")
for storage_type in storages.keys():
if storage_type in settings.migration_source_supported:
result["source"].append(storage_type)
elif storage_type in settings.migration_dest_supported:
result["dest"].append(storage_type)
return Response(result)
@list_route(methods=["get"], url_path="get_tasks")
def get_tasks(self, request):
"""
@apiGroup migration
@api {get} /databus/migrations/get_tasks/ 获取dataid下迁移任务列表
@apiDescription 获取dataid下迁移任务列表
@apiParam {string} raw_data_id 数据id
@apiParam {string} result_table_id result_table_id, 当存在raw_data_id时无效
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {{}},
"message": "ok",
"code": "1500200",
}
"""
args = self.params_valid(serializer=serializers.MigrationGetTasksVerifySerializer)
if "raw_data_id" in args:
# query by raw_data_id
raw_data_id = args["raw_data_id"]
objects = models.DatabusClean.objects.filter(raw_data_id=raw_data_id)
rts = [obj.processing_id for obj in objects]
elif "result_table_id" in args:
rts = [args["result_table_id"]]
else:
return Response([])
result = models.DatabusMigrateTask.objects.filter(result_table_id__in=rts, task_type="overall").values(
"id",
"task_type",
"result_table_id",
"source",
"dest",
"start",
"end",
"created_at",
"created_by",
"updated_at",
"status",
)
return Response(result)
@list_route(methods=["post"], url_path="update_task_status")
def update_task_status(self, request):
"""
@apiGroup migration
@api {post} /databus/migrations/update_task_status/ 更新任务状态
@apiDescription 更新任务状态
@apiParam {int} task_id 任务id
@apiParam {string} status 状态
@apiParam {int} input [选填]source任务处理数量,默认0
@apiParam {int} output [选填]sink任务处理数量,默认0
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {{}},
"message": "ok",
"code": "1500200",
}
"""
args = self.params_valid(serializer=serializers.MigrateUpdateStateSerializer)
try:
obj = models.DatabusMigrateTask.objects.get(id=args["task_id"])
except models.DatabusMigrateTask.DoesNotExist:
raise MigrationNotFoundError()
with auto_meta_sync(using=DEFAULT):
obj.status = args["status"]
obj.input = args["input"]
obj.output = args["output"]
obj.updated_at = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
logger.info("update task:{} status:{} input:{} output:{}".format(obj.id, obj.status, obj.input, obj.output))
obj.save()
return Response("ok")
@list_route(methods=["get"], url_path="get_support_clusters")
def get_support_clusters(self, request):
"""
@apiGroup migration
@api {get} /databus/migrations/get_support_clusters/ 获取当前支持的迁移列表
@apiDescription 更新任务状态
@apiSuccessExample {json} Success-Response:
HTTP/1.1 200 OK
{
"result": true,
"data": {"source" : ["tspider"],
"dest": ["hdfs"]},
"message": "ok",
"code": "1500200",
}
"""
return Response(
{
"source": settings.migration_source_supported,
"dest": settings.migration_dest_supported,
}
)
| 1.320313 | 1 |
tests/test_delete_contact.py | s34rching/python_classes | 0 | 12794920 | <filename>tests/test_delete_contact.py
# -*- coding: utf-8 -*-
from models.contact import Contact
import random
import pytest
def test_delete_contact(app, orm, check_ui):
with pytest.allure.step('Given a non-empty contact list'):
if app.contact.count() == 0:
app.contact.create(Contact(firstname="Test", lastname="Test", address="some address, 1", home_number="+375293003030",
mobile_number="+375294004040"))
old_contact_list = orm.get_contact_list()
with pytest.allure.step('Given a random contact from the list'):
contact = random.choice(old_contact_list)
with pytest.allure.step('When I delete a new contact from the list'):
app.contact.delete_some_contact_by_id(contact.id)
with pytest.allure.step('Then the new contct list is equal to the old list without the deleted contact'):
new_contact_list = orm.get_contact_list()
old_contact_list.remove(contact)
assert old_contact_list == new_contact_list
if check_ui:
assert sorted(new_contact_list, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max) | 2.6875 | 3 |
BrushUp/import.py | JaviFdezT/brush_up | 0 | 12794921 | <filename>BrushUp/import.py<gh_stars>0
import time
import sqlite3
from tkinter.filedialog import askopenfilename
from tkinter import Tk
Tk().withdraw()
filename = askopenfilename(initialdir="./BBDD")
connection = sqlite3.connect(filename)
cursor=connection.cursor()
file = open("DOCS/words.txt",'r')
words,discardedwords=[],[]
now=time.localtime(time.time())
mm=str(now.tm_mon) if len(str(now.tm_mon))==2 else "0"+str(now.tm_mon)
dd=str(now.tm_mday) if len(str(now.tm_mday))==2 else "0"+str(now.tm_mday)
day="{!s}/{!s}/{!s}".format(str(now.tm_year),mm,dd)
for line in file:
line=line.split("*")
for i in range(len(line)):
line[i]=line[i].strip().replace("\t","").replace("\n","")
if len(line)==3:
words.append([line[0],line[1],line[2]])
else:
discardedwords.append(line)
for word in words:
syn=input("sysntaxis for \'"+str(word[0])+"\' as \'"+str(word[2])+"\':")
t=(str(word[0]), str(word[1]), str(word[2]), str(syn),1,day)
cursor.execute('INSERT INTO WORDS (word, example, meaning, syntaxis,category,day) VALUES (?,?,?,?,?,?)', t)
connection.commit()
print("New word "+str(word[0]))
if len(word[1])>100:
print("although its example is too large")
print("Added words:"+str(len(words))+". Discarded lines:"+str(len(discardedwords))) | 3.0625 | 3 |
graph/gp.py | HerlanAssis/Networkx-From-KML | 0 | 12794922 | from fastkml import kml
from .utils import haversine
class GraphFromKmlDoc:
def __init__(self, filename='pracas'):
self._filename = filename
def _get_document(self):
doc = open("pracas.kml", "r").read().encode('utf-8')
self._document = kml.KML()
self._document.from_string(doc)
return self._document
def get_pracas(self):
self._pracas = dict()
for locais in self._get_document().features():
for idx, marcadores in enumerate(locais.features()):
lng, lat, *args = marcadores.geometry._coordinates
self._pracas[marcadores.name] = {
'id': idx,
'lat': lat,
'lng': lng,
}
return self._pracas
def get_matriz_adjacencias(self):
self._distancias=dict()
pracas = self.get_pracas()
for praca, coordenadas in pracas.items():
self._distancias[praca] = {}
for _praca, _coordenadas in pracas.items():
self._distancias[praca][_praca] = haversine(
lat1=coordenadas['lat'],
lon1=coordenadas['lng'],
lat2=_coordenadas['lat'],
lon2=_coordenadas['lng'],
)
return self._distancias
| 2.765625 | 3 |
diccionario/diccionario/dictionary/models.py | ssvargass/en-senas | 1 | 12794923 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from pyuploadcare.dj.models import ImageField
from taggit_autosuggest.managers import TaggableManager
@python_2_unicode_compatible
class Word(models.Model):
title = models.CharField(max_length=255)
image = ImageField(blank=True, manual_crop="")
tags = TaggableManager()
created_date = models.DateTimeField(default=timezone.now)
published_date = models.DateTimeField(blank=True, null=True)
def publish(self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| 2.078125 | 2 |
benchmark/conftest.py | yhtang/GraphDot | 9 | 12794924 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
def pytest_benchmark_scale_unit(config, unit, benchmarks, best, worst, sort):
if unit == 'seconds':
prefix = 'm'
scale = 1e3
elif unit == 'operations':
prefix = 'K'
scale = 0.001
else:
raise RuntimeError("Unexpected measurement unit %r" % unit)
return prefix, scale
| 2.40625 | 2 |
ruclip/model.py | AlexWortega/ru-clip | 73 | 12794925 | <filename>ruclip/model.py
# -*- coding: utf-8 -*-
import os
import json
from collections import OrderedDict
import torch
import numpy as np
from torch import nn
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
('c_fc', nn.Linear(d_model, d_model * 4)),
('gelu', QuickGELU()),
('c_proj', nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor):
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[ResidualAttentionBlock(width, heads, attn_mask) for _ in range(layers)])
def forward(self, x: torch.Tensor):
return self.resblocks(x)
class VisionTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int, layers: int, heads: int, output_dim: int):
super().__init__()
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat([
self.class_embedding.to(x.dtype) +
torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x
], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
class CLIP(nn.Module):
def __init__(
self,
embed_dim,
image_resolution,
vision_layers,
vision_width,
vision_patch_size,
context_length,
vocab_size,
transformer_width,
transformer_heads,
transformer_layers,
eos_id=3,
):
super().__init__()
self.eos_id = eos_id
self.context_length = context_length
vision_heads = vision_width // 64
self.visual = VisionTransformer(
input_resolution=image_resolution,
patch_size=vision_patch_size,
width=vision_width,
layers=vision_layers,
heads=vision_heads,
output_dim=embed_dim,
)
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
)
self.vocab_size = vocab_size
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float('-inf'))
mask.triu_(1)
return mask
@property
def dtype(self):
return self.visual.conv1.weight.dtype
def encode_image(self, pixel_values):
"""Encode images
Parameters
----------
pixel_values: torch.Tensor
Processed images from RuCLIPProcessor class
Returns
-------
image_latents : torch.Tensor
Image embeddings
"""
return self.visual(pixel_values.type(self.dtype))
def encode_text(self, input_ids):
"""Encode texts
Parameters
----------
input_ids: torch.Tensor
Tokenized texts from RuCLIPProcessor class
Returns
-------
text_latents : torch.Tensor
Text embeddings
"""
x = self.token_embedding(input_ids).type(self.dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.type(self.dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x).type(self.dtype)
# x.shape = [batch_size, n_ctx, transformer.width]
x = x[torch.arange(x.shape[0]), torch.where(input_ids == self.eos_id)[1]] @ self.text_projection
return x
def forward(self, input_ids, pixel_values):
image_features = self.encode_image(pixel_values)
text_features = self.encode_text(input_ids)
# normalize features
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
text_features = text_features / text_features.norm(dim=-1, keepdim=True)
# cosine similarity as logits
logit_scale = self.logit_scale.exp()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
return logits_per_image, logits_per_text
@classmethod
def from_pretrained(cls, folder):
"""Load model from folder"""
config = json.load(open(os.path.join(folder, 'config.json')))
model = cls(
embed_dim=config['embed_dim'],
image_resolution=config['image_resolution'],
vision_layers=config['vision_layers'],
vision_width=config['vision_width'],
vision_patch_size=config['vision_patch_size'],
context_length=config['context_length'],
vocab_size=config['vocab_size'],
transformer_width=config['transformer_width'],
transformer_heads=config['transformer_heads'],
transformer_layers=config['transformer_layers'],
)
checkpoint = torch.load(os.path.join(folder, 'pytorch_model.bin'), map_location='cpu')
model.load_state_dict(checkpoint)
return model
| 2.46875 | 2 |
textattack/models/summarization/t5_summarization.py | yuchenlin/TextAttack | 1 | 12794926 | <filename>textattack/models/summarization/t5_summarization.py
from textattack.models.helpers import T5ForTextToText
class T5Summarization(T5ForTextToText):
""" A T5 model trained to summarize English text. Trained on the CNN/Daily
Mail summarization dataset.
For more information, please see the T5 paper, "Exploring the Limits of
Transfer Learning with a Unified Text-to-Text Transformer".
Appendix D contains information about the various tasks supported
by T5.
"""
def __init__(self, **kwargs):
super().__init__('summarization', **kwargs) | 2.34375 | 2 |
pcshell/shell.py | xSlither/pretty-click-shell | 2 | 12794927 | from typing import Callable
import sys
import os
from copy import deepcopy
from io import StringIO
import click
from click.core import MultiCommand, _check_multicommand
from colorama import Style
from . import globals as globs
from . import _colors as colors
from .chars import IGNORE_LINE
from .pretty import PrettyGroup, PrettyCommand
from .multicommand import CUSTOM_COMMAND_PROPS, CustomCommandPropsParser
from .utils import HasKey
from ._cmd_factories import ClickCmdShell
class Shell(PrettyGroup):
"""A :class:`Click Group` implementation with an (optionally) attatched shell.
Otherwise functions as a :class:`PrettyGroup`
Constructor Kwargs:
- :param:`isShell`: Attach a new shell instance?
- :param:`prompt`: Prompt Text
- :param:`intro`: Shell Intro Text
- :param:`hist_file`: Full Path & Filename to History File
- :param:`on_finished`: Callback function when shell closes
- :param:`add_command_callback`: Callback for extending command kwargs. See :func:`multicommand.CustomCommandPropsParser()`
- :param:`before_start`: os.system() command to execute prior to starting the shell
- :param:`readline`: If True, use pyreadline instead of any prompt_toolkit features
- :param:`complete_while_typing`: If True, prompt_toolkit suggestions will be live (on a separate thread)
- :param:`fuzzy_completion`: If True, use fuzzy completion for prompt_toolkit suggestions
- :param:`mouse_support`: If True, enables mouse support for prompt_toolkit
- :param:`lexer`: If True, enables the prompt_toolkit lexer
"""
def __init__(self,
isShell=False,
prompt=None,
intro=None,
hist_file=None,
on_finished=None,
add_command_callback: Callable[[ClickCmdShell, object, str], None] = None,
before_start=None,
readline=None,
complete_while_typing=True,
fuzzy_completion=True,
mouse_support=False,
lexer=True,
**attrs):
# Allows this class to be used as a subclass without a new shell instance attached
self.isShell = isShell
if isShell:
attrs['invoke_without_command'] = True
super(Shell, self).__init__(**attrs)
if not globs.__MASTER_SHELL__:
globs.__MASTER_SHELL__ = self.name
def on_shell_closed(ctx):
if len(globs.__SHELL_PATH__):
try: globs.__SHELL_PATH__.remove(self.name)
except: pass
if on_finished and callable(on_finished): on_finished(ctx)
def on_shell_start():
if before_start and callable(before_start): before_start()
if not self.name == globs.__MASTER_SHELL__:
globs.__SHELL_PATH__.append(self.name)
# Create the shell
self.shell = ClickCmdShell(hist_file=hist_file, on_finished=on_shell_closed,
add_command_callback=add_command_callback, before_start=on_shell_start, readline=readline,
complete_while_typing=complete_while_typing, fuzzy_completion=fuzzy_completion, mouse_support=mouse_support,
lexer=lexer
)
if prompt:
self.shell.prompt = prompt
self.shell.intro = intro
else:
super(Shell, self).__init__(**attrs)
def add_command(self, cmd: click.Command, name=None):
name = name or cmd.name
if name is None: raise TypeError("Command has no name.")
_check_multicommand(self, name, cmd, register=True)
if type(name) is str:
self.commands[name] = cmd
else:
for _name_ in name:
self.commands[_name_] = cmd
if self.isShell: self.shell.add_command(cmd, name)
def invoke(self, ctx: click.Context):
if self.isShell:
ret = super(Shell, self).invoke(ctx)
if not ctx.protected_args and not ctx.invoked_subcommand:
ctx.info_name = None
self.shell.ctx = ctx
return self.shell.cmdloop()
return ret
else:
return MultiCommand.invoke(self, ctx)
def new_shell(self, cls=None, **kwargs):
"""A shortcut decorator that instantiates a new Shell instance and attaches it to the existing Command
"""
from .pretty import prettyGroup
def decorator(f):
cmd = prettyGroup(cls=Shell if not cls else cls, isShell=True, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
class MultiCommandShell(Shell):
""" A :class:`Click Group` implementation with an (optionally) attached shell, that also:
- Allows defining commands with multiple aliases
- Allows for addtional command options (hidden, exit, etc.)
- Implements pre-defined base shell commands
- Implements all pretty formatting features
If not attached to a shell, functions as a :class:`PrettyGroup` with the non-shell-related features listed above
Constructor Kwargs:
- :param:`isShell`: Attach a new shell instance?
- :param:`prompt`: Prompt Text
- :param:`intro`: Shell Intro Text
- :param:`hist_file`: Full Path & Filename to History File
- :param:`on_finished`: Callback function when shell closes
- :param:`add_command_callback`: Callback for extending command kwargs. See :func:`multicommand.CustomCommandPropsParser()`
- :param:`before_start`: os.system() command to execute prior to starting the shell
- :param:`readline`: If True, use pyreadline instead of any prompt_toolkit features
- :param:`complete_while_typing`: If True, prompt_toolkit suggestions will be live (on a separate thread)
- :param:`fuzzy_completion`: If True, use fuzzy completion for prompt_toolkit suggestions
- :param:`mouse_support`: If True, enables mouse support for prompt_toolkit
- :param:`lexer`: If True, enables the prompt_toolkit lexer
"""
def __init__(self, isShell=None, **attrs):
self.isShell = isShell
attrs['isShell'] = isShell
if self.isShell:
if not HasKey('add_command_callback', attrs) or not attrs['add_command_callback']:
attrs['add_command_callback'] = CustomCommandPropsParser
super(MultiCommandShell, self).__init__(**attrs)
if self.isShell: BaseShellCommands.addBasics(self)
if globs.__IsShell__ and self.isShell:
if globs.__MASTER_SHELL__ == self.name: BaseShellCommands.addMasters(self)
BaseShellCommands.addAll(self)
@staticmethod
def __strip_invalidKeys(kwargs):
for _kwarg_ in CUSTOM_COMMAND_PROPS:
if HasKey(_kwarg_, kwargs):
kwargs.pop(_kwarg_, None)
@staticmethod
def __assign_invalidKeys(kwargs, cmd):
for _kwarg_ in CUSTOM_COMMAND_PROPS:
if HasKey(_kwarg_, kwargs):
setattr(cmd, _kwarg_, kwargs[_kwarg_])
def group(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a group to
the group. This takes the same arguments as :func:`group` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
from .pretty import prettyGroup
def decorator(f):
cmd = prettyGroup(*args, **kwargs)(f)
cmd.alias = False
self.add_command(cmd)
return cmd
return decorator
def new_shell(self, cls=None, **kwargs):
"""A shortcut decorator that instantiates a new Shell instance and attaches it to the existing Command
"""
from .pretty import prettyGroup
def decorator(f):
cmd = prettyGroup(cls=MultiCommandShell if not cls else cls, isShell=True, **kwargs)(f)
cmd.alias = False
self.add_command(cmd)
return cmd
return decorator
def command(self, *args, **kwargs):
"""Behaves the same as `click.Group.command()` except if passed
a list of names, all after the first will be aliases for the first.
Also allows for use of custom kwargs defined in multicommand.py.
"""
def decorator(f):
old_kwargs = kwargs.copy()
self.__strip_invalidKeys(kwargs)
from .pretty import prettyCommand
tmpCommand = None
origHelpTxt = None
aliases = []
try:
if isinstance(args[0], list):
_args = [args[0][0]] + list(args[1:])
for alias in args[0][1:]:
if tmpCommand is None:
cmd: PrettyCommand = prettyCommand(alias, None, **kwargs)(f)
origHelpTxt = cmd.help
cmd.alias = True
cmd.aliases = []
cmd.help = "(Alias for '{c}') {h}".format(c = _args[0], h = cmd.help)
cmd.short_help = "Alias for '{}'".format(_args[0])
cmd.true_hidden = cmd.hidden
cmd.hidden = True
self.__assign_invalidKeys(old_kwargs, cmd)
super(MultiCommandShell, self).add_command(cmd)
tmpCommand = cmd
else:
cmd = deepcopy(tmpCommand)
cmd.alias = True
cmd.aliases = []
cmd.name = alias
cmd.help = "(Alias for '{c}') {h}".format(c = _args[0], h = origHelpTxt)
cmd.short_help = "Alias for '{}'".format(_args[0])
cmd.hidden = True
self.__assign_invalidKeys(old_kwargs, cmd)
super(MultiCommandShell, self).add_command(cmd)
aliases.append(alias)
else:
_args = args
if tmpCommand is None:
cmd: PrettyCommand = prettyCommand(*_args, **kwargs)(f)
cmd.alias = False
cmd.aliases = aliases
self.__assign_invalidKeys(old_kwargs, cmd)
super(MultiCommandShell, self).add_command(cmd)
return cmd
else:
cmd = deepcopy(tmpCommand)
cmd.alias = False
cmd.aliases = aliases
cmd.name = _args[0]
cmd.help = origHelpTxt
cmd.short_help = ''
cmd.hidden = cmd.true_hidden
self.__assign_invalidKeys(old_kwargs, cmd)
super(MultiCommandShell, self).add_command(cmd)
return cmd
except:
cmd: PrettyCommand = prettyCommand(*args, **kwargs)(f)
cmd.alias = False
cmd.aliases = aliases
self.__assign_invalidKeys(old_kwargs, cmd)
super(MultiCommandShell, self).add_command(cmd)
return cmd
return decorator
class BaseShellCommands:
@staticmethod
def addMasters(shell: MultiCommandShell):
@shell.command(globs.MASTERSHELL_COMMAND_ALIAS_RESTART, hidden=True)
def __restart_shell__():
"""Restarts the application"""
# Spawns a new shell within the current session by launching the python app again
os.system('python "%s"' % sys.argv[0].replace('\\', '/'))
# Exits the current shell once it's child has closed
globs.__IS_REPEAT__ = True
globs.__IS_EXITING__ = True
if shell.shell.readline:
globs.__PREV_STDIN__ = sys.stdin
sys.stdin = StringIO(globs.__LAST_COMMAND__)
else:
shell.shell._pipe_input.send_text('exit\r')
click.echo(IGNORE_LINE)
@staticmethod
def addBasics(shell: MultiCommandShell):
@shell.command(globs.BASIC_COMMAND_ALIAS_HELP, hidden=True)
def __get_help__():
with click.Context(shell) as ctx:
click.echo(shell.get_help(ctx))
@shell.command(globs.BASIC_COMMAND_ALIAS_CLEARHISTORY, hidden=True)
def __clear_history__():
"""Clears the CLI history for this terminal for the current user"""
result = shell.shell.clear_history()
print()
click.echo('\t{}{} {}{}{}'.format(
colors.SHELL_HISTORY_CLEARED_STYLE, 'History cleared' if result else 'Clear History',
colors.SHELL_HISTORY_CLEARED_TRUE if result else colors.SHELL_HISTORY_CLEARED_FALSE,
'successfully' if result else 'failed',
Style.RESET_ALL
))
@staticmethod
def addAll(shell: MultiCommandShell):
@shell.command(globs.SHELL_COMMAND_ALIAS_CLEAR, hidden=True)
def cls():
"""Clears the Terminal"""
click.clear()
@shell.command(globs.SHELL_COMMAND_ALIAS_QUIT, hidden=True, exit=True)
def _exit_():
"""Exits the Shell"""
pass
@shell.command(globs.SHELL_COMMAND_ALIAS_EXIT, exit=True)
def __exit__():
"""Exits the Shell"""
pass
@shell.command(globs.SHELL_COMMAND_ALIAS_REPEAT, hidden=True)
def __repeat_command__():
"""Repeats the last valid command with all previous parameters"""
if globs.__LAST_COMMAND__:
globs.__IS_REPEAT__ = True
if shell.shell.readline:
globs.__PREV_STDIN__ = sys.stdin
sys.stdin = StringIO(globs.__LAST_COMMAND__) | 2.125 | 2 |
tests/unit/test_cwl_interaction.py | Phil-Ocone/datacoco-cloud | 1 | 12794928 | <gh_stars>1-10
"""
Test Cloud Watch Log
"""
import os
import unittest
from unittest.mock import MagicMock
from datacoco_cloud import UNIT_TEST_KEY
from datacoco_cloud.cwl_interaction import CWLInteraction
class TestCWLInteraction(unittest.TestCase):
def setUp(self):
os.environ[UNIT_TEST_KEY] = "True"
self.testCls = CWLInteraction(
region="", aws_secret_key="", aws_access_key=""
)
def test_parse_and_print_events(self):
self.testCls.client = MagicMock()
self.testCls.parse_and_print_events(events="")
self.assertTrue(True) # Assert that this line is reached without error
def test_get_log_events(self):
self.testCls.client = MagicMock()
self.testCls.client.get_log_events.return_value = {
"ResponseMetadata": {"HTTPStatusCode": 200},
"events": [],
}
self.testCls.get_log_events(log_group="", log_stream="")
self.assertTrue(True) # Assert that this line is reached without error
def test_get_log_events_http_error(self):
self.testCls.client = MagicMock()
self.testCls.client.get_log_events.return_value = {
"ResponseMetadata": {"HTTPStatusCode": 500},
"events": [],
}
try:
self.testCls.get_log_events(log_group="", log_stream="")
self.fail("There should be an error")
except ValueError as v:
self.assertTrue(
str(v) is not None
) # Assert value error did happen
| 2.3125 | 2 |
sendEmail.pyw | lucas-pixel/SendEmail | 0 | 12794929 | <reponame>lucas-pixel/SendEmail
import smtplib
import PySimpleGUI as sg
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class Interface:
def __init__(self):
sg.theme('DarkPurple4')
layout = [
[sg.Text('Servidor ', size=(8,1)),
sg.Input(key='_using_server_', size=(21, 1))],
[sg.Text('De ', size=(8,1)),
sg.Input(key='_from_', size=(21,1))],
[sg.Text('Para ', size=(8,1)),
sg.Input(key='_to_', size=(21,1))],
[sg.Text('Assunto ', size=(8,1)),
sg.Input(key='_matter_', size=(21,1))],
[sg.Text('Mensagem ', size=(8,1)),
sg.Input(key='_message_', size=(21,2))],
[sg.Text('Logs')],
[sg.Output(size=(31, 5))],
[sg.Button('Enviar')]
]
self.windowMain = sg.Window('SendEmail 1.0', layout, icon='Blackvariant-Shadow135-System-Mail.ico')
def Home(self):
while True:
event, values = self.windowMain.read()
if event == sg.WINDOW_CLOSED:
break
if event == 'Enviar':
self.configs(values['_using_server_'], values['_from_'], values['_to_'], values['_matter_'], values['_message_'])
def configs(self, _using_server_, _from_, _to_, _matter_, _message_):
try:
Select = _using_server_
s = 'smtp.{}.com'.format(Select)
server = smtplib.SMTP(s, 587)
server.starttls()
print('Configurando o server...')
#email_from = input(str('De: '))
print('Fazendo o login, aguarde...')
server.login(_from_, open('senha.txt').read().strip())
#email_to = input(str('Para: '))
#subject = input(str('Assunto: '))
msg = MIMEMultipart()
msg['From'] = _from_
msg['To'] = _to_
msg['Subject'] = _matter_
#message = input(str('Mensagem: '))
msg.attach(MIMEText(_message_, 'plain'))
text = msg.as_string()
print('Enviando a mensagem...')
#SET
server.sendmail(_from_, _to_, text)
server.quit()
print('Sucesso ao enviar o email')
except:
print('Ops, ocorreu um erro :(')
debg = Interface()
debg.Home() | 2.546875 | 3 |
src/tasks/lesson03/task301.py | vadimkondratovich/asd | 0 | 12794930 | from django.http import HttpRequest, HttpResponse
from main.util import render_template
TEMPLATE = "tasks/lesson03/task301.html"
def handler(request: HttpRequest) -> HttpResponse:
name = request.GET.get("name")
context = {
"input_name": name,
"greeting_name": name or "anonymous",
}
document = render_template(TEMPLATE, context)
response = HttpResponse(content=document)
return response
if __name__ == '__main__':
x = render_template(TEMPLATE, {'input_name': 1, 'greeting_name': 2})
print(x) | 2.671875 | 3 |
configs/fisheye_pig/yolov3_d53_mstrain-608_273e_coco_2passage_pig.py | leemengwei/my_mmdetection | 1 | 12794931 | <reponame>leemengwei/my_mmdetection<filename>configs/fisheye_pig/yolov3_d53_mstrain-608_273e_coco_2passage_pig.py<gh_stars>1-10
_base_ = '../yolo/yolov3_d53_mstrain-608_273e_coco.py'
# model settings
model = dict(
bbox_head=dict(
num_classes=2),
train_cfg=dict(
nms_pre=1000,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=200),
test_cfg=dict(
nms_pre=1000,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=200),
)
#load_from='./checkpoints/yolov3_d53_mstrain-608_273e_coco-139f5633.pth'
# dataset settings
dataset_type = 'CocoDataset'
train_dir_1 = "../dataset/1猪舍-汇研+剪裁ok_cat_and_dogs/train"
val_dir_1 = "../dataset/1猪舍-汇研+剪裁ok_cat_and_dogs/val"
train_dir_2 = "../dataset/2出猪通道-泉州safe/train"
val_dir_2 = "../dataset/2出猪通道-泉州safe/val"
train_dir_3 = "../dataset/3出猪台-泉州+剪裁safe/train"
val_dir_3 = "../dataset/3出猪台-泉州+剪裁safe/val"
train_dir_3 = "../dataset/4称重台-泉州safe/train"
val_dir_3 = "../dataset/4称重台-泉州safe/val"
img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
#dict(type='PhotoMetricDistortion'),
#dict(type='MinIoURandomCrop', min_ious=(0.8, 0.9, 1.0), min_crop_size=0.7),
#dict(type='Resize', img_scale=[(1366, 768), (990, 540), (1115, 608)], keep_ratio=True, multiscale_mode='value'),
dict(type='Resize', img_scale=[(608,608)], keep_ratio=True, multiscale_mode='value'),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(608,608),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=1,
workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=[train_dir_1 + '/annotation_coco.json', train_dir_2 + '/annotation_coco.json'],
img_prefix=[train_dir_1, train_dir_2],
pipeline=train_pipeline,
classes=('pig', 'person')
),
val=dict(
type=dataset_type,
ann_file=[val_dir_1 + '/annotation_coco.json', val_dir_2 + '/annotation_coco.json'],
img_prefix=[val_dir_1, val_dir_2],
pipeline=test_pipeline,
classes=('pig', 'person')
),
test=dict(
type=dataset_type,
ann_file=[val_dir_1 + '/annotation_coco.json', val_dir_2 + '/annotation_coco.json'],
img_prefix=[val_dir_1, val_dir_2],
pipeline=test_pipeline,
classes=('pig', 'person')
)
)
# optimizer
optimizer = dict(type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
#_delete_=True,
#policy='CosineAnnealing',
policy='step',
warmup='linear',
warmup_iters=2000, # same as burn-in in darknet
#warmup_ratio=1e-6,
warmup_ratio=0.1,
step=[150, 180],
#min_lr_ratio=1e-10
)
runner = dict(type='EpochBasedRunner', max_epochs=200)
checkpoint_config = dict(interval=10)
log_config = dict(
interval=10)
workflow = [('train', 1), ('val', 1)] | 1.453125 | 1 |
application/tests/test_camera.py | Sapfir0/web-premier-eye | 0 | 12794932 | import unittest
from config import Config as cfg
import requests
class Camera(unittest.TestCase):
routeUrl = cfg.serverUrl + "gallery/camera"
camerasList = [1,2,3]
def test_IsAllCamerasAvailable(self):
for camera in self.camerasList:
r = requests.get(f"{self.routeUrl}/{camera}")
self.assertEqual(200, r.status_code)
if __name__ == '__main__':
unittest.main()
| 3.109375 | 3 |
app/core/migrations/0004_auto_20201128_2021.py | vivek92-tech/SocialSphere-Insta-clone | 1 | 12794933 | <reponame>vivek92-tech/SocialSphere-Insta-clone
# Generated by Django 3.1.3 on 2020-11-28 20:21
import core.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
dependencies = [
('core', '0003_user_profile_pic'),
]
operations = [
migrations.AddField(
model_name='user',
name='date_joined',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='followers',
field=models.ManyToManyField(blank=True, related_name='user_followers', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='user',
name='following',
field=models.ManyToManyField(blank=True, related_name='user_following', to=settings.AUTH_USER_MODEL),
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('photo', models.ImageField(editable=False, upload_to=core.models.image_file_path)),
('text', models.TextField(blank=True, max_length=500)),
('location', models.CharField(blank=True, max_length=30)),
('posted_on', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_posts', to=settings.AUTH_USER_MODEL)),
('likes', models.ManyToManyField(blank=True, related_name='likers', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-posted_on'],
},
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=100)),
('posted_on', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_comments', to=settings.AUTH_USER_MODEL)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_comments', to='core.post')),
],
options={
'ordering': ['-posted_on'],
},
),
]
| 1.789063 | 2 |
models/train_classifier.py | Yasir-Ghunaim/disaster_response_pipeline | 0 | 12794934 | <gh_stars>0
import sys
import nltk
nltk.download(['punkt', 'stopwords', 'wordnet', 'averaged_perceptron_tagger'])
import re
import pandas as pd
import numpy as np
import pickle
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
def load_data(database_filepath):
"""Load database and split into features and outputs"""
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table("DisasterResponse", engine)
X = df["message"]
Y = df.drop(columns=["id", "message", "original", "genre"])
category_names = Y.columns
return X, Y, category_names
def tokenize(text):
"""Clean text, apply word tokenization, remove stop words and reduce tokens into their roots"""
# Tokenize text
text = re.sub(r"[^a-z0-9]", " ", text.lower())
tokens = word_tokenize(text)
# Remove stop words
tokens = [w for w in tokens if w not in stopwords.words("english")]
# Reduce tokens into their roots
stemer = PorterStemmer()
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = stemer.stem(tok).strip()
clean_tok = lemmatizer.lemmatize(clean_tok)
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
""" Return a mechine learning pipeline optimized by GridSearch to classify multi-output dataset """
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(DecisionTreeClassifier(random_state=42)))
])
parameters = {
'vect__ngram_range': ((1, 1), (1, 2)),
'clf__estimator__max_depth': [3, 6]
}
cv = GridSearchCV(pipeline, param_grid=parameters, verbose=50)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
""" Print a classification report containing recall, precision and f1-score for each predicted category """
Y_pred = model.predict(X_test)
for i in list(range(Y_pred.shape[1])):
print("Report for column: \"" + category_names[i] + "\"")
print(classification_report(Y_test.values[:,i], Y_pred[:,i]))
print("\n\n")
def save_model(model, model_filepath):
""" Save the model as a pickle file """
pkl_filename = "disaster_response_model.pkl"
with open(pkl_filename, 'wb') as file:
pickle.dump(model, file)
def main():
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | 2.671875 | 3 |
controller/server.py | uniaim-event-team/watch-link | 2 | 12794935 | <reponame>uniaim-event-team/watch-link
from flask import Blueprint, render_template
app = Blueprint(__name__, "server")
@app.route('/')
def route():
return render_template('top.html')
| 1.78125 | 2 |
tests/test_ges.py | stevenengler/witness-encryption | 0 | 12794936 | <reponame>stevenengler/witness-encryption
from trivial_ges import TrivialGES
import sys
# Testing
l = int(sys.argv[1])
n = int(sys.argv[2])
print("Instantiating with lambda = ", l, " n = ", n)
x = TrivialGES(l, n)
print("n: ", x.get_n())
print("lambda: ", x.get_lambda())
e = x.sample()
print("Sample 1:", e)
f = print("Sample 2: ", x.sample())
f = print("Sample 3: ", x.sample())
f = print("Sample 4: ", x.sample())
e = x.encode(1, e)
print("Encoding first sample at level 1", e)
f = x.copy_encoding(e)
print("Copying encoding: ", f)
x.rerandomize(2, e)
print("Rerandomizing first encoding at level 2: ", e)
print("Checking copy has not changed: ", f)
print("Multiplying both and returning new: ", x.multiply(e, f))
print("Checking encodings haven't changed: ")
print("\t", e)
print("\t", f)
print("Multiplying both and storing in new: ", x.multiply(e, f, e))
print("Checking only first encoding changes: ")
print("\t", e)
print("\t", f)
print("Length of key from first:", len(x.extract(e)))
print("Length of key from second:", len(x.extract(f)))
print("First two keys are equal:", str(x.extract(e)) == str(x.extract(f)))
print("First encoding key and key of copy are equal:",
str(x.extract(e)) == str(x.extract(x.copy_encoding(e))))
| 3.21875 | 3 |
zipline/utils/calendars/calendar_helpers.py | NunoEdgarGFlowHub/zipline | 0 | 12794937 | <gh_stars>0
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import bisect
from zipline.errors import NoFurtherDataError
def normalize_date(date):
date = pd.Timestamp(date, tz='UTC')
return pd.tseries.tools.normalize_date(date)
def delta_from_time(t):
"""
Convert a datetime.time into a timedelta.
"""
return pd.Timedelta(
hours=t.hour,
minutes=t.minute,
seconds=t.second,
)
def _get_index(dt, all_trading_days):
"""
Return the index of the given @dt, or the index of the preceding
trading day if the given dt is not in the trading calendar.
"""
ndt = normalize_date(dt)
if ndt in all_trading_days:
return all_trading_days.searchsorted(ndt)
else:
return all_trading_days.searchsorted(ndt) - 1
# The following methods are intended to be inserted in both the
# ExchangeCalendar and TradingSchedule classes.
# These methods live in the helpers module to avoid code duplication.
def next_scheduled_day(date, last_trading_day, is_scheduled_day_hook):
"""
Returns the next session date in the calendar after the provided date.
Parameters
----------
date : Timestamp
The date whose following date is needed.
Returns
-------
Timestamp
The next scheduled date after the provided date.
"""
dt = normalize_date(date)
delta = pd.Timedelta(days=1)
while dt <= last_trading_day:
dt += delta
if is_scheduled_day_hook(dt):
return dt
raise NoFurtherDataError(msg='Cannot find next day after %s' % date)
def previous_scheduled_day(date, first_trading_day, is_scheduled_day_hook):
"""
Returns the previous session date in the calendar before the provided date.
Parameters
----------
date : Timestamp
The date whose previous date is needed.
Returns
-------
Timestamp
The previous scheduled date before the provided date.
"""
dt = normalize_date(date)
delta = pd.Timedelta(days=-1)
while first_trading_day < dt:
dt += delta
if is_scheduled_day_hook(dt):
return dt
raise NoFurtherDataError(msg='Cannot find previous day before %s' % date)
def next_open_and_close(date, open_and_close_hook,
next_scheduled_day_hook):
return open_and_close_hook(next_scheduled_day_hook(date))
def previous_open_and_close(date, open_and_close_hook,
previous_scheduled_day_hook):
return open_and_close_hook(previous_scheduled_day_hook(date))
def scheduled_day_distance(first_date, second_date, all_days):
first_date = normalize_date(first_date)
second_date = normalize_date(second_date)
i = bisect.bisect_left(all_days, first_date)
if i == len(all_days): # nothing found
return None
j = bisect.bisect_left(all_days, second_date)
if j == len(all_days):
return None
distance = j - 1
assert distance >= 0
return distance
def minutes_for_day(day, open_and_close_hook):
start, end = open_and_close_hook(day)
return pd.date_range(start, end, freq='T')
def days_in_range(start, end, all_days):
"""
Get all execution days between start and end,
inclusive.
"""
start_date = normalize_date(start)
end_date = normalize_date(end)
return all_days[all_days.slice_indexer(start_date, end_date)]
def minutes_for_days_in_range(start, end, days_in_range_hook,
minutes_for_day_hook):
"""
Get all execution minutes for the days between start and end,
inclusive.
"""
start_date = normalize_date(start)
end_date = normalize_date(end)
all_minutes = []
for day in days_in_range_hook(start_date, end_date):
day_minutes = minutes_for_day_hook(day)
all_minutes.append(day_minutes)
# Concatenate all minutes and truncate minutes before start/after end.
return pd.DatetimeIndex(np.concatenate(all_minutes), copy=False, tz='UTC')
def add_scheduled_days(n, date, next_scheduled_day_hook,
previous_scheduled_day_hook, all_trading_days):
"""
Adds n trading days to date. If this would fall outside of the
trading calendar, a NoFurtherDataError is raised.
Parameters
----------
n : int
The number of days to add to date, this can be positive or
negative.
date : datetime
The date to add to.
Returns
-------
datetime
n trading days added to date.
"""
if n == 1:
return next_scheduled_day_hook(date)
if n == -1:
return previous_scheduled_day_hook(date)
idx = _get_index(date, all_trading_days) + n
if idx < 0 or idx >= len(all_trading_days):
raise NoFurtherDataError(
msg='Cannot add %d days to %s' % (n, date)
)
return all_trading_days[idx]
def all_scheduled_minutes(all_days, minutes_for_days_in_range_hook):
first_day = all_days[0]
last_day = all_days[-1]
return minutes_for_days_in_range_hook(first_day, last_day)
def next_scheduled_minute(start, is_scheduled_day_hook, open_and_close_hook,
next_open_and_close_hook):
"""
Get the next market minute after @start. This is either the immediate
next minute, the open of the same day if @start is before the market
open on a trading day, or the open of the next market day after @start.
"""
if is_scheduled_day_hook(start):
market_open, market_close = open_and_close_hook(start)
# If start before market open on a trading day, return market open.
if start < market_open:
return market_open
# If start is during trading hours, then get the next minute.
elif start < market_close:
return start + pd.Timedelta(minutes=1)
# If start is not in a trading day, or is after the market close
# then return the open of the *next* trading day.
return next_open_and_close_hook(start)[0]
def previous_scheduled_minute(start, is_scheduled_day_hook,
open_and_close_hook,
previous_open_and_close_hook):
"""
Get the next market minute before @start. This is either the immediate
previous minute, the close of the same day if @start is after the close
on a trading day, or the close of the market day before @start.
"""
if is_scheduled_day_hook(start):
market_open, market_close = open_and_close_hook(start)
# If start after the market close, return market close.
if start > market_close:
return market_close
# If start is during trading hours, then get previous minute.
if start > market_open:
return start - pd.Timedelta(minutes=1)
# If start is not a trading day, or is before the market open
# then return the close of the *previous* trading day.
return previous_open_and_close_hook(start)[1]
| 2.5 | 2 |
Lab1/tt.py | Machinesaac/6.009 | 0 | 12794938 | a = [(255,0,0)]
print(a[0][0]) | 2.46875 | 2 |
objetto/reactions.py | brunonicko/objetto | 8 | 12794939 | # -*- coding: utf-8 -*-
"""Reactions."""
from ._reactions import CustomReaction, Limit, LimitChildren, UniqueAttributes, reaction
__all__ = [
"reaction",
"CustomReaction",
"UniqueAttributes",
"LimitChildren",
"Limit",
]
| 1.171875 | 1 |
backend/app/crud/crud_debit.py | ralphribeiro/debito_automatico | 0 | 12794940 | from typing import Any, Dict, Optional, Union, List
from fastapi.encoders import jsonable_encoder
from sqlalchemy.orm import Session
from app.crud.base import CRUDBase
from app.models.debit import Debit
from app.schemas.debit import DebitCreate, DebitUpdate
class CRUDDebit(CRUDBase[Debit, DebitCreate, DebitUpdate]):
def create_with_owner(self, db: Session, *, obj_in: DebitCreate,
owner_id: int) -> Debit:
obj_in_data = jsonable_encoder(obj_in)
db_obj = self.model(**obj_in_data, owner_id=owner_id)
db.add(db_obj)
db.commit()
db.refresh(db_obj)
return db_obj
def get_by_owner(self, db: Session, *, owner_id: int) -> Optional[Debit]:
return db.query(Debit).filter(Debit.owner_id == owner_id).first()
def update_status(self, db: Session, *,
db_obj: Debit,
obj_in: Union[DebitUpdate, Dict[str, Any]]) -> Debit:
if isinstance(obj_in, dict):
update_data = obj_in
else:
update_data = obj_in.dict(exclude_unset=True)
return super().update(db, db_obj=db_obj, obj_in=obj_in)
# def get_multi(self, db: Session, *,
# skip: int = 0, limit: int = 100) -> List[Dict]:
# return (db.query(self.model).offset(skip).limit(limit).all())
debit = CRUDDebit(Debit)
| 2.21875 | 2 |
Products/mcdutils/ftests/functest.py | zms-publishing/Products.mcdutils | 0 | 12794941 | <reponame>zms-publishing/Products.mcdutils
# Run this test from 'zopectl run'
# Requires that we are running a memcached on localhost, port 11211
import transaction
from .proxy import MemCacheProxy
proxy = MemCacheProxy(['localhost:11211'])
session = proxy.new_or_existing('foobar')
print(session)
session['abc'] = 123
print(session)
transaction.commit()
proxy2 = MemCacheProxy(['localhost:11211'])
print(proxy2.get('foobar'))
| 1.726563 | 2 |
scripts/deepimpact/brute-force.py | d1shs0ap/pyserini | 451 | 12794942 | import argparse
import json
import os
from scipy.sparse import csr_matrix
from tqdm import tqdm
import numpy as np
from multiprocessing import Pool, Manager
def token_dict_to_sparse_vector(token_dict, token2id):
matrix_row, matrix_col, matrix_data = [], [], []
tokens = token_dict.keys()
col = []
data = []
for tok in tokens:
if tok in token2id:
col.append(token2id[tok])
data.append(token_dict[tok])
matrix_row.extend([0] * len(col))
matrix_col.extend(col)
matrix_data.extend(data)
vector = csr_matrix((matrix_data, (matrix_row, matrix_col)), shape=(1, len(token2id)))
return vector
parser = argparse.ArgumentParser()
parser.add_argument('--corpus', type=str, help='path to corpus with vectors', required=True)
parser.add_argument('--topics', type=str, help='path to topics with vectors', required=True)
parser.add_argument('--tokens', type=str, help='path to token list', required=True)
parser.add_argument('--run', type=str, help='path to run file', required=True)
parser.add_argument('--threads', type=int, help='threads for hnsw', required=False, default=12)
args = parser.parse_args()
token2id = {}
with open(args.tokens) as tok_f:
for idx, line in enumerate(tok_f):
tok = line.rstrip()
token2id[tok] = idx
corpus = []
for file in sorted(os.listdir(args.corpus)):
file = os.path.join(args.corpus, file)
if file.endswith('json') or file.endswith('jsonl'):
print(f'Loading {file}')
with open(file, 'r') as f:
for idx, line in enumerate(tqdm(f.readlines())):
info = json.loads(line)
corpus.append(info)
ids = []
vectors = []
matrix_row, matrix_col, matrix_data = [], [], []
for i, d in enumerate(tqdm(corpus)):
weight_dict = d['vector']
tokens = weight_dict.keys()
col = [token2id[tok] for tok in tokens]
data = weight_dict.values()
matrix_row.extend([i] * len(weight_dict))
matrix_col.extend(col)
matrix_data.extend(data)
ids.append(d['id'])
vectors = csr_matrix((matrix_data, (matrix_row, matrix_col)), shape=(len(corpus), len(token2id)))
topic_ids = []
topic_vectors = []
with open(args.topics) as topic_f:
for line in topic_f:
info = json.loads(line)
topic_ids.append(info['id'])
topic_vectors.append(token_dict_to_sparse_vector(info['vector'], token2id))
vectors_T = vectors.T
manager = Manager()
results = manager.dict()
def run_search(idx):
global results
qid = topic_ids[idx]
t_vec = topic_vectors[idx]
scores = np.array(t_vec.dot(vectors_T).todense())[0]
top_idx = sorted(range(len(scores)), key=lambda x: scores[x], reverse=True)[:1000]
result = [(ids[x], scores[x]) for x in top_idx]
results[qid] = result
with Pool(args.threads) as p:
for _ in tqdm(p.imap_unordered(run_search, list(range(len(topic_ids)))), total=len(topic_ids)):
pass
with open(args.run, 'w') as f:
for qid in results:
for idx, item in enumerate(results[qid]):
did = item[0]
score = item[1]
f.write(f'{qid} Q0 {did} {idx+1} {score} bf\n')
| 2.46875 | 2 |
pycc/cli/__init__.py | kevinconway/pycc | 17 | 12794943 | <filename>pycc/cli/__init__.py
"""Modules related to the command line interface of the project."""
| 1.164063 | 1 |
notebooks/snippets/nbody/create_n.py | IsabelAverill/Scipy-2017---Numba | 149 | 12794944 | @njit
def create_n_random_particles(n, m, domain=1):
'''
Creates `n` particles with mass `m` with random coordinates
between 0 and `domain`
'''
parts = numpy.zeros((n), dtype=particle_dtype)
#attribute access only in @jitted function
for p in parts:
p.x = numpy.random.random() * domain
p.y = numpy.random.random() * domain
p.z = numpy.random.random() * domain
p.m = m
p.phi = 0
return parts
| 2.796875 | 3 |
desidiff/src/scores.py | EquinoxOmega0/timedomain | 0 | 12794945 | <filename>desidiff/src/scores.py
import numpy
import copy
def clipmean_one(y,ivar,mask,nsig=3):
w=numpy.where(mask==0)[0]
ansivar = ivar[w].sum()
ansmean = numpy.sum(y[w]*ivar[w])/ansivar
newy = y-ansmean
w=numpy.where(numpy.logical_and.reduce([mask==0, numpy.abs(newy*numpy.sqrt(ivar)) < nsig]))[0]
ansivar = ivar[w].sum()
ansmean = numpy.sum(y[w]*ivar[w])/ansivar
return y-ansmean
def clipmean(y,ivar,mask,nsig=3):
ans = copy.deepcopy(y)
for k in ans.keys():
ans[k]=clipmean_one(y[k],ivar[k],mask[k],nsig=nsig)
return ans
def perband_SN(y,ivar,mask,nsig=10):
ans=dict()
for k in y.keys():
w=numpy.where(mask[k]==0)[0]
ansivar = ivar[k][w].sum()
ansmean = numpy.sum(y[k][w]*ivar[k][w])/ansivar
ston=numpy.abs(ansmean)*numpy.sqrt(ansivar)
ans[k]=ston
return ans
def perband_increase(y,ivar,mask,refy, refivar, refmask):
ans=dict()
for k in y.keys():
w=numpy.where(mask[k]==0)[0]
ansivar = ivar[k][w].sum()
ansmean = numpy.sum(y[k][w]*ivar[k][w])/ansivar
ans[k]=ansmean
w=numpy.where(refmask[k]==0)[0]
ansivar = refivar[k][w].sum()
ansmean = numpy.sum(refy[k][w]*refivar[k][w])/ansivar
ans[k]=ans[k]/ansmean
return ans
def perres_SN(y,ivar,mask,nsig=10):
ans=dict()
# use observed dispersion rather than statistical
for k in y.keys():
w=numpy.where(mask[k]==0)[0]
std=y[k][w].std()
ston=numpy.abs(y[k][w])/std
# ston=numpy.abs(y[k][w])*numpy.sqrt(ivar[k][w])
ans[k]=(ston>10).sum()
return ans
def perconv_SN(y,ivar,mask,ncon=3,nsig=10):
newy=dict(y)
newivar=dict(ivar)
newmask=dict(mask)
ncon = numpy.zeros(ncon)+1.
for b in newy.keys():
newivar=numpy.convolve(ivar[b],ncon,mode='valid')
newy = numpy.convolve(y[b]*ivar[b],ncon,mode='valid')
newy = newy/newivar
newmask=numpy.convolve(mask[b],ncon,mode='valid')
return perres_SN(y,ivar,mask,nsig=nsig) | 2.25 | 2 |
Experiment2/URL_rating.py | aditya1271/Learning-Perspectives | 0 | 12794946 | from googlesearch import search
import requests
import re
import csv
data=[]
a=[]
student="Mam"
def swap(text,j):
temp1=text[j]
temp2=text[j+1]
temp3=text[j+2]
temp4=text[j+3]
text[j]=text[j+4]
text[j+1]=text[j+5]
text[j+2]=text[j+6]
text[j+3]=text[j+7]
text[j+4]=temp1
text[j+5]=temp2
text[j+6]=temp3
text[j+7]=temp4
def sort_urls(data):
newdata=[]
for word,text in zip(a,data):
text=text.split()
i=0
while(i<len(text)-4):
j=0
while(j<len(text)-5):
if( int(text[j+1])<int(text[j+5]) ):
swap(text,j)
elif(int(text[j+1])==int(text[j+5])):
if( min(int(text[j+3]), min(int(text[j+1]),int(text[j+2]))) < min(int(text[j+7]), min(int(text[j+6]),int(text[j+5])) )):
swap(text,j)
elif( int(text[j+3])==0 or int(text[j+1])==0 or int(text[j+2])==0 and (int(text[j+7])!=0 and int(text[j+6]) !=0 and int(text[j+5])!=0 ) ):
swap(text,j)
elif( int(text[j+3]) + int(text[j+1]) + int(text[j+2]) < int(text[j+7]) + int(text[j+6]) + int(text[j+5])!=0 ):
swap(text,j)
j=j+4
i=i+4
strtemp=""
k=0
while(k<len(text)-3):
strtemp+=text[k]+"\n"+text[k+1]+" "+text[k+2]+" "+text[k+3]+"\n"
k=k+4
strtemp=strtemp+"-1\n"
newdata.append(strtemp)
#for x in newdata:
# print (x)
return newdata
def read_from_file():
try:
fin = open("Experiment2/urlw8"+student+"new.txt")
except :
return 0
query=fin.readline()
strtemp=""
query=query.replace("\n","")
var=query
while(query):
while(query and query!="-1"):
query=fin.readline()
strtemp+=query
query=query.replace("\n","")
query=fin.readline()
query=query.replace("\n","")
if(query):
a.append(var)
data.append(strtemp)
strtemp=""
var=query
fin.close()
return 1
read_from_file()
data=sort_urls(data)
open("Experiment2/urlw8"+student+"new.txt","w").close()
fout= open("Experiment2/urlw8"+student+"new.txt","w")
list_1=[]
for x,y in zip(a,data):
fout.write(x+"\n")
fout.write(y)
#----------------------------------
temp12=y
temp12=temp12.splitlines()
i=0
while( i < len(temp12)-1):
templist=[]
if ( temp12[i]=="-1" or temp12[i+1]=="-1" ) :
i=i+2
else:
w8=temp12[i+1].split()
templist.append(x)
templist.append(temp12[i])
templist.append(w8[0])
templist.append(w8[1])
templist.append(w8[2])
#print(templist)
if (int(w8[0])!=0):
# print(w8[0])
list_1.append(templist)
i=i+2
#print("\n")
#-------------------------------------------------------------------
header = ['Label', 'URL', ' Label Weight', 'Esemble Weight','ML Weight']
with open('Experiment2/'+student+'new.csv', 'wt') as f:
csv_writer = csv.writer(f, quoting=csv.QUOTE_ALL)
csv_writer.writerow(header) # write header
csv_writer.writerows(list_1)
#fin1 = open("Experiment2/Results(Mam).txt","r")
fin2 = open("Experiment2/Results("+student+").txt","r")
#set1=fin1.readline()
set2=fin2.readline()
#set1=set1.split(" ,")
set2=set2.split(" ,")
words=list(set2)
#word1=word1.replace("\n","")
i=0
#print(a)
while(i<len(words) ) :
word1=words[i]
i=i+1
if word1 not in a:
regex1='\W'+word1+'\W'
regex2='\Wensemble\W'
regex3='\Wmachine learning\W'
query='"'+word1+'" + "ensemble" + "machine learning" '
fout.write(word1+"\n")
print(word1)
for url in search(query, tld='com', stop=10):
if(url.find(".pdf",len(url)-5)==-1):
test=1
try:
page=requests.get(url).text
except :
test=0
if test!=0 :
print(url)
fout.write(url)
fout.write("\n")
fout.write(str(len(re.findall(regex1, page , re.IGNORECASE) ) ) )
fout.write(" ")
fout.write(str(len(re.findall(regex2, page , re.IGNORECASE) ) ) )
fout.write(" ")
fout.write(str(len(re.findall(regex3, page , re.IGNORECASE) ) ) )
fout.write("\n")
fout.write("-1\n")
fout.write("-2\n")
| 3.453125 | 3 |
A1014280203/6/6.py | saurabh896/python-1 | 3,976 | 12794947 | <filename>A1014280203/6/6.py
import nltk
import string
import os
# simply extend word like: it's => it is
def extend_word(text):
if text.find('\'') > 0:
old2new = dict()
words = text.split()
for word in words:
if word.find('\'') > 0:
parts = word.split('\'')
if parts[1] == 'm':
parts[1] = 'am'
elif parts[1] == 's':
parts[1] = 'is'
elif parts[1] == 're':
parts[1] = 'are'
elif parts[1] == 't':
parts[1] = 'not'
elif parts[1] == 've':
parts[1] = 'have'
elif parts[1] == 'll':
parts[1] = 'will'
elif parts[1] == 'd':
if words[words.index(word) + 1] == 'better':
parts[1] = 'had'
else:
parts[1] = 'would'
if parts[0].endswith('n'):
parts[0] = parts[0][:-1]
old2new[word] = ' '.join(parts)
_text = text
for old_word in old2new.keys():
_text = _text.replace(old_word, old2new[old_word])
return _text
def return_order_key(record):
return record[1]
def show_important_word(records):
# only this function was changed
items = sorted(records.items(), key=return_order_key, reverse=True)
# frequency of word
freq = 0
for item in items:
word, tag = nltk.pos_tag([item[0]])[0]
if tag.startswith('NN'):
print(word)
if item[1] < freq:
return
freq = item[1]
# no appropriate word found
if not freq:
print(items[0][0])
def process_file(filename):
with open(filename, 'r') as file:
article = file.read()
no_pun_text = article
_punctuation = string.punctuation.replace('\'', '')
# delete punctuation except '''
for pun in _punctuation:
no_pun_text = no_pun_text.replace(pun, '')
complete_text = extend_word(no_pun_text)
records = dict()
for word in complete_text.lower().split():
records[word] = records.get(word, 0) + 1
print('='*30)
print('current file:', filename)
print('-'*20)
show_important_word(records)
def process_files(path='.'):
files = os.listdir(path)
for file in files:
if file.endswith('.txt'):
process_file(os.path.join(path, file))
process_files() | 3.65625 | 4 |
sub.py | waki285/rt-bot | 26 | 12794948 | <filename>sub.py
"""りつたん! (C) 2020 RT-Team
LICENSE : ./LICENSE
README : ./readme.md
"""
desc = """りつたん - (C) 2021 RT-Team
少女起動中..."""
print(desc)
from discord.ext import commands
import discord
# routeは無効にする。
commands.Cog.route = lambda *args, **kwargs: lambda *args, **kwargs: (args, kwargs)
from aiohttp import ClientSession
from sys import argv
import ujson
import rtlib
from data import data, is_admin, RTCHAN_COLORS
with open("token.secret", "r", encoding="utf-8_sig") as f:
secret = ujson.load(f)
TOKEN = secret["token"]["sub"]
prefixes = data["prefixes"]["sub"]
def setup(bot):
bot.admins = data["admins"]
@bot.listen()
async def on_close(loop):
await bot.session.close()
del bot.mysql
bot.mysql = bot.data["mysql"] = rtlib.mysql.MySQLManager(
loop=bot.loop, user=secret["mysql"]["user"],
host="192.168.3.11" if argv[1] == "production" else "localhost",
password=secret["mysql"]["password"], db="mysql",
pool = True, minsize=1, maxsize=30, autocommit=True)
rtlib.setup(bot)
bot.load_extension("jishaku")
bot._loaded = False
@bot.event
async def on_ready():
if not bot._loaded:
bot.session = ClientSession(loop=bot.loop)
for name in ("cogs.tts", "cogs.music", "cogs._sub", "cogs.language"):
bot.load_extension(name)
bot.dispatch("full_ready")
bot._loaded = True
print("少女絶賛稼働中!")
intents = discord.Intents.default()
intents.members = True
intents.typing = False
intents.guild_typing = False
intents.dm_typing = False
args = (prefixes,)
kwargs = {
"help_command": None,
"intents": intents
}
bot = commands.Bot(
command_prefix=data["prefixes"]["sub"], **kwargs
)
bot.test = argv[1] != "production"
bot.data = data
bot.colors = RTCHAN_COLORS
bot.is_admin = is_admin
async def _is_owner(user):
return bot.is_admin(user.id)
bot.is_owner = _is_owner
del is_admin, _is_owner
setup(bot)
bot.run(TOKEN)
| 2.453125 | 2 |
states/games/game_H2R.py | ianiancilla/kanacode | 0 | 12794949 | <gh_stars>0
import random
import pygame
from helper.textinput import TextInput
from helper.pygame_helpers import create_centered_text, create_containers, place_buttons
from helper.button import Button
import text
class H2R(object):
""" A mode in which a hiragana word is displayed,
and the user needs to enter the correct romaji transliteration"""
def __init__(self, app):
""" initialises the Hiragana to Romaji exercise
- creates container rects for the 3 main screen areas
- selects a word
- updates and draws screen accordingly """
self.app = app
# create and place container rects for different UI elements
self.container_english, \
self.container_kana, \
self.container_romaji, \
self.container_buttons = create_containers(
self.app.screen,
(5 / 20, 6 / 20, 5 / 20, 4 / 20),
layout="V")
# creates text input field
self.text_input = TextInput(
initial_string="",
font_family=self.app.settings.h2r_font_romaji.name,
font_size=int(self.app.settings.h2r_font_romaji.size),
antialias=True,
text_color=self.app.settings.h2r_font_romaji_color,
cursor_color=self.app.settings.h2r_font_romaji_color,
repeat_keys_initial_ms=400,
repeat_keys_interval_ms=35,
max_string_length=self.app.settings.h2r_text_input_max_string_length
)
# creates background frame for input field
self.input_frame = pygame.Rect(0, 0,
self.app.settings.h2r_text_input_width + 20,
self.app.settings.h2r_text_input_height)
self.input_frame.center = self.container_romaji.center
# creates rect for input box
self.text_input_rect = pygame.Rect(0, 0,
self.app.settings.h2r_text_input_width,
self.app.settings.h2r_text_input_height)
self.text_input_rect.center = self.input_frame.center
# init attributes for text rects and images
self.str_english_img, self.str_english_rect = None, None
self.str_kana_img, self.str_kana_rect = None, None
# create and place input tip
self.input_tip_img, self.input_tip_rect = create_centered_text(
text.h2r_input_tip,
self.app.settings.h2r_font_tip,
self.app.settings.h2r_font_tip_color,
self.container_romaji)
# creates buttons
self.butt_help = self._h2r_button(text.h2r_button_help, self._help)
self.butt_check = self._h2r_button(text.h2r_button_check, self._confirm_word)
self.butt_quit = self._h2r_button(text.h2r_button_quit, self.app.quit_game)
self.buttons = [self.butt_help, self.butt_check, self.butt_quit]
place_buttons(self.buttons, self.container_buttons)
# set initial status
self.word = self._pick_word()
self.previous_try = None # determines background of text input field as feedback
def update_screen(self, events):
""" implements changes that are needed by H2R at every tick, based on user events """
# creates english word img
self.str_english_img, self.str_english_rect = create_centered_text(
self.word.english,
self.app.settings.h2r_font_english,
self.app.settings.h2r_font_english_color,
self.container_english)
# creates kana word img
self.str_kana_img, self.str_kana_rect = create_centered_text(
self.word.hiragana,
self.app.settings.h2r_font_kana,
self.app.settings.h2r_font_kana_color,
self.container_kana)
# updates text input box
if self.text_input.update(events):
self._confirm_word()
# updates previous try, to return input field to default color if text
# was erased
if not self.text_input.get_text():
self.set_previous_try(None)
def draw_screen(self):
""" draws each element of H2R """
# draw backgrounds of containers
self.app.screen.fill(self.app.settings.col_dark)
pygame.draw.rect(self.app.screen,
self.app.settings.h2r_col_bg_en,
self.container_english)
pygame.draw.rect(self.app.screen,
self.app.settings.h2r_col_bg_kan,
self.container_kana)
pygame.draw.rect(self.app.screen,
self.app.settings.h2r_col_bg_rom,
self.container_romaji)
pygame.draw.rect(self.app.screen,
self.app.settings.h2r_col_bg_but,
self.container_buttons)
# draw bg of text input box
if self.get_previous_try():
if self.get_previous_try() == "right":
romaji_frame_color = self.app.settings.col_success
else:
romaji_frame_color = self.app.settings.col_danger
else:
romaji_frame_color = self.app.settings.h2r_col_bg_input
pygame.draw.rect(self.app.screen,
romaji_frame_color,
self.input_frame)
# draw all texts
self.app.screen.blit(self.str_english_img, self.str_english_rect)
self.app.screen.blit(self.str_kana_img, self.str_kana_rect)
if not self.text_input.get_text(): # only if there is no text in input field
self.app.screen.blit(self.input_tip_img, self.input_tip_rect)
# draw text input box
self.app.screen.blit(self.text_input.get_surface(), self.text_input_rect)
# draw all buttons
pos_mouse = pygame.mouse.get_pos()
for button in self.buttons:
if not button.is_inside(pos_mouse):
button.draw(self.app.screen)
else:
button.draw(self.app.screen, alt=True)
# EVENT HANDLING
def check_events(self):
"""Checks for and responds to mouse and kb events
NOTE: kb events related to input box and RETURN kez are handled in update function"""
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN
and event.key == self.app.settings.key_quit):
self.app.quit_game()
return [] # this is so the text input does not activate while app is quitting
if event.type == pygame.KEYUP:
# if event.key == self.app.settings.key_confirm: # commented out because this is
# handled in update method:
# self.text_input.update(events)
if event.key == self.app.settings.key_help:
self._help()
if event.type == pygame.MOUSEBUTTONUP:
for button in self.buttons:
button.on_mouse()
return events
# getters/setters
def get_previous_try(self):
return self.previous_try
def set_previous_try(self, previous_try):
if self.get_previous_try() not in (None, "right", "wrong"):
raise TypeError("Wrong argument for previous try.")
self.previous_try = previous_try
# HELPERS
def _pick_word(self):
""" picks a random Word instance from the app's vocab"""
return random.choice(list(self.app.vocab.hiragana))
def _help(self):
word = self.word.romaji
self.text_input.set_text(word)
self.text_input.set_cursor_position(len(word))
self.set_previous_try("right")
def _confirm_word(self):
""" checks whether input text is correct for current word """
current_input = self.text_input.get_text().lower().strip()
if current_input == self.word.romaji:
if self.get_previous_try() == "right":
self.set_previous_try(None)
self._next_word()
else:
self.set_previous_try("right")
else:
self.set_previous_try("wrong")
def _next_word(self):
self.word = self._pick_word()
self.text_input.clear_text()
def _h2r_button(self, txt, function):
return Button(((0, 0), self.app.settings.h2r_button_size),
text=txt,
function=function,
color_base=self.app.settings.h2r_button_color,
color_alt=self.app.settings.h2r_button_color_alt,
font=self.app.settings.h2r_button_font,
font_color=self.app.settings.h2r_button_font_color,
font_color_alt=self.app.settings.h2r_button_font_color_alt
)
| 3.453125 | 3 |
getPeriodicBoundaryCondition.py | mo-hanxuan/MagicPBC | 1 | 12794950 | """
generate periodic boundary condition (PBC).
Two methods to detect and partition the surface-nodes:
1. graph-method: (recommended, can deal with arbitrary deformed shape):
use dictionary-data-structure to map facet-nodes to element-number,
where the surface-facet is shared by only one element.
Construct the node-linking graph of surface, and the node-linking graph of the outlines.
Using outlines as boundaries,
partition the graph into different faces (left-, right-, down-, up-, back-, front- surfaces) by union-find algorithm.
2. method of xMin, xMax, yMin, yMax, zMin, zMax:
detect the surface simply by coordinates of all nodes.
This method can only be applied to the object with cuboid shape.
Two methods match nodes on opposites of the surface:
1. BFS method to match the nodes (time complexity of O(V + E), V and E are number of nodes and edges respectively):
Matching nodes during traversing of surface-node-graphs of opposite faces.
Given a matched node-pair, use similar vectors (pointed from current node to neighbors) to match their neighbors.
2. nearest-coordinates method: Could be very slow when there are many many nodes on a surface (with time complexity of O(V^2)).
"""
import torch as tch
import numpy as np
from elementsBody import *
def write_PBC_equation(file, obj, instance):
"""
write the PBC for the 8 outer vertex, and 12 edges, and 6 faces, with three steps:
1. make the 8 outer vertexes to form a parallel hexahedron (平行六面体))
2. make 12 edges to satisfy PBC
3. make the inside nodes of face-pair to coincide
"""
if not isinstance(obj, ElementsBody):
raise ValueError("error, not isinstance(obj, ElementsBody)")
if not hasattr(obj, 'v_x0y0z0'):
obj.getEdgeVertexForPBC()
## 1.1 make the y0face to be parallogram
file.write('************************** make the y0face to be parallogram \n')
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.v_x1y0z0, dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.v_x0y0z0, dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.v_x1y0z1, dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.v_x0y0z1, dm))
## 1.2 make vertexes of ylines to form parallel hexahedron
file.write('************************** make vertexes of 4 ylines to coincide \n')
for yline in obj.ylines[1:]:
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, yline['end'], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, yline['beg'], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.ylines[0]['end'], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.ylines[0]['beg'], dm))
# 2. make all outer edges to coincide
file.write('************************** make all outer edges to coincide \n')
xyzEdges = [obj.xlines, obj.ylines, obj.zlines]
for edges in xyzEdges:
for edge in edges[1:]:
for node in range(len(edge['inside'])):
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, edge['inside'][node], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, edge['beg'], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, edges[0]['inside'][node], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, edges[0]['beg'], dm))
# 3. make all corresponding face-pairs to coincide
file.write('************************** make all corresponding face-pairs to coincide \n')
edgeNodes = set()
for edges in [obj.xlines, obj.ylines, obj.zlines]:
for edge in edges:
edgeNodes |= ({edge['beg']} | {edge['end']} | set(edge['inside']))
for iface, face in enumerate(obj.faceMatch):
for node in face:
for dm in [1, 2, 3]:
if node not in edgeNodes:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, node, dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.baseNodes[iface][0], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, face[node], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.baseNodes[iface][1], dm))
def write_PBC_equation_byGraph(file, obj, instance):
"""
use graph-method to get the PBC info,
write the PBC for the 8 outer vertex, and 12 edges, and 6 faces, with three steps:
1. make the 8 outer vertexes to form a parallel hexahedron (平行六面体))
2. make 12 edges to satisfy PBC
3. make the inside nodes of face-pair to coincide
the node-number of megaElement
(composed of vertex of outer surface) is shown as follows,
v3------v7
/| /|
v0------v4|
| | | |
| v2----|-v6
y ^ |/ |/
| v1------v5
--->
/ x
z
"""
if not isinstance(obj, ElementsBody):
raise ValueError("error, not isinstance(obj, ElementsBody)")
obj.getFaceForPBC_byGraph()
obj.getEdgeForPBC_byGraph()
## 1.1 make the y0face to be parallogram
file.write('************************** make the y0face to be parallogram \n')
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.megaElement[6], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.megaElement[2], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.megaElement[5], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.megaElement[1], dm))
## 1.2 make vertexes of ylines to form parallel hexahedron
file.write('************************** make vertexes of 4 ylines to coincide \n')
for i, j in [[7, 6], [3, 2], [0, 1]]:
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.megaElement[i], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.megaElement[j], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.megaElement[4], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.megaElement[5], dm))
# 2. make all outer edges to coincide
file.write('************************** make all outer edges to coincide \n')
edgeId = [
[[0, 4], [3, 7], [2, 6], [1, 5]], # xEdges
[[1, 0], [5, 4], [6, 7], [2, 3]], # yEdges
[[2, 1], [6, 5], [7, 4], [3, 0]] # zEdges
]
for edges in edgeId: # edges = xEdges or yEdges or zEdges
edge0 = (obj.megaElement[edges[0][0]], obj.megaElement[edges[0][1]])
if edge0 in obj.outlines:
for edge in edges[1:]:
edge1 = (obj.megaElement[edge[0]], obj.megaElement[edge[1]])
for node in range(len(obj.outlines[edge0])):
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, obj.outlines[edge1][node], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, edge1[0], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, obj.outlines[edge0][node], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, edge0[0], dm))
# 3. make all corresponding face-pairs to coincide
file.write('************************** make all corresponding face-pairs to coincide \n')
for twoFacets in obj.faceMatch:
faceMatch = obj.faceMatch[twoFacets]
for node in faceMatch:
for dm in [1, 2, 3]:
file.write('*Equation\n4 \n')
file.write('{}.N{}, {}, 1 \n'.format(instance, node, dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, twoFacets[0], dm))
file.write('{}.N{}, {}, -1 \n'.format(instance, faceMatch[node], dm))
file.write('{}.N{}, {}, 1 \n'.format(instance, twoFacets[4], dm))
def write_PBC_Nset(file, obj):
if not isinstance(obj, ElementsBody):
raise ValueError("error, not isinstance(obj, ElementsBody)")
if not hasattr(obj, 'faceNode'):
obj.getFaceNode()
for node in obj.getFaceNode():
file.write('*Nset, nset=N{} \n'.format(node))
file.write('{}, \n'.format(node))
def write_nodes(file, obj):
nodes = obj.nodes
for node in nodes:
file.write(' {}, {}, {}, {} \n'.format(
node, nodes[node][0], nodes[node][1], nodes[node][2]
))
def adjustCoordinatesForPBC_byGraph(obj):
"""
use graph method to get the node-relation,
adjust the nodal coordiantes for periodic boundary condition (PBC)
make the nodes at face-pair to be strictly coincide at initial state
"""
if not isinstance(obj, ElementsBody):
raise ValueError("error, not isinstance(obj, ElementsBody)")
obj.getFaceForPBC_byGraph()
obj.getEdgeForPBC_byGraph()
makenp = False
for node in obj.nodes:
if type(obj.nodes[node]) == type([]):
makenp = True
break
if makenp:
for node in obj.nodes:
obj.nodes[node] = np.array(obj.nodes[node])
## 1.1 make the y0face to be parallogram
obj.nodes[obj.megaElement[6]] = \
obj.nodes[obj.megaElement[2]] + \
(obj.nodes[obj.megaElement[5]] - obj.nodes[obj.megaElement[1]])
## 1.2 make vertexes of ylines to form parallel hexahedron
for i, j in [[7, 6], [3, 2], [0, 1]]:
obj.nodes[obj.megaElement[i]] = \
obj.nodes[obj.megaElement[j]] + \
obj.nodes[obj.megaElement[4]] - obj.nodes[obj.megaElement[5]]
# 2. make all outer edges to coincide
edgeId = [
[[0, 4], [3, 7], [2, 6], [1, 5]], # xEdges
[[1, 0], [5, 4], [6, 7], [2, 3]], # yEdges
[[2, 1], [6, 5], [7, 4], [3, 0]] # zEdges
]
for edges in edgeId: # edges = xEdges or yEdges or zEdges
edge0 = (obj.megaElement[edges[0][0]], obj.megaElement[edges[0][1]])
if edge0 in obj.outlines:
for edge in edges[1:]:
edge1 = (obj.megaElement[edge[0]], obj.megaElement[edge[1]])
for node in range(len(obj.outlines[edge0])):
obj.nodes[obj.outlines[edge1][node]] = \
obj.nodes[edge1[0]] + \
obj.nodes[obj.outlines[edge0][node]] - obj.nodes[edge0[0]]
# 3. make all corresponding face-pairs to coincide
for twoFacets in obj.faceMatch:
faceMatch = obj.faceMatch[twoFacets]
for node in faceMatch:
obj.nodes[faceMatch[node]] = \
obj.nodes[twoFacets[4]] + \
obj.nodes[node] - obj.nodes[twoFacets[0]]
obj.nodesAdjusted = True
def adjustCoordinatesForPBC(obj):
"""
adjust the nodal coordiantes for periodic boundary condition (PBC)
make the nodes at face-pair to be strictly coincide at initial state
"""
if not isinstance(obj, ElementsBody):
raise ValueError("error, not isinstance(obj, ElementsBody)")
if not hasattr(obj, 'v_x0y0z0'):
obj.getEdgeVertexForPBC()
makenp = False
for node in obj.nodes:
if type(obj.nodes[node]) == type([]):
makenp = True
break
if makenp:
for node in obj.nodes:
obj.nodes[node] = np.array(obj.nodes[node])
## 1.1 make the y0face to be parallogram
obj.nodes[obj.v_x1y0z0] = \
obj.nodes[obj.v_x0y0z0] + \
(obj.nodes[obj.v_x1y0z1] - obj.nodes[obj.v_x0y0z1])
## 1.2 make vertexes of ylines to form parallel hexahedron
for yline in obj.ylines[1:]:
obj.nodes[yline['end']] = \
obj.nodes[yline['beg']] + \
obj.nodes[obj.ylines[0]['end']] - obj.nodes[obj.ylines[0]['beg']]
# 2. make all outer edges to coincide
xyzEdges = [obj.xlines, obj.ylines, obj.zlines]
for edges in xyzEdges:
for edge in edges[1:]:
for node in range(len(edge['inside'])):
obj.nodes[edge['inside'][node]] = \
obj.nodes[edge['beg']] + \
obj.nodes[edges[0]['inside'][node]] - obj.nodes[edges[0]['beg']]
# 3. make all corresponding face-pairs to coincide
edgeNodes = set()
for edges in [obj.xlines, obj.ylines, obj.zlines]:
for edge in edges:
edgeNodes |= ({edge['beg']} | {edge['end']} | set(edge['inside']))
for iface, face in enumerate(obj.faceMatch):
for node in face:
if node not in edgeNodes:
obj.nodes[node] = \
obj.nodes[obj.baseNodes[iface][0]] + \
obj.nodes[face[node]] - obj.nodes[obj.baseNodes[iface][1]]
obj.nodesAdjusted = True
if __name__ == "__main__":
testState = False
# get the inp file and the object
inpFile = input("\033[0;33;40m{}\033[0m".format("please insert the .inp file name (include the path): "))
job = inpFile.split("/")[-1].split(".inp")[0] if "/" in inpFile else inpFile.split("\\")[-1].split(".inp")[0]
path = inpFile.split(job + ".inp")[0]
obj = ElementsBody(*readInp(inpFile))
key = input("\033[35;1m{}\033[0m".format(
"which method do you want to use? \n"
"1: graph-method (recomended); \n"
"2: xMin, xMax, yMin, yMax, zMin, zMax; \n(insert 1 or 2): "
))
if key == "1":
getFaceForPBC = obj.getFaceForPBC_byGraph
writeEquations = write_PBC_equation_byGraph
adjustCoordinate = adjustCoordinatesForPBC_byGraph
elif key == "2":
getFaceForPBC = obj.getFaceForPBC
writeEquations = write_PBC_equation
adjustCoordinate = adjustCoordinatesForPBC
getFaceForPBC()
adjustCoor = input("do you want to adjust the coordinates for PBC? "
"(not recommended)\n\033[33m{}\033[0m".format('(y/n): '))
while adjustCoor not in ['y', 'n']:
adjustCoor = input('\033[33m{}\033[0m'.format('please insert "y" or "n": '))
if adjustCoor == 'y':
adjustCoordinate(obj)
if testState:
del obj.faceMatch
getFaceForPBC()
# find the instance name
instance = 'Part-1'
with open(inpFile, 'r') as file:
for line in file:
if '*Instance' in line and 'name=' in line:
instance = line.split(',')
instance = instance[1].split('=')
instance = instance[-1]
print('instance =', instance)
break
writeInp = input(
'ok to write the .inp file with PBC inside the file ? \033[36m{}\033[0m'.format('(y/n): ')
)
while writeInp not in ['y', 'n']:
writeInp = input('\033[31m{}\033[0m'.format(
'please insert "y" or "n": '
))
if writeInp == 'y':
newFileName = path + job + "_PBC.inp"
with open(newFileName, 'w') as newFile, open(inpFile, 'r') as oldFile:
clone = True
for line in oldFile:
if "Section:" in line and "**" in line:
write_PBC_Nset(newFile, obj)
elif '*End Assembly' in line:
writeEquations(newFile, obj, instance)
if clone == False and '*' in line:
clone = True
if clone:
newFile.write(line) # write the line from old file to new file
if "*Node\n" in line:
if hasattr(obj, 'nodesAdjusted'):
clone = False
print("\033[35;1m{}\033[0m".format("write new nodes for obj"))
write_nodes(newFile, obj)
print("\033[40;36;1m {} {} \033[35;1m {} \033[0m".format(
"file", newFileName, "has been written. "
))
elif input(
"\033[32;1m write nset- and equations- files for PBC? (y/n): \033[0m"
) in ["y", ""]:
# write the Nset
with open(path + '{}_nset.txt'.format(job), 'w') as file:
for node in obj.getFaceNode():
file.write('*Nset, nset=N{} \n'.format(node))
file.write('{}, \n'.format(node))
print("\033[40;36;1m {} {} \033[35;1m {} \033[0m".format(
"file", path + '{}_nset.txt'.format(job), "has been written. "
))
# write the equation for PBC
with open(path + '{}_equation.txt'.format(job), 'w') as file:
writeEquations(file, obj, instance)
print("\033[40;36;1m {} {} \033[35;1m {} \033[0m".format(
"file", path + '{}_equation.txt'.format(job), "has been written. "
))
| 3.078125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.