index
int64 0
10k
| blob_id
stringlengths 40
40
| step-1
stringlengths 13
984k
| step-2
stringlengths 6
1.23M
⌀ | step-3
stringlengths 15
1.34M
⌀ | step-4
stringlengths 30
1.34M
⌀ | step-5
stringlengths 64
1.2M
⌀ | step-ids
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|
1,100 | c4096cfae7182875a79ba7837187cd94b4379922 | <mask token>
| <mask token>
@main.route('/link')
@cache.cached(key_prefix=make_cache_key, timeout=60)
def get_link():
url = request.args.get('url')
params = {'video': True, 'audio': True, 'screenshot': False}
if request.args.get('iframe'):
params['iframe'] = True
if url[8:11] != 'www':
url = url[:8] + 'www.' + url[8:]
content = Content.query.filter_by(url=url).first()
if content:
return {'status': 'success', 'data': content.to_json(iframe=params[
'iframe'], video=params['video'], audio=params['audio'])}, 200
else:
headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}
m_url = 'https://pro.microlink.io?url={}'.format(url)
r = requests.get(m_url, headers=headers, params=params)
if r.json().get('status') == 'success':
content = Content.from_json(r.json().get('data'))
db.session.add(content)
db.session.commit()
return r.json(), 200
| <mask token>
@main.route('/')
def index():
return render_template('templates/index.html')
@main.route('/link')
@cache.cached(key_prefix=make_cache_key, timeout=60)
def get_link():
url = request.args.get('url')
params = {'video': True, 'audio': True, 'screenshot': False}
if request.args.get('iframe'):
params['iframe'] = True
if url[8:11] != 'www':
url = url[:8] + 'www.' + url[8:]
content = Content.query.filter_by(url=url).first()
if content:
return {'status': 'success', 'data': content.to_json(iframe=params[
'iframe'], video=params['video'], audio=params['audio'])}, 200
else:
headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}
m_url = 'https://pro.microlink.io?url={}'.format(url)
r = requests.get(m_url, headers=headers, params=params)
if r.json().get('status') == 'success':
content = Content.from_json(r.json().get('data'))
db.session.add(content)
db.session.commit()
return r.json(), 200
| from flask import render_template, request, current_app
from . import main
from .. import db, cache
from ..models import Content
from ..utils import make_cache_key
import requests
@main.route('/')
def index():
return render_template('templates/index.html')
@main.route('/link')
@cache.cached(key_prefix=make_cache_key, timeout=60)
def get_link():
url = request.args.get('url')
params = {'video': True, 'audio': True, 'screenshot': False}
if request.args.get('iframe'):
params['iframe'] = True
if url[8:11] != 'www':
url = url[:8] + 'www.' + url[8:]
content = Content.query.filter_by(url=url).first()
if content:
return {'status': 'success', 'data': content.to_json(iframe=params[
'iframe'], video=params['video'], audio=params['audio'])}, 200
else:
headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}
m_url = 'https://pro.microlink.io?url={}'.format(url)
r = requests.get(m_url, headers=headers, params=params)
if r.json().get('status') == 'success':
content = Content.from_json(r.json().get('data'))
db.session.add(content)
db.session.commit()
return r.json(), 200
| from flask import render_template, request, current_app
from . import main
from .. import db, cache
from ..models import Content
from ..utils import make_cache_key
import requests
@main.route('/')
def index():
return render_template("templates/index.html")
@main.route('/link')
@cache.cached(key_prefix=make_cache_key, timeout=60)
def get_link():
url = request.args.get('url')
params = {'video': True,
'audio': True,
'screenshot': False}
if request.args.get('iframe'):
params['iframe'] = True
if url[8:11] != 'www':
url = url[:8] + 'www.' + url[8:]
content = Content.query.filter_by(url=url).first()
if content:
return {'status': 'success',
'data': content.to_json(iframe=params['iframe'], video=params['video'], audio=params['audio'])}, 200
else:
headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}
m_url = 'https://pro.microlink.io?url={}'.format(url)
r = requests.get(m_url, headers=headers, params=params)
if r.json().get('status') == 'success':
content = Content.from_json(r.json().get('data'))
db.session.add(content)
db.session.commit()
return r.json(), 200
| [
0,
1,
2,
3,
4
] |
1,101 | 5a59108084d943f6faa07ffea1467dc19c3dd790 | <mask token>
| <mask token>
class DatapackageModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.fields['status'].queryset = self.fields['status'
].queryset.order_by('name')
self.fields['collaborators'].queryset = self.fields['collaborators'
].queryset.order_by('full_name')
self.fields['collaborators'].help_text = (
'Refresh page to show new collaborators. Hold down “Control”, or “Command” on a Mac, to select more than one'
)
self.fields['collaborators'].widget.attrs = {'size': 10}
collaborator_add_url = reverse('management:collaborator-add')
self.fields['collaborators'].label = (
f'Collaborators <div class="float-right"><a target="_blank" href="{collaborator_add_url}"><i class="fas fa-user-plus"></i> Add collaborator <i class="fas fa-external-link-alt"></i></a></div>'
)
self.helper.layout = Layout(Div(Div('status', css_class='col-6'),
css_class='row'), Div(Div('collaborators', css_class='col-6'),
css_class='row'), FormActions(Submit('save', 'Save'),
cancel_button(reverse('management:datapackage-detail', kwargs={
'uuid': self.instance.uuid}))))
class Meta:
model = Datapackage
fields = ['status', 'collaborators']
widgets = {'status': RadioSelect}
| <mask token>
class PersonModelForm(forms.ModelForm):
<mask token>
class Meta:
model = Person
fields = ['full_name']
class DatapackageModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.fields['status'].queryset = self.fields['status'
].queryset.order_by('name')
self.fields['collaborators'].queryset = self.fields['collaborators'
].queryset.order_by('full_name')
self.fields['collaborators'].help_text = (
'Refresh page to show new collaborators. Hold down “Control”, or “Command” on a Mac, to select more than one'
)
self.fields['collaborators'].widget.attrs = {'size': 10}
collaborator_add_url = reverse('management:collaborator-add')
self.fields['collaborators'].label = (
f'Collaborators <div class="float-right"><a target="_blank" href="{collaborator_add_url}"><i class="fas fa-user-plus"></i> Add collaborator <i class="fas fa-external-link-alt"></i></a></div>'
)
self.helper.layout = Layout(Div(Div('status', css_class='col-6'),
css_class='row'), Div(Div('collaborators', css_class='col-6'),
css_class='row'), FormActions(Submit('save', 'Save'),
cancel_button(reverse('management:datapackage-detail', kwargs={
'uuid': self.instance.uuid}))))
class Meta:
model = Datapackage
fields = ['status', 'collaborators']
widgets = {'status': RadioSelect}
| from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit
from django import forms
from django.forms import RadioSelect
from django.urls import reverse
from core.models import Person, Datapackage
from core.utils import cancel_button
class PersonModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
if self.instance.pk:
cancel_url = reverse('management:collaborator-detail', kwargs={
'pk': self.instance.pk})
else:
cancel_url = reverse('management:collaborator-list')
self.helper.layout = Layout(Div(Div('full_name', css_class='col-6'),
css_class='row'), FormActions(Submit('save', 'Save'),
cancel_button(cancel_url)))
class Meta:
model = Person
fields = ['full_name']
class DatapackageModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.fields['status'].queryset = self.fields['status'
].queryset.order_by('name')
self.fields['collaborators'].queryset = self.fields['collaborators'
].queryset.order_by('full_name')
self.fields['collaborators'].help_text = (
'Refresh page to show new collaborators. Hold down “Control”, or “Command” on a Mac, to select more than one'
)
self.fields['collaborators'].widget.attrs = {'size': 10}
collaborator_add_url = reverse('management:collaborator-add')
self.fields['collaborators'].label = (
f'Collaborators <div class="float-right"><a target="_blank" href="{collaborator_add_url}"><i class="fas fa-user-plus"></i> Add collaborator <i class="fas fa-external-link-alt"></i></a></div>'
)
self.helper.layout = Layout(Div(Div('status', css_class='col-6'),
css_class='row'), Div(Div('collaborators', css_class='col-6'),
css_class='row'), FormActions(Submit('save', 'Save'),
cancel_button(reverse('management:datapackage-detail', kwargs={
'uuid': self.instance.uuid}))))
class Meta:
model = Datapackage
fields = ['status', 'collaborators']
widgets = {'status': RadioSelect}
| from crispy_forms.bootstrap import FormActions
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit
from django import forms
from django.forms import RadioSelect
from django.urls import reverse
from core.models import Person, Datapackage
from core.utils import cancel_button
class PersonModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
if self.instance.pk:
cancel_url = reverse('management:collaborator-detail', kwargs={'pk': self.instance.pk})
else:
cancel_url = reverse('management:collaborator-list')
self.helper.layout = Layout(
Div(
Div('full_name', css_class='col-6'),
css_class='row'
),
FormActions(
Submit('save', 'Save'),
cancel_button(cancel_url)
)
)
class Meta:
model = Person
fields = ['full_name']
class DatapackageModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.fields['status'].queryset = self.fields['status'].queryset.order_by('name')
self.fields['collaborators'].queryset = self.fields['collaborators'].queryset.order_by('full_name')
self.fields['collaborators'].help_text = 'Refresh page to show new collaborators. Hold down “Control”, or “Command” on a Mac, to select more than one'
self.fields['collaborators'].widget.attrs = {'size': 10}
collaborator_add_url = reverse('management:collaborator-add')
self.fields['collaborators'].label = f'Collaborators <div class="float-right"><a target="_blank" href="{collaborator_add_url}"><i class="fas fa-user-plus"></i> Add collaborator <i class="fas fa-external-link-alt"></i></a></div>'
self.helper.layout = Layout(
Div(
Div('status', css_class='col-6'),
css_class='row'
),
Div(
Div('collaborators', css_class='col-6'),
css_class='row'
),
FormActions(
Submit('save', 'Save'),
cancel_button(reverse('management:datapackage-detail', kwargs={'uuid': self.instance.uuid})),
)
)
class Meta:
model = Datapackage
fields = ['status', 'collaborators']
widgets = {'status': RadioSelect}
| [
0,
2,
3,
5,
6
] |
1,102 | 8bf330dc7bee65ac9478722233477ebe5d0286c2 | <mask token>
def test():
webbrowser.open_new_tab('Test.html')
<mask token>
| <mask token>
ventana.geometry('1920x1080')
def test():
webbrowser.open_new_tab('Test.html')
<mask token>
boton1.grid(row=3, column=0)
boton2.grid(row=4, column=0)
boton3.grid(row=5, column=0)
ventana.mainloop()
| <mask token>
ventana = tkinter.Tk()
ventana.geometry('1920x1080')
def test():
webbrowser.open_new_tab('Test.html')
boton1 = tkinter.Button(ventana, text='WEB', width=10, height=5, command=test)
boton2 = tkinter.Button(ventana, text='boton2', width=10, height=5)
boton3 = tkinter.Button(ventana, text='boton3', width=10, height=5)
boton1.grid(row=3, column=0)
boton2.grid(row=4, column=0)
boton3.grid(row=5, column=0)
ventana.mainloop()
| import tkinter
import webbrowser
ventana = tkinter.Tk()
ventana.geometry('1920x1080')
def test():
webbrowser.open_new_tab('Test.html')
boton1 = tkinter.Button(ventana, text='WEB', width=10, height=5, command=test)
boton2 = tkinter.Button(ventana, text='boton2', width=10, height=5)
boton3 = tkinter.Button(ventana, text='boton3', width=10, height=5)
boton1.grid(row=3, column=0)
boton2.grid(row=4, column=0)
boton3.grid(row=5, column=0)
ventana.mainloop()
| import tkinter
import webbrowser
ventana = tkinter.Tk()
ventana.geometry("1920x1080")
def test():
webbrowser.open_new_tab('Test.html')
boton1 = tkinter.Button(ventana,text ="WEB", width = 10, height=5, command = test );
boton2 = tkinter.Button(ventana,text ="boton2", width = 10, height=5);
boton3 = tkinter.Button(ventana,text ="boton3", width = 10, height=5);
boton1.grid(row = 3, column = 0)
boton2.grid(row = 4, column = 0)
boton3.grid(row = 5, column = 0)
ventana.mainloop()
| [
1,
2,
3,
4,
5
] |
1,103 | c6055c6b67ac28d304ed34ddc2f81e59da8e7f1b | <mask token>
class GoalCategory(NestedSet):
nsm_parent_field = 'parent_goal_category'
def on_update(self):
self.validate_name_with_goal()
super(GoalCategory, self).on_update()
self.validate_one_root()
def validate_name_with_goal(self):
if frappe.db.exists('Goal', self.name):
frappe.msgprint(_('A goal with the same name already exists'),
raise_exception=1)
<mask token>
| <mask token>
class GoalCategory(NestedSet):
nsm_parent_field = 'parent_goal_category'
def on_update(self):
self.validate_name_with_goal()
super(GoalCategory, self).on_update()
self.validate_one_root()
def validate_name_with_goal(self):
if frappe.db.exists('Goal', self.name):
frappe.msgprint(_('A goal with the same name already exists'),
raise_exception=1)
def get_parent_goal_categories(goal_category):
lft, rgt = frappe.db.get_value('Goal Category', goal_category, ['lft',
'rgt'])
return frappe.db.sql(
"""select name from `tabGoal Category`
where lft <= %s and rgt >= %s
order by lft asc"""
, (lft, rgt), as_dict=True)
<mask token>
| <mask token>
class GoalCategory(NestedSet):
nsm_parent_field = 'parent_goal_category'
def on_update(self):
self.validate_name_with_goal()
super(GoalCategory, self).on_update()
self.validate_one_root()
def validate_name_with_goal(self):
if frappe.db.exists('Goal', self.name):
frappe.msgprint(_('A goal with the same name already exists'),
raise_exception=1)
def get_parent_goal_categories(goal_category):
lft, rgt = frappe.db.get_value('Goal Category', goal_category, ['lft',
'rgt'])
return frappe.db.sql(
"""select name from `tabGoal Category`
where lft <= %s and rgt >= %s
order by lft asc"""
, (lft, rgt), as_dict=True)
def on_doctype_update():
frappe.db.add_index('Goal Category', ['lft', 'rgt'])
| from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils.nestedset import NestedSet
class GoalCategory(NestedSet):
nsm_parent_field = 'parent_goal_category'
def on_update(self):
self.validate_name_with_goal()
super(GoalCategory, self).on_update()
self.validate_one_root()
def validate_name_with_goal(self):
if frappe.db.exists('Goal', self.name):
frappe.msgprint(_('A goal with the same name already exists'),
raise_exception=1)
def get_parent_goal_categories(goal_category):
lft, rgt = frappe.db.get_value('Goal Category', goal_category, ['lft',
'rgt'])
return frappe.db.sql(
"""select name from `tabGoal Category`
where lft <= %s and rgt >= %s
order by lft asc"""
, (lft, rgt), as_dict=True)
def on_doctype_update():
frappe.db.add_index('Goal Category', ['lft', 'rgt'])
| # -*- coding: utf-8 -*-
# Copyright (c) 2018, HSCH and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils.nestedset import NestedSet
class GoalCategory(NestedSet):
nsm_parent_field = 'parent_goal_category';
def on_update(self):
self.validate_name_with_goal()
super(GoalCategory, self).on_update()
self.validate_one_root()
def validate_name_with_goal(self):
if frappe.db.exists("Goal", self.name):
frappe.msgprint(_("A goal with the same name already exists"), raise_exception=1)
def get_parent_goal_categories(goal_category):
lft, rgt = frappe.db.get_value("Goal Category", goal_category, ['lft', 'rgt'])
return frappe.db.sql("""select name from `tabGoal Category`
where lft <= %s and rgt >= %s
order by lft asc""", (lft, rgt), as_dict=True)
def on_doctype_update():
frappe.db.add_index("Goal Category", ["lft", "rgt"])
| [
4,
5,
6,
7,
8
] |
1,104 | 84476e1793242bf3bae51263c2db28ff555c25d7 | <mask token>
| <mask token>
def start():
image_file = 'sample.png'
top_left_corner = [100, 100]
bottom_right_corner = [200, 200]
img = Image.open(image_file)
top_left_x = top_left_corner[0]
top_left_y = top_left_corner[1]
bottom_right_x = bottom_right_corner[0]
bottom_right_y = bottom_right_corner[1]
draw = ImageDraw.Draw(img)
draw.rectangle((top_left_x, top_left_y, bottom_right_x, bottom_right_y),
outline=255)
img.save('preview.png')
grayscale_image = Image.open(image_file).convert('L')
pixel_array = numpy.array(grayscale_image) / 255.0
print(f'Image file: {image_file} , height x width : {pixel_array.shape}')
sub_section = pixel_array[top_left_x:bottom_right_x, top_left_y:
bottom_right_y]
deviation = numpy.std(sub_section)
print(f'Deviation: {deviation}')
print('Done')
<mask token>
| <mask token>
def start():
image_file = 'sample.png'
top_left_corner = [100, 100]
bottom_right_corner = [200, 200]
img = Image.open(image_file)
top_left_x = top_left_corner[0]
top_left_y = top_left_corner[1]
bottom_right_x = bottom_right_corner[0]
bottom_right_y = bottom_right_corner[1]
draw = ImageDraw.Draw(img)
draw.rectangle((top_left_x, top_left_y, bottom_right_x, bottom_right_y),
outline=255)
img.save('preview.png')
grayscale_image = Image.open(image_file).convert('L')
pixel_array = numpy.array(grayscale_image) / 255.0
print(f'Image file: {image_file} , height x width : {pixel_array.shape}')
sub_section = pixel_array[top_left_x:bottom_right_x, top_left_y:
bottom_right_y]
deviation = numpy.std(sub_section)
print(f'Deviation: {deviation}')
print('Done')
if __name__ == '__main__':
start()
| import numpy
from PIL import Image, ImageDraw
def start():
image_file = 'sample.png'
top_left_corner = [100, 100]
bottom_right_corner = [200, 200]
img = Image.open(image_file)
top_left_x = top_left_corner[0]
top_left_y = top_left_corner[1]
bottom_right_x = bottom_right_corner[0]
bottom_right_y = bottom_right_corner[1]
draw = ImageDraw.Draw(img)
draw.rectangle((top_left_x, top_left_y, bottom_right_x, bottom_right_y),
outline=255)
img.save('preview.png')
grayscale_image = Image.open(image_file).convert('L')
pixel_array = numpy.array(grayscale_image) / 255.0
print(f'Image file: {image_file} , height x width : {pixel_array.shape}')
sub_section = pixel_array[top_left_x:bottom_right_x, top_left_y:
bottom_right_y]
deviation = numpy.std(sub_section)
print(f'Deviation: {deviation}')
print('Done')
if __name__ == '__main__':
start()
| import numpy
from PIL import Image, ImageDraw
def start():
# ----------------------------
# Set values
# ----------------------------
image_file = 'sample.png'
# Coordinates, where [0,0] is top left corner
top_left_corner = [100, 100] # [x, y]
bottom_right_corner = [200, 200] # [x, y]
# ----------------------------
# ----------------------------
# Preview area
# ----------------------------
img = Image.open(image_file)
top_left_x = top_left_corner[0]
top_left_y = top_left_corner[1]
bottom_right_x = bottom_right_corner[0]
bottom_right_y = bottom_right_corner[1]
draw = ImageDraw.Draw(img)
draw.rectangle((top_left_x, top_left_y, bottom_right_x, bottom_right_y), outline=255)
img.save('preview.png')
# ----------------------------
# ----------------------------
# Calculate deviation
# ----------------------------
grayscale_image = Image.open(image_file).convert('L')
pixel_array = numpy.array(grayscale_image) / 255.0 # normalize
print(f"Image file: {image_file} , height x width : {pixel_array.shape}")
sub_section = pixel_array[top_left_x:bottom_right_x, top_left_y:bottom_right_y]
deviation = numpy.std(sub_section)
print(f"Deviation: {deviation}")
print('Done')
if __name__ == '__main__':
start()
| [
0,
1,
2,
3,
4
] |
1,105 | a35e86e474883d892a6ce8eb191a3a5f8a9558c8 | <mask token>
| <mask token>
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.
STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
| <mask token>
urlpatterns = [url('^$', home, name='home'), url('prof/profile/$', profile,
name='profile'), url('members/$', member, name='member'), url(
'researches/$', research, name='research'), url(
'pub/(?P<type>[\\w-]+)/$', publication, name='publication'), url(
'pub/detail/(?P<pub_id>\\d+)/$', pub_detail, name='pub_detail'), url(
'notice/list/(?P<type>[\\w-]+)/$', list, name='notice_list'), url(
'notice/detail/(?P<notice_id>\\d+)/$', notice_detail, name=
'notice_detail'), url('protocol/list/$', protocol_list, name=
'protocol_list'), url('^summernote/', include('django_summernote.urls')
), url('^admin/', admin.site.urls)]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.
STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
| from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from .views import home, profile
from member.views import member
from publication.views import publication, pub_detail
from notice.views import list, notice_detail
from research.views import research
from protocol.views import protocol_list
urlpatterns = [url('^$', home, name='home'), url('prof/profile/$', profile,
name='profile'), url('members/$', member, name='member'), url(
'researches/$', research, name='research'), url(
'pub/(?P<type>[\\w-]+)/$', publication, name='publication'), url(
'pub/detail/(?P<pub_id>\\d+)/$', pub_detail, name='pub_detail'), url(
'notice/list/(?P<type>[\\w-]+)/$', list, name='notice_list'), url(
'notice/detail/(?P<notice_id>\\d+)/$', notice_detail, name=
'notice_detail'), url('protocol/list/$', protocol_list, name=
'protocol_list'), url('^summernote/', include('django_summernote.urls')
), url('^admin/', admin.site.urls)]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.
STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT
)
| from django.conf import settings
from django.conf.urls import url, include
from django.conf.urls.static import static
from django.contrib import admin
from .views import home, profile
from member.views import member
from publication.views import publication, pub_detail
from notice.views import list, notice_detail
from research.views import research
from protocol.views import protocol_list
urlpatterns = [
url(r'^$', home, name='home'),
url(r'prof/profile/$', profile, name='profile'),
url(r'members/$', member, name='member'),
url(r'researches/$', research, name='research'),
url(r'pub/(?P<type>[\w-]+)/$', publication, name='publication'),
url(r'pub/detail/(?P<pub_id>\d+)/$', pub_detail, name='pub_detail'),
url(r'notice/list/(?P<type>[\w-]+)/$', list, name='notice_list'),
url(r'notice/detail/(?P<notice_id>\d+)/$', notice_detail, name='notice_detail'),
url(r'protocol/list/$', protocol_list, name="protocol_list"),
url(r'^summernote/', include('django_summernote.urls')),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
0,
1,
2,
3,
4
] |
1,106 | ac178d4e009a40bde5d76e854edc6f6ae8422610 | <mask token>
| <mask token>
def get_random_player(file_name):
def need_s(num):
return 's' if num != 1 else ''
csv.field_size_limit(sys.maxsize)
res = pd.read_csv(file_name, header=None)
r = np.random.randint(0, len(res.values))
arr = ast.literal_eval(res.values[r][1])
player = players.find_player_by_id(res.values[r][0])['full_name']
print(f'{player} selected.')
r_idx = np.random.randint(0, len(arr))
game = arr[r_idx]
x = (
f'On {game[0]}, {player} scored {game[-1]} point{need_s(game[-1])}, dished out {game[16]} assist{need_s(game[16])}, grabbed {game[15]} rebound{need_s(game[15])}, had {game[17]} steal{need_s(game[17])}, and had {game[18]} block{need_s(game[18])}.'
)
print(x)
return player, arr
| import ast
import csv
import numpy as np
import pandas as pd
import sys
from nba_api.stats.static import players
def get_random_player(file_name):
def need_s(num):
return 's' if num != 1 else ''
csv.field_size_limit(sys.maxsize)
res = pd.read_csv(file_name, header=None)
r = np.random.randint(0, len(res.values))
arr = ast.literal_eval(res.values[r][1])
player = players.find_player_by_id(res.values[r][0])['full_name']
print(f'{player} selected.')
r_idx = np.random.randint(0, len(arr))
game = arr[r_idx]
x = (
f'On {game[0]}, {player} scored {game[-1]} point{need_s(game[-1])}, dished out {game[16]} assist{need_s(game[16])}, grabbed {game[15]} rebound{need_s(game[15])}, had {game[17]} steal{need_s(game[17])}, and had {game[18]} block{need_s(game[18])}.'
)
print(x)
return player, arr
| # file with function to randomly select user from all of the data, all of the games
import ast
import csv
import numpy as np
import pandas as pd
import sys
from nba_api.stats.static import players
# some fun little work to get a random player
def get_random_player(file_name):
def need_s(num):
return 's' if num!=1 else ''
csv.field_size_limit(sys.maxsize)
# the rows are really long!
res = pd.read_csv(file_name, header=None)
r = np.random.randint(0, len(res.values))
arr = ast.literal_eval(res.values[r][1])
player = players.find_player_by_id(res.values[r][0])['full_name']
print(f'{player} selected.')
r_idx = np.random.randint(0, len(arr))
game = arr[r_idx]
x = f'On {game[0]}, {player} scored {game[-1]} point{need_s(game[-1])}, dished out '\
f'{game[16]} assist{need_s(game[16])}, grabbed {game[15]} rebound{need_s(game[15])}, '\
f'had {game[17]} steal{need_s(game[17])}, and had {game[18]} block{need_s(game[18])}.'
print(x)
return player, arr | null | [
0,
1,
2,
3
] |
1,107 | 91f83adbe01e2d8070f9286031b77eae71beb83e | <mask token>
| def maths(num):
int(num)
if num % 5 == 0 and num % 3 == 0:
print('bizzfizz')
elif num % 3 == 0:
print('fizz')
elif num % 5 == 0:
print('bizz')
else:
print(num)
<mask token>
| def maths(num):
int(num)
if num % 5 == 0 and num % 3 == 0:
print('bizzfizz')
elif num % 3 == 0:
print('fizz')
elif num % 5 == 0:
print('bizz')
else:
print(num)
<mask token>
maths(int(value))
| def maths(num):
int(num)
if num % 5 == 0 and num % 3 == 0:
print('bizzfizz')
elif num % 3 == 0:
print('fizz')
elif num % 5 == 0:
print('bizz')
else:
print(num)
value = input('enter the value ')
maths(int(value))
| def maths(num):
int(num)
if num % 5 == 0 and num % 3 == 0:
print("bizzfizz")
elif num % 3 == 0:
print("fizz")
elif num % 5 == 0:
print("bizz")
else:
print(num)
value=input("enter the value ")
maths(int(value)) | [
0,
1,
2,
3,
4
] |
1,108 | b874bfe9590a3eaff4298d6f9cc72be92000dc30 | <mask token>
def int_installs(x):
try:
return int(x.replace(',', '').replace('+', ''))
except:
raise ValueError('Cannot transform to int.')
def test_int_install_1():
"""Unit test to showcase functionality of int of int
"""
expected_output_price = 65000
output_price = int_installs('65000')
assert math.fabs(output_price - expected_output_price
) < ROUND_OFF_ERROR, 'Should show that the installs is 65000.'
<mask token>
| <mask token>
def int_installs(x):
try:
return int(x.replace(',', '').replace('+', ''))
except:
raise ValueError('Cannot transform to int.')
def test_int_install_1():
"""Unit test to showcase functionality of int of int
"""
expected_output_price = 65000
output_price = int_installs('65000')
assert math.fabs(output_price - expected_output_price
) < ROUND_OFF_ERROR, 'Should show that the installs is 65000.'
def test_int_install_2():
"""Unit test to showcase functionality of int of string with right format
"""
expected_output_price = 65000
output_price = int_installs('+65,000')
assert math.fabs(output_price - expected_output_price
) < ROUND_OFF_ERROR, 'Should show that the installs is 65000.'
def test_int_install_3():
"""Unit test to showcase functionality of int of strong with wrong format
"""
with pytest.raises(ValueError):
int_installs('$65000')
| <mask token>
ROUND_OFF_ERROR = 0.001
def int_installs(x):
try:
return int(x.replace(',', '').replace('+', ''))
except:
raise ValueError('Cannot transform to int.')
def test_int_install_1():
"""Unit test to showcase functionality of int of int
"""
expected_output_price = 65000
output_price = int_installs('65000')
assert math.fabs(output_price - expected_output_price
) < ROUND_OFF_ERROR, 'Should show that the installs is 65000.'
def test_int_install_2():
"""Unit test to showcase functionality of int of string with right format
"""
expected_output_price = 65000
output_price = int_installs('+65,000')
assert math.fabs(output_price - expected_output_price
) < ROUND_OFF_ERROR, 'Should show that the installs is 65000.'
def test_int_install_3():
"""Unit test to showcase functionality of int of strong with wrong format
"""
with pytest.raises(ValueError):
int_installs('$65000')
| <mask token>
import math
import pytest
ROUND_OFF_ERROR = 0.001
def int_installs(x):
try:
return int(x.replace(',', '').replace('+', ''))
except:
raise ValueError('Cannot transform to int.')
def test_int_install_1():
"""Unit test to showcase functionality of int of int
"""
expected_output_price = 65000
output_price = int_installs('65000')
assert math.fabs(output_price - expected_output_price
) < ROUND_OFF_ERROR, 'Should show that the installs is 65000.'
def test_int_install_2():
"""Unit test to showcase functionality of int of string with right format
"""
expected_output_price = 65000
output_price = int_installs('+65,000')
assert math.fabs(output_price - expected_output_price
) < ROUND_OFF_ERROR, 'Should show that the installs is 65000.'
def test_int_install_3():
"""Unit test to showcase functionality of int of strong with wrong format
"""
with pytest.raises(ValueError):
int_installs('$65000')
| """Unit test for int install
"""
import math
import pytest
ROUND_OFF_ERROR = 0.001
def int_installs(x):
try:
return int(x.replace(',', '').replace('+', ''))
except:
raise ValueError("Cannot transform to int.")
def test_int_install_1():
"""Unit test to showcase functionality of int of int
"""
expected_output_price = 65000
output_price = int_installs('65000')
assert math.fabs(output_price - expected_output_price) < ROUND_OFF_ERROR, \
"""Should show that the installs is 65000."""
def test_int_install_2():
"""Unit test to showcase functionality of int of string with right format
"""
expected_output_price = 65000
output_price = int_installs('+65,000')
assert math.fabs(output_price - expected_output_price) < ROUND_OFF_ERROR, \
"""Should show that the installs is 65000."""
def test_int_install_3():
"""Unit test to showcase functionality of int of strong with wrong format
"""
with pytest.raises(ValueError):
int_installs('$65000') | [
2,
4,
5,
6,
7
] |
1,109 | 54a705de2597140a72e47f5afe86614b619461b7 | <mask token>
| <mask token>
urlpatterns = [url('^coffeeshops/(\\d+)$', ShopView.as_view()), url(
'^coffeeshops$', ShopListView.as_view())]
| from django.conf.urls import url
from . import views
from .views import ShopView, ShopListView
urlpatterns = [url('^coffeeshops/(\\d+)$', ShopView.as_view()), url(
'^coffeeshops$', ShopListView.as_view())]
| from django.conf.urls import url
from . import views
from .views import ShopView, ShopListView
urlpatterns = [
url(r'^coffeeshops/(\d+)$', ShopView.as_view()),
url(r'^coffeeshops$', ShopListView.as_view()),
]
| null | [
0,
1,
2,
3
] |
1,110 | 21d07c2b80aa00d0c75da342d37195b6829593b6 | <mask token>
| <mask token>
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger('crawl').setLevel(logging.INFO)
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
es = Elasticsearch()
crawl.crawl_domain(es, 'aaronparecki.com')
| import crawl
import logging
from elasticsearch import Elasticsearch
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger('crawl').setLevel(logging.INFO)
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
es = Elasticsearch()
crawl.crawl_domain(es, 'aaronparecki.com')
| #!/usr/bin/env python
import crawl
import logging
from elasticsearch import Elasticsearch
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
logging.getLogger("crawl").setLevel(logging.INFO)
logging.getLogger("elasticsearch").setLevel(logging.ERROR)
es = Elasticsearch()
crawl.crawl_domain(es, "aaronparecki.com") | null | [
0,
1,
2,
3
] |
1,111 | 01b9706966007c44aa19d8249fbcaee5b511786a | <mask token>
| <mask token>
for item in data['comments']:
sum = sum + int(item['count'])
print(sum)
| <mask token>
url = 'http://py4e-data.dr-chuck.net/comments_147422.json'
handle = urllib.request.urlopen(url)
data = handle.read().decode()
data = json.loads(data)
sum = 0
for item in data['comments']:
sum = sum + int(item['count'])
print(sum)
| import urllib.request, urllib.parse, urllib.error
import json
import ssl
url = 'http://py4e-data.dr-chuck.net/comments_147422.json'
handle = urllib.request.urlopen(url)
data = handle.read().decode()
data = json.loads(data)
sum = 0
for item in data['comments']:
sum = sum + int(item['count'])
print(sum)
| import urllib.request, urllib.parse, urllib.error
import json
import ssl
# Retrieve json data into Python dictionary
url = "http://py4e-data.dr-chuck.net/comments_147422.json"
handle = urllib.request.urlopen(url)
data = handle.read().decode()
data = json.loads(data)
# Calculate total sum of counts
sum = 0
for item in data['comments']:
sum = sum + int(item['count'])
print(sum)
| [
0,
1,
2,
3,
4
] |
1,112 | 3e84265b7c88fc45bc89868c4339fe37dcc7d738 | <mask token>
| x *= 2
<mask token>
| #!/usr/bin/env python
x *= 2
"""run = 0
while(run < 10):
[TAB]x = (first number in sequence)
[TAB](your code here)
[TAB]run += 1"""
| null | null | [
0,
1,
2
] |
1,113 | f15a0956c4aa27da861f9bccbeff7a6b6a909b73 | <mask token>
| <mask token>
with open('credentials_as.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
<mask token>
print(df)
| <mask token>
with open('credentials_as.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
db_schema = None
db = Database(credentials=credentials)
<mask token>
fn = MultiplyByFactor(input_items=['orgoccupancycount', 'occupancycount'],
factor=2, output_items=['adjusted_orgoccupancycount',
'adjusted_occupancycount'])
df = fn.execute_local_test(db=db, db_schema=db_schema, generate_days=1,
to_csv=True)
print(df)
| import datetime as dt
import json
import pandas as pd
import numpy as np
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions.base import BaseTransformer
from iotfunctions.metadata import EntityType
from iotfunctions.db import Database
from iotfunctions import ui
with open('credentials_as.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
db_schema = None
db = Database(credentials=credentials)
from custom.multiplybyfactor import MultiplyByFactor
fn = MultiplyByFactor(input_items=['orgoccupancycount', 'occupancycount'],
factor=2, output_items=['adjusted_orgoccupancycount',
'adjusted_occupancycount'])
df = fn.execute_local_test(db=db, db_schema=db_schema, generate_days=1,
to_csv=True)
print(df)
| null | [
0,
1,
2,
3
] |
1,114 | 3ac02308959749b8cd264e660c3d6334fd385fd4 | #!/usr/bin/env python
#-------------------------------------------------------------------------------
#
# Circle finder.
#
# Rowan Leeder
#
#-------------------------------------------------------------------------------
#
# Listens on the 'scan' and 'base_scan' topics. These are the pioneers SICK
# topic and Stage's scan topic respectively.
#
# The program strips out noise samples and attempts to match circles to the
# remaining samples.
#
# Any circle that is found is then published on the 'circles' topic in a
# circleArray message.
#
# The circleArray and circleEntry messages are defined in the msg\ folder.
#
#-------------------------------------------------------------------------------
#
# Compile Commands:
#
# First run 'rosmake' in the base directory. If you change the messages in any
# way then you will have to close all ros components using the topic (basically
# everything) and then recompile with rosmake. If you add a message, add an
# entry to the manifest file.
#
# To run this program do 'rosrun circleFinder finder.py'.
#
# Exit with Ctrl + C.
#
# Listen in with 'rostopic echo circles'
#
# If you want to see a plot of the data, set the 'plot' variable to True.
#
#-------------------------------------------------------------------------------
# Known Bugs:
# If the laser scan covers 360 degrees then you might get two circles at the
# same spot. This is becuase i haven't joined the two ends of the scan together.
# This will not be an issue with the robots as they only take 180 degree scans.
# Ros imports.
import roslib;
roslib.load_manifest('circleFinder')
import rospy
from sensor_msgs.msg import LaserScan
from roslib.rostime import Duration
# Python lib imports.
import math
import time
# Message imports
from circleFinder.msg import *
# Local file imports.
from placment_funcs import *
from data_parser import *
# plot functions are in here. Remove if you dont want and you might free up
# some memory.
from plot_funcs import *
#-------------------------------------------------------------------------------
# Function: callback
#
# Thread created when a laser scan is received on a listening topic and extract
# and publish a specified number of circle from the data.
#
#-------------------------------------------------------------------------------
#
# args - An array of arguments. The form is:
# max_dist - the maximum distance to look for circles. If a sample or
# circle edge goes beyond this then it will be ignored.
# max_rad - The maximum radius that a valid circle can have.
# min_rad - The minimum radius that a valid circle can have.
# grad_tol - The tolerance used in the prune function.
# split_multi - The multiplier used in the split function
#
# publish - A circleArray object containing the circle data in an array of
# circleEntry objects. These classes are defined in the
# circleFinder/msg path.
#-------------------------------------------------------------------------------
def callback(data, args):
tStart = time.time()
pub = args[0]
max_dist = args[1]
max_rad = args[2]
min_rad = args[3]
grad_tol = args[4]
split_multi = args[5]
prune_lines = args[6]
plot = args[7]
# Get possible circle data.
possibles = dataParser(data,max_dist, grad_tol, split_multi, prune_lines)
# Calculate the circle info from that data.
circles = []
for i in possibles:
current = matchCirc(list(i), False)
if current is not None:
#prune out any circles that are too large or small
if current[1] > max_rad or \
current[1] < min_rad or \
math.sqrt(math.pow(current[0][0],2) + math.pow(current[0][1],2)) + current[1] > max_dist:
pass
else:
circles.append(current)
# Setup circleArray and publish found circles.
ret = []
for i in circles:
c = circleEntry()
c.x = i[0][0]
c.y = i[0][1]
c.distance = math.sqrt(i[0][0]*i[0][0] + i[0][1] * i[0][1])
c.theta = math.atan2(i[0][1], i[0][0])
c.radius = i[1]
ret.append(c)
m = circleArray()
m.broadcastTime = rospy.get_rostime()
m.duration = time.time() - tStart
m.array = ret
if not rospy.is_shutdown():
pub.publish(m)
if plot:
import matplotlib.pyplot as plt
plotWorld(data, 30, True, 'ro')
for i in circles:
plotCircle((i[0])[0],(i[0])[1],i[1])
for i in possibles:
for u in i:
plt.plot(u[0], u[1], 'bo')
plt.plot(0,0,'ro')
plotAxis(8,-8,8,-8,4)
plt.axis([-8,8,-8,8])
plt.show()
#-------------------------------------------------------------------------------
# Function: main
#
# Sets up the callback function and then idles.
#
# Program arguments are inside.
#
#-------------------------------------------------------------------------------
if __name__ == '__main__':
#print dir()
# the publiser
pub = rospy.Publisher("circles", circleArray)
# The maximum distance from the origin that a sample point or circle edge
# can be before they are considered invalid.
max_dist = 7
# The maximum radius a circle can be before it is considered invalid.
max_rad = 0.25
# The maximum radius a circle can be before it is considered invalid.
min_rad = 0
# See the prune function in data_parser.py
grad_tol = 0.3
# See the split function in data_parser.py
split_multi = 2.5
# If true then an attempt to remove straight edges from the data will be
# made.
prune_lines = True
# Plot flag.
plot = False
import sys
if (len(sys.argv) > 1):
for i in sys.argv:
if i == '--plot':
plot = True
elif i == '--no-line-pruning':
prune_lines = False
args = [pub, max_dist, max_rad, min_rad, grad_tol, split_multi, prune_lines , plot]
print "--------------------------------------------------------------------------------"
print "Circle Finder"
print
print "--------------------------------------------------------------------------------"
print "Command line arguments are:"
print " --plot Will cause the outcome of the first scan to be plotted."
print " --no-line-pruning Will prevent straight lines from being removed from the"
print " scan."
print
print "--------------------------------------------------------------------------------"
print "Starting circle finder with arguments:"
print
print " Publisher: " , pub
print " Maximum Distance: " , max_dist
print " Maximum Radius: " , max_rad
print " Minimum Radius: " , min_rad
print " Gradient Tolerance: " , grad_tol
print " Split Multiplier: " , split_multi
print " Remove Lines: " , prune_lines
print " Plot: " , plot
print
print "--------------------------------------------------------------------------------"
print "To increase speed, the listening thread is not verbose."
print "Ctrl+C to exit."
rospy.init_node('circles', anonymous=True)
rospy.Subscriber("base_scan",LaserScan, callback, callback_args=args)
rospy.Subscriber("scan",LaserScan, callback, callback_args=args)
rospy.spin()
| null | null | null | null | [
0
] |
1,115 | 2d7e3a70f1c25bbc7ad5eafa006ab12c978eaec4 | import random
import sys
import numpy
from gensim import corpora
from coherence.wn import WordNetEvaluator
from topic.topic import Topic
from nltk.corpus import wordnet as wn
from nltk.corpus import reuters
from nltk.corpus import brown
# python random_tc.py <dname> <word_count> <sample_times> <output>
# <word_count>: the number of words that need to be randomly generated
# <sample_times>: the repetition times of the topic coherence calculation
if len(sys.argv) <= 1:
dname = "reuters_LDA"
else:
dname = sys.argv[1]
if len(sys.argv) <= 2:
word_count = 10
else:
word_count = int(sys.argv[2])
if len(sys.argv) <= 3:
sample_times = 5
else:
sample_times = int(sys.argv[3])
if len(sys.argv) <= 4:
tcmethod = "path"
else:
tcmethod = sys.argv[4]
print tcmethod
if len(sys.argv) <= 5:
ic = False
else:
if sys.argv[5] == "ic":
ic = True
else:
ic = False
dictionary = corpora.Dictionary.load(dname + "/dict.dict")
print "Load dictionary",
print dictionary
corpus_fname = dname + '/bow_corpus.mm'
print "Load Corpus File " + corpus_fname
corpus = corpora.MmCorpus(corpus_fname)
# transfer each doc in the corpus into a dictionary
corpus_dict = []
for doc in corpus:
corpus_dict.append(dict(doc))
dictlen = len(dictionary)
tc = WordNetEvaluator()
tc_means = []
tc_medians = []
words_list = []
ofilemean = open(dname + "/"+tcmethod+"_mean_rand_"+str(word_count)+".txt", "w")
ofilemedian = open(dname + "/"+tcmethod+"_median_rand_"+str(word_count)+".txt", "w")
if ic:
if dname == "reuters_LDA":
src_ic = wn.ic(reuters, False, 0.0)
else:
src_ic = wn.ic(brown, False, 0.0)
for i in range(sample_times):
random_words = []
# generate random numbers
for n in range(word_count):
word = random.randint(1, dictlen-1)
while word in random_words:
word = random.randint(0, dictlen-1)
random_words.append(word)
keylist = []
for key in random_words:
keylist.append(dictionary[key])
words_list.append(keylist)
randt = Topic()
for key in keylist:
randt.add((key, 0.1))
# calculate topic coherence based on randomly generated words
if ic:
result = tc.evaluate_ic(randt, word_count, src_ic, tcmethod, not_write=True)
else:
result = tc.evaluate(randt, word_count, tcmethod, not_write=True)
if (not numpy.isnan(result[1])) and result[1] < 10000:
rmean = result[1]
else:
rmean = 0.0
if (not numpy.isnan(result[2])) and result[1] < 10000:
rmedian = result[2]
else:
rmedian = 0.0
tc_means.append(rmean)
tc_medians.append(rmedian)
ofilemean.write("Mean: " + str(numpy.mean(tc_means)) + "\n")
ofilemean.write("SD: " + str(numpy.std(tc_means)) + "\n\n")
for item in tc_means:
ofilemean.write(str(item) + "\n")
for item in words_list:
ofilemean.write(str(item) + "\n")
ofilemedian.write("Mean: " + str(numpy.mean(tc_medians)) + "\n")
ofilemedian.write("SD: " + str(numpy.std(tc_medians)) + "\n\n")
for item in tc_medians:
ofilemedian.write(str(item) + "\n")
for item in words_list:
ofilemedian.write(str(item) + "\n")
| null | null | null | null | [
0
] |
1,116 | ec224924206c41cf8203c6aa8002ddf6b0e70e9b | <mask token>
class EngageScraper(ABC):
def __init__(self, tz_string):
super().__init__()
self._agenda_locations = []
self._tz = timezone(tz_string)
@property
def agenda_locations(self):
return self._agenda_locations
@agenda_locations.setter
def agenda_locations(self, locations):
self._agenda_locations = locations
@abstractmethod
def get_available_agendas(self):
"""
Method to determine what agendas are available.
Sets the self._agenda_locations property
In a typical HTML scraper, these resources would be HTTP URLs
"""
pass
<mask token>
@abstractmethod
def _process_agenda(self, agenda_data, meeting_id):
"""
process_agenda takes one agenda document (for instance HTML document) data.
A processed agenda will have to process each of its items. Each agenda item might
be at a different location or contained within an agenda. If they are contained within
the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be
called with the location of the agenda_item.
The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items
"""
pass
@abstractmethod
def _scrape_agenda_item(self, agenda_item_location):
"""
Takes a location and produces the data from the item and calls process_agenda_item
"""
pass
<mask token>
<mask token>
@abstractmethod
def _store_agenda_items(self, agenda_dict, agenda_saved):
"""
Calls to the DB should be here for agenda item content
"""
pass
| <mask token>
class EngageScraper(ABC):
def __init__(self, tz_string):
super().__init__()
self._agenda_locations = []
self._tz = timezone(tz_string)
@property
def agenda_locations(self):
return self._agenda_locations
@agenda_locations.setter
def agenda_locations(self, locations):
self._agenda_locations = locations
@abstractmethod
def get_available_agendas(self):
"""
Method to determine what agendas are available.
Sets the self._agenda_locations property
In a typical HTML scraper, these resources would be HTTP URLs
"""
pass
@abstractmethod
def scrape(self):
"""
Scrape processes all agendas in self._agenda_locations
It calls process agenda on all items in _agenda_locations with
data downloaded from those locations.
The result of scrape is the stored agendas and agenda items.
"""
pass
@abstractmethod
def _process_agenda(self, agenda_data, meeting_id):
"""
process_agenda takes one agenda document (for instance HTML document) data.
A processed agenda will have to process each of its items. Each agenda item might
be at a different location or contained within an agenda. If they are contained within
the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be
called with the location of the agenda_item.
The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items
"""
pass
@abstractmethod
def _scrape_agenda_item(self, agenda_item_location):
"""
Takes a location and produces the data from the item and calls process_agenda_item
"""
pass
@abstractmethod
def _process_agenda_item(self, agenda_item_data, agenda_item_id,
meeting_id, meeting_time):
"""
The result of process agenda item will be a dict that can be stored by store_agenda_item
"""
pass
<mask token>
@abstractmethod
def _store_agenda_items(self, agenda_dict, agenda_saved):
"""
Calls to the DB should be here for agenda item content
"""
pass
| <mask token>
class EngageScraper(ABC):
def __init__(self, tz_string):
super().__init__()
self._agenda_locations = []
self._tz = timezone(tz_string)
@property
def agenda_locations(self):
return self._agenda_locations
@agenda_locations.setter
def agenda_locations(self, locations):
self._agenda_locations = locations
@abstractmethod
def get_available_agendas(self):
"""
Method to determine what agendas are available.
Sets the self._agenda_locations property
In a typical HTML scraper, these resources would be HTTP URLs
"""
pass
@abstractmethod
def scrape(self):
"""
Scrape processes all agendas in self._agenda_locations
It calls process agenda on all items in _agenda_locations with
data downloaded from those locations.
The result of scrape is the stored agendas and agenda items.
"""
pass
@abstractmethod
def _process_agenda(self, agenda_data, meeting_id):
"""
process_agenda takes one agenda document (for instance HTML document) data.
A processed agenda will have to process each of its items. Each agenda item might
be at a different location or contained within an agenda. If they are contained within
the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be
called with the location of the agenda_item.
The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items
"""
pass
@abstractmethod
def _scrape_agenda_item(self, agenda_item_location):
"""
Takes a location and produces the data from the item and calls process_agenda_item
"""
pass
@abstractmethod
def _process_agenda_item(self, agenda_item_data, agenda_item_id,
meeting_id, meeting_time):
"""
The result of process agenda item will be a dict that can be stored by store_agenda_item
"""
pass
@abstractmethod
def _store_agenda(self, processed_agenda, committee):
"""
Calls to DB should be here for the main agenda content
"""
pass
@abstractmethod
def _store_agenda_items(self, agenda_dict, agenda_saved):
"""
Calls to the DB should be here for agenda item content
"""
pass
| from abc import ABC, abstractmethod, abstractproperty
from pytz import timezone
class EngageScraper(ABC):
def __init__(self, tz_string):
super().__init__()
self._agenda_locations = []
self._tz = timezone(tz_string)
@property
def agenda_locations(self):
return self._agenda_locations
@agenda_locations.setter
def agenda_locations(self, locations):
self._agenda_locations = locations
@abstractmethod
def get_available_agendas(self):
"""
Method to determine what agendas are available.
Sets the self._agenda_locations property
In a typical HTML scraper, these resources would be HTTP URLs
"""
pass
@abstractmethod
def scrape(self):
"""
Scrape processes all agendas in self._agenda_locations
It calls process agenda on all items in _agenda_locations with
data downloaded from those locations.
The result of scrape is the stored agendas and agenda items.
"""
pass
@abstractmethod
def _process_agenda(self, agenda_data, meeting_id):
"""
process_agenda takes one agenda document (for instance HTML document) data.
A processed agenda will have to process each of its items. Each agenda item might
be at a different location or contained within an agenda. If they are contained within
the agenda, progress to process_agenda_item with its data. If not, scrape_agenda_item should be
called with the location of the agenda_item.
The result of process agenda will be a dict that can be saved by store_agenda and store_agenda_items
"""
pass
@abstractmethod
def _scrape_agenda_item(self, agenda_item_location):
"""
Takes a location and produces the data from the item and calls process_agenda_item
"""
pass
@abstractmethod
def _process_agenda_item(self, agenda_item_data, agenda_item_id,
meeting_id, meeting_time):
"""
The result of process agenda item will be a dict that can be stored by store_agenda_item
"""
pass
@abstractmethod
def _store_agenda(self, processed_agenda, committee):
"""
Calls to DB should be here for the main agenda content
"""
pass
@abstractmethod
def _store_agenda_items(self, agenda_dict, agenda_saved):
"""
Calls to the DB should be here for agenda item content
"""
pass
| null | [
8,
10,
11,
12
] |
1,117 | 92a50bcdbb4c03d1a4813a93c2e0986250516f14 | class Persona:
<mask token>
<mask token>
def hola(self):
print('Hola Mundo')
class Empleado(Persona):
def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,
residencia_empleado):
super().__init__(nombre_empleado, edad_empleado, residencia_empleado)
self.salario = salario
self.antiguedad_persona = antiguedad
super().hola()
def descripcion(self):
super().descripcion()
print('Salario: ', self.salario, 'Antiguedad: ', self.
antiguedad_persona)
<mask token>
| class Persona:
def __init__(self, nombre, edad, lugar_residencia):
self.nombre = nombre
self.edad = edad
self.residencia = lugar_residencia
<mask token>
def hola(self):
print('Hola Mundo')
class Empleado(Persona):
def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,
residencia_empleado):
super().__init__(nombre_empleado, edad_empleado, residencia_empleado)
self.salario = salario
self.antiguedad_persona = antiguedad
super().hola()
def descripcion(self):
super().descripcion()
print('Salario: ', self.salario, 'Antiguedad: ', self.
antiguedad_persona)
<mask token>
| class Persona:
def __init__(self, nombre, edad, lugar_residencia):
self.nombre = nombre
self.edad = edad
self.residencia = lugar_residencia
def descripcion(self):
print('Nombre: ', self.nombre, ' Edad: ', self.edad,
' Lugar de residencia: ', self.residencia)
def hola(self):
print('Hola Mundo')
class Empleado(Persona):
def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,
residencia_empleado):
super().__init__(nombre_empleado, edad_empleado, residencia_empleado)
self.salario = salario
self.antiguedad_persona = antiguedad
super().hola()
def descripcion(self):
super().descripcion()
print('Salario: ', self.salario, 'Antiguedad: ', self.
antiguedad_persona)
<mask token>
Antonio.descripcion()
print(isinstance(Antonio, Empleado))
| class Persona:
def __init__(self, nombre, edad, lugar_residencia):
self.nombre = nombre
self.edad = edad
self.residencia = lugar_residencia
def descripcion(self):
print('Nombre: ', self.nombre, ' Edad: ', self.edad,
' Lugar de residencia: ', self.residencia)
def hola(self):
print('Hola Mundo')
class Empleado(Persona):
def __init__(self, salario, antiguedad, nombre_empleado, edad_empleado,
residencia_empleado):
super().__init__(nombre_empleado, edad_empleado, residencia_empleado)
self.salario = salario
self.antiguedad_persona = antiguedad
super().hola()
def descripcion(self):
super().descripcion()
print('Salario: ', self.salario, 'Antiguedad: ', self.
antiguedad_persona)
Antonio = Persona('Alex', 23, 'Merida')
Antonio.descripcion()
print(isinstance(Antonio, Empleado))
|
#Aplicacion de la funcion super()
class Persona():
def __init__(self,nombre,edad,lugar_residencia):
self.nombre = nombre
self.edad = edad
self.residencia = lugar_residencia
def descripcion(self):
print("Nombre: ",self.nombre," Edad: ", self.edad," Lugar de residencia: ",self.residencia)
def hola(self):
print("Hola Mundo")
class Empleado(Persona):
def __init__(self,salario,antiguedad,nombre_empleado,edad_empleado,residencia_empleado):
super().__init__(nombre_empleado,edad_empleado,residencia_empleado)#Hace la llamada al constructor de la clase padre que esta heredando
self.salario = salario
self.antiguedad_persona=antiguedad
super().hola()
def descripcion(self):
super().descripcion()
print("Salario: " ,self.salario, "Antiguedad: ",self.antiguedad_persona)
Antonio = Persona("Alex",23,"Merida")
Antonio.descripcion()
print(isinstance(Antonio,Empleado))
#Principio de sustitucion
#consiste en plantearse las siguientes preguntas:
#es siempre un o una
#funcion isinstance()--> nos informa si un objeto es instancia de una clase determinada devuelve verdadero o falso
| [
5,
6,
8,
9,
10
] |
1,118 | 6553312c9655c821444ff5f60e4d68c7fc08bd08 | <mask token>
def get_basename(name, split_num):
return f'{name}.split{split_num:d}'
<mask token>
def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,
batch_norm, l1_factor, l2_factor, optimizer):
"""
Attempt to load the specified model (including architecture, weights, and
even optimizer states). If this is not possible, build a new model from
scratch.
"""
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
model_filename = model_filename_fmt.format(epoch=resume_from_epoch)
checkpoint_path = os.path.join(checkpoint_dir, model_filename)
if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):
click.secho(
f"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}."
, fg='green')
model = load_model(checkpoint_path)
initial_epoch = resume_from_epoch
else:
click.secho(
f"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model."
, fg='yellow')
model = build_model(output_dim=1, batch_norm=batch_norm,
kernel_regularizer=l1_l2(l1_factor, l2_factor))
model.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
initial_epoch = 0
return model, initial_epoch
def build_callbacks(name, split_num, summary_dir, checkpoint_dir,
checkpoint_period):
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
tensorboard_path = os.path.join(summary_dir, basename)
csv_path = os.path.join(summary_dir, f'{basename}.csv')
checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt)
callbacks = []
callbacks.append(TensorBoard(tensorboard_path, profile_batch=0))
callbacks.append(CSVLogger(csv_path, append=True))
callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period)
)
return callbacks
<mask token>
| <mask token>
def get_basename(name, split_num):
return f'{name}.split{split_num:d}'
def get_model_filename_fmt(basename):
return f'{basename}.{{epoch:02d}}.h5'
def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,
batch_norm, l1_factor, l2_factor, optimizer):
"""
Attempt to load the specified model (including architecture, weights, and
even optimizer states). If this is not possible, build a new model from
scratch.
"""
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
model_filename = model_filename_fmt.format(epoch=resume_from_epoch)
checkpoint_path = os.path.join(checkpoint_dir, model_filename)
if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):
click.secho(
f"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}."
, fg='green')
model = load_model(checkpoint_path)
initial_epoch = resume_from_epoch
else:
click.secho(
f"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model."
, fg='yellow')
model = build_model(output_dim=1, batch_norm=batch_norm,
kernel_regularizer=l1_l2(l1_factor, l2_factor))
model.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
initial_epoch = 0
return model, initial_epoch
def build_callbacks(name, split_num, summary_dir, checkpoint_dir,
checkpoint_period):
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
tensorboard_path = os.path.join(summary_dir, basename)
csv_path = os.path.join(summary_dir, f'{basename}.csv')
checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt)
callbacks = []
callbacks.append(TensorBoard(tensorboard_path, profile_batch=0))
callbacks.append(CSVLogger(csv_path, append=True))
callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period)
)
return callbacks
<mask token>
| <mask token>
def get_basename(name, split_num):
return f'{name}.split{split_num:d}'
def get_model_filename_fmt(basename):
return f'{basename}.{{epoch:02d}}.h5'
def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,
batch_norm, l1_factor, l2_factor, optimizer):
"""
Attempt to load the specified model (including architecture, weights, and
even optimizer states). If this is not possible, build a new model from
scratch.
"""
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
model_filename = model_filename_fmt.format(epoch=resume_from_epoch)
checkpoint_path = os.path.join(checkpoint_dir, model_filename)
if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):
click.secho(
f"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}."
, fg='green')
model = load_model(checkpoint_path)
initial_epoch = resume_from_epoch
else:
click.secho(
f"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model."
, fg='yellow')
model = build_model(output_dim=1, batch_norm=batch_norm,
kernel_regularizer=l1_l2(l1_factor, l2_factor))
model.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
initial_epoch = 0
return model, initial_epoch
def build_callbacks(name, split_num, summary_dir, checkpoint_dir,
checkpoint_period):
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
tensorboard_path = os.path.join(summary_dir, basename)
csv_path = os.path.join(summary_dir, f'{basename}.csv')
checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt)
callbacks = []
callbacks.append(TensorBoard(tensorboard_path, profile_batch=0))
callbacks.append(CSVLogger(csv_path, append=True))
callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period)
)
return callbacks
def make_plot_data(names, splits, summary_dir, pretty_name_mapping=None):
df_list = []
for name in names:
for split_num in splits:
basename = get_basename(name, split_num)
csv_path = os.path.join(summary_dir, f'{basename}.csv')
df = pd.read_csv(csv_path).assign(name=name, split=split_num)
df_list.append(df)
data = pd.concat(df_list, axis='index', sort=True).rename(columns=dict(
acc='train', val_acc='validation'))
if pretty_name_mapping is not None:
data = data.assign(name=data.name.replace(pretty_name_mapping))
wide_data = pd.melt(data, id_vars=['name', 'split', 'epoch'],
value_vars=['train', 'validation'], value_name='accuracy', var_name
='partition')
return wide_data
| <mask token>
import click
import os.path
import pandas as pd
from tensorflow.keras.models import load_model
from tensorflow.keras.regularizers import l1_l2
from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, TensorBoard
from zalando_classification.models import build_model
def get_basename(name, split_num):
return f'{name}.split{split_num:d}'
def get_model_filename_fmt(basename):
return f'{basename}.{{epoch:02d}}.h5'
def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,
batch_norm, l1_factor, l2_factor, optimizer):
"""
Attempt to load the specified model (including architecture, weights, and
even optimizer states). If this is not possible, build a new model from
scratch.
"""
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
model_filename = model_filename_fmt.format(epoch=resume_from_epoch)
checkpoint_path = os.path.join(checkpoint_dir, model_filename)
if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):
click.secho(
f"Found model checkpoint '{checkpoint_path}'. Resuming from epoch {resume_from_epoch}."
, fg='green')
model = load_model(checkpoint_path)
initial_epoch = resume_from_epoch
else:
click.secho(
f"Could not load model checkpoint '{checkpoint_path}' or `resume_from_epoch == 0`. Building new model."
, fg='yellow')
model = build_model(output_dim=1, batch_norm=batch_norm,
kernel_regularizer=l1_l2(l1_factor, l2_factor))
model.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
initial_epoch = 0
return model, initial_epoch
def build_callbacks(name, split_num, summary_dir, checkpoint_dir,
checkpoint_period):
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
tensorboard_path = os.path.join(summary_dir, basename)
csv_path = os.path.join(summary_dir, f'{basename}.csv')
checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt)
callbacks = []
callbacks.append(TensorBoard(tensorboard_path, profile_batch=0))
callbacks.append(CSVLogger(csv_path, append=True))
callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period)
)
return callbacks
def make_plot_data(names, splits, summary_dir, pretty_name_mapping=None):
df_list = []
for name in names:
for split_num in splits:
basename = get_basename(name, split_num)
csv_path = os.path.join(summary_dir, f'{basename}.csv')
df = pd.read_csv(csv_path).assign(name=name, split=split_num)
df_list.append(df)
data = pd.concat(df_list, axis='index', sort=True).rename(columns=dict(
acc='train', val_acc='validation'))
if pretty_name_mapping is not None:
data = data.assign(name=data.name.replace(pretty_name_mapping))
wide_data = pd.melt(data, id_vars=['name', 'split', 'epoch'],
value_vars=['train', 'validation'], value_name='accuracy', var_name
='partition')
return wide_data
| """Utils module."""
import click
import os.path
import pandas as pd
from tensorflow.keras.models import load_model
from tensorflow.keras.regularizers import l1_l2
from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint, TensorBoard
from zalando_classification.models import build_model
def get_basename(name, split_num):
return f"{name}.split{split_num:d}"
def get_model_filename_fmt(basename):
return f"{basename}.{{epoch:02d}}.h5"
def maybe_load_model(name, split_num, checkpoint_dir, resume_from_epoch,
batch_norm, l1_factor, l2_factor, optimizer):
"""
Attempt to load the specified model (including architecture, weights, and
even optimizer states). If this is not possible, build a new model from
scratch.
"""
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
model_filename = model_filename_fmt.format(epoch=resume_from_epoch)
checkpoint_path = os.path.join(checkpoint_dir, model_filename)
if resume_from_epoch > 0 and os.path.isfile(checkpoint_path):
click.secho(f"Found model checkpoint '{checkpoint_path}'. "
f"Resuming from epoch {resume_from_epoch}.", fg='green')
model = load_model(checkpoint_path)
initial_epoch = resume_from_epoch
else:
click.secho(f"Could not load model checkpoint '{checkpoint_path}' "
"or `resume_from_epoch == 0`. Building new model.",
fg='yellow')
model = build_model(output_dim=1, batch_norm=batch_norm,
kernel_regularizer=l1_l2(l1_factor, l2_factor))
# optimizer = Adam(beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
initial_epoch = 0
return model, initial_epoch
def build_callbacks(name, split_num, summary_dir, checkpoint_dir,
checkpoint_period):
basename = get_basename(name, split_num)
model_filename_fmt = get_model_filename_fmt(basename)
tensorboard_path = os.path.join(summary_dir, basename)
csv_path = os.path.join(summary_dir, f"{basename}.csv")
checkpoint_path = os.path.join(checkpoint_dir, model_filename_fmt)
callbacks = []
callbacks.append(TensorBoard(tensorboard_path, profile_batch=0))
callbacks.append(CSVLogger(csv_path, append=True))
callbacks.append(ModelCheckpoint(checkpoint_path, period=checkpoint_period))
return callbacks
def make_plot_data(names, splits, summary_dir, pretty_name_mapping=None):
df_list = []
for name in names:
for split_num in splits:
basename = get_basename(name, split_num)
csv_path = os.path.join(summary_dir, f"{basename}.csv")
df = pd.read_csv(csv_path).assign(name=name, split=split_num)
df_list.append(df)
data = pd.concat(df_list, axis="index", sort=True) \
.rename(columns=dict(acc="train", val_acc="validation"))
if pretty_name_mapping is not None:
data = data.assign(name=data.name.replace(pretty_name_mapping))
wide_data = pd.melt(data, id_vars=["name", "split", "epoch"],
value_vars=["train", "validation"],
value_name="accuracy", var_name="partition")
return wide_data
| [
3,
4,
5,
6,
7
] |
1,119 | 5c291dbc241a80e7f2625ba338a4b9b3a3f3b2d0 | <mask token>
class TestRedshiftCreateClusterTrigger:
<mask token>
@pytest.mark.asyncio
@async_mock.patch(
'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'
)
async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):
mock = async_mock.MagicMock()
mock_async_conn.__aenter__.return_value = mock
mock.get_waiter().wait = AsyncMock()
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
generator = redshift_create_cluster_trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({'status': 'success', 'message':
'Cluster Created'})
| <mask token>
if sys.version_info < (3, 8):
from asynctest import CoroutineMock as AsyncMock, mock as async_mock
else:
from unittest import mock as async_mock
from unittest.mock import AsyncMock
<mask token>
class TestRedshiftCreateClusterTrigger:
def test_redshift_create_cluster_trigger_serialize(self):
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
class_path, args = redshift_create_cluster_trigger.serialize()
assert class_path == 'airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger'
assert args['cluster_identifier'] == TEST_CLUSTER_IDENTIFIER
assert args['poll_interval'] == str(TEST_POLL_INTERVAL)
assert args['max_attempt'] == str(TEST_MAX_ATTEMPT)
assert args['aws_conn_id'] == TEST_AWS_CONN_ID
@pytest.mark.asyncio
@async_mock.patch(
'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'
)
async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):
mock = async_mock.MagicMock()
mock_async_conn.__aenter__.return_value = mock
mock.get_waiter().wait = AsyncMock()
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
generator = redshift_create_cluster_trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({'status': 'success', 'message':
'Cluster Created'})
| <mask token>
if sys.version_info < (3, 8):
from asynctest import CoroutineMock as AsyncMock, mock as async_mock
else:
from unittest import mock as async_mock
from unittest.mock import AsyncMock
TEST_CLUSTER_IDENTIFIER = 'test-cluster'
TEST_POLL_INTERVAL = 10
TEST_MAX_ATTEMPT = 10
TEST_AWS_CONN_ID = 'test-aws-id'
class TestRedshiftCreateClusterTrigger:
def test_redshift_create_cluster_trigger_serialize(self):
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
class_path, args = redshift_create_cluster_trigger.serialize()
assert class_path == 'airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger'
assert args['cluster_identifier'] == TEST_CLUSTER_IDENTIFIER
assert args['poll_interval'] == str(TEST_POLL_INTERVAL)
assert args['max_attempt'] == str(TEST_MAX_ATTEMPT)
assert args['aws_conn_id'] == TEST_AWS_CONN_ID
@pytest.mark.asyncio
@async_mock.patch(
'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'
)
async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):
mock = async_mock.MagicMock()
mock_async_conn.__aenter__.return_value = mock
mock.get_waiter().wait = AsyncMock()
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
generator = redshift_create_cluster_trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({'status': 'success', 'message':
'Cluster Created'})
| from __future__ import annotations
import sys
import pytest
from airflow.providers.amazon.aws.triggers.redshift_cluster import RedshiftCreateClusterTrigger
from airflow.triggers.base import TriggerEvent
if sys.version_info < (3, 8):
from asynctest import CoroutineMock as AsyncMock, mock as async_mock
else:
from unittest import mock as async_mock
from unittest.mock import AsyncMock
TEST_CLUSTER_IDENTIFIER = 'test-cluster'
TEST_POLL_INTERVAL = 10
TEST_MAX_ATTEMPT = 10
TEST_AWS_CONN_ID = 'test-aws-id'
class TestRedshiftCreateClusterTrigger:
def test_redshift_create_cluster_trigger_serialize(self):
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
class_path, args = redshift_create_cluster_trigger.serialize()
assert class_path == 'airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger'
assert args['cluster_identifier'] == TEST_CLUSTER_IDENTIFIER
assert args['poll_interval'] == str(TEST_POLL_INTERVAL)
assert args['max_attempt'] == str(TEST_MAX_ATTEMPT)
assert args['aws_conn_id'] == TEST_AWS_CONN_ID
@pytest.mark.asyncio
@async_mock.patch(
'airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn'
)
async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):
mock = async_mock.MagicMock()
mock_async_conn.__aenter__.return_value = mock
mock.get_waiter().wait = AsyncMock()
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER, poll_interval=
TEST_POLL_INTERVAL, max_attempt=TEST_MAX_ATTEMPT, aws_conn_id=
TEST_AWS_CONN_ID)
generator = redshift_create_cluster_trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({'status': 'success', 'message':
'Cluster Created'})
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import sys
import pytest
from airflow.providers.amazon.aws.triggers.redshift_cluster import RedshiftCreateClusterTrigger
from airflow.triggers.base import TriggerEvent
if sys.version_info < (3, 8):
from asynctest import CoroutineMock as AsyncMock, mock as async_mock
else:
from unittest import mock as async_mock
from unittest.mock import AsyncMock
TEST_CLUSTER_IDENTIFIER = "test-cluster"
TEST_POLL_INTERVAL = 10
TEST_MAX_ATTEMPT = 10
TEST_AWS_CONN_ID = "test-aws-id"
class TestRedshiftCreateClusterTrigger:
def test_redshift_create_cluster_trigger_serialize(self):
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER,
poll_interval=TEST_POLL_INTERVAL,
max_attempt=TEST_MAX_ATTEMPT,
aws_conn_id=TEST_AWS_CONN_ID,
)
class_path, args = redshift_create_cluster_trigger.serialize()
assert (
class_path
== "airflow.providers.amazon.aws.triggers.redshift_cluster.RedshiftCreateClusterTrigger"
)
assert args["cluster_identifier"] == TEST_CLUSTER_IDENTIFIER
assert args["poll_interval"] == str(TEST_POLL_INTERVAL)
assert args["max_attempt"] == str(TEST_MAX_ATTEMPT)
assert args["aws_conn_id"] == TEST_AWS_CONN_ID
@pytest.mark.asyncio
@async_mock.patch("airflow.providers.amazon.aws.hooks.redshift_cluster.RedshiftHook.async_conn")
async def test_redshift_create_cluster_trigger_run(self, mock_async_conn):
mock = async_mock.MagicMock()
mock_async_conn.__aenter__.return_value = mock
mock.get_waiter().wait = AsyncMock()
redshift_create_cluster_trigger = RedshiftCreateClusterTrigger(
cluster_identifier=TEST_CLUSTER_IDENTIFIER,
poll_interval=TEST_POLL_INTERVAL,
max_attempt=TEST_MAX_ATTEMPT,
aws_conn_id=TEST_AWS_CONN_ID,
)
generator = redshift_create_cluster_trigger.run()
response = await generator.asend(None)
assert response == TriggerEvent({"status": "success", "message": "Cluster Created"})
| [
1,
3,
4,
5,
6
] |
1,120 | 6c5f60e7a122e3da5e6705bfacf73a361f6c1362 | def correctLineup1(athletes: list) ->list:
return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in
range(len(athletes))]
<mask token>
| def correctLineup1(athletes: list) ->list:
return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in
range(len(athletes))]
def correctLineup1(athletes: list) ->list:
return [athletes[i ^ 1] for i in range(len(athletes))]
<mask token>
| def correctLineup1(athletes: list) ->list:
return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in
range(len(athletes))]
def correctLineup1(athletes: list) ->list:
return [athletes[i ^ 1] for i in range(len(athletes))]
<mask token>
print(r1)
| def correctLineup1(athletes: list) ->list:
return [(athletes[i + 1] if i % 2 == 0 else athletes[i - 1]) for i in
range(len(athletes))]
def correctLineup1(athletes: list) ->list:
return [athletes[i ^ 1] for i in range(len(athletes))]
a1 = [1, 2, 3, 4, 5, 6]
r1 = correctLineup1(a1)
print(r1)
| #
# * Python 57, Correct Lineup
# * Easy
# * For the opening ceremony of the upcoming sports event an even number of
# * athletes were picked. They formed a correct lineup, i.e. such a lineup in
# * which no two boys or two girls stand together. The first person in the lineup
# * was a girl. As a part of the performance, adjacent pairs of athletes (i.e.
# * the first one together with the second one, the third one together with the
# * fourth one, etc.) had to swap positions with each other.
# * Given a list of athletes, return the list of athletes after the changes, i.e.
# * after each adjacent pair of athletes is swapped.
# * Example
# For athletes = [1, 2, 3, 4, 5, 6], the output should be
# correctLineup(athletes) = [2, 1, 4, 3, 6, 5].
# * Input/Output
# [execution time limit] 4 seconds (py3)
# [input] array.integer athletes
# A list of even length representing the athletes, where each athlete is given
# by the number written on their back.
# Guaranteed constraints:
# 2 ≤ athletes.length ≤ 20,
# 1 ≤ athletes[i] ≤ 100.
# [output] array.integer
# Array of athletes with each pair of adjacent elements swapped.
#%%
# * Solution 1
def correctLineup1(athletes:list)-> list:
return [athletes[i+1] if i%2==0 else athletes[i-1] for i in range(len(athletes))]
# * Solution 2
# ! bitwise operator ^.
def correctLineup1(athletes:list)-> list:
return [athletes[i^1] for i in range(len(athletes))]
a1 = [1, 2, 3, 4, 5, 6]
r1 = correctLineup1(a1)
print(r1)
# %%
| [
1,
2,
3,
4,
5
] |
1,121 | f91e1fdc31b2fe1aef15757576d847c617a86201 | <mask token>
class TestBromineObjective(ForceBalanceTestCase, ObjectiveTests):
def setUp(self):
self.options = forcebalance.parser.gen_opts_defaults.copy()
self.options.update({'root': os.getcwd() + '/test/files',
'penalty_additive': 0.01, 'jobtype': 'NEWTON', 'forcefield': [
'bro.itp']})
os.chdir(self.options['root'])
self.logger.debug('\nUsing the following options:\n%s\n' % str(self
.options))
self.tgt_opts = [forcebalance.parser.tgt_opts_defaults.copy()]
self.tgt_opts[0].update({'type': 'LIQUID_GMX', 'name': 'LiquidBromine'}
)
self.ff = forcebalance.forcefield.FF(self.options)
self.objective = forcebalance.objective.Objective(self.options,
self.tgt_opts, self.ff)
<mask token>
<mask token>
| <mask token>
class ObjectiveTests(object):
def test_target_zero_order_terms(self):
"""Check zero order target terms"""
obj = self.objective.Target_Terms(numpy.array([0.5] * self.ff.np),
Order=0)
self.assertEqual(type(obj), dict)
self.assertTrue('X' in obj)
self.assertNotEqual(int(obj['X']), 0)
self.assertTrue('G' in obj)
self.assertFalse(obj['G'].any())
self.assertTrue('H' in obj)
self.assertEqual(obj['H'], numpy.diag([1] * self.ff.np))
def test_target_first_order_terms(self):
"""Check first order target terms"""
obj = self.objective.Target_Terms(numpy.array([0.5] * self.ff.np),
Order=1)
self.assertEqual(type(obj), dict)
self.assertTrue('X' in obj)
self.assertTrue('G' in obj)
self.assertTrue('H' in obj)
def test_target_second_order_terms(self):
"""Check second order target terms"""
obj = self.objective.Target_Terms(numpy.array([0.5] * self.ff.np),
Order=2)
self.assertEqual(type(obj), dict)
self.assertTrue('X' in obj)
self.assertTrue('G' in obj)
self.assertTrue('H' in obj)
def test_indicate(self):
"""Check objective.indicate() runs without errors"""
self.objective.Indicate()
class TestWaterObjective(ForceBalanceTestCase, ObjectiveTests):
def setUp(self):
self.options = forcebalance.parser.gen_opts_defaults.copy()
self.options.update({'root': os.getcwd() + '/test/files',
'penalty_additive': 0.01, 'jobtype': 'NEWTON', 'forcefield': [
'water.itp']})
os.chdir(self.options['root'])
self.logger.debug('\nUsing the following options:\n%s\n' % str(self
.options))
self.tgt_opts = [forcebalance.parser.tgt_opts_defaults.copy()]
self.tgt_opts[0].update({'type': 'ABINITIO_GMX', 'name': 'cluster-06'})
self.ff = forcebalance.forcefield.FF(self.options)
self.objective = forcebalance.objective.Objective(self.options,
self.tgt_opts, self.ff)
def shortDescription(self):
return super(TestWaterObjective, self).shortDescription(
) + ' (AbInitio_GMX target)'
class TestBromineObjective(ForceBalanceTestCase, ObjectiveTests):
def setUp(self):
self.options = forcebalance.parser.gen_opts_defaults.copy()
self.options.update({'root': os.getcwd() + '/test/files',
'penalty_additive': 0.01, 'jobtype': 'NEWTON', 'forcefield': [
'bro.itp']})
os.chdir(self.options['root'])
self.logger.debug('\nUsing the following options:\n%s\n' % str(self
.options))
self.tgt_opts = [forcebalance.parser.tgt_opts_defaults.copy()]
self.tgt_opts[0].update({'type': 'LIQUID_GMX', 'name': 'LiquidBromine'}
)
self.ff = forcebalance.forcefield.FF(self.options)
self.objective = forcebalance.objective.Objective(self.options,
self.tgt_opts, self.ff)
def shortDescription(self):
return super(TestBromineObjective, self).shortDescription(
) + ' (Liquid_GMX target)'
<mask token>
| <mask token>
class TestPenalty(ForceBalanceTestCase):
<mask token>
<mask token>
class ObjectiveTests(object):
def test_target_zero_order_terms(self):
"""Check zero order target terms"""
obj = self.objective.Target_Terms(numpy.array([0.5] * self.ff.np),
Order=0)
self.assertEqual(type(obj), dict)
self.assertTrue('X' in obj)
self.assertNotEqual(int(obj['X']), 0)
self.assertTrue('G' in obj)
self.assertFalse(obj['G'].any())
self.assertTrue('H' in obj)
self.assertEqual(obj['H'], numpy.diag([1] * self.ff.np))
def test_target_first_order_terms(self):
"""Check first order target terms"""
obj = self.objective.Target_Terms(numpy.array([0.5] * self.ff.np),
Order=1)
self.assertEqual(type(obj), dict)
self.assertTrue('X' in obj)
self.assertTrue('G' in obj)
self.assertTrue('H' in obj)
def test_target_second_order_terms(self):
"""Check second order target terms"""
obj = self.objective.Target_Terms(numpy.array([0.5] * self.ff.np),
Order=2)
self.assertEqual(type(obj), dict)
self.assertTrue('X' in obj)
self.assertTrue('G' in obj)
self.assertTrue('H' in obj)
def test_indicate(self):
"""Check objective.indicate() runs without errors"""
self.objective.Indicate()
class TestWaterObjective(ForceBalanceTestCase, ObjectiveTests):
def setUp(self):
self.options = forcebalance.parser.gen_opts_defaults.copy()
self.options.update({'root': os.getcwd() + '/test/files',
'penalty_additive': 0.01, 'jobtype': 'NEWTON', 'forcefield': [
'water.itp']})
os.chdir(self.options['root'])
self.logger.debug('\nUsing the following options:\n%s\n' % str(self
.options))
self.tgt_opts = [forcebalance.parser.tgt_opts_defaults.copy()]
self.tgt_opts[0].update({'type': 'ABINITIO_GMX', 'name': 'cluster-06'})
self.ff = forcebalance.forcefield.FF(self.options)
self.objective = forcebalance.objective.Objective(self.options,
self.tgt_opts, self.ff)
def shortDescription(self):
return super(TestWaterObjective, self).shortDescription(
) + ' (AbInitio_GMX target)'
class TestBromineObjective(ForceBalanceTestCase, ObjectiveTests):
def setUp(self):
self.options = forcebalance.parser.gen_opts_defaults.copy()
self.options.update({'root': os.getcwd() + '/test/files',
'penalty_additive': 0.01, 'jobtype': 'NEWTON', 'forcefield': [
'bro.itp']})
os.chdir(self.options['root'])
self.logger.debug('\nUsing the following options:\n%s\n' % str(self
.options))
self.tgt_opts = [forcebalance.parser.tgt_opts_defaults.copy()]
self.tgt_opts[0].update({'type': 'LIQUID_GMX', 'name': 'LiquidBromine'}
)
self.ff = forcebalance.forcefield.FF(self.options)
self.objective = forcebalance.objective.Objective(self.options,
self.tgt_opts, self.ff)
def shortDescription(self):
return super(TestBromineObjective, self).shortDescription(
) + ' (Liquid_GMX target)'
<mask token>
| <mask token>
class TestImplemented(ForceBalanceTestCase):
<mask token>
<mask token>
class TestPenalty(ForceBalanceTestCase):
def setUp(self):
self.options = forcebalance.parser.gen_opts_defaults.copy()
self.options.update({'root': os.getcwd() + '/test/files',
'penalty_additive': 0.01, 'jobtype': 'NEWTON', 'forcefield': [
'cc-pvdz-overlap-original.gbs']})
os.chdir(self.options['root'])
self.ff = forcebalance.forcefield.FF(self.options)
self.np = self.ff.np
self.penalties = []
for ptype in forcebalance.objective.Penalty.Pen_Names.keys():
penalty = forcebalance.objective.Penalty(ptype, self.ff, self.
options['penalty_additive'], self.options[
'penalty_multiplicative'], self.options[
'penalty_hyperbolic_b'], self.options['penalty_alpha'])
self.penalties.append(penalty)
def test_penalty_compute(self):
"""Check penalty computation functions"""
objective = {'G': numpy.zeros(9), 'H': numpy.diag((1,) * 9), 'X': 1}
for penalty in self.penalties:
result = penalty.compute([1] * self.np, objective)
self.assertEqual(tuple, type(result))
class ObjectiveTests(object):
def test_target_zero_order_terms(self):
"""Check zero order target terms"""
obj = self.objective.Target_Terms(numpy.array([0.5] * self.ff.np),
Order=0)
self.assertEqual(type(obj), dict)
self.assertTrue('X' in obj)
self.assertNotEqual(int(obj['X']), 0)
self.assertTrue('G' in obj)
self.assertFalse(obj['G'].any())
self.assertTrue('H' in obj)
self.assertEqual(obj['H'], numpy.diag([1] * self.ff.np))
def test_target_first_order_terms(self):
"""Check first order target terms"""
obj = self.objective.Target_Terms(numpy.array([0.5] * self.ff.np),
Order=1)
self.assertEqual(type(obj), dict)
self.assertTrue('X' in obj)
self.assertTrue('G' in obj)
self.assertTrue('H' in obj)
def test_target_second_order_terms(self):
"""Check second order target terms"""
obj = self.objective.Target_Terms(numpy.array([0.5] * self.ff.np),
Order=2)
self.assertEqual(type(obj), dict)
self.assertTrue('X' in obj)
self.assertTrue('G' in obj)
self.assertTrue('H' in obj)
def test_indicate(self):
"""Check objective.indicate() runs without errors"""
self.objective.Indicate()
class TestWaterObjective(ForceBalanceTestCase, ObjectiveTests):
def setUp(self):
self.options = forcebalance.parser.gen_opts_defaults.copy()
self.options.update({'root': os.getcwd() + '/test/files',
'penalty_additive': 0.01, 'jobtype': 'NEWTON', 'forcefield': [
'water.itp']})
os.chdir(self.options['root'])
self.logger.debug('\nUsing the following options:\n%s\n' % str(self
.options))
self.tgt_opts = [forcebalance.parser.tgt_opts_defaults.copy()]
self.tgt_opts[0].update({'type': 'ABINITIO_GMX', 'name': 'cluster-06'})
self.ff = forcebalance.forcefield.FF(self.options)
self.objective = forcebalance.objective.Objective(self.options,
self.tgt_opts, self.ff)
def shortDescription(self):
return super(TestWaterObjective, self).shortDescription(
) + ' (AbInitio_GMX target)'
class TestBromineObjective(ForceBalanceTestCase, ObjectiveTests):
def setUp(self):
self.options = forcebalance.parser.gen_opts_defaults.copy()
self.options.update({'root': os.getcwd() + '/test/files',
'penalty_additive': 0.01, 'jobtype': 'NEWTON', 'forcefield': [
'bro.itp']})
os.chdir(self.options['root'])
self.logger.debug('\nUsing the following options:\n%s\n' % str(self
.options))
self.tgt_opts = [forcebalance.parser.tgt_opts_defaults.copy()]
self.tgt_opts[0].update({'type': 'LIQUID_GMX', 'name': 'LiquidBromine'}
)
self.ff = forcebalance.forcefield.FF(self.options)
self.objective = forcebalance.objective.Objective(self.options,
self.tgt_opts, self.ff)
def shortDescription(self):
return super(TestBromineObjective, self).shortDescription(
) + ' (Liquid_GMX target)'
<mask token>
| from __future__ import absolute_import
from builtins import str
from builtins import object
import unittest
import sys, os, re
import forcebalance
import abc
import numpy
from __init__ import ForceBalanceTestCase
class TestImplemented(ForceBalanceTestCase):
def test_implemented_targets_derived_from_target(self):
"""Check classes listed in Implemented_Targets are derived from Target"""
for key in forcebalance.objective.Implemented_Targets.keys():
self.logger.debug("Assert %s is subclass of target\n" % str(forcebalance.objective.Implemented_Targets[key]))
self.assertTrue(issubclass(forcebalance.objective.Implemented_Targets[key],forcebalance.target.Target))
def test_no_unlisted_classes_derived_from_Target(self):
"""Check for unknown omissions from Implemented_Targets
Check to make sure any classes derived from Target are either
listed in Implemented_Targets or in the exclusion list in this
test case
"""
self.skipTest("Not sure if test is working properly.")
forcebalance_modules=[module[:-3] for module in os.listdir(forcebalance.__path__[0])
if re.compile(".*\.py$").match(module)
and module not in ["__init__.py"]]
for module in forcebalance_modules:
# LPW: I don't think dcdlib should be imported this way.
print(module)
if module == "_dcdlib": continue
m = __import__('forcebalance.' + module)
objs = dir(eval('m.' + module))
print(objs)
for obj in objs:
obj = eval('m.'+module+'.'+obj)
if type(obj) == abc.ABCMeta:
implemented = [i for i in forcebalance.objective.Implemented_Targets.values()]
# list of documented exceptions
# Basically, platform-independent targets are excluded.
exclude = ['Target',
'AbInitio',
'Interaction',
'Interaction_GMX',
'Liquid',
'Lipid',
'BindingEnergy',
'LeastSquares',
'Vibration',
'Thermo',
'Hydration',
'Moments']
print(obj)
if obj not in implemented and obj.__name__ not in exclude:
self.fail("Unknown class '%s' not listed in Implemented_Targets" % obj.__name__)
class TestPenalty(ForceBalanceTestCase):
def setUp(self):
self.options=forcebalance.parser.gen_opts_defaults.copy()
self.options.update({
'root': os.getcwd() + '/test/files',
'penalty_additive': 0.01,
'jobtype': 'NEWTON',
'forcefield': ['cc-pvdz-overlap-original.gbs']})
os.chdir(self.options['root'])
self.ff = forcebalance.forcefield.FF(self.options)
self.np=self.ff.np
self.penalties = []
for ptype in forcebalance.objective.Penalty.Pen_Names.keys():
penalty = forcebalance.objective.Penalty(ptype,
self.ff,
self.options['penalty_additive'],
self.options['penalty_multiplicative'],
self.options['penalty_hyperbolic_b'],
self.options['penalty_alpha'])
self.penalties.append(penalty)
def test_penalty_compute(self):
"""Check penalty computation functions"""
objective = {'G': numpy.zeros((9)),
'H': numpy.diag((1,)*9),
'X': 1}
for penalty in self.penalties:
result=penalty.compute([1]*self.np, objective)
self.assertEqual(tuple, type(result))
# more tests go here
class ObjectiveTests(object):
def test_target_zero_order_terms(self):
"""Check zero order target terms"""
obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=0)
self.assertEqual(type(obj),dict)
self.assertTrue("X" in obj)
self.assertNotEqual(int(obj["X"]), 0)
self.assertTrue("G" in obj)
self.assertFalse(obj["G"].any())
self.assertTrue("H" in obj)
self.assertEqual(obj["H"], numpy.diag([1]*self.ff.np))
def test_target_first_order_terms(self):
"""Check first order target terms"""
obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=1)
self.assertEqual(type(obj),dict)
self.assertTrue("X" in obj)
self.assertTrue("G" in obj)
self.assertTrue("H" in obj)
def test_target_second_order_terms(self):
"""Check second order target terms"""
obj = self.objective.Target_Terms(numpy.array([.5]*self.ff.np), Order=2)
self.assertEqual(type(obj),dict)
self.assertTrue("X" in obj)
self.assertTrue("G" in obj)
self.assertTrue("H" in obj)
def test_indicate(self):
"""Check objective.indicate() runs without errors"""
self.objective.Indicate()
class TestWaterObjective(ForceBalanceTestCase, ObjectiveTests):
def setUp(self):
self.options=forcebalance.parser.gen_opts_defaults.copy()
self.options.update({
'root': os.getcwd() + '/test/files',
'penalty_additive': 0.01,
'jobtype': 'NEWTON',
'forcefield': ['water.itp']})
os.chdir(self.options['root'])
self.logger.debug("\nUsing the following options:\n%s\n" % str(self.options))
self.tgt_opts = [ forcebalance.parser.tgt_opts_defaults.copy() ]
self.tgt_opts[0].update({"type" : "ABINITIO_GMX", "name" : "cluster-06"})
self.ff = forcebalance.forcefield.FF(self.options)
self.objective = forcebalance.objective.Objective(self.options, self.tgt_opts,self.ff)
def shortDescription(self):
return super(TestWaterObjective, self).shortDescription() + " (AbInitio_GMX target)"
class TestBromineObjective(ForceBalanceTestCase, ObjectiveTests):
def setUp(self):
self.options=forcebalance.parser.gen_opts_defaults.copy()
self.options.update({
'root': os.getcwd() + '/test/files',
'penalty_additive': 0.01,
'jobtype': 'NEWTON',
'forcefield': ['bro.itp']})
os.chdir(self.options['root'])
self.logger.debug("\nUsing the following options:\n%s\n" % str(self.options))
self.tgt_opts = [ forcebalance.parser.tgt_opts_defaults.copy() ]
self.tgt_opts[0].update({"type" : "LIQUID_GMX", "name" : "LiquidBromine"})
self.ff = forcebalance.forcefield.FF(self.options)
self.objective = forcebalance.objective.Objective(self.options, self.tgt_opts,self.ff)
def shortDescription(self):
return super(TestBromineObjective, self).shortDescription() + " (Liquid_GMX target)"
if __name__ == '__main__':
unittest.main()
| [
2,
11,
12,
15,
20
] |
1,122 | 374fbb986524f28cc86f6e579f504eeb8ddc9701 | <mask token>
def parse_cr(cr):
binary = cr.value
string = binary.decode('utf-8')
return string.split(',')
def get_title(cr):
get = parse_cr(cr)[2]
head = get[5:9]
if head == 'data':
trunc = get[12:]
return trunc.split('/')[0]
else:
trunc = get[10:]
return trunc.split('=')[0]
<mask token>
def gather_popularity():
first = None
popularity = dict()
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000
)
duration = 0
max_duration = 500000000
for message in consumer:
if duration > max_duration:
break
else:
duration += 1
if duration % (max_duration / 100) == 0:
print(duration / (max_duration / 100), '% complete')
if first is None:
first = message
elif message == first:
print('repeat')
break
parsed = parse_cr(message)
r_block = parsed[2]
head = r_block[5:9]
if head == 'data':
trunc = r_block[12:]
title = trunc.split('/')[0]
minutes = r_block.split('/')[4][:-4]
else:
continue
if int(minutes) == 0:
date = parsed[0][5:10]
if title in popularity:
count = popularity[title]
popularity[title] = count + 1
else:
popularity[title] = 1
dates.add(date)
return popularity
def gather_titles():
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)
f = open('movie_titles.txt', 'r')
fl = f.readlines()
f.close()
s = set(fl)
i = len(s)
f = open('movie_titles.txt', 'a')
for message in consumer:
if i > 27000:
break
title = get_title(message) + '\n'
if title in s:
continue
else:
s.add(title)
f.write(title)
i = i + 1
f.close()
<mask token>
| <mask token>
def parse_cr(cr):
binary = cr.value
string = binary.decode('utf-8')
return string.split(',')
def get_title(cr):
get = parse_cr(cr)[2]
head = get[5:9]
if head == 'data':
trunc = get[12:]
return trunc.split('/')[0]
else:
trunc = get[10:]
return trunc.split('=')[0]
<mask token>
def gather_popularity():
first = None
popularity = dict()
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000
)
duration = 0
max_duration = 500000000
for message in consumer:
if duration > max_duration:
break
else:
duration += 1
if duration % (max_duration / 100) == 0:
print(duration / (max_duration / 100), '% complete')
if first is None:
first = message
elif message == first:
print('repeat')
break
parsed = parse_cr(message)
r_block = parsed[2]
head = r_block[5:9]
if head == 'data':
trunc = r_block[12:]
title = trunc.split('/')[0]
minutes = r_block.split('/')[4][:-4]
else:
continue
if int(minutes) == 0:
date = parsed[0][5:10]
if title in popularity:
count = popularity[title]
popularity[title] = count + 1
else:
popularity[title] = 1
dates.add(date)
return popularity
def gather_titles():
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)
f = open('movie_titles.txt', 'r')
fl = f.readlines()
f.close()
s = set(fl)
i = len(s)
f = open('movie_titles.txt', 'a')
for message in consumer:
if i > 27000:
break
title = get_title(message) + '\n'
if title in s:
continue
else:
s.add(title)
f.write(title)
i = i + 1
f.close()
<mask token>
with open('views3.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in results.items():
writer.writerow([key, value / num_days])
| <mask token>
users = set()
def parse_cr(cr):
binary = cr.value
string = binary.decode('utf-8')
return string.split(',')
def get_title(cr):
get = parse_cr(cr)[2]
head = get[5:9]
if head == 'data':
trunc = get[12:]
return trunc.split('/')[0]
else:
trunc = get[10:]
return trunc.split('=')[0]
dates = set()
def gather_popularity():
first = None
popularity = dict()
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000
)
duration = 0
max_duration = 500000000
for message in consumer:
if duration > max_duration:
break
else:
duration += 1
if duration % (max_duration / 100) == 0:
print(duration / (max_duration / 100), '% complete')
if first is None:
first = message
elif message == first:
print('repeat')
break
parsed = parse_cr(message)
r_block = parsed[2]
head = r_block[5:9]
if head == 'data':
trunc = r_block[12:]
title = trunc.split('/')[0]
minutes = r_block.split('/')[4][:-4]
else:
continue
if int(minutes) == 0:
date = parsed[0][5:10]
if title in popularity:
count = popularity[title]
popularity[title] = count + 1
else:
popularity[title] = 1
dates.add(date)
return popularity
def gather_titles():
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)
f = open('movie_titles.txt', 'r')
fl = f.readlines()
f.close()
s = set(fl)
i = len(s)
f = open('movie_titles.txt', 'a')
for message in consumer:
if i > 27000:
break
title = get_title(message) + '\n'
if title in s:
continue
else:
s.add(title)
f.write(title)
i = i + 1
f.close()
results = gather_popularity()
num_days = len(dates)
with open('views3.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in results.items():
writer.writerow([key, value / num_days])
| from kafka import KafkaConsumer
import csv
users = set()
def parse_cr(cr):
binary = cr.value
string = binary.decode('utf-8')
return string.split(',')
def get_title(cr):
get = parse_cr(cr)[2]
head = get[5:9]
if head == 'data':
trunc = get[12:]
return trunc.split('/')[0]
else:
trunc = get[10:]
return trunc.split('=')[0]
dates = set()
def gather_popularity():
first = None
popularity = dict()
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-stream', enable_auto_commit=True, auto_commit_interval_ms=1000
)
duration = 0
max_duration = 500000000
for message in consumer:
if duration > max_duration:
break
else:
duration += 1
if duration % (max_duration / 100) == 0:
print(duration / (max_duration / 100), '% complete')
if first is None:
first = message
elif message == first:
print('repeat')
break
parsed = parse_cr(message)
r_block = parsed[2]
head = r_block[5:9]
if head == 'data':
trunc = r_block[12:]
title = trunc.split('/')[0]
minutes = r_block.split('/')[4][:-4]
else:
continue
if int(minutes) == 0:
date = parsed[0][5:10]
if title in popularity:
count = popularity[title]
popularity[title] = count + 1
else:
popularity[title] = 1
dates.add(date)
return popularity
def gather_titles():
consumer = KafkaConsumer('movielog', bootstrap_servers=[
'localhost:9092'], auto_offset_reset='earliest', group_id=
'jcerwin-new', enable_auto_commit=True, auto_commit_interval_ms=1000)
f = open('movie_titles.txt', 'r')
fl = f.readlines()
f.close()
s = set(fl)
i = len(s)
f = open('movie_titles.txt', 'a')
for message in consumer:
if i > 27000:
break
title = get_title(message) + '\n'
if title in s:
continue
else:
s.add(title)
f.write(title)
i = i + 1
f.close()
results = gather_popularity()
num_days = len(dates)
with open('views3.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in results.items():
writer.writerow([key, value / num_days])
| from kafka import KafkaConsumer
import csv
users = set()
# returns string of title given a ConsumerRecord
def parse_cr(cr):
binary = cr.value
string = binary.decode('utf-8')
# [time, user id, GET request]
return string.split(',')
# returns string of title given a ConsumerRecord in name+name+year format regardless of rate or data
def get_title(cr):
get = parse_cr(cr)[2]
head = get[5:9]
if head == 'data':
trunc = get[12:]
return trunc.split('/')[0]
else:
trunc = get[10:]
return trunc.split('=')[0]
dates = set()
def gather_popularity():
first = None
popularity = dict()
consumer = KafkaConsumer(
'movielog',
bootstrap_servers=['localhost:9092'],
auto_offset_reset='earliest',
group_id='jcerwin-stream',
enable_auto_commit=True,
auto_commit_interval_ms=1000
)
duration = 0
max_duration = 500000000
for message in consumer:
if duration > max_duration: break
else: duration += 1
if duration % (max_duration / 100) == 0:
print(duration / (max_duration / 100), "% complete")
if first is None:
first = message
else:
if message == first:
print("repeat")
break
parsed = parse_cr(message)
r_block = parsed[2]
head = r_block[5:9]
# look for watches only not reviews
if head == 'data':
trunc = r_block[12:]
title = trunc.split('/')[0]
minutes = r_block.split('/')[4][:-4]
else:
continue
if int(minutes) == 0:
date = (parsed[0])[5:10]
if title in popularity:
count = popularity[title]
popularity[title] = count + 1
else:
popularity[title] = 1
dates.add(date)
return popularity
def gather_titles():
consumer = KafkaConsumer(
'movielog',
bootstrap_servers=['localhost:9092'],
auto_offset_reset='earliest',
group_id='jcerwin-new',
enable_auto_commit=True,
auto_commit_interval_ms=1000
)
f = open("movie_titles.txt", "r")
fl = f.readlines()
f.close()
s = set(fl)
i = len(s)
f = open("movie_titles.txt", "a")
for message in consumer:
if i > 27000:
break
title = get_title(message) + '\n'
if title in s:
continue
else:
s.add(title)
f.write(title)
i = i + 1
f.close()
#with open('views.csv', 'w') as csv_file:
# writer = csv.writer(csv_file)
# for key, value in gather_popularity().items():
# writer.writerow([key, value])
results = gather_popularity()
num_days = len(dates)
with open('views3.csv', 'w') as csv_file:
writer = csv.writer(csv_file)
for key, value in results.items():
writer.writerow([key, value / num_days])
| [
4,
5,
6,
7,
8
] |
1,123 | bea90bbcd4d34b64c21f022b6f3af2bee2d978e4 | <mask token>
| <mask token>
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| <mask token>
app = create_app(Config)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| from app import create_app
from app.config import Config
app = create_app(Config)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| from app import create_app
from app.config import Config
app = create_app(Config)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
| [
0,
1,
2,
3,
4
] |
1,124 | 6b7bc40ba842ff565e7141fb1d51def99d9ab96a | <mask token>
class SwitchingBatchSampler(Sampler):
<mask token>
def __iter__(self):
second_size = self.data_len - self.first_size
self.first_iter = iter(torch.randperm(self.first_size))
self.second_iter = iter(torch.randperm(second_size) + self.first_size)
i = 0
count_first = 0
count_second = 0
batch = []
while count_first + count_second < self.data_len:
if self.turn == 0:
if count_first == self.first_size:
self.turn = 1
if len(batch) > 0 and not self.drop_last:
yield batch
batch = []
else:
batch.append(next(self.first_iter))
count_first += 1
i += 1
elif count_second == self.data_len - self.first_size:
self.turn = 0
if len(batch) > 0 and not self.drop_last:
yield batch
batch = []
else:
batch.append(next(self.second_iter))
count_second += 1
i += 1
if i != 0 and i % self.batch_size == 0:
yield batch
batch = []
if (count_first != self.first_size and count_second !=
second_size and random.uniform(0, 1) > 0.5):
self.turn = (self.turn + 1) % 2
if len(batch) > 0 and not self.drop_last:
yield batch
<mask token>
| <mask token>
class SwitchingBatchSampler(Sampler):
<mask token>
def __iter__(self):
second_size = self.data_len - self.first_size
self.first_iter = iter(torch.randperm(self.first_size))
self.second_iter = iter(torch.randperm(second_size) + self.first_size)
i = 0
count_first = 0
count_second = 0
batch = []
while count_first + count_second < self.data_len:
if self.turn == 0:
if count_first == self.first_size:
self.turn = 1
if len(batch) > 0 and not self.drop_last:
yield batch
batch = []
else:
batch.append(next(self.first_iter))
count_first += 1
i += 1
elif count_second == self.data_len - self.first_size:
self.turn = 0
if len(batch) > 0 and not self.drop_last:
yield batch
batch = []
else:
batch.append(next(self.second_iter))
count_second += 1
i += 1
if i != 0 and i % self.batch_size == 0:
yield batch
batch = []
if (count_first != self.first_size and count_second !=
second_size and random.uniform(0, 1) > 0.5):
self.turn = (self.turn + 1) % 2
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return self.first_size // self.batch_size
+((self.data_len - self.first_size) // self.batch_size)
else:
return (self.first_size + self.batch_size - 1) // self.batch_size
+((self.data_len - self.first_size + self.batch_size - 1) //
self.batch_size)
| <mask token>
class SwitchingBatchSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last=False):
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self.data_len = len(self.data_source)
count = 0
for i in range(self.data_len):
if self.data_source.imgs[i][1] == 1:
break
else:
count += 1
print('Total Images: %d [Class 0: %d, Class 1: %d]\n' % (self.
data_len, count, self.data_len - count))
self.first_size = count
if random.uniform(0, 1) > 0.5:
self.turn = 0
else:
self.turn = 1
def __iter__(self):
second_size = self.data_len - self.first_size
self.first_iter = iter(torch.randperm(self.first_size))
self.second_iter = iter(torch.randperm(second_size) + self.first_size)
i = 0
count_first = 0
count_second = 0
batch = []
while count_first + count_second < self.data_len:
if self.turn == 0:
if count_first == self.first_size:
self.turn = 1
if len(batch) > 0 and not self.drop_last:
yield batch
batch = []
else:
batch.append(next(self.first_iter))
count_first += 1
i += 1
elif count_second == self.data_len - self.first_size:
self.turn = 0
if len(batch) > 0 and not self.drop_last:
yield batch
batch = []
else:
batch.append(next(self.second_iter))
count_second += 1
i += 1
if i != 0 and i % self.batch_size == 0:
yield batch
batch = []
if (count_first != self.first_size and count_second !=
second_size and random.uniform(0, 1) > 0.5):
self.turn = (self.turn + 1) % 2
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return self.first_size // self.batch_size
+((self.data_len - self.first_size) // self.batch_size)
else:
return (self.first_size + self.batch_size - 1) // self.batch_size
+((self.data_len - self.first_size + self.batch_size - 1) //
self.batch_size)
| from torch.utils.data.sampler import Sampler
import torch
import random
class SwitchingBatchSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last=False):
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
self.data_len = len(self.data_source)
count = 0
for i in range(self.data_len):
if self.data_source.imgs[i][1] == 1:
break
else:
count += 1
print('Total Images: %d [Class 0: %d, Class 1: %d]\n' % (self.
data_len, count, self.data_len - count))
self.first_size = count
if random.uniform(0, 1) > 0.5:
self.turn = 0
else:
self.turn = 1
def __iter__(self):
second_size = self.data_len - self.first_size
self.first_iter = iter(torch.randperm(self.first_size))
self.second_iter = iter(torch.randperm(second_size) + self.first_size)
i = 0
count_first = 0
count_second = 0
batch = []
while count_first + count_second < self.data_len:
if self.turn == 0:
if count_first == self.first_size:
self.turn = 1
if len(batch) > 0 and not self.drop_last:
yield batch
batch = []
else:
batch.append(next(self.first_iter))
count_first += 1
i += 1
elif count_second == self.data_len - self.first_size:
self.turn = 0
if len(batch) > 0 and not self.drop_last:
yield batch
batch = []
else:
batch.append(next(self.second_iter))
count_second += 1
i += 1
if i != 0 and i % self.batch_size == 0:
yield batch
batch = []
if (count_first != self.first_size and count_second !=
second_size and random.uniform(0, 1) > 0.5):
self.turn = (self.turn + 1) % 2
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return self.first_size // self.batch_size
+((self.data_len - self.first_size) // self.batch_size)
else:
return (self.first_size + self.batch_size - 1) // self.batch_size
+((self.data_len - self.first_size + self.batch_size - 1) //
self.batch_size)
| from torch.utils.data.sampler import Sampler
import torch
import random
class SwitchingBatchSampler(Sampler):
def __init__(self, data_source, batch_size, drop_last=False):
self.data_source = data_source
self.batch_size = batch_size
self.drop_last = drop_last
# Divide the indices into two indices groups
self.data_len = len(self.data_source)
count = 0
for i in range(self.data_len):
if self.data_source.imgs[i][1] == 1:
break
else:
count += 1
print("Total Images: %d [Class 0: %d, Class 1: %d]\n"%(self.data_len, count, (self.data_len-count)))
self.first_size = count
if random.uniform(0, 1) > 0.5:
self.turn = 0
else:
self.turn = 1
def __iter__(self):
# Initialize both iters
second_size = self.data_len - self.first_size
self.first_iter = iter(torch.randperm(self.first_size))
self.second_iter = iter(torch.randperm(second_size) + self.first_size)
# Counting variables
i = 0
count_first = 0 # Counts how many imgs of first iter has been returned
count_second = 0 # Counts second iter
batch = []
# Until no data left, keep iterating
while count_first+count_second < self.data_len:
# Fill the batch
if self.turn == 0:
if count_first == self.first_size:
self.turn = 1
if len(batch) > 0 and not self.drop_last:
yield batch
batch = []
else:
batch.append(next(self.first_iter))
count_first += 1
i += 1
else:
if count_second == (self.data_len-self.first_size):
self.turn = 0
if len(batch) > 0 and not self.drop_last:
yield batch
batch = []
else:
batch.append(next(self.second_iter))
count_second += 1
i += 1
# Yield the batch and switch the turn randomly
if i != 0 and i % self.batch_size == 0:
yield batch
batch = []
if count_first != self.first_size and count_second != second_size and random.uniform(0, 1) > 0.5:
self.turn = (self.turn + 1) % 2
# If drop_last is False, return the rest
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
if self.drop_last:
return (self.first_size // self.batch_size)
+ ((self.data_len - self.first_size) // self.batch_size)
else:
return ((self.first_size + self.batch_size - 1) // self.batch_size)
+ ((self.data_len - self.first_size + self.batch_size - 1) // self.batch_size) | [
2,
3,
4,
5,
6
] |
1,125 | 68d37421b71d595510a1439c06cc31d00c23c277 | import numpy
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.cross_validation import cross_val_score
lag = 10
print "Loading train_data..."
train_data = numpy.loadtxt("../KddJavaToolChain/train_timelines_final.csv",
delimiter=",")
print "Loading train_data completed..."
print "Loading test_data..."
test_data = numpy.loadtxt("../KddJavaToolChain/predict_timelines.csv",
delimiter=",")
print "Loading test_data completed..."
print "Loading truth_data..."
truth_data_lines = open("../KddJavaToolChain/truth_timelines_final.csv", "r").readlines()
truth_data = []
for truth_data_line in truth_data_lines:
truth_data.append(int(truth_data_line.replace("\n", "")))
print "Loading truth_data completed..."
for i in range(501,1001,50):
n = 1000
# i = 420
clf_rf = RandomForestClassifier(n_estimators=n, n_jobs=-1, max_depth=None, min_samples_split=i, verbose=False)
scores = cross_val_score(clf_rf, train_data, truth_data, n_jobs=1, cv=2, verbose=False)
print i, ":", scores.mean()
# clf = GradientBoostingClassifier(verbose=False)
# clf.fit(train_data, truth_data)
#
# index = [i for i in range(lag)]
# zipped = zip(index, clf.feature_importances_)
# zipped.sort(key = lambda t: t[1], reverse=True)
#
# for i, j in zipped:
# print i, ":", j
| null | null | null | null | [
0
] |
1,126 | c6502d6b589fa75dfbd5946a1097e77fc0b472c4 | <mask token>
class DatabaseConnection:
<mask token>
<mask token>
def connect(self):
self.conn = MySQLdb.connect(host=self.address, port=3306, user=self
.user, passwd=self.password, db=self.database)
c = self.conn.cursor()
return c, self.conn
def disconnect(self):
self.conn.close()
def addEmail(self, email, number):
try:
c, conn = self.connect()
c.execute(
'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',
(thwart(email), thwart(number)))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def removeEmail(self, email):
try:
c, conn = self.connect()
c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
<mask token>
<mask token>
| <mask token>
class DatabaseConnection:
<mask token>
<mask token>
def connect(self):
self.conn = MySQLdb.connect(host=self.address, port=3306, user=self
.user, passwd=self.password, db=self.database)
c = self.conn.cursor()
return c, self.conn
def disconnect(self):
self.conn.close()
def addEmail(self, email, number):
try:
c, conn = self.connect()
c.execute(
'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',
(thwart(email), thwart(number)))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def removeEmail(self, email):
try:
c, conn = self.connect()
c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
<mask token>
def getMostClicked(self):
try:
c, conn = self.connect()
c.execute(
'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'
)
data = c.fetchone()
print(data)
self.disconnect()
return [data[0], data[1], data[2]]
except:
return []
| <mask token>
class DatabaseConnection:
<mask token>
<mask token>
def connect(self):
self.conn = MySQLdb.connect(host=self.address, port=3306, user=self
.user, passwd=self.password, db=self.database)
c = self.conn.cursor()
return c, self.conn
def disconnect(self):
self.conn.close()
def addEmail(self, email, number):
try:
c, conn = self.connect()
c.execute(
'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',
(thwart(email), thwart(number)))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def removeEmail(self, email):
try:
c, conn = self.connect()
c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def updateSpamTable(self, mailID, repo):
try:
c, conn = self.connect()
no = c.execute('SELECT * FROM spammail WHERE idEmail = %s', (
thwart(mailID),))
print(no)
if no == 0:
c.execute(
'INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)'
, (1, thwart(repo), thwart(mailID)))
else:
c.execute('SELECT numClicked FROM spammail WHERE idEmail = %s',
(thwart(mailID),))
no = c.fetchone()[0]
print(no)
c.execute(
'UPDATE spammail SET numClicked = %s WHERE idEmail = %s',
(no + 1, thwart(mailID)))
conn.commit()
self.disconnect()
print('here')
return True
except:
return False
def getMostClicked(self):
try:
c, conn = self.connect()
c.execute(
'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'
)
data = c.fetchone()
print(data)
self.disconnect()
return [data[0], data[1], data[2]]
except:
return []
| <mask token>
class DatabaseConnection:
def __init__(self, address, user, password, database):
self.address = address
self.user = user
self.password = password
self.database = database
<mask token>
def connect(self):
self.conn = MySQLdb.connect(host=self.address, port=3306, user=self
.user, passwd=self.password, db=self.database)
c = self.conn.cursor()
return c, self.conn
def disconnect(self):
self.conn.close()
def addEmail(self, email, number):
try:
c, conn = self.connect()
c.execute(
'INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)',
(thwart(email), thwart(number)))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def removeEmail(self, email):
try:
c, conn = self.connect()
c.execute('DELETE from User WHERE email = (%s)', (thwart(email),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def updateSpamTable(self, mailID, repo):
try:
c, conn = self.connect()
no = c.execute('SELECT * FROM spammail WHERE idEmail = %s', (
thwart(mailID),))
print(no)
if no == 0:
c.execute(
'INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)'
, (1, thwart(repo), thwart(mailID)))
else:
c.execute('SELECT numClicked FROM spammail WHERE idEmail = %s',
(thwart(mailID),))
no = c.fetchone()[0]
print(no)
c.execute(
'UPDATE spammail SET numClicked = %s WHERE idEmail = %s',
(no + 1, thwart(mailID)))
conn.commit()
self.disconnect()
print('here')
return True
except:
return False
def getMostClicked(self):
try:
c, conn = self.connect()
c.execute(
'SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1'
)
data = c.fetchone()
print(data)
self.disconnect()
return [data[0], data[1], data[2]]
except:
return []
| import MySQLdb
from MySQLdb import escape_string as thwart
"""
"""
class DatabaseConnection:
def __init__(self, address, user, password, database):
self.address = address
self.user = user
self.password = password
self.database = database
"""
"""
def connect(self):
self.conn = MySQLdb.connect(host=self.address,
port=3306,
user=self.user,
passwd=self.password,
db=self.database)
c = self.conn.cursor()
return c, self.conn
def disconnect(self):
self.conn.close()
def addEmail(self, email, number):
try:
c, conn = self.connect()
c.execute("INSERT INTO User (email, maxEmailsPerMonth) VALUES (%s, %s)", (thwart(email), thwart(number),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def removeEmail(self, email):
try:
c, conn = self.connect()
c.execute("DELETE from User WHERE email = (%s)", (thwart(email),))
conn.commit()
self.disconnect()
return True
except Exception:
return False
def updateSpamTable(self, mailID, repo):
try:
c, conn = self.connect()
no = c.execute("SELECT * FROM spammail WHERE idEmail = %s", (thwart(mailID),))
print(no)
if no == 0:
c.execute("INSERT INTO spammail (numClicked, repo, idEmail) VALUES (%s, %s, %s)", (1, thwart(repo), thwart(mailID),))
else:
c.execute("SELECT numClicked FROM spammail WHERE idEmail = %s", (thwart(mailID),))
no = c.fetchone()[0]
print(no)
c.execute("UPDATE spammail SET numClicked = %s WHERE idEmail = %s", (no+1, thwart(mailID),))
conn.commit()
self.disconnect()
print("here")
return True
except:
return False
def getMostClicked(self):
try:
c, conn = self.connect()
c.execute("SELECT idEmail, repo, numClicked FROM SpamMail ORDER BY numClicked DESC LIMIT 1")
data = c.fetchone()
print(data)
self.disconnect()
return [data[0], data[1], data[2]]
except:
return []
| [
5,
6,
7,
8,
11
] |
1,127 | d2972fb7cff08e15957f9baeaa6fd9a6f5bbb006 | class Calculator:
<mask token>
def Subtract(self, num1, num2):
return num1 - num2
<mask token>
def Divide(self, num1, num2):
return num1 / num2
<mask token>
| class Calculator:
def Add(self, num1, num2):
return num1 + num2
def Subtract(self, num1, num2):
return num1 - num2
<mask token>
def Divide(self, num1, num2):
return num1 / num2
<mask token>
| class Calculator:
def Add(self, num1, num2):
return num1 + num2
def Subtract(self, num1, num2):
return num1 - num2
def Multiply(self, num1, num2):
return num1 * num2
def Divide(self, num1, num2):
return num1 / num2
<mask token>
| class Calculator:
def Add(self, num1, num2):
return num1 + num2
def Subtract(self, num1, num2):
return num1 - num2
def Multiply(self, num1, num2):
return num1 * num2
def Divide(self, num1, num2):
return num1 / num2
if __name__ == '__main__':
calc = Calculator()
print(calc.Add(1, 2))
print(calc.Subtract(1, 2))
print(calc.Multiply(1, 2))
print(calc.Divide(1, 2))
| # This file is part of the functional_calculator_oop.py Task
# Create a class called Calculator
class Calculator:
def Add(self, num1, num2):
return num1 + num2
def Subtract(self, num1, num2):
return num1 - num2
def Multiply(self, num1, num2):
return num1 * num2
def Divide(self, num1, num2):
return num1 / num2
# We need this conditional check so that the code doesn't run automatically when we import it on another file
if __name__ == "__main__":
# Create calculator object
calc = Calculator()
# Use object to call methods
print(calc.Add(1, 2))
print(calc.Subtract(1, 2))
print(calc.Multiply(1, 2))
print(calc.Divide(1, 2))
# Here we can see that __name__ is main when ran from here directly, but calculator_oop when imported on another file
# print(__name__)
| [
3,
4,
5,
6,
7
] |
1,128 | 2a5d498a386190bdd2c05bc2b14db0fecd707162 | <mask token>
| from .slinklist import SingleLinkedList
| null | null | null | [
0,
1
] |
1,129 | 6b45541c54f1a4ce94d6bd457701ecd1b90a4c4c | #!/usr/bin/env python
#_*_coding:utf-8_*_
#作者:Paul哥
from fabric.api import settings,run,cd,env,hosts
from fabric.colors import *
env.hosts=['192.168.75.130:22']
env.password='hello123'
env.user='root'
def test():
with cd('/home'):
print yellow(run('ls -l'))
test()
| null | null | null | null | [
0
] |
1,130 | 37580939a0e58bdffb8cfad8252f339a7da4446e | <mask token>
| <mask token>
for t in sorted(list(permutations(s, int(k)))):
print(*t, sep='')
| <mask token>
s, space, k = raw_input().partition(' ')
for t in sorted(list(permutations(s, int(k)))):
print(*t, sep='')
| from __future__ import print_function
from itertools import permutations
s, space, k = raw_input().partition(' ')
for t in sorted(list(permutations(s, int(k)))):
print(*t, sep='')
| null | [
0,
1,
2,
3
] |
1,131 | 8197d918b86f0e38fb4320434b61aa4186853af9 | <mask token>
@register_command('sig gallery-application version show')
class Show(AAZCommand):
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
class GalleryApplicationVersionsGet(AAZHttpOperation):
CLIENT_TYPE = 'MgmtClient'
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=
False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'
, **self.url_parameters)
@property
def method(self):
return 'GET'
@property
def error_format(self):
return 'ODataV4Format'
@property
def url_parameters(self):
parameters = {**self.serialize_url_param(
'galleryApplicationName', self.ctx.args.
gallery_application_name, required=True), **self.
serialize_url_param('galleryApplicationVersionName', self.
ctx.args.gallery_application_version_name, required=True),
**self.serialize_url_param('galleryName', self.ctx.args.
gallery_name, required=True), **self.serialize_url_param(
'resourceGroupName', self.ctx.args.resource_group, required
=True), **self.serialize_url_param('subscriptionId', self.
ctx.subscription_id, required=True)}
return parameters
@property
def query_parameters(self):
parameters = {**self.serialize_query_param('$expand', self.ctx.
args.expand), **self.serialize_query_param('api-version',
'2022-01-03', required=True)}
return parameters
@property
def header_parameters(self):
parameters = {**self.serialize_header_param('Accept',
'application/json')}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var('instance', data, schema_builder=self.
_build_schema_on_200)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(flags={'read_only': True})
_schema_on_200.location = AAZStrType(flags={'required': True})
_schema_on_200.name = AAZStrType(flags={'read_only': True})
_schema_on_200.properties = AAZObjectType(flags={
'client_flatten': True})
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(flags={'read_only': True})
properties = cls._schema_on_200.properties
properties.provisioning_state = AAZStrType(serialized_name=
'provisioningState', flags={'read_only': True})
properties.publishing_profile = AAZObjectType(serialized_name=
'publishingProfile', flags={'required': True})
properties.replication_status = AAZObjectType(serialized_name=
'replicationStatus')
publishing_profile = (cls._schema_on_200.properties.
publishing_profile)
publishing_profile.advanced_settings = AAZDictType(serialized_name
='advancedSettings')
publishing_profile.enable_health_check = AAZBoolType(
serialized_name='enableHealthCheck')
publishing_profile.end_of_life_date = AAZStrType(serialized_name
='endOfLifeDate')
publishing_profile.exclude_from_latest = AAZBoolType(
serialized_name='excludeFromLatest')
publishing_profile.manage_actions = AAZObjectType(serialized_name
='manageActions')
publishing_profile.published_date = AAZStrType(serialized_name=
'publishedDate', flags={'read_only': True})
publishing_profile.replica_count = AAZIntType(serialized_name=
'replicaCount')
publishing_profile.replication_mode = AAZStrType(serialized_name
='replicationMode')
publishing_profile.settings = AAZObjectType()
publishing_profile.source = AAZObjectType(flags={'required': True})
publishing_profile.storage_account_type = AAZStrType(
serialized_name='storageAccountType')
publishing_profile.target_extended_locations = AAZListType(
serialized_name='targetExtendedLocations')
publishing_profile.target_regions = AAZListType(serialized_name
='targetRegions')
advanced_settings = (cls._schema_on_200.properties.
publishing_profile.advanced_settings)
advanced_settings.Element = AAZStrType()
manage_actions = (cls._schema_on_200.properties.
publishing_profile.manage_actions)
manage_actions.install = AAZStrType(flags={'required': True})
manage_actions.remove = AAZStrType(flags={'required': True})
manage_actions.update = AAZStrType()
settings = (cls._schema_on_200.properties.publishing_profile.
settings)
settings.config_file_name = AAZStrType(serialized_name=
'configFileName')
settings.package_file_name = AAZStrType(serialized_name=
'packageFileName')
source = cls._schema_on_200.properties.publishing_profile.source
source.default_configuration_link = AAZStrType(serialized_name=
'defaultConfigurationLink')
source.media_link = AAZStrType(serialized_name='mediaLink',
flags={'required': True})
target_extended_locations = (cls._schema_on_200.properties.
publishing_profile.target_extended_locations)
target_extended_locations.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.publishing_profile.
target_extended_locations.Element)
_element.encryption = AAZObjectType()
_ShowHelper._build_schema_encryption_images_read(_element.
encryption)
_element.extended_location = AAZObjectType(serialized_name=
'extendedLocation')
_element.extended_location_replica_count = AAZIntType(
serialized_name='extendedLocationReplicaCount')
_element.name = AAZStrType()
_element.storage_account_type = AAZStrType(serialized_name=
'storageAccountType')
extended_location = (cls._schema_on_200.properties.
publishing_profile.target_extended_locations.Element.
extended_location)
extended_location.name = AAZStrType()
extended_location.type = AAZStrType()
target_regions = (cls._schema_on_200.properties.
publishing_profile.target_regions)
target_regions.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.publishing_profile.
target_regions.Element)
_element.encryption = AAZObjectType()
_ShowHelper._build_schema_encryption_images_read(_element.
encryption)
_element.name = AAZStrType(flags={'required': True})
_element.regional_replica_count = AAZIntType(serialized_name=
'regionalReplicaCount')
_element.storage_account_type = AAZStrType(serialized_name=
'storageAccountType')
replication_status = (cls._schema_on_200.properties.
replication_status)
replication_status.aggregated_state = AAZStrType(serialized_name
='aggregatedState', flags={'read_only': True})
replication_status.summary = AAZListType(flags={'read_only': True})
summary = cls._schema_on_200.properties.replication_status.summary
summary.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.replication_status.
summary.Element)
_element.details = AAZStrType(flags={'read_only': True})
_element.progress = AAZIntType(flags={'read_only': True})
_element.region = AAZStrType(flags={'read_only': True})
_element.state = AAZStrType(flags={'read_only': True})
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
_schema_encryption_images_read = None
@classmethod
def _build_schema_encryption_images_read(cls, _schema):
if cls._schema_encryption_images_read is not None:
_schema.data_disk_images = (cls._schema_encryption_images_read.
data_disk_images)
_schema.os_disk_image = (cls._schema_encryption_images_read.
os_disk_image)
return
(cls._schema_encryption_images_read) = (_schema_encryption_images_read
) = AAZObjectType()
encryption_images_read = _schema_encryption_images_read
encryption_images_read.data_disk_images = AAZListType(serialized_name
='dataDiskImages')
encryption_images_read.os_disk_image = AAZObjectType(serialized_name
='osDiskImage')
data_disk_images = _schema_encryption_images_read.data_disk_images
data_disk_images.Element = AAZObjectType()
_element = _schema_encryption_images_read.data_disk_images.Element
_element.disk_encryption_set_id = AAZStrType(serialized_name=
'diskEncryptionSetId')
_element.lun = AAZIntType(flags={'required': True})
os_disk_image = _schema_encryption_images_read.os_disk_image
os_disk_image.disk_encryption_set_id = AAZStrType(serialized_name=
'diskEncryptionSetId')
os_disk_image.security_profile = AAZObjectType(serialized_name=
'securityProfile')
security_profile = (_schema_encryption_images_read.os_disk_image.
security_profile)
security_profile.confidential_vm_encryption_type = AAZStrType(
serialized_name='confidentialVMEncryptionType')
security_profile.secure_vm_disk_encryption_set_id = AAZStrType(
serialized_name='secureVMDiskEncryptionSetId')
_schema.data_disk_images = (cls._schema_encryption_images_read.
data_disk_images)
_schema.os_disk_image = (cls._schema_encryption_images_read.
os_disk_image)
<mask token>
| <mask token>
@register_command('sig gallery-application version show')
class Show(AAZCommand):
<mask token>
<mask token>
<mask token>
<mask token>
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
_args_schema = cls._args_schema
_args_schema.gallery_application_name = AAZStrArg(options=[
'--application-name', '--gallery-application-name'], help=
'The name of the gallery application.', required=True, id_part=
'child_name_1')
_args_schema.gallery_application_version_name = AAZStrArg(options=[
'-n', '--name', '--version-name',
'--gallery-application-version-name'], help=
'The name of the gallery application version.', required=True,
id_part='child_name_2')
_args_schema.gallery_name = AAZStrArg(options=['-r',
'--gallery-name'], help='Gallery name.', required=True, id_part
='name')
_args_schema.resource_group = AAZResourceGroupNameArg(help=
'Name of resource group. You can configure the default group using `az configure --defaults group=<name>`.'
, required=True)
_args_schema.expand = AAZStrArg(options=['--expand'], help=
'The expand expression to apply on the operation. "ReplicationStatus" Default value is None.'
, enum={'ReplicationStatus': 'ReplicationStatus'})
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.GalleryApplicationVersionsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
<mask token>
<mask token>
class GalleryApplicationVersionsGet(AAZHttpOperation):
CLIENT_TYPE = 'MgmtClient'
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=
False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'
, **self.url_parameters)
@property
def method(self):
return 'GET'
@property
def error_format(self):
return 'ODataV4Format'
@property
def url_parameters(self):
parameters = {**self.serialize_url_param(
'galleryApplicationName', self.ctx.args.
gallery_application_name, required=True), **self.
serialize_url_param('galleryApplicationVersionName', self.
ctx.args.gallery_application_version_name, required=True),
**self.serialize_url_param('galleryName', self.ctx.args.
gallery_name, required=True), **self.serialize_url_param(
'resourceGroupName', self.ctx.args.resource_group, required
=True), **self.serialize_url_param('subscriptionId', self.
ctx.subscription_id, required=True)}
return parameters
@property
def query_parameters(self):
parameters = {**self.serialize_query_param('$expand', self.ctx.
args.expand), **self.serialize_query_param('api-version',
'2022-01-03', required=True)}
return parameters
@property
def header_parameters(self):
parameters = {**self.serialize_header_param('Accept',
'application/json')}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var('instance', data, schema_builder=self.
_build_schema_on_200)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(flags={'read_only': True})
_schema_on_200.location = AAZStrType(flags={'required': True})
_schema_on_200.name = AAZStrType(flags={'read_only': True})
_schema_on_200.properties = AAZObjectType(flags={
'client_flatten': True})
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(flags={'read_only': True})
properties = cls._schema_on_200.properties
properties.provisioning_state = AAZStrType(serialized_name=
'provisioningState', flags={'read_only': True})
properties.publishing_profile = AAZObjectType(serialized_name=
'publishingProfile', flags={'required': True})
properties.replication_status = AAZObjectType(serialized_name=
'replicationStatus')
publishing_profile = (cls._schema_on_200.properties.
publishing_profile)
publishing_profile.advanced_settings = AAZDictType(serialized_name
='advancedSettings')
publishing_profile.enable_health_check = AAZBoolType(
serialized_name='enableHealthCheck')
publishing_profile.end_of_life_date = AAZStrType(serialized_name
='endOfLifeDate')
publishing_profile.exclude_from_latest = AAZBoolType(
serialized_name='excludeFromLatest')
publishing_profile.manage_actions = AAZObjectType(serialized_name
='manageActions')
publishing_profile.published_date = AAZStrType(serialized_name=
'publishedDate', flags={'read_only': True})
publishing_profile.replica_count = AAZIntType(serialized_name=
'replicaCount')
publishing_profile.replication_mode = AAZStrType(serialized_name
='replicationMode')
publishing_profile.settings = AAZObjectType()
publishing_profile.source = AAZObjectType(flags={'required': True})
publishing_profile.storage_account_type = AAZStrType(
serialized_name='storageAccountType')
publishing_profile.target_extended_locations = AAZListType(
serialized_name='targetExtendedLocations')
publishing_profile.target_regions = AAZListType(serialized_name
='targetRegions')
advanced_settings = (cls._schema_on_200.properties.
publishing_profile.advanced_settings)
advanced_settings.Element = AAZStrType()
manage_actions = (cls._schema_on_200.properties.
publishing_profile.manage_actions)
manage_actions.install = AAZStrType(flags={'required': True})
manage_actions.remove = AAZStrType(flags={'required': True})
manage_actions.update = AAZStrType()
settings = (cls._schema_on_200.properties.publishing_profile.
settings)
settings.config_file_name = AAZStrType(serialized_name=
'configFileName')
settings.package_file_name = AAZStrType(serialized_name=
'packageFileName')
source = cls._schema_on_200.properties.publishing_profile.source
source.default_configuration_link = AAZStrType(serialized_name=
'defaultConfigurationLink')
source.media_link = AAZStrType(serialized_name='mediaLink',
flags={'required': True})
target_extended_locations = (cls._schema_on_200.properties.
publishing_profile.target_extended_locations)
target_extended_locations.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.publishing_profile.
target_extended_locations.Element)
_element.encryption = AAZObjectType()
_ShowHelper._build_schema_encryption_images_read(_element.
encryption)
_element.extended_location = AAZObjectType(serialized_name=
'extendedLocation')
_element.extended_location_replica_count = AAZIntType(
serialized_name='extendedLocationReplicaCount')
_element.name = AAZStrType()
_element.storage_account_type = AAZStrType(serialized_name=
'storageAccountType')
extended_location = (cls._schema_on_200.properties.
publishing_profile.target_extended_locations.Element.
extended_location)
extended_location.name = AAZStrType()
extended_location.type = AAZStrType()
target_regions = (cls._schema_on_200.properties.
publishing_profile.target_regions)
target_regions.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.publishing_profile.
target_regions.Element)
_element.encryption = AAZObjectType()
_ShowHelper._build_schema_encryption_images_read(_element.
encryption)
_element.name = AAZStrType(flags={'required': True})
_element.regional_replica_count = AAZIntType(serialized_name=
'regionalReplicaCount')
_element.storage_account_type = AAZStrType(serialized_name=
'storageAccountType')
replication_status = (cls._schema_on_200.properties.
replication_status)
replication_status.aggregated_state = AAZStrType(serialized_name
='aggregatedState', flags={'read_only': True})
replication_status.summary = AAZListType(flags={'read_only': True})
summary = cls._schema_on_200.properties.replication_status.summary
summary.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.replication_status.
summary.Element)
_element.details = AAZStrType(flags={'read_only': True})
_element.progress = AAZIntType(flags={'read_only': True})
_element.region = AAZStrType(flags={'read_only': True})
_element.state = AAZStrType(flags={'read_only': True})
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
_schema_encryption_images_read = None
@classmethod
def _build_schema_encryption_images_read(cls, _schema):
if cls._schema_encryption_images_read is not None:
_schema.data_disk_images = (cls._schema_encryption_images_read.
data_disk_images)
_schema.os_disk_image = (cls._schema_encryption_images_read.
os_disk_image)
return
(cls._schema_encryption_images_read) = (_schema_encryption_images_read
) = AAZObjectType()
encryption_images_read = _schema_encryption_images_read
encryption_images_read.data_disk_images = AAZListType(serialized_name
='dataDiskImages')
encryption_images_read.os_disk_image = AAZObjectType(serialized_name
='osDiskImage')
data_disk_images = _schema_encryption_images_read.data_disk_images
data_disk_images.Element = AAZObjectType()
_element = _schema_encryption_images_read.data_disk_images.Element
_element.disk_encryption_set_id = AAZStrType(serialized_name=
'diskEncryptionSetId')
_element.lun = AAZIntType(flags={'required': True})
os_disk_image = _schema_encryption_images_read.os_disk_image
os_disk_image.disk_encryption_set_id = AAZStrType(serialized_name=
'diskEncryptionSetId')
os_disk_image.security_profile = AAZObjectType(serialized_name=
'securityProfile')
security_profile = (_schema_encryption_images_read.os_disk_image.
security_profile)
security_profile.confidential_vm_encryption_type = AAZStrType(
serialized_name='confidentialVMEncryptionType')
security_profile.secure_vm_disk_encryption_set_id = AAZStrType(
serialized_name='secureVMDiskEncryptionSetId')
_schema.data_disk_images = (cls._schema_encryption_images_read.
data_disk_images)
_schema.os_disk_image = (cls._schema_encryption_images_read.
os_disk_image)
<mask token>
| <mask token>
@register_command('sig gallery-application version show')
class Show(AAZCommand):
<mask token>
<mask token>
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
<mask token>
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
_args_schema = cls._args_schema
_args_schema.gallery_application_name = AAZStrArg(options=[
'--application-name', '--gallery-application-name'], help=
'The name of the gallery application.', required=True, id_part=
'child_name_1')
_args_schema.gallery_application_version_name = AAZStrArg(options=[
'-n', '--name', '--version-name',
'--gallery-application-version-name'], help=
'The name of the gallery application version.', required=True,
id_part='child_name_2')
_args_schema.gallery_name = AAZStrArg(options=['-r',
'--gallery-name'], help='Gallery name.', required=True, id_part
='name')
_args_schema.resource_group = AAZResourceGroupNameArg(help=
'Name of resource group. You can configure the default group using `az configure --defaults group=<name>`.'
, required=True)
_args_schema.expand = AAZStrArg(options=['--expand'], help=
'The expand expression to apply on the operation. "ReplicationStatus" Default value is None.'
, enum={'ReplicationStatus': 'ReplicationStatus'})
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.GalleryApplicationVersionsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
<mask token>
<mask token>
class GalleryApplicationVersionsGet(AAZHttpOperation):
CLIENT_TYPE = 'MgmtClient'
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=
False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'
, **self.url_parameters)
@property
def method(self):
return 'GET'
@property
def error_format(self):
return 'ODataV4Format'
@property
def url_parameters(self):
parameters = {**self.serialize_url_param(
'galleryApplicationName', self.ctx.args.
gallery_application_name, required=True), **self.
serialize_url_param('galleryApplicationVersionName', self.
ctx.args.gallery_application_version_name, required=True),
**self.serialize_url_param('galleryName', self.ctx.args.
gallery_name, required=True), **self.serialize_url_param(
'resourceGroupName', self.ctx.args.resource_group, required
=True), **self.serialize_url_param('subscriptionId', self.
ctx.subscription_id, required=True)}
return parameters
@property
def query_parameters(self):
parameters = {**self.serialize_query_param('$expand', self.ctx.
args.expand), **self.serialize_query_param('api-version',
'2022-01-03', required=True)}
return parameters
@property
def header_parameters(self):
parameters = {**self.serialize_header_param('Accept',
'application/json')}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var('instance', data, schema_builder=self.
_build_schema_on_200)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(flags={'read_only': True})
_schema_on_200.location = AAZStrType(flags={'required': True})
_schema_on_200.name = AAZStrType(flags={'read_only': True})
_schema_on_200.properties = AAZObjectType(flags={
'client_flatten': True})
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(flags={'read_only': True})
properties = cls._schema_on_200.properties
properties.provisioning_state = AAZStrType(serialized_name=
'provisioningState', flags={'read_only': True})
properties.publishing_profile = AAZObjectType(serialized_name=
'publishingProfile', flags={'required': True})
properties.replication_status = AAZObjectType(serialized_name=
'replicationStatus')
publishing_profile = (cls._schema_on_200.properties.
publishing_profile)
publishing_profile.advanced_settings = AAZDictType(serialized_name
='advancedSettings')
publishing_profile.enable_health_check = AAZBoolType(
serialized_name='enableHealthCheck')
publishing_profile.end_of_life_date = AAZStrType(serialized_name
='endOfLifeDate')
publishing_profile.exclude_from_latest = AAZBoolType(
serialized_name='excludeFromLatest')
publishing_profile.manage_actions = AAZObjectType(serialized_name
='manageActions')
publishing_profile.published_date = AAZStrType(serialized_name=
'publishedDate', flags={'read_only': True})
publishing_profile.replica_count = AAZIntType(serialized_name=
'replicaCount')
publishing_profile.replication_mode = AAZStrType(serialized_name
='replicationMode')
publishing_profile.settings = AAZObjectType()
publishing_profile.source = AAZObjectType(flags={'required': True})
publishing_profile.storage_account_type = AAZStrType(
serialized_name='storageAccountType')
publishing_profile.target_extended_locations = AAZListType(
serialized_name='targetExtendedLocations')
publishing_profile.target_regions = AAZListType(serialized_name
='targetRegions')
advanced_settings = (cls._schema_on_200.properties.
publishing_profile.advanced_settings)
advanced_settings.Element = AAZStrType()
manage_actions = (cls._schema_on_200.properties.
publishing_profile.manage_actions)
manage_actions.install = AAZStrType(flags={'required': True})
manage_actions.remove = AAZStrType(flags={'required': True})
manage_actions.update = AAZStrType()
settings = (cls._schema_on_200.properties.publishing_profile.
settings)
settings.config_file_name = AAZStrType(serialized_name=
'configFileName')
settings.package_file_name = AAZStrType(serialized_name=
'packageFileName')
source = cls._schema_on_200.properties.publishing_profile.source
source.default_configuration_link = AAZStrType(serialized_name=
'defaultConfigurationLink')
source.media_link = AAZStrType(serialized_name='mediaLink',
flags={'required': True})
target_extended_locations = (cls._schema_on_200.properties.
publishing_profile.target_extended_locations)
target_extended_locations.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.publishing_profile.
target_extended_locations.Element)
_element.encryption = AAZObjectType()
_ShowHelper._build_schema_encryption_images_read(_element.
encryption)
_element.extended_location = AAZObjectType(serialized_name=
'extendedLocation')
_element.extended_location_replica_count = AAZIntType(
serialized_name='extendedLocationReplicaCount')
_element.name = AAZStrType()
_element.storage_account_type = AAZStrType(serialized_name=
'storageAccountType')
extended_location = (cls._schema_on_200.properties.
publishing_profile.target_extended_locations.Element.
extended_location)
extended_location.name = AAZStrType()
extended_location.type = AAZStrType()
target_regions = (cls._schema_on_200.properties.
publishing_profile.target_regions)
target_regions.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.publishing_profile.
target_regions.Element)
_element.encryption = AAZObjectType()
_ShowHelper._build_schema_encryption_images_read(_element.
encryption)
_element.name = AAZStrType(flags={'required': True})
_element.regional_replica_count = AAZIntType(serialized_name=
'regionalReplicaCount')
_element.storage_account_type = AAZStrType(serialized_name=
'storageAccountType')
replication_status = (cls._schema_on_200.properties.
replication_status)
replication_status.aggregated_state = AAZStrType(serialized_name
='aggregatedState', flags={'read_only': True})
replication_status.summary = AAZListType(flags={'read_only': True})
summary = cls._schema_on_200.properties.replication_status.summary
summary.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.replication_status.
summary.Element)
_element.details = AAZStrType(flags={'read_only': True})
_element.progress = AAZIntType(flags={'read_only': True})
_element.region = AAZStrType(flags={'read_only': True})
_element.state = AAZStrType(flags={'read_only': True})
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
_schema_encryption_images_read = None
@classmethod
def _build_schema_encryption_images_read(cls, _schema):
if cls._schema_encryption_images_read is not None:
_schema.data_disk_images = (cls._schema_encryption_images_read.
data_disk_images)
_schema.os_disk_image = (cls._schema_encryption_images_read.
os_disk_image)
return
(cls._schema_encryption_images_read) = (_schema_encryption_images_read
) = AAZObjectType()
encryption_images_read = _schema_encryption_images_read
encryption_images_read.data_disk_images = AAZListType(serialized_name
='dataDiskImages')
encryption_images_read.os_disk_image = AAZObjectType(serialized_name
='osDiskImage')
data_disk_images = _schema_encryption_images_read.data_disk_images
data_disk_images.Element = AAZObjectType()
_element = _schema_encryption_images_read.data_disk_images.Element
_element.disk_encryption_set_id = AAZStrType(serialized_name=
'diskEncryptionSetId')
_element.lun = AAZIntType(flags={'required': True})
os_disk_image = _schema_encryption_images_read.os_disk_image
os_disk_image.disk_encryption_set_id = AAZStrType(serialized_name=
'diskEncryptionSetId')
os_disk_image.security_profile = AAZObjectType(serialized_name=
'securityProfile')
security_profile = (_schema_encryption_images_read.os_disk_image.
security_profile)
security_profile.confidential_vm_encryption_type = AAZStrType(
serialized_name='confidentialVMEncryptionType')
security_profile.secure_vm_disk_encryption_set_id = AAZStrType(
serialized_name='secureVMDiskEncryptionSetId')
_schema.data_disk_images = (cls._schema_encryption_images_read.
data_disk_images)
_schema.os_disk_image = (cls._schema_encryption_images_read.
os_disk_image)
<mask token>
| <mask token>
@register_command('sig gallery-application version show')
class Show(AAZCommand):
<mask token>
<mask token>
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
<mask token>
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
_args_schema = cls._args_schema
_args_schema.gallery_application_name = AAZStrArg(options=[
'--application-name', '--gallery-application-name'], help=
'The name of the gallery application.', required=True, id_part=
'child_name_1')
_args_schema.gallery_application_version_name = AAZStrArg(options=[
'-n', '--name', '--version-name',
'--gallery-application-version-name'], help=
'The name of the gallery application version.', required=True,
id_part='child_name_2')
_args_schema.gallery_name = AAZStrArg(options=['-r',
'--gallery-name'], help='Gallery name.', required=True, id_part
='name')
_args_schema.resource_group = AAZResourceGroupNameArg(help=
'Name of resource group. You can configure the default group using `az configure --defaults group=<name>`.'
, required=True)
_args_schema.expand = AAZStrArg(options=['--expand'], help=
'The expand expression to apply on the operation. "ReplicationStatus" Default value is None.'
, enum={'ReplicationStatus': 'ReplicationStatus'})
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.GalleryApplicationVersionsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
<mask token>
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance,
client_flatten=True)
return result
class GalleryApplicationVersionsGet(AAZHttpOperation):
CLIENT_TYPE = 'MgmtClient'
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=
False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}'
, **self.url_parameters)
@property
def method(self):
return 'GET'
@property
def error_format(self):
return 'ODataV4Format'
@property
def url_parameters(self):
parameters = {**self.serialize_url_param(
'galleryApplicationName', self.ctx.args.
gallery_application_name, required=True), **self.
serialize_url_param('galleryApplicationVersionName', self.
ctx.args.gallery_application_version_name, required=True),
**self.serialize_url_param('galleryName', self.ctx.args.
gallery_name, required=True), **self.serialize_url_param(
'resourceGroupName', self.ctx.args.resource_group, required
=True), **self.serialize_url_param('subscriptionId', self.
ctx.subscription_id, required=True)}
return parameters
@property
def query_parameters(self):
parameters = {**self.serialize_query_param('$expand', self.ctx.
args.expand), **self.serialize_query_param('api-version',
'2022-01-03', required=True)}
return parameters
@property
def header_parameters(self):
parameters = {**self.serialize_header_param('Accept',
'application/json')}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var('instance', data, schema_builder=self.
_build_schema_on_200)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(flags={'read_only': True})
_schema_on_200.location = AAZStrType(flags={'required': True})
_schema_on_200.name = AAZStrType(flags={'read_only': True})
_schema_on_200.properties = AAZObjectType(flags={
'client_flatten': True})
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(flags={'read_only': True})
properties = cls._schema_on_200.properties
properties.provisioning_state = AAZStrType(serialized_name=
'provisioningState', flags={'read_only': True})
properties.publishing_profile = AAZObjectType(serialized_name=
'publishingProfile', flags={'required': True})
properties.replication_status = AAZObjectType(serialized_name=
'replicationStatus')
publishing_profile = (cls._schema_on_200.properties.
publishing_profile)
publishing_profile.advanced_settings = AAZDictType(serialized_name
='advancedSettings')
publishing_profile.enable_health_check = AAZBoolType(
serialized_name='enableHealthCheck')
publishing_profile.end_of_life_date = AAZStrType(serialized_name
='endOfLifeDate')
publishing_profile.exclude_from_latest = AAZBoolType(
serialized_name='excludeFromLatest')
publishing_profile.manage_actions = AAZObjectType(serialized_name
='manageActions')
publishing_profile.published_date = AAZStrType(serialized_name=
'publishedDate', flags={'read_only': True})
publishing_profile.replica_count = AAZIntType(serialized_name=
'replicaCount')
publishing_profile.replication_mode = AAZStrType(serialized_name
='replicationMode')
publishing_profile.settings = AAZObjectType()
publishing_profile.source = AAZObjectType(flags={'required': True})
publishing_profile.storage_account_type = AAZStrType(
serialized_name='storageAccountType')
publishing_profile.target_extended_locations = AAZListType(
serialized_name='targetExtendedLocations')
publishing_profile.target_regions = AAZListType(serialized_name
='targetRegions')
advanced_settings = (cls._schema_on_200.properties.
publishing_profile.advanced_settings)
advanced_settings.Element = AAZStrType()
manage_actions = (cls._schema_on_200.properties.
publishing_profile.manage_actions)
manage_actions.install = AAZStrType(flags={'required': True})
manage_actions.remove = AAZStrType(flags={'required': True})
manage_actions.update = AAZStrType()
settings = (cls._schema_on_200.properties.publishing_profile.
settings)
settings.config_file_name = AAZStrType(serialized_name=
'configFileName')
settings.package_file_name = AAZStrType(serialized_name=
'packageFileName')
source = cls._schema_on_200.properties.publishing_profile.source
source.default_configuration_link = AAZStrType(serialized_name=
'defaultConfigurationLink')
source.media_link = AAZStrType(serialized_name='mediaLink',
flags={'required': True})
target_extended_locations = (cls._schema_on_200.properties.
publishing_profile.target_extended_locations)
target_extended_locations.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.publishing_profile.
target_extended_locations.Element)
_element.encryption = AAZObjectType()
_ShowHelper._build_schema_encryption_images_read(_element.
encryption)
_element.extended_location = AAZObjectType(serialized_name=
'extendedLocation')
_element.extended_location_replica_count = AAZIntType(
serialized_name='extendedLocationReplicaCount')
_element.name = AAZStrType()
_element.storage_account_type = AAZStrType(serialized_name=
'storageAccountType')
extended_location = (cls._schema_on_200.properties.
publishing_profile.target_extended_locations.Element.
extended_location)
extended_location.name = AAZStrType()
extended_location.type = AAZStrType()
target_regions = (cls._schema_on_200.properties.
publishing_profile.target_regions)
target_regions.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.publishing_profile.
target_regions.Element)
_element.encryption = AAZObjectType()
_ShowHelper._build_schema_encryption_images_read(_element.
encryption)
_element.name = AAZStrType(flags={'required': True})
_element.regional_replica_count = AAZIntType(serialized_name=
'regionalReplicaCount')
_element.storage_account_type = AAZStrType(serialized_name=
'storageAccountType')
replication_status = (cls._schema_on_200.properties.
replication_status)
replication_status.aggregated_state = AAZStrType(serialized_name
='aggregatedState', flags={'read_only': True})
replication_status.summary = AAZListType(flags={'read_only': True})
summary = cls._schema_on_200.properties.replication_status.summary
summary.Element = AAZObjectType()
_element = (cls._schema_on_200.properties.replication_status.
summary.Element)
_element.details = AAZStrType(flags={'read_only': True})
_element.progress = AAZIntType(flags={'read_only': True})
_element.region = AAZStrType(flags={'read_only': True})
_element.state = AAZStrType(flags={'read_only': True})
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
_schema_encryption_images_read = None
@classmethod
def _build_schema_encryption_images_read(cls, _schema):
if cls._schema_encryption_images_read is not None:
_schema.data_disk_images = (cls._schema_encryption_images_read.
data_disk_images)
_schema.os_disk_image = (cls._schema_encryption_images_read.
os_disk_image)
return
(cls._schema_encryption_images_read) = (_schema_encryption_images_read
) = AAZObjectType()
encryption_images_read = _schema_encryption_images_read
encryption_images_read.data_disk_images = AAZListType(serialized_name
='dataDiskImages')
encryption_images_read.os_disk_image = AAZObjectType(serialized_name
='osDiskImage')
data_disk_images = _schema_encryption_images_read.data_disk_images
data_disk_images.Element = AAZObjectType()
_element = _schema_encryption_images_read.data_disk_images.Element
_element.disk_encryption_set_id = AAZStrType(serialized_name=
'diskEncryptionSetId')
_element.lun = AAZIntType(flags={'required': True})
os_disk_image = _schema_encryption_images_read.os_disk_image
os_disk_image.disk_encryption_set_id = AAZStrType(serialized_name=
'diskEncryptionSetId')
os_disk_image.security_profile = AAZObjectType(serialized_name=
'securityProfile')
security_profile = (_schema_encryption_images_read.os_disk_image.
security_profile)
security_profile.confidential_vm_encryption_type = AAZStrType(
serialized_name='confidentialVMEncryptionType')
security_profile.secure_vm_disk_encryption_set_id = AAZStrType(
serialized_name='secureVMDiskEncryptionSetId')
_schema.data_disk_images = (cls._schema_encryption_images_read.
data_disk_images)
_schema.os_disk_image = (cls._schema_encryption_images_read.
os_disk_image)
<mask token>
| # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"sig gallery-application version show",
)
class Show(AAZCommand):
"""Get information about a gallery application version.
"""
_aaz_info = {
"version": "2022-01-03",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/galleries/{}/applications/{}/versions/{}", "2022-01-03"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.gallery_application_name = AAZStrArg(
options=["--application-name", "--gallery-application-name"],
help="The name of the gallery application.",
required=True,
id_part="child_name_1",
)
_args_schema.gallery_application_version_name = AAZStrArg(
options=["-n", "--name", "--version-name", "--gallery-application-version-name"],
help="The name of the gallery application version.",
required=True,
id_part="child_name_2",
)
_args_schema.gallery_name = AAZStrArg(
options=["-r", "--gallery-name"],
help="Gallery name.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
help="Name of resource group. You can configure the default group using `az configure --defaults group=<name>`.",
required=True,
)
_args_schema.expand = AAZStrArg(
options=["--expand"],
help="The expand expression to apply on the operation. \"ReplicationStatus\" Default value is None.",
enum={"ReplicationStatus": "ReplicationStatus"},
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.GalleryApplicationVersionsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class GalleryApplicationVersionsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}/versions/{galleryApplicationVersionName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"galleryApplicationName", self.ctx.args.gallery_application_name,
required=True,
),
**self.serialize_url_param(
"galleryApplicationVersionName", self.ctx.args.gallery_application_version_name,
required=True,
),
**self.serialize_url_param(
"galleryName", self.ctx.args.gallery_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"$expand", self.ctx.args.expand,
),
**self.serialize_query_param(
"api-version", "2022-01-03",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.publishing_profile = AAZObjectType(
serialized_name="publishingProfile",
flags={"required": True},
)
properties.replication_status = AAZObjectType(
serialized_name="replicationStatus",
)
publishing_profile = cls._schema_on_200.properties.publishing_profile
publishing_profile.advanced_settings = AAZDictType(
serialized_name="advancedSettings",
)
publishing_profile.enable_health_check = AAZBoolType(
serialized_name="enableHealthCheck",
)
publishing_profile.end_of_life_date = AAZStrType(
serialized_name="endOfLifeDate",
)
publishing_profile.exclude_from_latest = AAZBoolType(
serialized_name="excludeFromLatest",
)
publishing_profile.manage_actions = AAZObjectType(
serialized_name="manageActions",
)
publishing_profile.published_date = AAZStrType(
serialized_name="publishedDate",
flags={"read_only": True},
)
publishing_profile.replica_count = AAZIntType(
serialized_name="replicaCount",
)
publishing_profile.replication_mode = AAZStrType(
serialized_name="replicationMode",
)
publishing_profile.settings = AAZObjectType()
publishing_profile.source = AAZObjectType(
flags={"required": True},
)
publishing_profile.storage_account_type = AAZStrType(
serialized_name="storageAccountType",
)
publishing_profile.target_extended_locations = AAZListType(
serialized_name="targetExtendedLocations",
)
publishing_profile.target_regions = AAZListType(
serialized_name="targetRegions",
)
advanced_settings = cls._schema_on_200.properties.publishing_profile.advanced_settings
advanced_settings.Element = AAZStrType()
manage_actions = cls._schema_on_200.properties.publishing_profile.manage_actions
manage_actions.install = AAZStrType(
flags={"required": True},
)
manage_actions.remove = AAZStrType(
flags={"required": True},
)
manage_actions.update = AAZStrType()
settings = cls._schema_on_200.properties.publishing_profile.settings
settings.config_file_name = AAZStrType(
serialized_name="configFileName",
)
settings.package_file_name = AAZStrType(
serialized_name="packageFileName",
)
source = cls._schema_on_200.properties.publishing_profile.source
source.default_configuration_link = AAZStrType(
serialized_name="defaultConfigurationLink",
)
source.media_link = AAZStrType(
serialized_name="mediaLink",
flags={"required": True},
)
target_extended_locations = cls._schema_on_200.properties.publishing_profile.target_extended_locations
target_extended_locations.Element = AAZObjectType()
_element = cls._schema_on_200.properties.publishing_profile.target_extended_locations.Element
_element.encryption = AAZObjectType()
_ShowHelper._build_schema_encryption_images_read(_element.encryption)
_element.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
_element.extended_location_replica_count = AAZIntType(
serialized_name="extendedLocationReplicaCount",
)
_element.name = AAZStrType()
_element.storage_account_type = AAZStrType(
serialized_name="storageAccountType",
)
extended_location = cls._schema_on_200.properties.publishing_profile.target_extended_locations.Element.extended_location
extended_location.name = AAZStrType()
extended_location.type = AAZStrType()
target_regions = cls._schema_on_200.properties.publishing_profile.target_regions
target_regions.Element = AAZObjectType()
_element = cls._schema_on_200.properties.publishing_profile.target_regions.Element
_element.encryption = AAZObjectType()
_ShowHelper._build_schema_encryption_images_read(_element.encryption)
_element.name = AAZStrType(
flags={"required": True},
)
_element.regional_replica_count = AAZIntType(
serialized_name="regionalReplicaCount",
)
_element.storage_account_type = AAZStrType(
serialized_name="storageAccountType",
)
replication_status = cls._schema_on_200.properties.replication_status
replication_status.aggregated_state = AAZStrType(
serialized_name="aggregatedState",
flags={"read_only": True},
)
replication_status.summary = AAZListType(
flags={"read_only": True},
)
summary = cls._schema_on_200.properties.replication_status.summary
summary.Element = AAZObjectType()
_element = cls._schema_on_200.properties.replication_status.summary.Element
_element.details = AAZStrType(
flags={"read_only": True},
)
_element.progress = AAZIntType(
flags={"read_only": True},
)
_element.region = AAZStrType(
flags={"read_only": True},
)
_element.state = AAZStrType(
flags={"read_only": True},
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
_schema_encryption_images_read = None
@classmethod
def _build_schema_encryption_images_read(cls, _schema):
if cls._schema_encryption_images_read is not None:
_schema.data_disk_images = cls._schema_encryption_images_read.data_disk_images
_schema.os_disk_image = cls._schema_encryption_images_read.os_disk_image
return
cls._schema_encryption_images_read = _schema_encryption_images_read = AAZObjectType()
encryption_images_read = _schema_encryption_images_read
encryption_images_read.data_disk_images = AAZListType(
serialized_name="dataDiskImages",
)
encryption_images_read.os_disk_image = AAZObjectType(
serialized_name="osDiskImage",
)
data_disk_images = _schema_encryption_images_read.data_disk_images
data_disk_images.Element = AAZObjectType()
_element = _schema_encryption_images_read.data_disk_images.Element
_element.disk_encryption_set_id = AAZStrType(
serialized_name="diskEncryptionSetId",
)
_element.lun = AAZIntType(
flags={"required": True},
)
os_disk_image = _schema_encryption_images_read.os_disk_image
os_disk_image.disk_encryption_set_id = AAZStrType(
serialized_name="diskEncryptionSetId",
)
os_disk_image.security_profile = AAZObjectType(
serialized_name="securityProfile",
)
security_profile = _schema_encryption_images_read.os_disk_image.security_profile
security_profile.confidential_vm_encryption_type = AAZStrType(
serialized_name="confidentialVMEncryptionType",
)
security_profile.secure_vm_disk_encryption_set_id = AAZStrType(
serialized_name="secureVMDiskEncryptionSetId",
)
_schema.data_disk_images = cls._schema_encryption_images_read.data_disk_images
_schema.os_disk_image = cls._schema_encryption_images_read.os_disk_image
__all__ = ["Show"]
| [
5,
8,
9,
10,
16
] |
1,132 | c10e1cf2f1ce5b11d19ddddbfc3dc9652d830a3c | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('web', '0005_remove_product_image')]
operations = [migrations.CreateModel(name='Subscription', fields=[('id',
models.AutoField(primary_key=True, serialize=False)), ('price',
models.FloatField()), ('duration_till', models.DateField()), (
'total_amount', models.FloatField()), ('buyer', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name=
'consumer', to=settings.AUTH_USER_MODEL)), ('seller', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings
.AUTH_USER_MODEL))])]
| from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('web', '0005_remove_product_image')]
operations = [migrations.CreateModel(name='Subscription', fields=[('id',
models.AutoField(primary_key=True, serialize=False)), ('price',
models.FloatField()), ('duration_till', models.DateField()), (
'total_amount', models.FloatField()), ('buyer', models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, related_name=
'consumer', to=settings.AUTH_USER_MODEL)), ('seller', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings
.AUTH_USER_MODEL))])]
| # Generated by Django 3.0.4 on 2020-03-27 11:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0005_remove_product_image'),
]
operations = [
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('price', models.FloatField()),
('duration_till', models.DateField()),
('total_amount', models.FloatField()),
('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='consumer', to=settings.AUTH_USER_MODEL)),
('seller', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
0,
1,
2,
3,
4
] |
1,133 | 763c0baf919b48ff135f7aa18974da5b85ee40f5 | class Odwroc:
<mask token>
<mask token>
<mask token>
<mask token>
| class Odwroc:
def __init__(self, dane):
self.dane = dane
self.indeks = len(dane)
<mask token>
def __next__(self):
if self.indeks == 0:
raise StopIteration
self.indeks -= 1
return self.dane[self.indeks]
<mask token>
| class Odwroc:
def __init__(self, dane):
self.dane = dane
self.indeks = len(dane)
def __iter__(self):
return self
def __next__(self):
if self.indeks == 0:
raise StopIteration
self.indeks -= 1
return self.dane[self.indeks]
<mask token>
| class Odwroc:
def __init__(self, dane):
self.dane = dane
self.indeks = len(dane)
def __iter__(self):
return self
def __next__(self):
if self.indeks == 0:
raise StopIteration
self.indeks -= 1
return self.dane[self.indeks]
for i in Odwroc('Martusia'):
print(i, end='')
| class Odwroc():
def __init__(self,dane):
self.dane = dane
self.indeks = len(dane)
def __iter__(self):
return self
def __next__(self):
if self.indeks == 0:
raise StopIteration
self.indeks -= 1
return self.dane[self.indeks]
for i in Odwroc('Martusia'):
print(i,end = '')
| [
1,
3,
4,
5,
6
] |
1,134 | ea646068d48a9a4b5a578a5fb1399d83a4812b02 | <mask token>
| <mask token>
for file in file_list_excel:
"""遍历所有excel文件,删除空行"""
file_path = os.path.join(file_dir, file)
df = pd.read_excel(file_path)
data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any')
new_list.append(data)
<mask token>
df_all.to_excel('new_file.xlsx', index=False)
print('Ok, 3秒后退出。')
time.sleep(3)
| <mask token>
file_dir = os.getcwd()
file_list_all = os.listdir(file_dir)
file_list_excel = [item for item in file_list_all if '.xlsx' in item or
'.xls' in item]
new_list = []
for file in file_list_excel:
"""遍历所有excel文件,删除空行"""
file_path = os.path.join(file_dir, file)
df = pd.read_excel(file_path)
data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any')
new_list.append(data)
df_all = pd.concat(new_list)
df_all.to_excel('new_file.xlsx', index=False)
print('Ok, 3秒后退出。')
time.sleep(3)
| import os
import time
import pandas as pd
file_dir = os.getcwd()
file_list_all = os.listdir(file_dir)
file_list_excel = [item for item in file_list_all if '.xlsx' in item or
'.xls' in item]
new_list = []
for file in file_list_excel:
"""遍历所有excel文件,删除空行"""
file_path = os.path.join(file_dir, file)
df = pd.read_excel(file_path)
data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any')
new_list.append(data)
df_all = pd.concat(new_list)
df_all.to_excel('new_file.xlsx', index=False)
print('Ok, 3秒后退出。')
time.sleep(3)
| # -*- coding: utf-8 -*-
import os
import time
import pandas as pd
file_dir = os.getcwd() # 获取当前工作目录
file_list_all = os.listdir(file_dir) # 获取目录下的所有文件名
file_list_excel = [item for item in file_list_all if ('.xlsx' in item) or ('.xls' in item)] # 清洗非excel文件
new_list = [] # 空列表用于存放下面各个清洗后的表格
for file in file_list_excel:
'''遍历所有excel文件,删除空行'''
file_path = os.path.join(file_dir, file) # 连接而成当前文件的完整路径
df = pd.read_excel(file_path) # 读取当前excel文件
data = pd.DataFrame(df.iloc[:, :]).dropna(axis=0, how='any') # 对空行进行删除
new_list.append(data) # 删除空行后存入列表
df_all = pd.concat(new_list) # 将所有删除空行的表格进行合并
df_all.to_excel('new_file.xlsx', index=False) # 将合并后的数据存到文件中
print('Ok, 3秒后退出。')
time.sleep(3)
| [
0,
1,
2,
3,
4
] |
1,135 | 7be54b2bd99680beed3e8e9cb14225756a71a4ea | <mask token>
class AppData:
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
<mask token>
| <mask token>
class AppData:
def __init__(self, app, backend, moduleRefs, locations, modules,
version, checkout, silent):
"""Collects TF data according to specifications.
The specifications are passed as arguments when the object is initialized.
Parameters
----------
backend: string
`github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.
app: obj
The high-level API object
moduleRefs: tuple
Each member consists of a module ref, which is a tuple of information
that defines a module.
locations: string|tuple
One or more directory paths. They will be combined with the `modules`
argument and used as locations to search for TF data files.
modules: string|tuple
One or more directory path segments. They will be appended to the
paths given by the `locations` argument to form search locations
for TF data files.
version: string
The version of TF data that should be retrievend. Version is a directory
level just below the search locations.
checkout: string
A specifier to use a specific release or commit of a data repository.
silent: string, optional tf.core.timestamp.SILENT_D
See `tf.core.timestamp.Timestamp`
"""
self.backend = backend
self.app = app
self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','
) if type(moduleRefs) is str else list(moduleRefs)
self.locationsArg = locations
self.modulesArg = modules
self.version = version
self.checkout = checkout
self.silent = silent
def getMain(self):
"""Get the main data of the corpus.
This is specified by the `org`, `repo` and `relative` settings under
`provenanceSpec` in `config.yaml`.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
checkout = self.checkout
aContext = app.context
org = aContext.org
repo = aContext.repo
relative = prefixSlash(aContext.relative)
appPath = aContext.appPath
appName = aContext.appName
if appName.startswith('app:'):
appParent = appPath.rsplit('/', 1)[0]
relative = f'{appParent}{relative}'
elif org is None or repo is None:
appPathRep = f'{appPath}/' if appPath else ''
relative = f'{appPathRep}{appName}'
self.checkout = 'local'
if not self.getModule(org, repo, prefixSlash(relative), checkout,
isBase=True):
self.good = False
<mask token>
def getRefs(self):
"""Get data from additional modules.
These are specified in the `moduleRefs` parameter of `AppData`.
We store the set of special modules in order to skip them
later when we are loading the standard modules.
"""
backend = self.backend
refs = self.moduleRefs
for ref in refs:
refPure = ref.rsplit(':', 1)[0]
if refPure in self.seen:
continue
parts = splitModRef(ref)
if not parts:
self.good = False
continue
parts[2] = prefixSlash(normpath(parts[2]))
theBackend = None if parts[-1] is None or parts[-1
] == backend else parts[-1]
if not self.getModule(*parts[0:-1], backend=theBackend):
self.good = False
def getModules(self):
"""Get data from additional local directories.
These are specified in the `locations` and `modules` parameters of `AppData`.
"""
self.provenance = []
provenance = self.provenance
self.mLocations = []
mLocations = self.mLocations
self.locations = None
self.modules = None
self.good = True
self.seen = set()
self.getMain()
self.getRefs()
self.getStandard()
version = self.version
good = self.good
app = self.app
if good:
app.mLocations = mLocations
app.provenance = provenance
else:
return
mModules = []
if mLocations:
mModules.append(version or '')
locations = self.locationsArg
modules = self.modulesArg
givenLocations = [] if locations is None else [expandDir(app, x.
strip()) for x in itemize(locations, '\n')] if type(locations
) is str else [str(x) for x in locations]
givenModules = [] if modules is None else [normpath(x.strip()) for
x in itemize(modules, '\n')] if type(modules) is str else [normpath
(str(x)) for x in modules]
self.locations = mLocations + givenLocations
self.modules = mModules + givenModules
<mask token>
<mask token>
| <mask token>
class AppData:
def __init__(self, app, backend, moduleRefs, locations, modules,
version, checkout, silent):
"""Collects TF data according to specifications.
The specifications are passed as arguments when the object is initialized.
Parameters
----------
backend: string
`github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.
app: obj
The high-level API object
moduleRefs: tuple
Each member consists of a module ref, which is a tuple of information
that defines a module.
locations: string|tuple
One or more directory paths. They will be combined with the `modules`
argument and used as locations to search for TF data files.
modules: string|tuple
One or more directory path segments. They will be appended to the
paths given by the `locations` argument to form search locations
for TF data files.
version: string
The version of TF data that should be retrievend. Version is a directory
level just below the search locations.
checkout: string
A specifier to use a specific release or commit of a data repository.
silent: string, optional tf.core.timestamp.SILENT_D
See `tf.core.timestamp.Timestamp`
"""
self.backend = backend
self.app = app
self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','
) if type(moduleRefs) is str else list(moduleRefs)
self.locationsArg = locations
self.modulesArg = modules
self.version = version
self.checkout = checkout
self.silent = silent
def getMain(self):
"""Get the main data of the corpus.
This is specified by the `org`, `repo` and `relative` settings under
`provenanceSpec` in `config.yaml`.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
checkout = self.checkout
aContext = app.context
org = aContext.org
repo = aContext.repo
relative = prefixSlash(aContext.relative)
appPath = aContext.appPath
appName = aContext.appName
if appName.startswith('app:'):
appParent = appPath.rsplit('/', 1)[0]
relative = f'{appParent}{relative}'
elif org is None or repo is None:
appPathRep = f'{appPath}/' if appPath else ''
relative = f'{appPathRep}{appName}'
self.checkout = 'local'
if not self.getModule(org, repo, prefixSlash(relative), checkout,
isBase=True):
self.good = False
def getStandard(self):
"""Get the data of the standard modules specified by the settings of the corpus.
These are specified in the `moduleSpecs` setting under
`provenanceSpecs` in `config.yaml`.
They will be loaded *after* the extra modules specified in the **mod**
parameter, and only in as far they have not been specifief in the
**mod** parameter. In this way you can pass overriding
checkout specifiers to the standard modules.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
loadData = app.loadData
if not loadData or loadData == 'core':
return
aContext = app.context
moduleSpecs = aContext.moduleSpecs
seen = self.seen
checkout = self.checkout
backend = self.backend
for m in (moduleSpecs or []):
org = m['org']
repo = m['repo']
relative = m['relative']
theCheckout = m.get('checkout', checkout)
theBackend = m.get('backend', backend)
bRep = backendRep(theBackend, 'spec', default=backend)
ref = f'{bRep}{org}/{repo}{relative}'
if ref in seen:
continue
if not self.getModule(org, repo, relative, theCheckout, backend
=theBackend, specs=m):
self.good = False
def getRefs(self):
"""Get data from additional modules.
These are specified in the `moduleRefs` parameter of `AppData`.
We store the set of special modules in order to skip them
later when we are loading the standard modules.
"""
backend = self.backend
refs = self.moduleRefs
for ref in refs:
refPure = ref.rsplit(':', 1)[0]
if refPure in self.seen:
continue
parts = splitModRef(ref)
if not parts:
self.good = False
continue
parts[2] = prefixSlash(normpath(parts[2]))
theBackend = None if parts[-1] is None or parts[-1
] == backend else parts[-1]
if not self.getModule(*parts[0:-1], backend=theBackend):
self.good = False
def getModules(self):
"""Get data from additional local directories.
These are specified in the `locations` and `modules` parameters of `AppData`.
"""
self.provenance = []
provenance = self.provenance
self.mLocations = []
mLocations = self.mLocations
self.locations = None
self.modules = None
self.good = True
self.seen = set()
self.getMain()
self.getRefs()
self.getStandard()
version = self.version
good = self.good
app = self.app
if good:
app.mLocations = mLocations
app.provenance = provenance
else:
return
mModules = []
if mLocations:
mModules.append(version or '')
locations = self.locationsArg
modules = self.modulesArg
givenLocations = [] if locations is None else [expandDir(app, x.
strip()) for x in itemize(locations, '\n')] if type(locations
) is str else [str(x) for x in locations]
givenModules = [] if modules is None else [normpath(x.strip()) for
x in itemize(modules, '\n')] if type(modules) is str else [normpath
(str(x)) for x in modules]
self.locations = mLocations + givenLocations
self.modules = mModules + givenModules
def getModule(self, org, repo, relative, checkout, backend=None, isBase
=False, specs=None):
"""Prepare to load a single module.
Eventually, all TF data will be downloaded from local directories, bases
on a list of location paths and module paths.
This function computes the contribution of a single module to both the
location paths and the module paths.
Parameters
----------
org: string
GitHub organization or GitLab group of the module
repo: string:
GitHub repository or GitLab project of the module
relative: string
Path within the repository of the module
checkout: string
A specifier to use a specific release or commit of a data repository.
backend: string
The backend if different from the backend of the main module
isBase: boolean, optional False
Whether this module is the main data of the corpus.
specs: dict, optional False
Additional informational attributes of the module, e.g. a DOI
"""
backend = self.backend if backend is None else backendRep(backend,
'norm')
bRep = backendRep(backend, 'spec', default=self.backend)
version = self.version
silent = self.silent
mLocations = self.mLocations
provenance = self.provenance
seen = self.seen
app = self.app
_browse = app._browse
aContext = app.context
branch = aContext.provenanceSpec['branch']
relative = prefixSlash(normpath(relative))
moduleRef = f'{bRep}{org}/{repo}{relative}'
if moduleRef in self.seen:
return True
if org is None or repo is None:
relativeBare = relative.removeprefix('/')
repoLocation = relativeBare
mLocations.append(relativeBare)
commit, local, release = None, None, None
else:
commit, release, local, localBase, localDir = checkoutRepo(backend,
_browse=_browse, org=org, repo=repo, folder=relative,
version=version, checkout=checkout, withPaths=False, keep=
False, silent=silent)
if not localBase:
return False
repoLocation = f'{localBase}/{org}/{repo}'
mLocations.append(f'{localBase}/{localDir}')
seen.add(moduleRef)
if isBase:
app.repoLocation = repoLocation
info = {}
for item in (('doi', None), ('corpus', f'{org}/{repo}{relative}')):
key, default = item
info[key] = getattr(aContext, key) if isBase else specs[key
] if specs and key in specs else default
provenance.append((('corpus', info['corpus']), ('version', version),
('commit', commit or '??'), ('release', release or 'none'), (
'live', provenanceLink(backend, org, repo, version, branch,
commit, local, release, relative)), ('doi', info['doi'])))
return True
def getModulesData(*args):
"""Retrieve all data for a corpus.
Parameters
----------
args: list
All parameters needed to retrieve all associated data.
They are the same as are needed to construct an `AppData` object.
"""
mData = AppData(*args)
mData.getModules()
if not mData.good or mData.locations is None:
return None
return mData.locations, mData.modules
| from ..core.helpers import itemize
from ..core.files import backendRep, expandDir, prefixSlash, normpath
from .helpers import splitModRef
from .repo import checkoutRepo
from .links import provenanceLink
class AppData:
def __init__(self, app, backend, moduleRefs, locations, modules,
version, checkout, silent):
"""Collects TF data according to specifications.
The specifications are passed as arguments when the object is initialized.
Parameters
----------
backend: string
`github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.
app: obj
The high-level API object
moduleRefs: tuple
Each member consists of a module ref, which is a tuple of information
that defines a module.
locations: string|tuple
One or more directory paths. They will be combined with the `modules`
argument and used as locations to search for TF data files.
modules: string|tuple
One or more directory path segments. They will be appended to the
paths given by the `locations` argument to form search locations
for TF data files.
version: string
The version of TF data that should be retrievend. Version is a directory
level just below the search locations.
checkout: string
A specifier to use a specific release or commit of a data repository.
silent: string, optional tf.core.timestamp.SILENT_D
See `tf.core.timestamp.Timestamp`
"""
self.backend = backend
self.app = app
self.moduleRefs = [] if moduleRefs is None else moduleRefs.split(','
) if type(moduleRefs) is str else list(moduleRefs)
self.locationsArg = locations
self.modulesArg = modules
self.version = version
self.checkout = checkout
self.silent = silent
def getMain(self):
"""Get the main data of the corpus.
This is specified by the `org`, `repo` and `relative` settings under
`provenanceSpec` in `config.yaml`.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
checkout = self.checkout
aContext = app.context
org = aContext.org
repo = aContext.repo
relative = prefixSlash(aContext.relative)
appPath = aContext.appPath
appName = aContext.appName
if appName.startswith('app:'):
appParent = appPath.rsplit('/', 1)[0]
relative = f'{appParent}{relative}'
elif org is None or repo is None:
appPathRep = f'{appPath}/' if appPath else ''
relative = f'{appPathRep}{appName}'
self.checkout = 'local'
if not self.getModule(org, repo, prefixSlash(relative), checkout,
isBase=True):
self.good = False
def getStandard(self):
"""Get the data of the standard modules specified by the settings of the corpus.
These are specified in the `moduleSpecs` setting under
`provenanceSpecs` in `config.yaml`.
They will be loaded *after* the extra modules specified in the **mod**
parameter, and only in as far they have not been specifief in the
**mod** parameter. In this way you can pass overriding
checkout specifiers to the standard modules.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
loadData = app.loadData
if not loadData or loadData == 'core':
return
aContext = app.context
moduleSpecs = aContext.moduleSpecs
seen = self.seen
checkout = self.checkout
backend = self.backend
for m in (moduleSpecs or []):
org = m['org']
repo = m['repo']
relative = m['relative']
theCheckout = m.get('checkout', checkout)
theBackend = m.get('backend', backend)
bRep = backendRep(theBackend, 'spec', default=backend)
ref = f'{bRep}{org}/{repo}{relative}'
if ref in seen:
continue
if not self.getModule(org, repo, relative, theCheckout, backend
=theBackend, specs=m):
self.good = False
def getRefs(self):
"""Get data from additional modules.
These are specified in the `moduleRefs` parameter of `AppData`.
We store the set of special modules in order to skip them
later when we are loading the standard modules.
"""
backend = self.backend
refs = self.moduleRefs
for ref in refs:
refPure = ref.rsplit(':', 1)[0]
if refPure in self.seen:
continue
parts = splitModRef(ref)
if not parts:
self.good = False
continue
parts[2] = prefixSlash(normpath(parts[2]))
theBackend = None if parts[-1] is None or parts[-1
] == backend else parts[-1]
if not self.getModule(*parts[0:-1], backend=theBackend):
self.good = False
def getModules(self):
"""Get data from additional local directories.
These are specified in the `locations` and `modules` parameters of `AppData`.
"""
self.provenance = []
provenance = self.provenance
self.mLocations = []
mLocations = self.mLocations
self.locations = None
self.modules = None
self.good = True
self.seen = set()
self.getMain()
self.getRefs()
self.getStandard()
version = self.version
good = self.good
app = self.app
if good:
app.mLocations = mLocations
app.provenance = provenance
else:
return
mModules = []
if mLocations:
mModules.append(version or '')
locations = self.locationsArg
modules = self.modulesArg
givenLocations = [] if locations is None else [expandDir(app, x.
strip()) for x in itemize(locations, '\n')] if type(locations
) is str else [str(x) for x in locations]
givenModules = [] if modules is None else [normpath(x.strip()) for
x in itemize(modules, '\n')] if type(modules) is str else [normpath
(str(x)) for x in modules]
self.locations = mLocations + givenLocations
self.modules = mModules + givenModules
def getModule(self, org, repo, relative, checkout, backend=None, isBase
=False, specs=None):
"""Prepare to load a single module.
Eventually, all TF data will be downloaded from local directories, bases
on a list of location paths and module paths.
This function computes the contribution of a single module to both the
location paths and the module paths.
Parameters
----------
org: string
GitHub organization or GitLab group of the module
repo: string:
GitHub repository or GitLab project of the module
relative: string
Path within the repository of the module
checkout: string
A specifier to use a specific release or commit of a data repository.
backend: string
The backend if different from the backend of the main module
isBase: boolean, optional False
Whether this module is the main data of the corpus.
specs: dict, optional False
Additional informational attributes of the module, e.g. a DOI
"""
backend = self.backend if backend is None else backendRep(backend,
'norm')
bRep = backendRep(backend, 'spec', default=self.backend)
version = self.version
silent = self.silent
mLocations = self.mLocations
provenance = self.provenance
seen = self.seen
app = self.app
_browse = app._browse
aContext = app.context
branch = aContext.provenanceSpec['branch']
relative = prefixSlash(normpath(relative))
moduleRef = f'{bRep}{org}/{repo}{relative}'
if moduleRef in self.seen:
return True
if org is None or repo is None:
relativeBare = relative.removeprefix('/')
repoLocation = relativeBare
mLocations.append(relativeBare)
commit, local, release = None, None, None
else:
commit, release, local, localBase, localDir = checkoutRepo(backend,
_browse=_browse, org=org, repo=repo, folder=relative,
version=version, checkout=checkout, withPaths=False, keep=
False, silent=silent)
if not localBase:
return False
repoLocation = f'{localBase}/{org}/{repo}'
mLocations.append(f'{localBase}/{localDir}')
seen.add(moduleRef)
if isBase:
app.repoLocation = repoLocation
info = {}
for item in (('doi', None), ('corpus', f'{org}/{repo}{relative}')):
key, default = item
info[key] = getattr(aContext, key) if isBase else specs[key
] if specs and key in specs else default
provenance.append((('corpus', info['corpus']), ('version', version),
('commit', commit or '??'), ('release', release or 'none'), (
'live', provenanceLink(backend, org, repo, version, branch,
commit, local, release, relative)), ('doi', info['doi'])))
return True
def getModulesData(*args):
"""Retrieve all data for a corpus.
Parameters
----------
args: list
All parameters needed to retrieve all associated data.
They are the same as are needed to construct an `AppData` object.
"""
mData = AppData(*args)
mData.getModules()
if not mData.good or mData.locations is None:
return None
return mData.locations, mData.modules
| from ..core.helpers import itemize
from ..core.files import backendRep, expandDir, prefixSlash, normpath
from .helpers import splitModRef
from .repo import checkoutRepo
from .links import provenanceLink
# GET DATA FOR MAIN SOURCE AND ALL MODULES
class AppData:
def __init__(
self, app, backend, moduleRefs, locations, modules, version, checkout, silent
):
"""Collects TF data according to specifications.
The specifications are passed as arguments when the object is initialized.
Parameters
----------
backend: string
`github` or `gitlab` or a GitLab instance such as `gitlab.huc.knaw.nl`.
app: obj
The high-level API object
moduleRefs: tuple
Each member consists of a module ref, which is a tuple of information
that defines a module.
locations: string|tuple
One or more directory paths. They will be combined with the `modules`
argument and used as locations to search for TF data files.
modules: string|tuple
One or more directory path segments. They will be appended to the
paths given by the `locations` argument to form search locations
for TF data files.
version: string
The version of TF data that should be retrievend. Version is a directory
level just below the search locations.
checkout: string
A specifier to use a specific release or commit of a data repository.
silent: string, optional tf.core.timestamp.SILENT_D
See `tf.core.timestamp.Timestamp`
"""
self.backend = backend
self.app = app
self.moduleRefs = (
[]
if moduleRefs is None
else moduleRefs.split(",")
if type(moduleRefs) is str
else list(moduleRefs)
)
self.locationsArg = locations
self.modulesArg = modules
self.version = version
self.checkout = checkout
self.silent = silent
def getMain(self):
"""Get the main data of the corpus.
This is specified by the `org`, `repo` and `relative` settings under
`provenanceSpec` in `config.yaml`.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
checkout = self.checkout
aContext = app.context
org = aContext.org
repo = aContext.repo
relative = prefixSlash(aContext.relative)
appPath = aContext.appPath
appName = aContext.appName
if appName.startswith("app:"):
appParent = appPath.rsplit("/", 1)[0]
relative = f"{appParent}{relative}"
elif org is None or repo is None:
appPathRep = f"{appPath}/" if appPath else ""
relative = f"{appPathRep}{appName}"
self.checkout = "local"
if not self.getModule(org, repo, prefixSlash(relative), checkout, isBase=True):
self.good = False
def getStandard(self):
"""Get the data of the standard modules specified by the settings of the corpus.
These are specified in the `moduleSpecs` setting under
`provenanceSpecs` in `config.yaml`.
They will be loaded *after* the extra modules specified in the **mod**
parameter, and only in as far they have not been specifief in the
**mod** parameter. In this way you can pass overriding
checkout specifiers to the standard modules.
See Also
--------
tf.advanced.settings: options allowed in `config.yaml`
"""
app = self.app
loadData = app.loadData
if not loadData or loadData == "core":
return
aContext = app.context
moduleSpecs = aContext.moduleSpecs
seen = self.seen
checkout = self.checkout
backend = self.backend
for m in moduleSpecs or []:
org = m["org"]
repo = m["repo"]
relative = m["relative"]
theCheckout = m.get("checkout", checkout)
theBackend = m.get("backend", backend)
bRep = backendRep(theBackend, "spec", default=backend)
ref = f"{bRep}{org}/{repo}{relative}"
if ref in seen:
continue
if not self.getModule(
org,
repo,
relative,
theCheckout,
backend=theBackend,
specs=m,
):
self.good = False
def getRefs(self):
"""Get data from additional modules.
These are specified in the `moduleRefs` parameter of `AppData`.
We store the set of special modules in order to skip them
later when we are loading the standard modules.
"""
backend = self.backend
refs = self.moduleRefs
for ref in refs:
refPure = ref.rsplit(":", 1)[0]
if refPure in self.seen:
continue
parts = splitModRef(ref)
if not parts:
self.good = False
continue
parts[2] = prefixSlash(normpath(parts[2])) # the relative bit
theBackend = (
None if parts[-1] is None or parts[-1] == backend else parts[-1]
)
if not self.getModule(*parts[0:-1], backend=theBackend):
self.good = False
def getModules(self):
"""Get data from additional local directories.
These are specified in the `locations` and `modules` parameters of `AppData`.
"""
self.provenance = []
provenance = self.provenance
self.mLocations = []
mLocations = self.mLocations
self.locations = None
self.modules = None
self.good = True
self.seen = set()
self.getMain()
self.getRefs()
self.getStandard()
version = self.version
good = self.good
app = self.app
if good:
app.mLocations = mLocations
app.provenance = provenance
else:
return
mModules = []
if mLocations:
mModules.append(version or "")
locations = self.locationsArg
modules = self.modulesArg
givenLocations = (
[]
if locations is None
else [expandDir(app, x.strip()) for x in itemize(locations, "\n")]
if type(locations) is str
else [str(x) for x in locations]
)
givenModules = (
[]
if modules is None
else [normpath(x.strip()) for x in itemize(modules, "\n")]
if type(modules) is str
else [normpath(str(x)) for x in modules]
)
self.locations = mLocations + givenLocations
self.modules = mModules + givenModules
def getModule(
self, org, repo, relative, checkout, backend=None, isBase=False, specs=None
):
"""Prepare to load a single module.
Eventually, all TF data will be downloaded from local directories, bases
on a list of location paths and module paths.
This function computes the contribution of a single module to both the
location paths and the module paths.
Parameters
----------
org: string
GitHub organization or GitLab group of the module
repo: string:
GitHub repository or GitLab project of the module
relative: string
Path within the repository of the module
checkout: string
A specifier to use a specific release or commit of a data repository.
backend: string
The backend if different from the backend of the main module
isBase: boolean, optional False
Whether this module is the main data of the corpus.
specs: dict, optional False
Additional informational attributes of the module, e.g. a DOI
"""
backend = self.backend if backend is None else backendRep(backend, "norm")
bRep = backendRep(backend, "spec", default=self.backend)
version = self.version
silent = self.silent
mLocations = self.mLocations
provenance = self.provenance
seen = self.seen
app = self.app
_browse = app._browse
aContext = app.context
branch = aContext.provenanceSpec["branch"]
relative = prefixSlash(normpath(relative))
moduleRef = f"{bRep}{org}/{repo}{relative}"
if moduleRef in self.seen:
return True
if org is None or repo is None:
relativeBare = relative.removeprefix("/")
repoLocation = relativeBare
mLocations.append(relativeBare)
(commit, local, release) = (None, None, None)
else:
(commit, release, local, localBase, localDir) = checkoutRepo(
backend,
_browse=_browse,
org=org,
repo=repo,
folder=relative,
version=version,
checkout=checkout,
withPaths=False,
keep=False,
silent=silent,
)
if not localBase:
return False
repoLocation = f"{localBase}/{org}/{repo}"
mLocations.append(f"{localBase}/{localDir}")
seen.add(moduleRef)
if isBase:
app.repoLocation = repoLocation
info = {}
for item in (
("doi", None),
("corpus", f"{org}/{repo}{relative}"),
):
(key, default) = item
info[key] = (
getattr(aContext, key)
if isBase
else specs[key]
if specs and key in specs
else default
)
provenance.append(
(
("corpus", info["corpus"]),
("version", version),
("commit", commit or "??"),
("release", release or "none"),
(
"live",
provenanceLink(
backend, org, repo, version, branch, commit, local, release, relative
),
),
("doi", info["doi"]),
)
)
return True
def getModulesData(*args):
"""Retrieve all data for a corpus.
Parameters
----------
args: list
All parameters needed to retrieve all associated data.
They are the same as are needed to construct an `AppData` object.
"""
mData = AppData(*args)
mData.getModules()
if not mData.good or mData.locations is None:
return None
return (mData.locations, mData.modules)
| [
1,
5,
8,
9,
10
] |
1,136 | 8109fcc136b967e0ed4ca06077b32612605d5e5f | <mask token>
class NeuralNetwork:
def __init__(self, layer1, layer2):
self.layer1 = layer1
self.layer2 = layer2
<mask token>
<mask token>
<mask token>
def think(self, inputs):
output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1))
output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.
layer2))
return output_from_layer1, output_from_layer2
<mask token>
<mask token>
| <mask token>
class NeuralNetwork:
def __init__(self, layer1, layer2):
self.layer1 = layer1
self.layer2 = layer2
<mask token>
def __sigmoid_derivative(self, x):
return x * (1 - x)
def train(self, training_set_inputs, training_set_outputs,
number_of_training_iterations):
for iteration in range(number_of_training_iterations):
output_from_layer_1, output_from_layer_2 = self.think(
training_set_inputs)
layer2_error = training_set_outputs - output_from_layer_2
layer2_delta = layer2_error * self.__sigmoid_derivative(
output_from_layer_2)
layer1_error = layer2_delta.dot(self.layer2.T)
layer1_delta = layer1_error * self.__sigmoid_derivative(
output_from_layer_1)
layer1_adjustment = training_set_inputs.T.dot(layer1_delta)
layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)
self.layer1 += layer1_adjustment
self.layer2 += layer2_adjustment
def think(self, inputs):
output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1))
output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.
layer2))
return output_from_layer1, output_from_layer2
def print_weights(self):
print(self.layer1)
print(self.layer2)
<mask token>
| <mask token>
class NeuralNetwork:
def __init__(self, layer1, layer2):
self.layer1 = layer1
self.layer2 = layer2
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
def __sigmoid_derivative(self, x):
return x * (1 - x)
def train(self, training_set_inputs, training_set_outputs,
number_of_training_iterations):
for iteration in range(number_of_training_iterations):
output_from_layer_1, output_from_layer_2 = self.think(
training_set_inputs)
layer2_error = training_set_outputs - output_from_layer_2
layer2_delta = layer2_error * self.__sigmoid_derivative(
output_from_layer_2)
layer1_error = layer2_delta.dot(self.layer2.T)
layer1_delta = layer1_error * self.__sigmoid_derivative(
output_from_layer_1)
layer1_adjustment = training_set_inputs.T.dot(layer1_delta)
layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)
self.layer1 += layer1_adjustment
self.layer2 += layer2_adjustment
def think(self, inputs):
output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1))
output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.
layer2))
return output_from_layer1, output_from_layer2
def print_weights(self):
print(self.layer1)
print(self.layer2)
if __name__ == '__main__':
layer1 = array([[0.2, 0.1], [0.3, 0.1], [0.2, 0.1]])
layer2 = array([[0.5, 0.1]]).T
neural_network = NeuralNetwork(layer1, layer2)
neural_network.print_weights()
training_set_inputs = array([[normalized_set['input1'][0],
normalized_set['input2'][0], normalized_set['input3'][0]], [
normalized_set['input1'][1], normalized_set['input2'][1],
normalized_set['input3'][1]], [normalized_set['input1'][2],
normalized_set['input2'][2], normalized_set['input3'][2]], [
normalized_set['input1'][3], normalized_set['input2'][3],
normalized_set['input3'][3]], [normalized_set['input1'][4],
normalized_set['input2'][4], normalized_set['input3'][4]], [
normalized_set['input1'][5], normalized_set['input2'][5],
normalized_set['input3'][5]]])
training_set_outputs = array([[normalized_set['output'][0],
normalized_set['output'][1], normalized_set['output'][2],
normalized_set['output'][3], normalized_set['output'][4],
normalized_set['output'][5]]]).T
print('Inputs', training_set_inputs)
print('Output', training_set_outputs)
neural_network.train(training_set_inputs, training_set_outputs, 60000)
print('Weights ')
neural_network.print_weights()
output = neural_network.think(array([0.5, 0.6, 0.1]))
print('Weights', output[0])
print('Out ', output[1])
| from numpy import exp, array, dot
from read import normalized
class NeuralNetwork:
def __init__(self, layer1, layer2):
self.layer1 = layer1
self.layer2 = layer2
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
def __sigmoid_derivative(self, x):
return x * (1 - x)
def train(self, training_set_inputs, training_set_outputs,
number_of_training_iterations):
for iteration in range(number_of_training_iterations):
output_from_layer_1, output_from_layer_2 = self.think(
training_set_inputs)
layer2_error = training_set_outputs - output_from_layer_2
layer2_delta = layer2_error * self.__sigmoid_derivative(
output_from_layer_2)
layer1_error = layer2_delta.dot(self.layer2.T)
layer1_delta = layer1_error * self.__sigmoid_derivative(
output_from_layer_1)
layer1_adjustment = training_set_inputs.T.dot(layer1_delta)
layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)
self.layer1 += layer1_adjustment
self.layer2 += layer2_adjustment
def think(self, inputs):
output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1))
output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.
layer2))
return output_from_layer1, output_from_layer2
def print_weights(self):
print(self.layer1)
print(self.layer2)
if __name__ == '__main__':
layer1 = array([[0.2, 0.1], [0.3, 0.1], [0.2, 0.1]])
layer2 = array([[0.5, 0.1]]).T
neural_network = NeuralNetwork(layer1, layer2)
neural_network.print_weights()
training_set_inputs = array([[normalized_set['input1'][0],
normalized_set['input2'][0], normalized_set['input3'][0]], [
normalized_set['input1'][1], normalized_set['input2'][1],
normalized_set['input3'][1]], [normalized_set['input1'][2],
normalized_set['input2'][2], normalized_set['input3'][2]], [
normalized_set['input1'][3], normalized_set['input2'][3],
normalized_set['input3'][3]], [normalized_set['input1'][4],
normalized_set['input2'][4], normalized_set['input3'][4]], [
normalized_set['input1'][5], normalized_set['input2'][5],
normalized_set['input3'][5]]])
training_set_outputs = array([[normalized_set['output'][0],
normalized_set['output'][1], normalized_set['output'][2],
normalized_set['output'][3], normalized_set['output'][4],
normalized_set['output'][5]]]).T
print('Inputs', training_set_inputs)
print('Output', training_set_outputs)
neural_network.train(training_set_inputs, training_set_outputs, 60000)
print('Weights ')
neural_network.print_weights()
output = neural_network.think(array([0.5, 0.6, 0.1]))
print('Weights', output[0])
print('Out ', output[1])
| from numpy import exp, array, dot
from read import normalized
class NeuralNetwork():
def __init__(self, layer1, layer2):
self.layer1 = layer1
self.layer2 = layer2
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
def __sigmoid_derivative(self, x):
return x * (1 - x)
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in range(number_of_training_iterations):
output_from_layer_1, output_from_layer_2 = self.think(training_set_inputs)
layer2_error = training_set_outputs - output_from_layer_2
layer2_delta = layer2_error * self.__sigmoid_derivative(output_from_layer_2)
layer1_error = layer2_delta.dot(self.layer2.T)
layer1_delta = layer1_error * self.__sigmoid_derivative(output_from_layer_1)
layer1_adjustment = training_set_inputs.T.dot(layer1_delta)
layer2_adjustment = output_from_layer_1.T.dot(layer2_delta)
self.layer1 += layer1_adjustment
self.layer2 += layer2_adjustment
def think(self, inputs):
output_from_layer1 = self.__sigmoid(dot(inputs, self.layer1))
output_from_layer2 = self.__sigmoid(dot(output_from_layer1, self.layer2))
return output_from_layer1, output_from_layer2
def print_weights(self):
print(self.layer1)
print(self.layer2)
if __name__ == "__main__":
layer1 = array([[0.2, 0.1], [0.3, 0.1], [0.2, 0.1]])
layer2 = array([[0.5, 0.1]]).T
neural_network = NeuralNetwork(layer1, layer2)
neural_network.print_weights()
training_set_inputs = array(
[
[normalized_set['input1'][0], normalized_set['input2'][0], normalized_set['input3'][0]],
[normalized_set['input1'][1], normalized_set['input2'][1], normalized_set['input3'][1]],
[normalized_set['input1'][2], normalized_set['input2'][2], normalized_set['input3'][2]],
[normalized_set['input1'][3], normalized_set['input2'][3], normalized_set['input3'][3]],
[normalized_set['input1'][4], normalized_set['input2'][4], normalized_set['input3'][4]],
[normalized_set['input1'][5], normalized_set['input2'][5], normalized_set['input3'][5]]
])
training_set_outputs = array(
[[
normalized_set['output'][0],
normalized_set['output'][1],
normalized_set['output'][2],
normalized_set['output'][3],
normalized_set['output'][4],
normalized_set['output'][5]
]]).T
print("Inputs", training_set_inputs)
print("Output", training_set_outputs)
neural_network.train(training_set_inputs, training_set_outputs, 60000)
print("Weights ")
neural_network.print_weights()
output = neural_network.think(array([0.5, 0.6, 0.1]))
print("Weights", output[0])
print("Out ", output[1])
| [
3,
6,
8,
9,
10
] |
1,137 | 10a9437453371bd7472e93af1026c778b7983cf8 | <mask token>
class BubbleTypes(Enum):
USER = auto()
SYSTEM = auto()
STATUS = auto()
INFO = auto()
def __str__(self):
return str(self.value)
class Relations(Enum):
UNDERMINE = 'undermine'
UNDERCUT = 'undercut'
REBUT = 'rebut'
SUPPORT = 'support'
def __str__(self):
return str(self.value)
class Attitudes(Enum):
AGREE = 'agree'
DISAGREE = 'disagree'
DONT_KNOW = 'dontknow'
def __str__(self):
return str(self.value)
<mask token>
def escape_string(text):
"""
Escapes all html special chars.
:param text: string
:return: html.escape(text)
"""
return escape(text)
def get_discussion_language(matchdict, params, session, current_issue_uid=None
):
"""
Returns Language.ui_locales
CALL AFTER issue_handler.get_id_of_slug(..)!
:param matchdict: matchdict of the current request
:param params: params of the current request
:param session: session of the current request
:param current_issue_uid: uid
:return:
"""
if not current_issue_uid:
current_issue = DBDiscussionSession.query(Issue).filter(Issue.
is_disabled == False, Issue.is_private == False).first()
current_issue_uid = current_issue.uid if current_issue else None
issue = matchdict['issue'] if 'issue' in matchdict else params['issue'
] if 'issue' in params else session['issue'
] if 'issue' in session else current_issue_uid
db_issue = DBDiscussionSession.query(Issue).get(issue)
return db_issue.lang if db_issue else 'en'
def python_datetime_pretty_print(ts, lang):
"""
Pretty print of a locale
:param ts: Timestamp
:param lang: ui_locales
:return: String
"""
formatter = '%b. %d.'
if lang == 'de':
try:
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
formatter = '%d. %b.'
except locale.Error:
locale.setlocale(locale.LC_TIME, 'en_US.UTF8')
return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)
<mask token>
def __get_undercuts_of_argument(argument_uid, include_disabled):
"""
Returns all undercuts fo the given argument
:param argument_uid: Argument.uid
:param include_disabled: boolean
:return: list of Arguments
"""
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid
=argument_uid)
if not include_disabled:
db_undercuts = db_undercuts.filter_by(is_disabled=False)
return db_undercuts.all() if db_undercuts else []
<mask token>
def get_all_arguments_with_text_by_statement_id(statement_uid):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param statement_uid: uid to a statement, which should be analyzed
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(statement_uid))
arguments = get_all_arguments_by_statement(statement_uid)
results = []
if arguments:
results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.
uid)} for arg in arguments]
return results
<mask token>
def get_slug_by_statement_uid(uid):
"""
Returns slug for the given Issue.uid
:param uid: Issue.uid
:return: String
"""
db_statement = DBDiscussionSession.query(Statement).get(uid)
return resolve_issue_uid_to_slug(db_statement.issue_uid)
def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,
start_with_intro=False, first_arg_by_user=False, user_changed_opinion=
False, rearrange_intro=False, colored_position=False, attack_type=None,
minimize_on_undercut=False, is_users_opinion=True, anonymous_style=
False, support_counter_argument=False):
"""
Returns current argument as string like "conclusion, because premise1 and premise2"
:param uid: Integer
:param with_html_tag: Boolean
:param start_with_intro: Boolean
:param first_arg_by_user: Boolean
:param user_changed_opinion: Boolean
:param rearrange_intro: Boolean
:param colored_position: Boolean
:param attack_type: String
:param minimize_on_undercut: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:return: String
"""
logger('DBAS.LIB', 'main {}'.format(uid))
db_argument = DBDiscussionSession.query(Argument).get(uid)
if not db_argument:
return None
lang = db_argument.lang
_t = Translator(lang)
premisegroup_by_user = False
author_uid = None
db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)
).first()
if db_user:
author_uid = db_user.uid
pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.
premisegroup_uid)
marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(
argument_uid=uid, author_uid=db_user.uid).first()
premisegroup_by_user = (pgroup.author_uid == db_user.uid or
marked_argument is not None)
arg_array = [db_argument]
while db_argument.argument_uid:
db_argument = DBDiscussionSession.query(Argument).get(db_argument.
argument_uid)
arg_array.append(db_argument)
if attack_type == 'jump':
return __build_argument_for_jump(arg_array, with_html_tag)
if len(arg_array) == 1:
return __build_single_argument(arg_array[0], rearrange_intro,
with_html_tag, colored_position, attack_type, _t,
start_with_intro, is_users_opinion, anonymous_style,
support_counter_argument, author_uid)
else:
return __build_nested_argument(arg_array, first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)
<mask token>
def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t
):
premises = db_argument.get_premisegroup_text()
if premises[-1] != '.':
premises += '.'
conclusion = db_argument.get_conclusion_text()
because = _t.get(_.because).lower()
conclusion = tag_conclusion + conclusion + tag_end
premises = tag_premise + premises + tag_end
intro = start_con + _t.get(_.isNotRight).lower(
) + end_tag if not db_argument.is_supportive else ''
ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)
if _t.get_lang() == 'de':
intro = _t.get(_.itIsTrueThatAnonymous
) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous
)
intro = intro[0:1].upper() + intro[1:]
intro = (start_pro if db_argument.is_supportive else start_con
) + intro + end_tag
ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)
return ret_value
<mask token>
def __build_nested_argument(arg_array: List[Argument], first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):
"""
:param arg_array:
:param first_arg_by_user:
:param user_changed_opinion:
:param with_html_tag:
:param start_with_intro:
:param minimize_on_undercut:
:param anonymous_style:
:param premisegroup_by_user:
:param _t:
:return:
"""
pgroups = []
supportive = []
arg_array = arg_array[::-1]
local_lang = arg_array[0].lang
for db_argument in arg_array:
text = db_argument.get_premisegroup_text()
pgroups.append(text)
supportive.append(db_argument.is_supportive)
conclusion = arg_array[0].get_conclusion_text()
sb = start_position if with_html_tag else ''
se = end_tag if with_html_tag else ''
because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(
) + ' '
if len(arg_array
) % 2 is 0 and not first_arg_by_user and not anonymous_style:
ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else
_.otherUsersSaidThat) + ' '
tmp_users_opinion = True
elif not anonymous_style:
ret_value = _t.get(_.soYourOpinionIsThat
) + ': ' if start_with_intro else ''
tmp_users_opinion = False
conclusion = se + conclusion[0:1].upper() + conclusion[1:]
else:
ret_value = _t.get(_.someoneArgued) + ' '
tmp_users_opinion = False
tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''
ret_value += tmp + conclusion + because + pgroups[0] + '.'
del pgroups[0]
if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:
return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[
len(pgroups) - 1] + se + '.'
for i, pgroup in enumerate(pgroups):
ret_value += ' '
if tmp_users_opinion and not anonymous_style:
tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else
_.butYouCounteredWithInterest)
ret_value += _t.get(_.otherParticipantsConvincedYouThat if
user_changed_opinion else tmp)
elif not anonymous_style:
ret_value += _t.get(_.youAgreeWithThatNow)
else:
ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_
.thenOtherUsersSaidThat)
ret_value += sb + ' ' + pgroups[i] + '.'
tmp_users_opinion = not tmp_users_opinion
return ret_value.replace(' ', ' ')
def get_text_for_premisegroup_uid(uid):
"""
Returns joined text of the premise group and the premise ids
:param uid: premisegroup_uid
:return: text, uids
"""
warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid
=uid).join(Statement).all()
if len(db_premises) == 0:
return ''
texts = [premise.get_text() for premise in db_premises]
lang = DBDiscussionSession.query(Statement).get(db_premises[0].
statements.uid).lang
_t = Translator(lang)
return ' {} '.format(_t.get(_.aand)).join(texts)
<mask token>
def get_text_for_premise(uid: int, colored_position: bool=False):
"""
Returns text of premise with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
db_premise = DBDiscussionSession.query(Premise).get(uid)
if db_premise:
return db_premise.get_text(html=colored_position)
else:
return None
def get_text_for_conclusion(argument, start_with_intro=False,
rearrange_intro=False, is_users_opinion=True):
"""
Check the arguments conclusion whether it is an statement or an argument and returns the text
:param argument: Argument
:param start_with_intro: Boolean
:param rearrange_intro: Boolean
:return: String
"""
if argument.argument_uid:
return get_text_for_argument_uid(argument.argument_uid,
start_with_intro, rearrange_intro=rearrange_intro,
is_users_opinion=is_users_opinion)
else:
return argument.get_conclusion_text()
<mask token>
def get_user_by_private_or_public_nickname(nickname):
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
db_user = get_user_by_case_insensitive_nickname(nickname)
db_public_user = get_user_by_case_insensitive_public_nickname(nickname)
uid = 0
if db_user:
uid = db_user.uid
elif db_public_user:
uid = db_public_user.uid
db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid
).first()
if not db_settings:
return None
if db_settings.should_show_public_nickname and db_user:
return db_user
elif not db_settings.should_show_public_nickname and db_public_user:
return db_public_user
return None
def get_user_by_case_insensitive_nickname(nickname):
"""
Returns user with given nickname
:param nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==
func.lower(nickname)).first()
<mask token>
def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,
statement_uid, speech, lang):
"""
Build text for a bubble, how many other participants have the same interest?
:param nickname: User.nickname
:param is_user: boolean
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param speech: dict()
:param lang: ui_locales
:return: [String]
"""
if not nickname:
nickname = 'anonymous'
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
if not db_user:
db_user = DBDiscussionSession.query(User).filter_by(nickname=
'anonymous').first()
db_clicks, db_marks = __get_clicks_and_marks(argument_uid,
statement_uid, db_user)
_t = Translator(lang)
speech['votecounts'] = len(db_clicks) if db_clicks else 0
if db_marks:
speech['votecounts'] += len(db_marks)
votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[
'votecounts'], _t.get(_.voteCountTextMore)))
if is_user and db_user.gender == 'm':
gender_key = _.voteCountTextFirstM
elif is_user and db_user.gender == 'f':
gender_key = _.voteCountTextFirstF
else:
gender_key = _.voteCountTextFirst
votecount_keys[0] = '{}.'.format(_t.get(gender_key))
votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'
return votecount_keys
def __get_clicks_and_marks(argument_uid, statement_uid, db_user):
db_clicks = None
db_marks = None
if argument_uid:
db_clicks = DBDiscussionSession.query(ClickedArgument).filter(
ClickedArgument.argument_uid == argument_uid, ClickedArgument.
is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.
author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument.
author_uid != db_user.uid).all()
elif statement_uid:
db_clicks = DBDiscussionSession.query(ClickedStatement).filter(
ClickedStatement.statement_uid == statement_uid,
ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,
ClickedStatement.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid, MarkedStatement
.author_uid != db_user.uid).all()
return db_clicks, db_marks
def is_argument_disabled_due_to_disabled_statements(argument):
"""
Returns true if any involved statement is disabled.
:param argument: Argument
:return: Boolean
"""
if argument.conclusion_uid is None:
db_argument = DBDiscussionSession.query(Argument).get(argument.
argument_uid)
conclusion = DBDiscussionSession(Statement).get(db_argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(db_argument)
for premise in premises:
if premise.statements.is_disabled:
return True
else:
print(argument.conclusion_uid)
conclusion = DBDiscussionSession.query(Statement).get(argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(argument)
for premise in premises:
if premise.statements.is_disabled:
return True
return False
def is_author_of_statement(db_user: User, statement_uid: int) ->bool:
"""
Is the user with given nickname author of the statement?
:param db_user: User
:param statement_uid: Statement.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_textversion = DBDiscussionSession.query(TextVersion).filter_by(
statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()
if not db_textversion:
return False
return db_textversion.author_uid == db_user.uid
<mask token>
def get_profile_picture(user: User, size: int=80, ignore_privacy_settings:
bool=False):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
:param user: User
:param size: Integer, default 80
:param ignore_privacy_settings:
:return: String
"""
additional_id = ''
if user and isinstance(user, User):
additional_id = ('' if user.settings.should_show_public_nickname or
ignore_privacy_settings else 'x')
return __get_gravatar(user, additional_id, size)
<mask token>
def get_author_data(uid, gravatar_on_right_side=True,
linked_with_users_page=True, profile_picture_size=20):
"""
Returns a-tag with gravatar of current author and users page as href
:param uid: Uid of the author
:param gravatar_on_right_side: True, if the gravatar is on the right of authors name
:param linked_with_users_page: True, if the text is a link to the authors site
:param profile_picture_size: Integer
:return: HTML-String
"""
db_user = DBDiscussionSession.query(User).get(int(uid))
if not db_user:
return None, 'Missing author with uid ' + str(uid), False
nick = db_user.global_nickname
img_src = get_profile_picture(db_user, profile_picture_size)
link_begin = ''
link_end = ''
if linked_with_users_page:
link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick)
link_end = '</a>'
side = 'left' if gravatar_on_right_side else 'right'
img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(
img_src, side)
if gravatar_on_right_side:
return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end
), True
else:
return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end
), True
<mask token>
| <mask token>
class BubbleTypes(Enum):
USER = auto()
SYSTEM = auto()
STATUS = auto()
INFO = auto()
def __str__(self):
return str(self.value)
class Relations(Enum):
UNDERMINE = 'undermine'
UNDERCUT = 'undercut'
REBUT = 'rebut'
SUPPORT = 'support'
def __str__(self):
return str(self.value)
class Attitudes(Enum):
AGREE = 'agree'
DISAGREE = 'disagree'
DONT_KNOW = 'dontknow'
def __str__(self):
return str(self.value)
<mask token>
def escape_string(text):
"""
Escapes all html special chars.
:param text: string
:return: html.escape(text)
"""
return escape(text)
def get_discussion_language(matchdict, params, session, current_issue_uid=None
):
"""
Returns Language.ui_locales
CALL AFTER issue_handler.get_id_of_slug(..)!
:param matchdict: matchdict of the current request
:param params: params of the current request
:param session: session of the current request
:param current_issue_uid: uid
:return:
"""
if not current_issue_uid:
current_issue = DBDiscussionSession.query(Issue).filter(Issue.
is_disabled == False, Issue.is_private == False).first()
current_issue_uid = current_issue.uid if current_issue else None
issue = matchdict['issue'] if 'issue' in matchdict else params['issue'
] if 'issue' in params else session['issue'
] if 'issue' in session else current_issue_uid
db_issue = DBDiscussionSession.query(Issue).get(issue)
return db_issue.lang if db_issue else 'en'
def python_datetime_pretty_print(ts, lang):
"""
Pretty print of a locale
:param ts: Timestamp
:param lang: ui_locales
:return: String
"""
formatter = '%b. %d.'
if lang == 'de':
try:
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
formatter = '%d. %b.'
except locale.Error:
locale.setlocale(locale.LC_TIME, 'en_US.UTF8')
return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)
<mask token>
def __get_undercuts_of_argument(argument_uid, include_disabled):
"""
Returns all undercuts fo the given argument
:param argument_uid: Argument.uid
:param include_disabled: boolean
:return: list of Arguments
"""
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid
=argument_uid)
if not include_disabled:
db_undercuts = db_undercuts.filter_by(is_disabled=False)
return db_undercuts.all() if db_undercuts else []
<mask token>
def get_all_arguments_with_text_by_statement_id(statement_uid):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param statement_uid: uid to a statement, which should be analyzed
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(statement_uid))
arguments = get_all_arguments_by_statement(statement_uid)
results = []
if arguments:
results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.
uid)} for arg in arguments]
return results
<mask token>
def get_slug_by_statement_uid(uid):
"""
Returns slug for the given Issue.uid
:param uid: Issue.uid
:return: String
"""
db_statement = DBDiscussionSession.query(Statement).get(uid)
return resolve_issue_uid_to_slug(db_statement.issue_uid)
def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,
start_with_intro=False, first_arg_by_user=False, user_changed_opinion=
False, rearrange_intro=False, colored_position=False, attack_type=None,
minimize_on_undercut=False, is_users_opinion=True, anonymous_style=
False, support_counter_argument=False):
"""
Returns current argument as string like "conclusion, because premise1 and premise2"
:param uid: Integer
:param with_html_tag: Boolean
:param start_with_intro: Boolean
:param first_arg_by_user: Boolean
:param user_changed_opinion: Boolean
:param rearrange_intro: Boolean
:param colored_position: Boolean
:param attack_type: String
:param minimize_on_undercut: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:return: String
"""
logger('DBAS.LIB', 'main {}'.format(uid))
db_argument = DBDiscussionSession.query(Argument).get(uid)
if not db_argument:
return None
lang = db_argument.lang
_t = Translator(lang)
premisegroup_by_user = False
author_uid = None
db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)
).first()
if db_user:
author_uid = db_user.uid
pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.
premisegroup_uid)
marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(
argument_uid=uid, author_uid=db_user.uid).first()
premisegroup_by_user = (pgroup.author_uid == db_user.uid or
marked_argument is not None)
arg_array = [db_argument]
while db_argument.argument_uid:
db_argument = DBDiscussionSession.query(Argument).get(db_argument.
argument_uid)
arg_array.append(db_argument)
if attack_type == 'jump':
return __build_argument_for_jump(arg_array, with_html_tag)
if len(arg_array) == 1:
return __build_single_argument(arg_array[0], rearrange_intro,
with_html_tag, colored_position, attack_type, _t,
start_with_intro, is_users_opinion, anonymous_style,
support_counter_argument, author_uid)
else:
return __build_nested_argument(arg_array, first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)
<mask token>
def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t
):
premises = db_argument.get_premisegroup_text()
if premises[-1] != '.':
premises += '.'
conclusion = db_argument.get_conclusion_text()
because = _t.get(_.because).lower()
conclusion = tag_conclusion + conclusion + tag_end
premises = tag_premise + premises + tag_end
intro = start_con + _t.get(_.isNotRight).lower(
) + end_tag if not db_argument.is_supportive else ''
ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)
if _t.get_lang() == 'de':
intro = _t.get(_.itIsTrueThatAnonymous
) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous
)
intro = intro[0:1].upper() + intro[1:]
intro = (start_pro if db_argument.is_supportive else start_con
) + intro + end_tag
ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)
return ret_value
<mask token>
def __get_tags_for_building_single_argument(with_html_tag, attack_type,
colored_position, premises, conclusion):
sb_none = start_tag if with_html_tag else ''
se = end_tag if with_html_tag else ''
if attack_type not in ['dont_know', 'jump']:
sb = start_tag if with_html_tag else ''
if colored_position:
sb = start_position if with_html_tag else ''
if attack_type == Relations.UNDERMINE:
premises = sb + premises + se
else:
conclusion = sb + conclusion + se
else:
sb = start_argument if with_html_tag else ''
sb_tmp = start_attack if with_html_tag else ''
premises = sb + premises + se
conclusion = sb_tmp + conclusion + se
return premises, conclusion, sb, sb_none, se
<mask token>
def __build_nested_argument(arg_array: List[Argument], first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):
"""
:param arg_array:
:param first_arg_by_user:
:param user_changed_opinion:
:param with_html_tag:
:param start_with_intro:
:param minimize_on_undercut:
:param anonymous_style:
:param premisegroup_by_user:
:param _t:
:return:
"""
pgroups = []
supportive = []
arg_array = arg_array[::-1]
local_lang = arg_array[0].lang
for db_argument in arg_array:
text = db_argument.get_premisegroup_text()
pgroups.append(text)
supportive.append(db_argument.is_supportive)
conclusion = arg_array[0].get_conclusion_text()
sb = start_position if with_html_tag else ''
se = end_tag if with_html_tag else ''
because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(
) + ' '
if len(arg_array
) % 2 is 0 and not first_arg_by_user and not anonymous_style:
ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else
_.otherUsersSaidThat) + ' '
tmp_users_opinion = True
elif not anonymous_style:
ret_value = _t.get(_.soYourOpinionIsThat
) + ': ' if start_with_intro else ''
tmp_users_opinion = False
conclusion = se + conclusion[0:1].upper() + conclusion[1:]
else:
ret_value = _t.get(_.someoneArgued) + ' '
tmp_users_opinion = False
tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''
ret_value += tmp + conclusion + because + pgroups[0] + '.'
del pgroups[0]
if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:
return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[
len(pgroups) - 1] + se + '.'
for i, pgroup in enumerate(pgroups):
ret_value += ' '
if tmp_users_opinion and not anonymous_style:
tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else
_.butYouCounteredWithInterest)
ret_value += _t.get(_.otherParticipantsConvincedYouThat if
user_changed_opinion else tmp)
elif not anonymous_style:
ret_value += _t.get(_.youAgreeWithThatNow)
else:
ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_
.thenOtherUsersSaidThat)
ret_value += sb + ' ' + pgroups[i] + '.'
tmp_users_opinion = not tmp_users_opinion
return ret_value.replace(' ', ' ')
def get_text_for_premisegroup_uid(uid):
"""
Returns joined text of the premise group and the premise ids
:param uid: premisegroup_uid
:return: text, uids
"""
warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid
=uid).join(Statement).all()
if len(db_premises) == 0:
return ''
texts = [premise.get_text() for premise in db_premises]
lang = DBDiscussionSession.query(Statement).get(db_premises[0].
statements.uid).lang
_t = Translator(lang)
return ' {} '.format(_t.get(_.aand)).join(texts)
<mask token>
def get_text_for_premise(uid: int, colored_position: bool=False):
"""
Returns text of premise with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
db_premise = DBDiscussionSession.query(Premise).get(uid)
if db_premise:
return db_premise.get_text(html=colored_position)
else:
return None
def get_text_for_conclusion(argument, start_with_intro=False,
rearrange_intro=False, is_users_opinion=True):
"""
Check the arguments conclusion whether it is an statement or an argument and returns the text
:param argument: Argument
:param start_with_intro: Boolean
:param rearrange_intro: Boolean
:return: String
"""
if argument.argument_uid:
return get_text_for_argument_uid(argument.argument_uid,
start_with_intro, rearrange_intro=rearrange_intro,
is_users_opinion=is_users_opinion)
else:
return argument.get_conclusion_text()
<mask token>
def get_user_by_private_or_public_nickname(nickname):
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
db_user = get_user_by_case_insensitive_nickname(nickname)
db_public_user = get_user_by_case_insensitive_public_nickname(nickname)
uid = 0
if db_user:
uid = db_user.uid
elif db_public_user:
uid = db_public_user.uid
db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid
).first()
if not db_settings:
return None
if db_settings.should_show_public_nickname and db_user:
return db_user
elif not db_settings.should_show_public_nickname and db_public_user:
return db_public_user
return None
def get_user_by_case_insensitive_nickname(nickname):
"""
Returns user with given nickname
:param nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==
func.lower(nickname)).first()
<mask token>
def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,
statement_uid, speech, lang):
"""
Build text for a bubble, how many other participants have the same interest?
:param nickname: User.nickname
:param is_user: boolean
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param speech: dict()
:param lang: ui_locales
:return: [String]
"""
if not nickname:
nickname = 'anonymous'
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
if not db_user:
db_user = DBDiscussionSession.query(User).filter_by(nickname=
'anonymous').first()
db_clicks, db_marks = __get_clicks_and_marks(argument_uid,
statement_uid, db_user)
_t = Translator(lang)
speech['votecounts'] = len(db_clicks) if db_clicks else 0
if db_marks:
speech['votecounts'] += len(db_marks)
votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[
'votecounts'], _t.get(_.voteCountTextMore)))
if is_user and db_user.gender == 'm':
gender_key = _.voteCountTextFirstM
elif is_user and db_user.gender == 'f':
gender_key = _.voteCountTextFirstF
else:
gender_key = _.voteCountTextFirst
votecount_keys[0] = '{}.'.format(_t.get(gender_key))
votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'
return votecount_keys
def __get_clicks_and_marks(argument_uid, statement_uid, db_user):
db_clicks = None
db_marks = None
if argument_uid:
db_clicks = DBDiscussionSession.query(ClickedArgument).filter(
ClickedArgument.argument_uid == argument_uid, ClickedArgument.
is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.
author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument.
author_uid != db_user.uid).all()
elif statement_uid:
db_clicks = DBDiscussionSession.query(ClickedStatement).filter(
ClickedStatement.statement_uid == statement_uid,
ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,
ClickedStatement.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid, MarkedStatement
.author_uid != db_user.uid).all()
return db_clicks, db_marks
def is_argument_disabled_due_to_disabled_statements(argument):
"""
Returns true if any involved statement is disabled.
:param argument: Argument
:return: Boolean
"""
if argument.conclusion_uid is None:
db_argument = DBDiscussionSession.query(Argument).get(argument.
argument_uid)
conclusion = DBDiscussionSession(Statement).get(db_argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(db_argument)
for premise in premises:
if premise.statements.is_disabled:
return True
else:
print(argument.conclusion_uid)
conclusion = DBDiscussionSession.query(Statement).get(argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(argument)
for premise in premises:
if premise.statements.is_disabled:
return True
return False
def is_author_of_statement(db_user: User, statement_uid: int) ->bool:
"""
Is the user with given nickname author of the statement?
:param db_user: User
:param statement_uid: Statement.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_textversion = DBDiscussionSession.query(TextVersion).filter_by(
statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()
if not db_textversion:
return False
return db_textversion.author_uid == db_user.uid
<mask token>
def get_profile_picture(user: User, size: int=80, ignore_privacy_settings:
bool=False):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
:param user: User
:param size: Integer, default 80
:param ignore_privacy_settings:
:return: String
"""
additional_id = ''
if user and isinstance(user, User):
additional_id = ('' if user.settings.should_show_public_nickname or
ignore_privacy_settings else 'x')
return __get_gravatar(user, additional_id, size)
<mask token>
def __get_gravatar(user, additional_id, size):
if user:
if str(user.email) == 'None':
email = (user.nickname + additional_id).encode('utf-8')
else:
email = (user.email + additional_id).encode('utf-8')
else:
email = 'unknown'.encode('utf-8')
gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.
md5(email.lower()).hexdigest())
gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})
return gravatar_url
def get_author_data(uid, gravatar_on_right_side=True,
linked_with_users_page=True, profile_picture_size=20):
"""
Returns a-tag with gravatar of current author and users page as href
:param uid: Uid of the author
:param gravatar_on_right_side: True, if the gravatar is on the right of authors name
:param linked_with_users_page: True, if the text is a link to the authors site
:param profile_picture_size: Integer
:return: HTML-String
"""
db_user = DBDiscussionSession.query(User).get(int(uid))
if not db_user:
return None, 'Missing author with uid ' + str(uid), False
nick = db_user.global_nickname
img_src = get_profile_picture(db_user, profile_picture_size)
link_begin = ''
link_end = ''
if linked_with_users_page:
link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick)
link_end = '</a>'
side = 'left' if gravatar_on_right_side else 'right'
img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(
img_src, side)
if gravatar_on_right_side:
return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end
), True
else:
return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end
), True
<mask token>
| <mask token>
class BubbleTypes(Enum):
USER = auto()
SYSTEM = auto()
STATUS = auto()
INFO = auto()
def __str__(self):
return str(self.value)
class Relations(Enum):
UNDERMINE = 'undermine'
UNDERCUT = 'undercut'
REBUT = 'rebut'
SUPPORT = 'support'
def __str__(self):
return str(self.value)
class Attitudes(Enum):
AGREE = 'agree'
DISAGREE = 'disagree'
DONT_KNOW = 'dontknow'
def __str__(self):
return str(self.value)
<mask token>
def get_global_url():
"""
Returns the global url of the project, based on the ENV
:return: String
"""
return os.environ.get('URL', '')
def get_changelog(no):
"""
Returns the 'no' last entries from the changelog
:param no: int
:return: list
"""
path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))
lines = [line.rstrip('\n').strip() for line in open(path) if len(line.
rstrip('\n').strip()) > 0]
changelog = []
title = ''
body = []
for l in lines:
if l.startswith('#'):
if len(title) > 0:
changelog.append({'title': title, 'body': body})
body = []
title = l.replace('### ', '')
else:
body.append(l.replace('- ', ''))
return changelog[0:no]
<mask token>
def usage_of_matomo(registry):
"""
Returns true, if matomo is set in the current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'mode' in registry.settings:
return registry.settings['usage_of_matomo'].lower() == 'true'
return False
def escape_string(text):
"""
Escapes all html special chars.
:param text: string
:return: html.escape(text)
"""
return escape(text)
def get_discussion_language(matchdict, params, session, current_issue_uid=None
):
"""
Returns Language.ui_locales
CALL AFTER issue_handler.get_id_of_slug(..)!
:param matchdict: matchdict of the current request
:param params: params of the current request
:param session: session of the current request
:param current_issue_uid: uid
:return:
"""
if not current_issue_uid:
current_issue = DBDiscussionSession.query(Issue).filter(Issue.
is_disabled == False, Issue.is_private == False).first()
current_issue_uid = current_issue.uid if current_issue else None
issue = matchdict['issue'] if 'issue' in matchdict else params['issue'
] if 'issue' in params else session['issue'
] if 'issue' in session else current_issue_uid
db_issue = DBDiscussionSession.query(Issue).get(issue)
return db_issue.lang if db_issue else 'en'
def python_datetime_pretty_print(ts, lang):
"""
Pretty print of a locale
:param ts: Timestamp
:param lang: ui_locales
:return: String
"""
formatter = '%b. %d.'
if lang == 'de':
try:
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
formatter = '%d. %b.'
except locale.Error:
locale.setlocale(locale.LC_TIME, 'en_US.UTF8')
return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)
def get_all_arguments_by_statement(statement_uid, include_disabled=False):
"""
Returns a list of all arguments where the statement is a conclusion or member of the premisegroup
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: [Arguments]
"""
logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid,
include_disabled))
db_arguments = __get_arguments_of_conclusion(statement_uid,
include_disabled)
arg_array = [arg for arg in db_arguments] if db_arguments else []
premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=
statement_uid)
if not include_disabled:
premises = premises.filter_by(is_disabled=False)
premises = premises.all()
for premise in premises:
arg_array += __get_argument_of_premisegroup(premise.
premisegroup_uid, include_disabled)
db_undercuts = []
for arg in arg_array:
db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)
db_undercutted_undercuts = []
for arg in db_undercuts:
db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid,
include_disabled)
arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))
logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in
arg_array]))
return arg_array if len(arg_array) > 0 else None
def __get_argument_of_premisegroup(premisegroup_uid, include_disabled):
"""
Returns all arguments with the given premisegroup
:param premisegroup_uid: PremisgGroup.uid
:param include_disabled: Boolean
:return: list of Arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(
premisegroup_uid=premisegroup_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def __get_undercuts_of_argument(argument_uid, include_disabled):
"""
Returns all undercuts fo the given argument
:param argument_uid: Argument.uid
:param include_disabled: boolean
:return: list of Arguments
"""
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid
=argument_uid)
if not include_disabled:
db_undercuts = db_undercuts.filter_by(is_disabled=False)
return db_undercuts.all() if db_undercuts else []
def __get_arguments_of_conclusion(statement_uid, include_disabled):
"""
Returns all arguments, where the statement is set as conclusion
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: list of arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid
=statement_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def get_all_arguments_with_text_by_statement_id(statement_uid):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param statement_uid: uid to a statement, which should be analyzed
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(statement_uid))
arguments = get_all_arguments_by_statement(statement_uid)
results = []
if arguments:
results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.
uid)} for arg in arguments]
return results
def get_all_arguments_with_text_and_url_by_statement_id(db_statement,
urlmanager, color_statement=False, is_jump=False):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param db_statement: Statement
:param urlmanager:
:param color_statement: True, if the statement (specified by the ID) should be colored
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(db_statement.uid))
arguments = get_all_arguments_by_statement(db_statement.uid)
uids = [arg.uid for arg in arguments] if arguments else None
results = list()
sb = '<{} data-argumentation-type="position">'.format(tag_type
) if color_statement else ''
se = '</{}>'.format(tag_type) if color_statement else ''
if not uids:
return []
uids.sort()
for uid in uids:
statement_text = db_statement.get_text()
attack_type = 'jump' if is_jump else ''
argument_text = get_text_for_argument_uid(uid, anonymous_style=True,
attack_type=attack_type)
pos = argument_text.lower().find(statement_text.lower())
argument_text = argument_text[:pos] + sb + argument_text[pos:]
pos += len(statement_text) + len(sb)
argument_text = argument_text[:pos] + se + argument_text[pos:]
results.append({'uid': uid, 'text': argument_text, 'url':
urlmanager.get_url_for_jump(uid)})
return results
def get_slug_by_statement_uid(uid):
"""
Returns slug for the given Issue.uid
:param uid: Issue.uid
:return: String
"""
db_statement = DBDiscussionSession.query(Statement).get(uid)
return resolve_issue_uid_to_slug(db_statement.issue_uid)
def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,
start_with_intro=False, first_arg_by_user=False, user_changed_opinion=
False, rearrange_intro=False, colored_position=False, attack_type=None,
minimize_on_undercut=False, is_users_opinion=True, anonymous_style=
False, support_counter_argument=False):
"""
Returns current argument as string like "conclusion, because premise1 and premise2"
:param uid: Integer
:param with_html_tag: Boolean
:param start_with_intro: Boolean
:param first_arg_by_user: Boolean
:param user_changed_opinion: Boolean
:param rearrange_intro: Boolean
:param colored_position: Boolean
:param attack_type: String
:param minimize_on_undercut: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:return: String
"""
logger('DBAS.LIB', 'main {}'.format(uid))
db_argument = DBDiscussionSession.query(Argument).get(uid)
if not db_argument:
return None
lang = db_argument.lang
_t = Translator(lang)
premisegroup_by_user = False
author_uid = None
db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)
).first()
if db_user:
author_uid = db_user.uid
pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.
premisegroup_uid)
marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(
argument_uid=uid, author_uid=db_user.uid).first()
premisegroup_by_user = (pgroup.author_uid == db_user.uid or
marked_argument is not None)
arg_array = [db_argument]
while db_argument.argument_uid:
db_argument = DBDiscussionSession.query(Argument).get(db_argument.
argument_uid)
arg_array.append(db_argument)
if attack_type == 'jump':
return __build_argument_for_jump(arg_array, with_html_tag)
if len(arg_array) == 1:
return __build_single_argument(arg_array[0], rearrange_intro,
with_html_tag, colored_position, attack_type, _t,
start_with_intro, is_users_opinion, anonymous_style,
support_counter_argument, author_uid)
else:
return __build_nested_argument(arg_array, first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)
<mask token>
def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t
):
premises = db_argument.get_premisegroup_text()
if premises[-1] != '.':
premises += '.'
conclusion = db_argument.get_conclusion_text()
because = _t.get(_.because).lower()
conclusion = tag_conclusion + conclusion + tag_end
premises = tag_premise + premises + tag_end
intro = start_con + _t.get(_.isNotRight).lower(
) + end_tag if not db_argument.is_supportive else ''
ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)
if _t.get_lang() == 'de':
intro = _t.get(_.itIsTrueThatAnonymous
) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous
)
intro = intro[0:1].upper() + intro[1:]
intro = (start_pro if db_argument.is_supportive else start_con
) + intro + end_tag
ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)
return ret_value
<mask token>
def __build_val_for_undercutted_undercut(arg_array: List[Argument],
tag_premise, tag_conclusion, tag_end, _t):
premise1 = arg_array[0].get_premisegroup_text()
premise2 = arg_array[1].get_premisegroup_text()
premise3 = arg_array[2].get_premisegroup_text()
conclusion = arg_array[2].get_conclusion_text()
bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag
because = _t.get(_.because)
seperator = ',' if _t.get_lang() == 'de' else ''
premise1 = tag_premise + premise1 + tag_end
premise2 = tag_conclusion + premise2 + tag_end
argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(),
premise3)
argument = tag_conclusion + argument + tag_end
ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because,
premise1)
return ret_value
def __build_single_argument(db_argument: Argument, rearrange_intro: bool,
with_html_tag: bool, colored_position: bool, attack_type: str, _t:
Translator, start_with_intro: bool, is_users_opinion: bool,
anonymous_style: bool, support_counter_argument: bool=False, author_uid
=None):
"""
Build up argument text for a single argument
Please, do not touch this!
:param uid: Argument.uid
:param rearrange_intro: Boolean
:param with_html_tag: Boolean
:param colored_position: Boolean
:param attack_type: String
:param _t: Translator
:param start_with_intro: Boolean
:param is_users_opinion: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:param author_uid: User.uid
:return: String
"""
premises_text = db_argument.get_premisegroup_text()
conclusion_text = db_argument.get_conclusion_text()
lang = db_argument.lang
if lang != 'de':
premises_text = premises_text[0:1].lower() + premises_text[1:]
premises_text, conclusion_text, sb, sb_none, se = (
__get_tags_for_building_single_argument(with_html_tag, attack_type,
colored_position, premises_text, conclusion_text))
marked_element = False
if author_uid:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.
author_uid == author_uid).first()
marked_element = db_marked is not None
you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format(''
).strip()
if lang == 'de':
ret_value = __build_single_argument_for_de(_t, sb, se,
you_have_the_opinion_that, start_with_intro, anonymous_style,
rearrange_intro, db_argument, attack_type, sb_none,
marked_element, lang, premises_text, conclusion_text,
is_users_opinion, support_counter_argument)
else:
ret_value = __build_single_argument_for_en(_t, sb, se,
you_have_the_opinion_that, marked_element, conclusion_text,
premises_text, db_argument)
return ret_value.replace(' ', ' ')
def __get_tags_for_building_single_argument(with_html_tag, attack_type,
colored_position, premises, conclusion):
sb_none = start_tag if with_html_tag else ''
se = end_tag if with_html_tag else ''
if attack_type not in ['dont_know', 'jump']:
sb = start_tag if with_html_tag else ''
if colored_position:
sb = start_position if with_html_tag else ''
if attack_type == Relations.UNDERMINE:
premises = sb + premises + se
else:
conclusion = sb + conclusion + se
else:
sb = start_argument if with_html_tag else ''
sb_tmp = start_attack if with_html_tag else ''
premises = sb + premises + se
conclusion = sb_tmp + conclusion + se
return premises, conclusion, sb, sb_none, se
def __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that,
start_with_intro, anonymous_style, rearrange_intro, db_argument,
attack_type, sb_none, marked_element, lang, premises, conclusion,
is_users_opinion, support_counter_argument):
if start_with_intro and not anonymous_style:
intro = _t.get(_.itIsTrueThat
) if db_argument.is_supportive else _t.get(_.itIsFalseThat)
if rearrange_intro:
intro = _t.get(_.itTrueIsThat
) if db_argument.is_supportive else _t.get(_.itFalseIsThat)
ret_value = (sb_none if attack_type in ['dont_know'] else sb
) + intro + se + ' '
elif is_users_opinion and not anonymous_style:
ret_value = sb_none
if support_counter_argument:
ret_value += _t.get(_.youAgreeWithThecounterargument)
elif marked_element:
ret_value += you_have_the_opinion_that
else:
ret_value += _t.get(_.youArgue)
ret_value += se + ' '
else:
tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else
_.itIsFalseThatAnonymous)
ret_value = sb_none + sb + tmp + se + ' '
ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se
) if not db_argument.is_supportive else ''
ret_value += conclusion
ret_value += ', ' if lang == 'de' else ' '
ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises
return ret_value
<mask token>
def __build_nested_argument(arg_array: List[Argument], first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):
"""
:param arg_array:
:param first_arg_by_user:
:param user_changed_opinion:
:param with_html_tag:
:param start_with_intro:
:param minimize_on_undercut:
:param anonymous_style:
:param premisegroup_by_user:
:param _t:
:return:
"""
pgroups = []
supportive = []
arg_array = arg_array[::-1]
local_lang = arg_array[0].lang
for db_argument in arg_array:
text = db_argument.get_premisegroup_text()
pgroups.append(text)
supportive.append(db_argument.is_supportive)
conclusion = arg_array[0].get_conclusion_text()
sb = start_position if with_html_tag else ''
se = end_tag if with_html_tag else ''
because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(
) + ' '
if len(arg_array
) % 2 is 0 and not first_arg_by_user and not anonymous_style:
ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else
_.otherUsersSaidThat) + ' '
tmp_users_opinion = True
elif not anonymous_style:
ret_value = _t.get(_.soYourOpinionIsThat
) + ': ' if start_with_intro else ''
tmp_users_opinion = False
conclusion = se + conclusion[0:1].upper() + conclusion[1:]
else:
ret_value = _t.get(_.someoneArgued) + ' '
tmp_users_opinion = False
tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''
ret_value += tmp + conclusion + because + pgroups[0] + '.'
del pgroups[0]
if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:
return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[
len(pgroups) - 1] + se + '.'
for i, pgroup in enumerate(pgroups):
ret_value += ' '
if tmp_users_opinion and not anonymous_style:
tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else
_.butYouCounteredWithInterest)
ret_value += _t.get(_.otherParticipantsConvincedYouThat if
user_changed_opinion else tmp)
elif not anonymous_style:
ret_value += _t.get(_.youAgreeWithThatNow)
else:
ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_
.thenOtherUsersSaidThat)
ret_value += sb + ' ' + pgroups[i] + '.'
tmp_users_opinion = not tmp_users_opinion
return ret_value.replace(' ', ' ')
def get_text_for_premisegroup_uid(uid):
"""
Returns joined text of the premise group and the premise ids
:param uid: premisegroup_uid
:return: text, uids
"""
warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid
=uid).join(Statement).all()
if len(db_premises) == 0:
return ''
texts = [premise.get_text() for premise in db_premises]
lang = DBDiscussionSession.query(Statement).get(db_premises[0].
statements.uid).lang
_t = Translator(lang)
return ' {} '.format(_t.get(_.aand)).join(texts)
<mask token>
def get_text_for_premise(uid: int, colored_position: bool=False):
"""
Returns text of premise with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
db_premise = DBDiscussionSession.query(Premise).get(uid)
if db_premise:
return db_premise.get_text(html=colored_position)
else:
return None
def get_text_for_conclusion(argument, start_with_intro=False,
rearrange_intro=False, is_users_opinion=True):
"""
Check the arguments conclusion whether it is an statement or an argument and returns the text
:param argument: Argument
:param start_with_intro: Boolean
:param rearrange_intro: Boolean
:return: String
"""
if argument.argument_uid:
return get_text_for_argument_uid(argument.argument_uid,
start_with_intro, rearrange_intro=rearrange_intro,
is_users_opinion=is_users_opinion)
else:
return argument.get_conclusion_text()
def resolve_issue_uid_to_slug(uid):
"""
Given the issue uid query database and return the correct slug of the issue.
:param uid: issue_uid
:type uid: int
:return: Slug of issue
:rtype: str
"""
issue = DBDiscussionSession.query(Issue).get(uid)
return issue.slug if issue else None
<mask token>
def get_user_by_private_or_public_nickname(nickname):
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
db_user = get_user_by_case_insensitive_nickname(nickname)
db_public_user = get_user_by_case_insensitive_public_nickname(nickname)
uid = 0
if db_user:
uid = db_user.uid
elif db_public_user:
uid = db_public_user.uid
db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid
).first()
if not db_settings:
return None
if db_settings.should_show_public_nickname and db_user:
return db_user
elif not db_settings.should_show_public_nickname and db_public_user:
return db_public_user
return None
def get_user_by_case_insensitive_nickname(nickname):
"""
Returns user with given nickname
:param nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==
func.lower(nickname)).first()
def get_user_by_case_insensitive_public_nickname(public_nickname):
"""
Returns user with given public nickname
:param public_nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.
public_nickname) == func.lower(public_nickname)).first()
def pretty_print_options(message):
"""
Some modifications for pretty printing.
Use uppercase for first letter in text and a single dot for the end if there isn't one already.
:param message: String
:return: String
"""
if message[0:1] == '<':
pos = message.index('>')
message = message[0:pos + 1] + message[pos + 1:pos + 2].upper(
) + message[pos + 2:]
else:
message = message[0:1].upper() + message[1:]
if message[-1] == '>':
pos = message.rfind('<')
if message[pos - 1:pos] not in ['.', '?', '!']:
message = message[0:pos] + '.' + message[pos:]
elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':
message += '.'
return message
def create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=
False, is_author: bool=False, uid: str='', bubble_url: str='', content:
str='', omit_bubble_url: bool=False, omit_vote_info: bool=False,
argument_uid: int=None, statement_uid: int=None, is_supportive: bool=
False, nickname: str='anonymous', lang: str='en', is_users_opinion:
bool=False, other_author: User=None):
"""
Creates an dictionary which includes every information needed for a bubble.
:param bubble_type: BubbleTypes
:param is_markable: True if the content itself could be flagged
:param is_author: True if the current user is author of the content
:param uid: Identifier for the bubble
:param bubble_url: URL for the click event of the bubble
:param content: Text of the bubble
:param omit_bubble_url: True if the bubble should have a link
:param omit_vote_info: True if the bubble have the little, grey information text
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param is_supportive: Boolean
:param nickname: String
:param omit_bubble_url: Boolean
:param lang: is_users_opinion
:param is_users_opinion: Boolean
:return: dict()
"""
gravatar_link = get_global_url() + '/static/images/icon.png'
profile = None
if uid is not 'now':
content = pretty_print_options(content)
if bubble_type is BubbleTypes.SYSTEM and other_author is not None:
gravatar_link = get_profile_picture(other_author, 25)
profile = '/user/{}'.format(other_author.uid),
if bubble_type is BubbleTypes.USER and nickname != 'anonymous':
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
db_marked = None
gravatar_link = get_profile_picture(db_user, 25)
if argument_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument
.author_uid == db_user.uid).first()
if statement_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid,
MarkedStatement.author_uid == db_user.uid).first()
is_users_opinion = db_marked is not None
speech = {'is_user': bubble_type is BubbleTypes.USER, 'is_system':
bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is
BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO,
'is_markable': is_markable, 'is_author': is_author, 'id': uid if
len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url,
'message': content, 'omit_bubble_url': omit_bubble_url,
'omit_vote_info': omit_vote_info, 'data_type': 'argument' if
argument_uid else 'statement' if statement_uid else 'None',
'data_argument_uid': argument_uid, 'data_statement_uid':
statement_uid, 'data_is_supportive': is_supportive,
'is_users_opinion': is_users_opinion, 'enemy': {'avatar':
gravatar_link, 'profile': profile, 'available': profile is not None}}
votecount_keys = __get_text_for_click_and_mark_count(nickname,
bubble_type is BubbleTypes.USER, argument_uid, statement_uid,
speech, lang)
speech['votecounts_message'] = votecount_keys[speech['votecounts']]
return speech
def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,
statement_uid, speech, lang):
"""
Build text for a bubble, how many other participants have the same interest?
:param nickname: User.nickname
:param is_user: boolean
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param speech: dict()
:param lang: ui_locales
:return: [String]
"""
if not nickname:
nickname = 'anonymous'
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
if not db_user:
db_user = DBDiscussionSession.query(User).filter_by(nickname=
'anonymous').first()
db_clicks, db_marks = __get_clicks_and_marks(argument_uid,
statement_uid, db_user)
_t = Translator(lang)
speech['votecounts'] = len(db_clicks) if db_clicks else 0
if db_marks:
speech['votecounts'] += len(db_marks)
votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[
'votecounts'], _t.get(_.voteCountTextMore)))
if is_user and db_user.gender == 'm':
gender_key = _.voteCountTextFirstM
elif is_user and db_user.gender == 'f':
gender_key = _.voteCountTextFirstF
else:
gender_key = _.voteCountTextFirst
votecount_keys[0] = '{}.'.format(_t.get(gender_key))
votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'
return votecount_keys
def __get_clicks_and_marks(argument_uid, statement_uid, db_user):
db_clicks = None
db_marks = None
if argument_uid:
db_clicks = DBDiscussionSession.query(ClickedArgument).filter(
ClickedArgument.argument_uid == argument_uid, ClickedArgument.
is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.
author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument.
author_uid != db_user.uid).all()
elif statement_uid:
db_clicks = DBDiscussionSession.query(ClickedStatement).filter(
ClickedStatement.statement_uid == statement_uid,
ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,
ClickedStatement.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid, MarkedStatement
.author_uid != db_user.uid).all()
return db_clicks, db_marks
def is_argument_disabled_due_to_disabled_statements(argument):
"""
Returns true if any involved statement is disabled.
:param argument: Argument
:return: Boolean
"""
if argument.conclusion_uid is None:
db_argument = DBDiscussionSession.query(Argument).get(argument.
argument_uid)
conclusion = DBDiscussionSession(Statement).get(db_argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(db_argument)
for premise in premises:
if premise.statements.is_disabled:
return True
else:
print(argument.conclusion_uid)
conclusion = DBDiscussionSession.query(Statement).get(argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(argument)
for premise in premises:
if premise.statements.is_disabled:
return True
return False
def is_author_of_statement(db_user: User, statement_uid: int) ->bool:
"""
Is the user with given nickname author of the statement?
:param db_user: User
:param statement_uid: Statement.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_textversion = DBDiscussionSession.query(TextVersion).filter_by(
statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()
if not db_textversion:
return False
return db_textversion.author_uid == db_user.uid
def is_author_of_argument(db_user: User, argument_uid: int) ->bool:
"""
Is the user with given nickname author of the argument?
:param db_user: User
:param argument_uid: Argument.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid ==
argument_uid, Argument.author_uid == db_user.uid).first()
return True if db_argument else False
<mask token>
def get_profile_picture(user: User, size: int=80, ignore_privacy_settings:
bool=False):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
:param user: User
:param size: Integer, default 80
:param ignore_privacy_settings:
:return: String
"""
additional_id = ''
if user and isinstance(user, User):
additional_id = ('' if user.settings.should_show_public_nickname or
ignore_privacy_settings else 'x')
return __get_gravatar(user, additional_id, size)
def get_public_profile_picture(user: User, size: int=80):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
If the user doesn't want an public profile, an anonymous image will be returned
:param user: User
:param size: Integer, default 80
:return: String
"""
additional_id = ''
if user.settings.should_show_public_nickname:
additional_id = 'x'
if len(str(user.oauth_provider)) > 0:
additional_id = '{}{}'.format(user.oauth_provider, user.
oauth_provider_id)
return __get_gravatar(user, additional_id, size)
def __get_gravatar(user, additional_id, size):
if user:
if str(user.email) == 'None':
email = (user.nickname + additional_id).encode('utf-8')
else:
email = (user.email + additional_id).encode('utf-8')
else:
email = 'unknown'.encode('utf-8')
gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.
md5(email.lower()).hexdigest())
gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})
return gravatar_url
def get_author_data(uid, gravatar_on_right_side=True,
linked_with_users_page=True, profile_picture_size=20):
"""
Returns a-tag with gravatar of current author and users page as href
:param uid: Uid of the author
:param gravatar_on_right_side: True, if the gravatar is on the right of authors name
:param linked_with_users_page: True, if the text is a link to the authors site
:param profile_picture_size: Integer
:return: HTML-String
"""
db_user = DBDiscussionSession.query(User).get(int(uid))
if not db_user:
return None, 'Missing author with uid ' + str(uid), False
nick = db_user.global_nickname
img_src = get_profile_picture(db_user, profile_picture_size)
link_begin = ''
link_end = ''
if linked_with_users_page:
link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick)
link_end = '</a>'
side = 'left' if gravatar_on_right_side else 'right'
img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(
img_src, side)
if gravatar_on_right_side:
return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end
), True
else:
return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end
), True
<mask token>
| <mask token>
class BubbleTypes(Enum):
USER = auto()
SYSTEM = auto()
STATUS = auto()
INFO = auto()
def __str__(self):
return str(self.value)
class Relations(Enum):
UNDERMINE = 'undermine'
UNDERCUT = 'undercut'
REBUT = 'rebut'
SUPPORT = 'support'
def __str__(self):
return str(self.value)
class Attitudes(Enum):
AGREE = 'agree'
DISAGREE = 'disagree'
DONT_KNOW = 'dontknow'
def __str__(self):
return str(self.value)
<mask token>
def get_global_url():
"""
Returns the global url of the project, based on the ENV
:return: String
"""
return os.environ.get('URL', '')
def get_changelog(no):
"""
Returns the 'no' last entries from the changelog
:param no: int
:return: list
"""
path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))
lines = [line.rstrip('\n').strip() for line in open(path) if len(line.
rstrip('\n').strip()) > 0]
changelog = []
title = ''
body = []
for l in lines:
if l.startswith('#'):
if len(title) > 0:
changelog.append({'title': title, 'body': body})
body = []
title = l.replace('### ', '')
else:
body.append(l.replace('- ', ''))
return changelog[0:no]
def is_development_mode(registry):
"""
Returns true, if mode is set to development in current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'mode' in registry.settings:
return registry.settings['mode'].lower() == 'development'
return False
def usage_of_modern_bubbles(registry):
"""
Returns true, if modern bubbles are set in the current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'modern_bubbles' in registry.settings:
return registry.settings['modern_bubbles'].lower() == 'true'
return False
def usage_of_matomo(registry):
"""
Returns true, if matomo is set in the current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'mode' in registry.settings:
return registry.settings['usage_of_matomo'].lower() == 'true'
return False
def escape_string(text):
"""
Escapes all html special chars.
:param text: string
:return: html.escape(text)
"""
return escape(text)
def get_discussion_language(matchdict, params, session, current_issue_uid=None
):
"""
Returns Language.ui_locales
CALL AFTER issue_handler.get_id_of_slug(..)!
:param matchdict: matchdict of the current request
:param params: params of the current request
:param session: session of the current request
:param current_issue_uid: uid
:return:
"""
if not current_issue_uid:
current_issue = DBDiscussionSession.query(Issue).filter(Issue.
is_disabled == False, Issue.is_private == False).first()
current_issue_uid = current_issue.uid if current_issue else None
issue = matchdict['issue'] if 'issue' in matchdict else params['issue'
] if 'issue' in params else session['issue'
] if 'issue' in session else current_issue_uid
db_issue = DBDiscussionSession.query(Issue).get(issue)
return db_issue.lang if db_issue else 'en'
def python_datetime_pretty_print(ts, lang):
"""
Pretty print of a locale
:param ts: Timestamp
:param lang: ui_locales
:return: String
"""
formatter = '%b. %d.'
if lang == 'de':
try:
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
formatter = '%d. %b.'
except locale.Error:
locale.setlocale(locale.LC_TIME, 'en_US.UTF8')
return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)
def get_all_arguments_by_statement(statement_uid, include_disabled=False):
"""
Returns a list of all arguments where the statement is a conclusion or member of the premisegroup
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: [Arguments]
"""
logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid,
include_disabled))
db_arguments = __get_arguments_of_conclusion(statement_uid,
include_disabled)
arg_array = [arg for arg in db_arguments] if db_arguments else []
premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=
statement_uid)
if not include_disabled:
premises = premises.filter_by(is_disabled=False)
premises = premises.all()
for premise in premises:
arg_array += __get_argument_of_premisegroup(premise.
premisegroup_uid, include_disabled)
db_undercuts = []
for arg in arg_array:
db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)
db_undercutted_undercuts = []
for arg in db_undercuts:
db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid,
include_disabled)
arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))
logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in
arg_array]))
return arg_array if len(arg_array) > 0 else None
def __get_argument_of_premisegroup(premisegroup_uid, include_disabled):
"""
Returns all arguments with the given premisegroup
:param premisegroup_uid: PremisgGroup.uid
:param include_disabled: Boolean
:return: list of Arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(
premisegroup_uid=premisegroup_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def __get_undercuts_of_argument(argument_uid, include_disabled):
"""
Returns all undercuts fo the given argument
:param argument_uid: Argument.uid
:param include_disabled: boolean
:return: list of Arguments
"""
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid
=argument_uid)
if not include_disabled:
db_undercuts = db_undercuts.filter_by(is_disabled=False)
return db_undercuts.all() if db_undercuts else []
def __get_arguments_of_conclusion(statement_uid, include_disabled):
"""
Returns all arguments, where the statement is set as conclusion
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: list of arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid
=statement_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def get_all_arguments_with_text_by_statement_id(statement_uid):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param statement_uid: uid to a statement, which should be analyzed
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(statement_uid))
arguments = get_all_arguments_by_statement(statement_uid)
results = []
if arguments:
results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.
uid)} for arg in arguments]
return results
def get_all_arguments_with_text_and_url_by_statement_id(db_statement,
urlmanager, color_statement=False, is_jump=False):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param db_statement: Statement
:param urlmanager:
:param color_statement: True, if the statement (specified by the ID) should be colored
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(db_statement.uid))
arguments = get_all_arguments_by_statement(db_statement.uid)
uids = [arg.uid for arg in arguments] if arguments else None
results = list()
sb = '<{} data-argumentation-type="position">'.format(tag_type
) if color_statement else ''
se = '</{}>'.format(tag_type) if color_statement else ''
if not uids:
return []
uids.sort()
for uid in uids:
statement_text = db_statement.get_text()
attack_type = 'jump' if is_jump else ''
argument_text = get_text_for_argument_uid(uid, anonymous_style=True,
attack_type=attack_type)
pos = argument_text.lower().find(statement_text.lower())
argument_text = argument_text[:pos] + sb + argument_text[pos:]
pos += len(statement_text) + len(sb)
argument_text = argument_text[:pos] + se + argument_text[pos:]
results.append({'uid': uid, 'text': argument_text, 'url':
urlmanager.get_url_for_jump(uid)})
return results
def get_slug_by_statement_uid(uid):
"""
Returns slug for the given Issue.uid
:param uid: Issue.uid
:return: String
"""
db_statement = DBDiscussionSession.query(Statement).get(uid)
return resolve_issue_uid_to_slug(db_statement.issue_uid)
def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False,
start_with_intro=False, first_arg_by_user=False, user_changed_opinion=
False, rearrange_intro=False, colored_position=False, attack_type=None,
minimize_on_undercut=False, is_users_opinion=True, anonymous_style=
False, support_counter_argument=False):
"""
Returns current argument as string like "conclusion, because premise1 and premise2"
:param uid: Integer
:param with_html_tag: Boolean
:param start_with_intro: Boolean
:param first_arg_by_user: Boolean
:param user_changed_opinion: Boolean
:param rearrange_intro: Boolean
:param colored_position: Boolean
:param attack_type: String
:param minimize_on_undercut: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:return: String
"""
logger('DBAS.LIB', 'main {}'.format(uid))
db_argument = DBDiscussionSession.query(Argument).get(uid)
if not db_argument:
return None
lang = db_argument.lang
_t = Translator(lang)
premisegroup_by_user = False
author_uid = None
db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)
).first()
if db_user:
author_uid = db_user.uid
pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.
premisegroup_uid)
marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(
argument_uid=uid, author_uid=db_user.uid).first()
premisegroup_by_user = (pgroup.author_uid == db_user.uid or
marked_argument is not None)
arg_array = [db_argument]
while db_argument.argument_uid:
db_argument = DBDiscussionSession.query(Argument).get(db_argument.
argument_uid)
arg_array.append(db_argument)
if attack_type == 'jump':
return __build_argument_for_jump(arg_array, with_html_tag)
if len(arg_array) == 1:
return __build_single_argument(arg_array[0], rearrange_intro,
with_html_tag, colored_position, attack_type, _t,
start_with_intro, is_users_opinion, anonymous_style,
support_counter_argument, author_uid)
else:
return __build_nested_argument(arg_array, first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t)
def __build_argument_for_jump(arg_array: List[Argument], with_html_tag):
"""
Build tet for an argument, if we jump to this argument
:param arg_array: [Argument]
:param with_html_tag: Boolean
:return: String
"""
tag_premise = ('<' + tag_type + ' data-argumentation-type="attack">' if
with_html_tag else '')
tag_conclusion = ('<' + tag_type +
' data-argumentation-type="argument">' if with_html_tag else '')
tag_end = '</' + tag_type + '>' if with_html_tag else ''
lang = arg_array[0].lang
_t = Translator(lang)
if len(arg_array) == 1:
ret_value = __build_val_for_jump(arg_array[0], tag_premise,
tag_conclusion, tag_end, _t)
elif len(arg_array) == 2:
ret_value = __build_val_for_undercut(arg_array, tag_premise,
tag_conclusion, tag_end, _t)
else:
ret_value = __build_val_for_undercutted_undercut(arg_array,
tag_premise, tag_conclusion, tag_end, _t)
return ret_value.replace(' ', ' ')
def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t
):
premises = db_argument.get_premisegroup_text()
if premises[-1] != '.':
premises += '.'
conclusion = db_argument.get_conclusion_text()
because = _t.get(_.because).lower()
conclusion = tag_conclusion + conclusion + tag_end
premises = tag_premise + premises + tag_end
intro = start_con + _t.get(_.isNotRight).lower(
) + end_tag if not db_argument.is_supportive else ''
ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)
if _t.get_lang() == 'de':
intro = _t.get(_.itIsTrueThatAnonymous
) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous
)
intro = intro[0:1].upper() + intro[1:]
intro = (start_pro if db_argument.is_supportive else start_con
) + intro + end_tag
ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)
return ret_value
def __build_val_for_undercut(arg_array: List[Argument], tag_premise,
tag_conclusion, tag_end, _t):
db_undercut = arg_array[0]
db_conclusion_argument = arg_array[1]
premise = db_undercut.get_premisegroup_text()
conclusion_premise = db_conclusion_argument.get_premisegroup_text()
conclusion_conclusion = db_conclusion_argument.get_conclusion_text()
premise = tag_premise + premise + tag_end
conclusion_premise = tag_conclusion + conclusion_premise + tag_end
conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end
intro = _t.get(_.statementAbout) + ' ' if _t.get_lang() == 'de' else ''
bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag
because = _t.get(_.because)
ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind,
conclusion_conclusion, because, premise)
return ret_value
def __build_val_for_undercutted_undercut(arg_array: List[Argument],
tag_premise, tag_conclusion, tag_end, _t):
premise1 = arg_array[0].get_premisegroup_text()
premise2 = arg_array[1].get_premisegroup_text()
premise3 = arg_array[2].get_premisegroup_text()
conclusion = arg_array[2].get_conclusion_text()
bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag
because = _t.get(_.because)
seperator = ',' if _t.get_lang() == 'de' else ''
premise1 = tag_premise + premise1 + tag_end
premise2 = tag_conclusion + premise2 + tag_end
argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(),
premise3)
argument = tag_conclusion + argument + tag_end
ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because,
premise1)
return ret_value
def __build_single_argument(db_argument: Argument, rearrange_intro: bool,
with_html_tag: bool, colored_position: bool, attack_type: str, _t:
Translator, start_with_intro: bool, is_users_opinion: bool,
anonymous_style: bool, support_counter_argument: bool=False, author_uid
=None):
"""
Build up argument text for a single argument
Please, do not touch this!
:param uid: Argument.uid
:param rearrange_intro: Boolean
:param with_html_tag: Boolean
:param colored_position: Boolean
:param attack_type: String
:param _t: Translator
:param start_with_intro: Boolean
:param is_users_opinion: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:param author_uid: User.uid
:return: String
"""
premises_text = db_argument.get_premisegroup_text()
conclusion_text = db_argument.get_conclusion_text()
lang = db_argument.lang
if lang != 'de':
premises_text = premises_text[0:1].lower() + premises_text[1:]
premises_text, conclusion_text, sb, sb_none, se = (
__get_tags_for_building_single_argument(with_html_tag, attack_type,
colored_position, premises_text, conclusion_text))
marked_element = False
if author_uid:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == db_argument.uid, MarkedArgument.
author_uid == author_uid).first()
marked_element = db_marked is not None
you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format(''
).strip()
if lang == 'de':
ret_value = __build_single_argument_for_de(_t, sb, se,
you_have_the_opinion_that, start_with_intro, anonymous_style,
rearrange_intro, db_argument, attack_type, sb_none,
marked_element, lang, premises_text, conclusion_text,
is_users_opinion, support_counter_argument)
else:
ret_value = __build_single_argument_for_en(_t, sb, se,
you_have_the_opinion_that, marked_element, conclusion_text,
premises_text, db_argument)
return ret_value.replace(' ', ' ')
def __get_tags_for_building_single_argument(with_html_tag, attack_type,
colored_position, premises, conclusion):
sb_none = start_tag if with_html_tag else ''
se = end_tag if with_html_tag else ''
if attack_type not in ['dont_know', 'jump']:
sb = start_tag if with_html_tag else ''
if colored_position:
sb = start_position if with_html_tag else ''
if attack_type == Relations.UNDERMINE:
premises = sb + premises + se
else:
conclusion = sb + conclusion + se
else:
sb = start_argument if with_html_tag else ''
sb_tmp = start_attack if with_html_tag else ''
premises = sb + premises + se
conclusion = sb_tmp + conclusion + se
return premises, conclusion, sb, sb_none, se
def __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that,
start_with_intro, anonymous_style, rearrange_intro, db_argument,
attack_type, sb_none, marked_element, lang, premises, conclusion,
is_users_opinion, support_counter_argument):
if start_with_intro and not anonymous_style:
intro = _t.get(_.itIsTrueThat
) if db_argument.is_supportive else _t.get(_.itIsFalseThat)
if rearrange_intro:
intro = _t.get(_.itTrueIsThat
) if db_argument.is_supportive else _t.get(_.itFalseIsThat)
ret_value = (sb_none if attack_type in ['dont_know'] else sb
) + intro + se + ' '
elif is_users_opinion and not anonymous_style:
ret_value = sb_none
if support_counter_argument:
ret_value += _t.get(_.youAgreeWithThecounterargument)
elif marked_element:
ret_value += you_have_the_opinion_that
else:
ret_value += _t.get(_.youArgue)
ret_value += se + ' '
else:
tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else
_.itIsFalseThatAnonymous)
ret_value = sb_none + sb + tmp + se + ' '
ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se
) if not db_argument.is_supportive else ''
ret_value += conclusion
ret_value += ', ' if lang == 'de' else ' '
ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises
return ret_value
<mask token>
def __build_nested_argument(arg_array: List[Argument], first_arg_by_user,
user_changed_opinion, with_html_tag, start_with_intro,
minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):
"""
:param arg_array:
:param first_arg_by_user:
:param user_changed_opinion:
:param with_html_tag:
:param start_with_intro:
:param minimize_on_undercut:
:param anonymous_style:
:param premisegroup_by_user:
:param _t:
:return:
"""
pgroups = []
supportive = []
arg_array = arg_array[::-1]
local_lang = arg_array[0].lang
for db_argument in arg_array:
text = db_argument.get_premisegroup_text()
pgroups.append(text)
supportive.append(db_argument.is_supportive)
conclusion = arg_array[0].get_conclusion_text()
sb = start_position if with_html_tag else ''
se = end_tag if with_html_tag else ''
because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower(
) + ' '
if len(arg_array
) % 2 is 0 and not first_arg_by_user and not anonymous_style:
ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else
_.otherUsersSaidThat) + ' '
tmp_users_opinion = True
elif not anonymous_style:
ret_value = _t.get(_.soYourOpinionIsThat
) + ': ' if start_with_intro else ''
tmp_users_opinion = False
conclusion = se + conclusion[0:1].upper() + conclusion[1:]
else:
ret_value = _t.get(_.someoneArgued) + ' '
tmp_users_opinion = False
tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''
ret_value += tmp + conclusion + because + pgroups[0] + '.'
del pgroups[0]
if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:
return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[
len(pgroups) - 1] + se + '.'
for i, pgroup in enumerate(pgroups):
ret_value += ' '
if tmp_users_opinion and not anonymous_style:
tmp = (_.butYouCounteredWithArgument if premisegroup_by_user else
_.butYouCounteredWithInterest)
ret_value += _t.get(_.otherParticipantsConvincedYouThat if
user_changed_opinion else tmp)
elif not anonymous_style:
ret_value += _t.get(_.youAgreeWithThatNow)
else:
ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_
.thenOtherUsersSaidThat)
ret_value += sb + ' ' + pgroups[i] + '.'
tmp_users_opinion = not tmp_users_opinion
return ret_value.replace(' ', ' ')
def get_text_for_premisegroup_uid(uid):
"""
Returns joined text of the premise group and the premise ids
:param uid: premisegroup_uid
:return: text, uids
"""
warnings.warn('Use PremiseGroup.get_text() instead.', DeprecationWarning)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid
=uid).join(Statement).all()
if len(db_premises) == 0:
return ''
texts = [premise.get_text() for premise in db_premises]
lang = DBDiscussionSession.query(Statement).get(db_premises[0].
statements.uid).lang
_t = Translator(lang)
return ' {} '.format(_t.get(_.aand)).join(texts)
<mask token>
def get_text_for_premise(uid: int, colored_position: bool=False):
"""
Returns text of premise with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
db_premise = DBDiscussionSession.query(Premise).get(uid)
if db_premise:
return db_premise.get_text(html=colored_position)
else:
return None
def get_text_for_conclusion(argument, start_with_intro=False,
rearrange_intro=False, is_users_opinion=True):
"""
Check the arguments conclusion whether it is an statement or an argument and returns the text
:param argument: Argument
:param start_with_intro: Boolean
:param rearrange_intro: Boolean
:return: String
"""
if argument.argument_uid:
return get_text_for_argument_uid(argument.argument_uid,
start_with_intro, rearrange_intro=rearrange_intro,
is_users_opinion=is_users_opinion)
else:
return argument.get_conclusion_text()
def resolve_issue_uid_to_slug(uid):
"""
Given the issue uid query database and return the correct slug of the issue.
:param uid: issue_uid
:type uid: int
:return: Slug of issue
:rtype: str
"""
issue = DBDiscussionSession.query(Issue).get(uid)
return issue.slug if issue else None
def get_all_attacking_arg_uids_from_history(history):
"""
Returns all arguments of the history, which attacked the user
:param history: String
:return: [Arguments.uid]
:rtype: list
"""
try:
splitted_history = history.split('-')
uids = []
for part in splitted_history:
if 'reaction' in part:
parts = part.split('/')
pos = parts.index('reaction')
uids.append(part.split('/')[pos + 3])
return uids
except AttributeError:
return []
def get_user_by_private_or_public_nickname(nickname):
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
db_user = get_user_by_case_insensitive_nickname(nickname)
db_public_user = get_user_by_case_insensitive_public_nickname(nickname)
uid = 0
if db_user:
uid = db_user.uid
elif db_public_user:
uid = db_public_user.uid
db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid
).first()
if not db_settings:
return None
if db_settings.should_show_public_nickname and db_user:
return db_user
elif not db_settings.should_show_public_nickname and db_public_user:
return db_public_user
return None
def get_user_by_case_insensitive_nickname(nickname):
"""
Returns user with given nickname
:param nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.nickname) ==
func.lower(nickname)).first()
def get_user_by_case_insensitive_public_nickname(public_nickname):
"""
Returns user with given public nickname
:param public_nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.
public_nickname) == func.lower(public_nickname)).first()
def pretty_print_options(message):
"""
Some modifications for pretty printing.
Use uppercase for first letter in text and a single dot for the end if there isn't one already.
:param message: String
:return: String
"""
if message[0:1] == '<':
pos = message.index('>')
message = message[0:pos + 1] + message[pos + 1:pos + 2].upper(
) + message[pos + 2:]
else:
message = message[0:1].upper() + message[1:]
if message[-1] == '>':
pos = message.rfind('<')
if message[pos - 1:pos] not in ['.', '?', '!']:
message = message[0:pos] + '.' + message[pos:]
elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':
message += '.'
return message
def create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=
False, is_author: bool=False, uid: str='', bubble_url: str='', content:
str='', omit_bubble_url: bool=False, omit_vote_info: bool=False,
argument_uid: int=None, statement_uid: int=None, is_supportive: bool=
False, nickname: str='anonymous', lang: str='en', is_users_opinion:
bool=False, other_author: User=None):
"""
Creates an dictionary which includes every information needed for a bubble.
:param bubble_type: BubbleTypes
:param is_markable: True if the content itself could be flagged
:param is_author: True if the current user is author of the content
:param uid: Identifier for the bubble
:param bubble_url: URL for the click event of the bubble
:param content: Text of the bubble
:param omit_bubble_url: True if the bubble should have a link
:param omit_vote_info: True if the bubble have the little, grey information text
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param is_supportive: Boolean
:param nickname: String
:param omit_bubble_url: Boolean
:param lang: is_users_opinion
:param is_users_opinion: Boolean
:return: dict()
"""
gravatar_link = get_global_url() + '/static/images/icon.png'
profile = None
if uid is not 'now':
content = pretty_print_options(content)
if bubble_type is BubbleTypes.SYSTEM and other_author is not None:
gravatar_link = get_profile_picture(other_author, 25)
profile = '/user/{}'.format(other_author.uid),
if bubble_type is BubbleTypes.USER and nickname != 'anonymous':
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
db_marked = None
gravatar_link = get_profile_picture(db_user, 25)
if argument_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument
.author_uid == db_user.uid).first()
if statement_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid,
MarkedStatement.author_uid == db_user.uid).first()
is_users_opinion = db_marked is not None
speech = {'is_user': bubble_type is BubbleTypes.USER, 'is_system':
bubble_type is BubbleTypes.SYSTEM, 'is_status': bubble_type is
BubbleTypes.STATUS, 'is_info': bubble_type is BubbleTypes.INFO,
'is_markable': is_markable, 'is_author': is_author, 'id': uid if
len(str(uid)) > 0 else uuid4().hex, 'bubble_url': bubble_url,
'message': content, 'omit_bubble_url': omit_bubble_url,
'omit_vote_info': omit_vote_info, 'data_type': 'argument' if
argument_uid else 'statement' if statement_uid else 'None',
'data_argument_uid': argument_uid, 'data_statement_uid':
statement_uid, 'data_is_supportive': is_supportive,
'is_users_opinion': is_users_opinion, 'enemy': {'avatar':
gravatar_link, 'profile': profile, 'available': profile is not None}}
votecount_keys = __get_text_for_click_and_mark_count(nickname,
bubble_type is BubbleTypes.USER, argument_uid, statement_uid,
speech, lang)
speech['votecounts_message'] = votecount_keys[speech['votecounts']]
return speech
def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid,
statement_uid, speech, lang):
"""
Build text for a bubble, how many other participants have the same interest?
:param nickname: User.nickname
:param is_user: boolean
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param speech: dict()
:param lang: ui_locales
:return: [String]
"""
if not nickname:
nickname = 'anonymous'
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname
).first()
if not db_user:
db_user = DBDiscussionSession.query(User).filter_by(nickname=
'anonymous').first()
db_clicks, db_marks = __get_clicks_and_marks(argument_uid,
statement_uid, db_user)
_t = Translator(lang)
speech['votecounts'] = len(db_clicks) if db_clicks else 0
if db_marks:
speech['votecounts'] += len(db_marks)
votecount_keys = defaultdict(lambda : '{} {}.'.format(speech[
'votecounts'], _t.get(_.voteCountTextMore)))
if is_user and db_user.gender == 'm':
gender_key = _.voteCountTextFirstM
elif is_user and db_user.gender == 'f':
gender_key = _.voteCountTextFirstF
else:
gender_key = _.voteCountTextFirst
votecount_keys[0] = '{}.'.format(_t.get(gender_key))
votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'
return votecount_keys
def __get_clicks_and_marks(argument_uid, statement_uid, db_user):
db_clicks = None
db_marks = None
if argument_uid:
db_clicks = DBDiscussionSession.query(ClickedArgument).filter(
ClickedArgument.argument_uid == argument_uid, ClickedArgument.
is_up_vote == True, ClickedArgument.is_valid, ClickedArgument.
author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid, MarkedArgument.
author_uid != db_user.uid).all()
elif statement_uid:
db_clicks = DBDiscussionSession.query(ClickedStatement).filter(
ClickedStatement.statement_uid == statement_uid,
ClickedStatement.is_up_vote == True, ClickedStatement.is_valid,
ClickedStatement.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid, MarkedStatement
.author_uid != db_user.uid).all()
return db_clicks, db_marks
def is_argument_disabled_due_to_disabled_statements(argument):
"""
Returns true if any involved statement is disabled.
:param argument: Argument
:return: Boolean
"""
if argument.conclusion_uid is None:
db_argument = DBDiscussionSession.query(Argument).get(argument.
argument_uid)
conclusion = DBDiscussionSession(Statement).get(db_argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(db_argument)
for premise in premises:
if premise.statements.is_disabled:
return True
else:
print(argument.conclusion_uid)
conclusion = DBDiscussionSession.query(Statement).get(argument.
conclusion_uid)
if conclusion.is_disabled:
return True
premises = __get_all_premises_of_argument(argument)
for premise in premises:
if premise.statements.is_disabled:
return True
return False
def is_author_of_statement(db_user: User, statement_uid: int) ->bool:
"""
Is the user with given nickname author of the statement?
:param db_user: User
:param statement_uid: Statement.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_textversion = DBDiscussionSession.query(TextVersion).filter_by(
statement_uid=statement_uid).order_by(TextVersion.uid.asc()).first()
if not db_textversion:
return False
return db_textversion.author_uid == db_user.uid
def is_author_of_argument(db_user: User, argument_uid: int) ->bool:
"""
Is the user with given nickname author of the argument?
:param db_user: User
:param argument_uid: Argument.uid
:return: Boolean
"""
db_user = (db_user if db_user and db_user.nickname !=
nick_of_anonymous_user else None)
if not db_user:
return False
db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid ==
argument_uid, Argument.author_uid == db_user.uid).first()
return True if db_argument else False
def __get_all_premises_of_argument(argument):
"""
Returns list with all premises of the argument.
:param argument: Argument
:return: list()
"""
ret_list = []
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid
=argument.premisegroup_uid).join(Statement).all()
for premise in db_premises:
ret_list.append(premise)
return ret_list
def get_profile_picture(user: User, size: int=80, ignore_privacy_settings:
bool=False):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
:param user: User
:param size: Integer, default 80
:param ignore_privacy_settings:
:return: String
"""
additional_id = ''
if user and isinstance(user, User):
additional_id = ('' if user.settings.should_show_public_nickname or
ignore_privacy_settings else 'x')
return __get_gravatar(user, additional_id, size)
def get_public_profile_picture(user: User, size: int=80):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
If the user doesn't want an public profile, an anonymous image will be returned
:param user: User
:param size: Integer, default 80
:return: String
"""
additional_id = ''
if user.settings.should_show_public_nickname:
additional_id = 'x'
if len(str(user.oauth_provider)) > 0:
additional_id = '{}{}'.format(user.oauth_provider, user.
oauth_provider_id)
return __get_gravatar(user, additional_id, size)
def __get_gravatar(user, additional_id, size):
if user:
if str(user.email) == 'None':
email = (user.nickname + additional_id).encode('utf-8')
else:
email = (user.email + additional_id).encode('utf-8')
else:
email = 'unknown'.encode('utf-8')
gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.
md5(email.lower()).hexdigest())
gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})
return gravatar_url
def get_author_data(uid, gravatar_on_right_side=True,
linked_with_users_page=True, profile_picture_size=20):
"""
Returns a-tag with gravatar of current author and users page as href
:param uid: Uid of the author
:param gravatar_on_right_side: True, if the gravatar is on the right of authors name
:param linked_with_users_page: True, if the text is a link to the authors site
:param profile_picture_size: Integer
:return: HTML-String
"""
db_user = DBDiscussionSession.query(User).get(int(uid))
if not db_user:
return None, 'Missing author with uid ' + str(uid), False
nick = db_user.global_nickname
img_src = get_profile_picture(db_user, profile_picture_size)
link_begin = ''
link_end = ''
if linked_with_users_page:
link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick)
link_end = '</a>'
side = 'left' if gravatar_on_right_side else 'right'
img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(
img_src, side)
if gravatar_on_right_side:
return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end
), True
else:
return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end
), True
def bubbles_already_last_in_list(bubble_list, bubbles):
"""
Are the given bubbles already at the end of the bubble list
:param bubble_list: list of Bubbles
:param bubbles: list of bubbles
:return: Boolean
"""
if isinstance(bubbles, list):
length = len(bubbles)
else:
length = 1
bubbles = [bubbles]
if len(bubble_list) < length:
return False
for bubble in bubbles:
if 'message' not in bubble:
return False
start_index = -length
is_already_in = False
for bubble in bubbles:
last = bubble_list[start_index]
if 'message' not in last or 'message' not in bubble:
return False
text1 = unhtmlify(last['message'].lower()).strip()
text2 = unhtmlify(bubble['message'].lower()).strip()
is_already_in = is_already_in or text1 == text2
start_index += 1
return is_already_in
def unhtmlify(html):
"""
Remove html-tags and unescape encoded html-entities.
:param html: Evil-string containing html
:return:
"""
return unescape(re.sub('<.*?>', '', html))
| """
Common, pure functions used by the D-BAS.
.. codeauthor:: Tobias Krauthoff <[email protected]
"""
import hashlib
import locale
import os
import re
import warnings
from collections import defaultdict
from datetime import datetime
from enum import Enum, auto
from html import escape, unescape
from typing import List
from urllib import parse
from uuid import uuid4
from sqlalchemy import func
from dbas.database import DBDiscussionSession
from dbas.database.discussion_model import Argument, Premise, Statement, TextVersion, Issue, User, Settings, \
ClickedArgument, ClickedStatement, MarkedArgument, MarkedStatement, PremiseGroup
from dbas.logger import logger
from dbas.strings.keywords import Keywords as _
from dbas.strings.translator import Translator
nick_of_anonymous_user = 'anonymous'
fallback_lang = 'en'
tag_type = 'span'
start_attack = '<{} data-argumentation-type="attack">'.format(tag_type)
start_argument = '<{} data-argumentation-type="argument">'.format(tag_type)
start_position = '<{} data-argumentation-type="position">'.format(tag_type)
start_content = '<{} class="triangle-content-text">'.format(tag_type)
start_pro = '<{} data-attitude="pro">'.format(tag_type)
start_con = '<{} data-attitude="con">'.format(tag_type)
start_tag = '<{}>'.format(tag_type)
end_tag = '</{}>'.format(tag_type)
class BubbleTypes(Enum):
USER = auto()
SYSTEM = auto()
STATUS = auto()
INFO = auto()
def __str__(self):
return str(self.value)
class Relations(Enum):
UNDERMINE = 'undermine'
UNDERCUT = 'undercut'
REBUT = 'rebut'
SUPPORT = 'support'
def __str__(self):
return str(self.value)
class Attitudes(Enum):
AGREE = 'agree'
DISAGREE = 'disagree'
DONT_KNOW = 'dontknow'
def __str__(self):
return str(self.value)
relation_mapper = {relation.value: relation for relation in Relations}
attitude_mapper = {attitude.value: attitude for attitude in Attitudes}
def get_global_url():
"""
Returns the global url of the project, based on the ENV
:return: String
"""
return os.environ.get('URL', '')
def get_changelog(no):
"""
Returns the 'no' last entries from the changelog
:param no: int
:return: list
"""
path = str(os.path.realpath(__file__ + '/../../CHANGELOG.md'))
lines = [line.rstrip('\n').strip() for line in open(path) if len(line.rstrip('\n').strip()) > 0]
changelog = []
title = ''
body = []
for l in lines:
if l.startswith('#'):
if len(title) > 0:
changelog.append({'title': title, 'body': body})
body = []
title = l.replace('### ', '')
else:
body.append(l.replace('- ', ''))
return changelog[0:no]
def is_development_mode(registry):
"""
Returns true, if mode is set to development in current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'mode' in registry.settings:
return registry.settings['mode'].lower() == 'development'
return False
def usage_of_modern_bubbles(registry):
"""
Returns true, if modern bubbles are set in the current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'modern_bubbles' in registry.settings:
return registry.settings['modern_bubbles'].lower() == 'true'
return False
def usage_of_matomo(registry):
"""
Returns true, if matomo is set in the current ini file.
:param registry: request.registry
:return: Boolean
"""
if 'mode' in registry.settings:
return registry.settings['usage_of_matomo'].lower() == 'true'
return False
def escape_string(text):
"""
Escapes all html special chars.
:param text: string
:return: html.escape(text)
"""
return escape(text)
def get_discussion_language(matchdict, params, session, current_issue_uid=None):
"""
Returns Language.ui_locales
CALL AFTER issue_handler.get_id_of_slug(..)!
:param matchdict: matchdict of the current request
:param params: params of the current request
:param session: session of the current request
:param current_issue_uid: uid
:return:
"""
if not current_issue_uid:
current_issue = DBDiscussionSession.query(Issue).filter(Issue.is_disabled == False,
Issue.is_private == False).first()
current_issue_uid = current_issue.uid if current_issue else None
# first matchdict, then params, then session, afterwards fallback
issue = matchdict['issue'] if 'issue' in matchdict \
else params['issue'] if 'issue' in params \
else session['issue'] if 'issue' in session \
else current_issue_uid
db_issue = DBDiscussionSession.query(Issue).get(issue)
return db_issue.lang if db_issue else 'en'
def python_datetime_pretty_print(ts, lang):
"""
Pretty print of a locale
:param ts: Timestamp
:param lang: ui_locales
:return: String
"""
formatter = '%b. %d.'
if lang == 'de':
try:
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
formatter = '%d. %b.'
except locale.Error:
locale.setlocale(locale.LC_TIME, 'en_US.UTF8')
return datetime.strptime(str(ts), '%Y-%m-%d').strftime(formatter)
def get_all_arguments_by_statement(statement_uid, include_disabled=False):
"""
Returns a list of all arguments where the statement is a conclusion or member of the premisegroup
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: [Arguments]
"""
logger('DBAS.LIB', 'main {}, include_disabled {}'.format(statement_uid, include_disabled))
db_arguments = __get_arguments_of_conclusion(statement_uid, include_disabled)
arg_array = [arg for arg in db_arguments] if db_arguments else []
premises = DBDiscussionSession.query(Premise).filter_by(statement_uid=statement_uid)
if not include_disabled:
premises = premises.filter_by(is_disabled=False)
premises = premises.all()
for premise in premises:
arg_array += __get_argument_of_premisegroup(premise.premisegroup_uid, include_disabled)
db_undercuts = []
for arg in arg_array:
db_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)
db_undercutted_undercuts = []
for arg in db_undercuts:
db_undercutted_undercuts += __get_undercuts_of_argument(arg.uid, include_disabled)
arg_array = list(set(arg_array + db_undercuts + db_undercutted_undercuts))
logger('DBAS.LIB', 'returning arguments {}'.format([arg.uid for arg in arg_array]))
return arg_array if len(arg_array) > 0 else None
def __get_argument_of_premisegroup(premisegroup_uid, include_disabled):
"""
Returns all arguments with the given premisegroup
:param premisegroup_uid: PremisgGroup.uid
:param include_disabled: Boolean
:return: list of Arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(premisegroup_uid=premisegroup_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def __get_undercuts_of_argument(argument_uid, include_disabled):
"""
Returns all undercuts fo the given argument
:param argument_uid: Argument.uid
:param include_disabled: boolean
:return: list of Arguments
"""
db_undercuts = DBDiscussionSession.query(Argument).filter_by(argument_uid=argument_uid)
if not include_disabled:
db_undercuts = db_undercuts.filter_by(is_disabled=False)
return db_undercuts.all() if db_undercuts else []
def __get_arguments_of_conclusion(statement_uid, include_disabled):
"""
Returns all arguments, where the statement is set as conclusion
:param statement_uid: Statement.uid
:param include_disabled: Boolean
:return: list of arguments
"""
db_arguments = DBDiscussionSession.query(Argument).filter_by(conclusion_uid=statement_uid)
if not include_disabled:
db_arguments = db_arguments.filter_by(is_disabled=False)
return db_arguments.all() if db_arguments else []
def get_all_arguments_with_text_by_statement_id(statement_uid):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param statement_uid: uid to a statement, which should be analyzed
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(statement_uid))
arguments = get_all_arguments_by_statement(statement_uid)
results = []
if arguments:
results = [{'uid': arg.uid, 'text': get_text_for_argument_uid(arg.uid)} for arg in arguments]
return results
def get_all_arguments_with_text_and_url_by_statement_id(db_statement, urlmanager, color_statement=False,
is_jump=False):
"""
Given a statement_uid, it returns all arguments, which use this statement and adds
the corresponding text to it, which normally appears in the bubbles. The resulting
text depends on the provided language.
:param db_statement: Statement
:param urlmanager:
:param color_statement: True, if the statement (specified by the ID) should be colored
:return: list of dictionaries containing some properties of these arguments
:rtype: list
"""
logger('DBAS.LIB', 'main ' + str(db_statement.uid))
arguments = get_all_arguments_by_statement(db_statement.uid)
uids = [arg.uid for arg in arguments] if arguments else None
results = list()
sb = '<{} data-argumentation-type="position">'.format(tag_type) if color_statement else ''
se = '</{}>'.format(tag_type) if color_statement else ''
if not uids:
return []
uids.sort()
for uid in uids:
statement_text = db_statement.get_text()
attack_type = 'jump' if is_jump else ''
argument_text = get_text_for_argument_uid(uid, anonymous_style=True, attack_type=attack_type)
pos = argument_text.lower().find(statement_text.lower())
argument_text = argument_text[:pos] + sb + argument_text[pos:]
pos += len(statement_text) + len(sb)
argument_text = argument_text[:pos] + se + argument_text[pos:]
results.append({
'uid': uid,
'text': argument_text,
'url': urlmanager.get_url_for_jump(uid)
})
return results
def get_slug_by_statement_uid(uid):
"""
Returns slug for the given Issue.uid
:param uid: Issue.uid
:return: String
"""
db_statement = DBDiscussionSession.query(Statement).get(uid)
return resolve_issue_uid_to_slug(db_statement.issue_uid)
def get_text_for_argument_uid(uid, nickname=None, with_html_tag=False, start_with_intro=False, first_arg_by_user=False,
user_changed_opinion=False, rearrange_intro=False, colored_position=False,
attack_type=None, minimize_on_undercut=False, is_users_opinion=True,
anonymous_style=False, support_counter_argument=False):
"""
Returns current argument as string like "conclusion, because premise1 and premise2"
:param uid: Integer
:param with_html_tag: Boolean
:param start_with_intro: Boolean
:param first_arg_by_user: Boolean
:param user_changed_opinion: Boolean
:param rearrange_intro: Boolean
:param colored_position: Boolean
:param attack_type: String
:param minimize_on_undercut: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:return: String
"""
logger('DBAS.LIB', 'main {}'.format(uid))
db_argument = DBDiscussionSession.query(Argument).get(uid)
if not db_argument:
return None
lang = db_argument.lang
_t = Translator(lang)
premisegroup_by_user = False
author_uid = None
db_user = DBDiscussionSession.query(User).filter_by(nickname=str(nickname)).first()
if db_user:
author_uid = db_user.uid
pgroup = DBDiscussionSession.query(PremiseGroup).get(db_argument.premisegroup_uid)
marked_argument = DBDiscussionSession.query(MarkedArgument).filter_by(
argument_uid=uid,
author_uid=db_user.uid).first()
premisegroup_by_user = pgroup.author_uid == db_user.uid or marked_argument is not None
# getting all argument id
arg_array = [db_argument]
while db_argument.argument_uid:
db_argument = DBDiscussionSession.query(Argument).get(db_argument.argument_uid)
arg_array.append(db_argument)
if attack_type == 'jump':
return __build_argument_for_jump(arg_array, with_html_tag)
if len(arg_array) == 1:
# build one argument only
return __build_single_argument(arg_array[0], rearrange_intro, with_html_tag, colored_position, attack_type, _t,
start_with_intro, is_users_opinion, anonymous_style, support_counter_argument,
author_uid)
else:
# get all pgroups and at last, the conclusion
return __build_nested_argument(arg_array, first_arg_by_user, user_changed_opinion, with_html_tag,
start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user,
_t)
def __build_argument_for_jump(arg_array: List[Argument], with_html_tag):
"""
Build tet for an argument, if we jump to this argument
:param arg_array: [Argument]
:param with_html_tag: Boolean
:return: String
"""
tag_premise = ('<' + tag_type + ' data-argumentation-type="attack">') if with_html_tag else ''
tag_conclusion = ('<' + tag_type + ' data-argumentation-type="argument">') if with_html_tag else ''
tag_end = ('</' + tag_type + '>') if with_html_tag else ''
lang = arg_array[0].lang
_t = Translator(lang)
if len(arg_array) == 1:
ret_value = __build_val_for_jump(arg_array[0], tag_premise, tag_conclusion, tag_end, _t)
elif len(arg_array) == 2:
ret_value = __build_val_for_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t)
else:
ret_value = __build_val_for_undercutted_undercut(arg_array, tag_premise, tag_conclusion, tag_end, _t)
return ret_value.replace(' ', ' ')
def __build_val_for_jump(db_argument, tag_premise, tag_conclusion, tag_end, _t):
premises = db_argument.get_premisegroup_text()
if premises[-1] != '.':
premises += '.'
conclusion = db_argument.get_conclusion_text()
because = _t.get(_.because).lower()
conclusion = tag_conclusion + conclusion + tag_end
premises = tag_premise + premises + tag_end
intro = (start_con + _t.get(_.isNotRight).lower() + end_tag) if not db_argument.is_supportive else ''
ret_value = '{} {} {} {}'.format(conclusion, intro, because, premises)
if _t.get_lang() == 'de':
intro = _t.get(_.itIsTrueThatAnonymous) if db_argument.is_supportive else _t.get(_.itIsFalseThatAnonymous)
intro = intro[0:1].upper() + intro[1:]
intro = (start_pro if db_argument.is_supportive else start_con) + intro + end_tag
ret_value = '{} {}, {} {}'.format(intro, conclusion, because, premises)
return ret_value
def __build_val_for_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t):
db_undercut = arg_array[0]
db_conclusion_argument = arg_array[1]
premise = db_undercut.get_premisegroup_text()
conclusion_premise = db_conclusion_argument.get_premisegroup_text()
conclusion_conclusion = db_conclusion_argument.get_conclusion_text()
premise = tag_premise + premise + tag_end
conclusion_premise = tag_conclusion + conclusion_premise + tag_end
conclusion_conclusion = tag_conclusion + conclusion_conclusion + tag_end
intro = (_t.get(_.statementAbout) + ' ') if _t.get_lang() == 'de' else ''
bind = start_con + _t.get(_.isNotAGoodReasonFor) + end_tag
because = _t.get(_.because)
ret_value = '{}{} {} {}. {} {}.'.format(intro, conclusion_premise, bind, conclusion_conclusion, because, premise)
return ret_value
def __build_val_for_undercutted_undercut(arg_array: List[Argument], tag_premise, tag_conclusion, tag_end, _t):
premise1 = arg_array[0].get_premisegroup_text()
premise2 = arg_array[1].get_premisegroup_text()
premise3 = arg_array[2].get_premisegroup_text()
conclusion = arg_array[2].get_conclusion_text()
bind = start_con + _t.get(_.isNotAGoodReasonAgainstArgument) + end_tag
because = _t.get(_.because)
seperator = ',' if _t.get_lang() == 'de' else ''
premise1 = tag_premise + premise1 + tag_end
premise2 = tag_conclusion + premise2 + tag_end
argument = '{}{} {} {}'.format(conclusion, seperator, because.lower(), premise3)
argument = tag_conclusion + argument + tag_end
# P2 ist kein guter Grund gegen das Argument, dass C weil P3. Weil P1
ret_value = '{} {} {}. {} {}'.format(premise2, bind, argument, because, premise1)
return ret_value
def __build_single_argument(db_argument: Argument, rearrange_intro: bool, with_html_tag: bool, colored_position: bool,
attack_type: str, _t: Translator, start_with_intro: bool, is_users_opinion: bool,
anonymous_style: bool, support_counter_argument: bool=False, author_uid=None):
"""
Build up argument text for a single argument
Please, do not touch this!
:param uid: Argument.uid
:param rearrange_intro: Boolean
:param with_html_tag: Boolean
:param colored_position: Boolean
:param attack_type: String
:param _t: Translator
:param start_with_intro: Boolean
:param is_users_opinion: Boolean
:param anonymous_style: Boolean
:param support_counter_argument: Boolean
:param author_uid: User.uid
:return: String
"""
premises_text = db_argument.get_premisegroup_text()
conclusion_text = db_argument.get_conclusion_text()
lang = db_argument.lang
if lang != 'de':
premises_text = premises_text[0:1].lower() + premises_text[1:] # pretty print
premises_text, conclusion_text, sb, sb_none, se = __get_tags_for_building_single_argument(with_html_tag,
attack_type,
colored_position,
premises_text,
conclusion_text)
marked_element = False
if author_uid:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(MarkedArgument.argument_uid == db_argument.uid,
MarkedArgument.author_uid == author_uid).first()
marked_element = db_marked is not None
you_have_the_opinion_that = _t.get(_.youHaveTheOpinionThat).format('').strip()
if lang == 'de':
ret_value = __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro,
anonymous_style, rearrange_intro, db_argument, attack_type, sb_none,
marked_element, lang, premises_text, conclusion_text,
is_users_opinion,
support_counter_argument)
else:
ret_value = __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element,
conclusion_text,
premises_text, db_argument)
return ret_value.replace(' ', ' ')
def __get_tags_for_building_single_argument(with_html_tag, attack_type, colored_position, premises, conclusion):
sb_none = start_tag if with_html_tag else ''
se = end_tag if with_html_tag else ''
if attack_type not in ['dont_know', 'jump']:
sb = start_tag if with_html_tag else ''
if colored_position:
sb = start_position if with_html_tag else ''
if attack_type == Relations.UNDERMINE:
premises = sb + premises + se
else:
conclusion = sb + conclusion + se
else:
sb = start_argument if with_html_tag else ''
sb_tmp = start_attack if with_html_tag else ''
premises = sb + premises + se
conclusion = sb_tmp + conclusion + se
return premises, conclusion, sb, sb_none, se
def __build_single_argument_for_de(_t, sb, se, you_have_the_opinion_that, start_with_intro, anonymous_style,
rearrange_intro, db_argument, attack_type, sb_none, marked_element, lang,
premises, conclusion, is_users_opinion, support_counter_argument):
if start_with_intro and not anonymous_style:
intro = _t.get(_.itIsTrueThat) if db_argument.is_supportive else _t.get(_.itIsFalseThat)
if rearrange_intro:
intro = _t.get(_.itTrueIsThat) if db_argument.is_supportive else _t.get(_.itFalseIsThat)
ret_value = (sb_none if attack_type in ['dont_know'] else sb) + intro + se + ' '
elif is_users_opinion and not anonymous_style:
ret_value = sb_none
if support_counter_argument:
ret_value += _t.get(_.youAgreeWithThecounterargument)
elif marked_element:
ret_value += you_have_the_opinion_that
else:
ret_value += _t.get(_.youArgue)
ret_value += se + ' '
else:
tmp = _t.get(_.itIsTrueThatAnonymous if db_argument.is_supportive else _.itIsFalseThatAnonymous)
ret_value = sb_none + sb + tmp + se + ' '
ret_value += ' {}{}{} '.format(sb, _t.get(_.itIsNotRight), se) if not db_argument.is_supportive else ''
ret_value += conclusion
ret_value += ', ' if lang == 'de' else ' '
ret_value += sb_none + _t.get(_.because).lower() + se + ' ' + premises
return ret_value
def __build_single_argument_for_en(_t, sb, se, you_have_the_opinion_that, marked_element, conclusion, premises, db_arg):
tmp = sb + ' ' + _t.get(_.isNotRight).lower() + se + ', ' + _t.get(_.because).lower() + ' '
ret_value = (you_have_the_opinion_that + ' ' if marked_element else '') + conclusion + ' '
ret_value += _t.get(_.because).lower() if db_arg.is_supportive else tmp
ret_value += ' ' + premises
return ret_value
def __build_nested_argument(arg_array: List[Argument], first_arg_by_user, user_changed_opinion, with_html_tag,
start_with_intro, minimize_on_undercut, anonymous_style, premisegroup_by_user, _t):
"""
:param arg_array:
:param first_arg_by_user:
:param user_changed_opinion:
:param with_html_tag:
:param start_with_intro:
:param minimize_on_undercut:
:param anonymous_style:
:param premisegroup_by_user:
:param _t:
:return:
"""
# get all pgroups and at last, the conclusion
pgroups = []
supportive = []
arg_array = arg_array[::-1]
local_lang = arg_array[0].lang
# grepping all arguments in the chain
for db_argument in arg_array:
text = db_argument.get_premisegroup_text()
pgroups.append(text)
supportive.append(db_argument.is_supportive)
conclusion = arg_array[0].get_conclusion_text()
# html tags for framing
sb = start_position if with_html_tag else ''
se = end_tag if with_html_tag else ''
because = (', ' if local_lang == 'de' else ' ') + _t.get(_.because).lower() + ' '
if len(arg_array) % 2 is 0 and not first_arg_by_user and not anonymous_style: # system starts
ret_value = _t.get(_.earlierYouArguedThat if user_changed_opinion else _.otherUsersSaidThat) + ' '
tmp_users_opinion = True # user after system
elif not anonymous_style: # user starts
ret_value = (_t.get(_.soYourOpinionIsThat) + ': ') if start_with_intro else ''
tmp_users_opinion = False # system after user
conclusion = se + conclusion[0:1].upper() + conclusion[1:] # pretty print
else:
ret_value = _t.get(_.someoneArgued) + ' '
tmp_users_opinion = False
tmp = _t.get(_.itFalseIsThat) + ' ' if not supportive[0] else ''
ret_value += tmp + conclusion + because + pgroups[0] + '.'
del pgroups[0]
# just display the last premise group on undercuts, because the story is always saved in all bubbles
if minimize_on_undercut and not user_changed_opinion and len(pgroups) > 2:
return _t.get(_.butYouCounteredWith).strip() + ' ' + sb + pgroups[len(pgroups) - 1] + se + '.'
for i, pgroup in enumerate(pgroups):
ret_value += ' '
if tmp_users_opinion and not anonymous_style:
tmp = _.butYouCounteredWithArgument if premisegroup_by_user else _.butYouCounteredWithInterest
ret_value += _t.get(_.otherParticipantsConvincedYouThat if user_changed_opinion else tmp)
elif not anonymous_style:
ret_value += _t.get(_.youAgreeWithThatNow)
else:
ret_value += _t.get(_.otherUsersSaidThat) if i == 0 else _t.get(_.thenOtherUsersSaidThat)
ret_value += sb + ' ' + pgroups[i] + '.'
tmp_users_opinion = not tmp_users_opinion
return ret_value.replace(' ', ' ')
def get_text_for_premisegroup_uid(uid):
"""
Returns joined text of the premise group and the premise ids
:param uid: premisegroup_uid
:return: text, uids
"""
warnings.warn("Use PremiseGroup.get_text() instead.", DeprecationWarning)
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=uid).join(Statement).all()
if len(db_premises) == 0:
return ''
texts = [premise.get_text() for premise in db_premises]
lang = DBDiscussionSession.query(Statement).get(db_premises[0].statements.uid).lang
_t = Translator(lang)
return ' {} '.format(_t.get(_.aand)).join(texts)
def get_text_for_statement_uid(uid: int, colored_position=False):
"""
Returns text of statement with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
warnings.warn("Use Statement.get_text() or Statement.get_html() instead.", DeprecationWarning)
if not isinstance(uid, int):
return None
db_statement = DBDiscussionSession.query(Statement).get(uid)
if not db_statement:
return None
db_textversion = DBDiscussionSession.query(TextVersion).order_by(TextVersion.uid.desc()).get(
db_statement.textversion_uid)
content = db_textversion.content
while content.endswith(('.', '?', '!')):
content = content[:-1]
sb, se = '', ''
if colored_position:
sb = '<{} data-argumentation-type="position">'.format(tag_type)
se = '</{}>'.format(tag_type)
return sb + content + se
def get_text_for_premise(uid: int, colored_position: bool = False):
"""
Returns text of premise with given uid
:param uid: Statement.uid
:param colored_position: Boolean
:return: String
"""
db_premise = DBDiscussionSession.query(Premise).get(uid)
if db_premise:
return db_premise.get_text(html=colored_position)
else:
return None
def get_text_for_conclusion(argument, start_with_intro=False, rearrange_intro=False, is_users_opinion=True):
"""
Check the arguments conclusion whether it is an statement or an argument and returns the text
:param argument: Argument
:param start_with_intro: Boolean
:param rearrange_intro: Boolean
:return: String
"""
if argument.argument_uid:
return get_text_for_argument_uid(argument.argument_uid, start_with_intro, rearrange_intro=rearrange_intro,
is_users_opinion=is_users_opinion)
else:
return argument.get_conclusion_text()
def resolve_issue_uid_to_slug(uid):
"""
Given the issue uid query database and return the correct slug of the issue.
:param uid: issue_uid
:type uid: int
:return: Slug of issue
:rtype: str
"""
issue = DBDiscussionSession.query(Issue).get(uid)
return issue.slug if issue else None
def get_all_attacking_arg_uids_from_history(history):
"""
Returns all arguments of the history, which attacked the user
:param history: String
:return: [Arguments.uid]
:rtype: list
"""
try:
splitted_history = history.split('-')
uids = []
for part in splitted_history:
if 'reaction' in part:
parts = part.split('/')
pos = parts.index('reaction')
uids.append(part.split('/')[pos + 3])
return uids
except AttributeError:
return []
def get_user_by_private_or_public_nickname(nickname):
"""
Gets the user by his (public) nickname, based on the option, whether his nickname is public or not
:param nickname: Nickname of the user
:return: Current user or None
"""
db_user = get_user_by_case_insensitive_nickname(nickname)
db_public_user = get_user_by_case_insensitive_public_nickname(nickname)
uid = 0
if db_user:
uid = db_user.uid
elif db_public_user:
uid = db_public_user.uid
db_settings = DBDiscussionSession.query(Settings).filter_by(author_uid=uid).first()
if not db_settings:
return None
if db_settings.should_show_public_nickname and db_user:
return db_user
elif not db_settings.should_show_public_nickname and db_public_user:
return db_public_user
return None
def get_user_by_case_insensitive_nickname(nickname):
"""
Returns user with given nickname
:param nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(func.lower(User.nickname) == func.lower(nickname)).first()
def get_user_by_case_insensitive_public_nickname(public_nickname):
"""
Returns user with given public nickname
:param public_nickname: String
:return: User or None
"""
return DBDiscussionSession.query(User).filter(
func.lower(User.public_nickname) == func.lower(public_nickname)).first()
def pretty_print_options(message):
"""
Some modifications for pretty printing.
Use uppercase for first letter in text and a single dot for the end if there isn't one already.
:param message: String
:return: String
"""
# check for html
if message[0:1] == '<':
pos = message.index('>')
message = message[0:pos + 1] + message[pos + 1:pos + 2].upper() + message[pos + 2:]
else:
message = message[0:1].upper() + message[1:]
# check for html
if message[-1] == '>':
pos = message.rfind('<')
if message[pos - 1:pos] not in ['.', '?', '!']:
message = message[0:pos] + '.' + message[pos:]
elif not message.endswith(tuple(['.', '?', '!'])) and id is not 'now':
message += '.'
return message
def create_speechbubble_dict(bubble_type: BubbleTypes, is_markable: bool=False, is_author: bool=False, uid: str='',
bubble_url: str= '', content: str= '', omit_bubble_url: bool=False, omit_vote_info: bool=False,
argument_uid: int=None, statement_uid: int=None, is_supportive: bool=False,
nickname: str='anonymous', lang: str='en', is_users_opinion: bool=False, other_author: User=None):
"""
Creates an dictionary which includes every information needed for a bubble.
:param bubble_type: BubbleTypes
:param is_markable: True if the content itself could be flagged
:param is_author: True if the current user is author of the content
:param uid: Identifier for the bubble
:param bubble_url: URL for the click event of the bubble
:param content: Text of the bubble
:param omit_bubble_url: True if the bubble should have a link
:param omit_vote_info: True if the bubble have the little, grey information text
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param is_supportive: Boolean
:param nickname: String
:param omit_bubble_url: Boolean
:param lang: is_users_opinion
:param is_users_opinion: Boolean
:return: dict()
"""
gravatar_link = get_global_url() + '/static/images/icon.png'
profile = None
if uid is not 'now':
content = pretty_print_options(content)
if bubble_type is BubbleTypes.SYSTEM and other_author is not None:
gravatar_link = get_profile_picture(other_author, 25)
profile = '/user/{}'.format(other_author.uid),
# check for users opinion
if bubble_type is BubbleTypes.USER and nickname != 'anonymous':
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first()
db_marked = None
gravatar_link = get_profile_picture(db_user, 25)
if argument_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedArgument).filter(
MarkedArgument.argument_uid == argument_uid,
MarkedArgument.author_uid == db_user.uid).first()
if statement_uid is not None and db_user is not None:
db_marked = DBDiscussionSession.query(MarkedStatement).filter(
MarkedStatement.statement_uid == statement_uid,
MarkedStatement.author_uid == db_user.uid).first()
is_users_opinion = db_marked is not None
speech = {
'is_user': bubble_type is BubbleTypes.USER,
'is_system': bubble_type is BubbleTypes.SYSTEM,
'is_status': bubble_type is BubbleTypes.STATUS,
'is_info': bubble_type is BubbleTypes.INFO,
'is_markable': is_markable,
'is_author': is_author,
'id': uid if len(str(uid)) > 0 else uuid4().hex,
'bubble_url': bubble_url,
'message': content,
'omit_bubble_url': omit_bubble_url,
'omit_vote_info': omit_vote_info,
'data_type': 'argument' if argument_uid else 'statement' if statement_uid else 'None',
'data_argument_uid': argument_uid,
'data_statement_uid': statement_uid,
'data_is_supportive': is_supportive,
'is_users_opinion': is_users_opinion,
'enemy': {
'avatar': gravatar_link,
'profile': profile,
'available': profile is not None
}
}
votecount_keys = __get_text_for_click_and_mark_count(nickname, bubble_type is BubbleTypes.USER, argument_uid,
statement_uid, speech, lang)
speech['votecounts_message'] = votecount_keys[speech['votecounts']]
return speech
def __get_text_for_click_and_mark_count(nickname, is_user, argument_uid, statement_uid, speech, lang):
"""
Build text for a bubble, how many other participants have the same interest?
:param nickname: User.nickname
:param is_user: boolean
:param argument_uid: Argument.uid
:param statement_uid: Statement.uid
:param speech: dict()
:param lang: ui_locales
:return: [String]
"""
if not nickname:
nickname = 'anonymous'
db_user = DBDiscussionSession.query(User).filter_by(nickname=nickname).first()
if not db_user:
db_user = DBDiscussionSession.query(User).filter_by(nickname='anonymous').first()
db_clicks, db_marks = __get_clicks_and_marks(argument_uid, statement_uid, db_user)
_t = Translator(lang)
speech['votecounts'] = len(db_clicks) if db_clicks else 0
if db_marks:
speech['votecounts'] += len(db_marks)
votecount_keys = defaultdict(lambda: "{} {}.".format(speech['votecounts'], _t.get(_.voteCountTextMore)))
if is_user and db_user.gender == 'm':
gender_key = _.voteCountTextFirstM
elif is_user and db_user.gender == 'f':
gender_key = _.voteCountTextFirstF
else:
gender_key = _.voteCountTextFirst
votecount_keys[0] = '{}.'.format(_t.get(gender_key))
votecount_keys[1] = _t.get(_.voteCountTextOneOther) + '.'
return votecount_keys
def __get_clicks_and_marks(argument_uid, statement_uid, db_user):
db_clicks = None
db_marks = None
if argument_uid:
db_clicks = DBDiscussionSession.query(ClickedArgument). \
filter(ClickedArgument.argument_uid == argument_uid,
ClickedArgument.is_up_vote == True,
ClickedArgument.is_valid,
ClickedArgument.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedArgument). \
filter(MarkedArgument.argument_uid == argument_uid,
MarkedArgument.author_uid != db_user.uid).all()
elif statement_uid:
db_clicks = DBDiscussionSession.query(ClickedStatement). \
filter(ClickedStatement.statement_uid == statement_uid,
ClickedStatement.is_up_vote == True,
ClickedStatement.is_valid,
ClickedStatement.author_uid != db_user.uid).all()
db_marks = DBDiscussionSession.query(MarkedStatement). \
filter(MarkedStatement.statement_uid == statement_uid,
MarkedStatement.author_uid != db_user.uid).all()
return db_clicks, db_marks
def is_argument_disabled_due_to_disabled_statements(argument):
"""
Returns true if any involved statement is disabled.
:param argument: Argument
:return: Boolean
"""
if argument.conclusion_uid is None:
# check conclusion of given arguments conclusion
db_argument = DBDiscussionSession.query(Argument).get(argument.argument_uid)
conclusion = DBDiscussionSession(Statement).get(db_argument.conclusion_uid)
if conclusion.is_disabled:
return True
# check premisegroup of given arguments conclusion
premises = __get_all_premises_of_argument(db_argument)
for premise in premises:
if premise.statements.is_disabled:
return True
else:
# check conclusion of given argument
print(argument.conclusion_uid)
conclusion = DBDiscussionSession.query(Statement).get(argument.conclusion_uid)
if conclusion.is_disabled:
return True
# check premisegroup of given argument
premises = __get_all_premises_of_argument(argument)
for premise in premises:
if premise.statements.is_disabled:
return True
return False
def is_author_of_statement(db_user: User, statement_uid: int) -> bool:
"""
Is the user with given nickname author of the statement?
:param db_user: User
:param statement_uid: Statement.uid
:return: Boolean
"""
db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None
if not db_user:
return False
db_textversion = DBDiscussionSession.query(TextVersion).filter_by(statement_uid=statement_uid).order_by(
TextVersion.uid.asc()).first() # TODO #432
if not db_textversion:
return False
return db_textversion.author_uid == db_user.uid
def is_author_of_argument(db_user: User, argument_uid: int) -> bool:
"""
Is the user with given nickname author of the argument?
:param db_user: User
:param argument_uid: Argument.uid
:return: Boolean
"""
db_user = db_user if db_user and db_user.nickname != nick_of_anonymous_user else None
if not db_user:
return False
db_argument = DBDiscussionSession.query(Argument).filter(Argument.uid == argument_uid,
Argument.author_uid == db_user.uid).first()
return True if db_argument else False
def __get_all_premises_of_argument(argument):
"""
Returns list with all premises of the argument.
:param argument: Argument
:return: list()
"""
ret_list = []
db_premises = DBDiscussionSession.query(Premise).filter_by(premisegroup_uid=argument.premisegroup_uid).join(
Statement).all()
for premise in db_premises:
ret_list.append(premise)
return ret_list
def get_profile_picture(user: User, size: int = 80, ignore_privacy_settings: bool = False):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
:param user: User
:param size: Integer, default 80
:param ignore_privacy_settings:
:return: String
"""
additional_id = ''
if user and isinstance(user, User):
additional_id = '' if user.settings.should_show_public_nickname or ignore_privacy_settings else 'x'
return __get_gravatar(user, additional_id, size)
def get_public_profile_picture(user: User, size: int = 80):
"""
Returns the url to a https://secure.gravatar.com picture, with the option wavatar and size of 80px
If the user doesn't want an public profile, an anonymous image will be returned
:param user: User
:param size: Integer, default 80
:return: String
"""
additional_id = ''
if user.settings.should_show_public_nickname:
additional_id = 'x'
if len(str(user.oauth_provider)) > 0:
additional_id = '{}{}'.format(user.oauth_provider, user.oauth_provider_id)
return __get_gravatar(user, additional_id, size)
def __get_gravatar(user, additional_id, size):
if user:
if str(user.email) == 'None':
email = (user.nickname + additional_id).encode('utf-8')
else:
email = (user.email + additional_id).encode('utf-8')
else:
email = 'unknown'.encode('utf-8')
gravatar_url = 'https://secure.gravatar.com/avatar/{}?'.format(hashlib.md5(email.lower()).hexdigest())
gravatar_url += parse.urlencode({'d': 'wavatar', 's': str(size)})
return gravatar_url
def get_author_data(uid, gravatar_on_right_side=True, linked_with_users_page=True, profile_picture_size=20):
"""
Returns a-tag with gravatar of current author and users page as href
:param uid: Uid of the author
:param gravatar_on_right_side: True, if the gravatar is on the right of authors name
:param linked_with_users_page: True, if the text is a link to the authors site
:param profile_picture_size: Integer
:return: HTML-String
"""
db_user = DBDiscussionSession.query(User).get(int(uid))
if not db_user:
return None, 'Missing author with uid ' + str(uid), False
nick = db_user.global_nickname
img_src = get_profile_picture(db_user, profile_picture_size)
link_begin = ''
link_end = ''
if linked_with_users_page:
link_begin = '<a href="/user/{}" title="{}">'.format(db_user.uid, nick)
link_end = '</a>'
side = 'left' if gravatar_on_right_side else 'right'
img = '<img class="img-circle" src="{}" style="padding-{}: 0.3em">'.format(img_src, side)
if gravatar_on_right_side:
return db_user, '{}{}{}{}'.format(link_begin, nick, img, link_end), True
else:
return db_user, '{}{}{}{}'.format(link_begin, img, nick, link_end), True
def bubbles_already_last_in_list(bubble_list, bubbles):
"""
Are the given bubbles already at the end of the bubble list
:param bubble_list: list of Bubbles
:param bubbles: list of bubbles
:return: Boolean
"""
if isinstance(bubbles, list):
length = len(bubbles)
else:
length = 1
bubbles = [bubbles]
if len(bubble_list) < length:
return False
for bubble in bubbles:
if 'message' not in bubble:
return False
start_index = - length
is_already_in = False
for bubble in bubbles:
last = bubble_list[start_index]
if 'message' not in last or 'message' not in bubble:
return False
text1 = unhtmlify(last['message'].lower()).strip()
text2 = unhtmlify(bubble['message'].lower()).strip()
is_already_in = is_already_in or (text1 == text2)
start_index += 1
return is_already_in
def unhtmlify(html):
"""
Remove html-tags and unescape encoded html-entities.
:param html: Evil-string containing html
:return:
"""
return unescape(re.sub(r'<.*?>', '', html))
| [
29,
31,
47,
55,
60
] |
1,138 | 850251338e8af841a5214b37610d1b6fba572aa5 | <mask token>
def setup():
size(500, 800)
rectMode(CENTER)
global atStartUp
atStartUp = True
global startTimeMs
startTimeMs = millis()
global bg, go, sb
bg = loadImage('assets\\background.png')
bg.resize(width, height)
go = loadImage('assets\\gameover.jpg')
go.resize(width, height)
sb = loadImage('assets\\start.png')
sb.resize(width, height)
global startOfGame
startOfGame = False
global platforms
platforms = []
starter_platform = platform([100, 700])
platforms.append(starter_platform)
global p1
p1 = player()
def draw():
global atStartUp
if atStartUp:
currentTimeMs = millis()
startUpTimeRemaining = delay - (currentTimeMs - startTimeMs)
startScreen(startUpTimeRemaining)
atStartUp = startUpTimeRemaining > 0
return
frameRate(30)
background(bg)
for platform in platforms:
platform.display()
p1.update(platforms)
platform_manager(platforms)
if p1.ypos > height + 25:
background(go)
fill(255, 255, 255)
textAlign(CENTER, CENTER)
textSize(80)
text('GAME', width / 2, 2 * height / 10)
text('OVER', width / 2, 3 * height / 10)
textSize(30)
fill(240, 225, 48)
text('Score: ' + str(p1.score / 100), width / 2, 0.5 * height / 10)
textSize(20)
fill(255, 255, 255)
text('Click anywhere on the screen to RETRY', width / 2, 8 * height /
10)
text('Press ESC to exit', width / 2, 8.5 * height / 10)
textSize(10)
fill(240, 225, 48)
text('Made by Priyam Sahoo', width / 2, 9.5 * height / 10)
textAlign(LEFT)
noLoop()
def startScreen(remainingTime):
background(sb)
fill(0)
textAlign(CENTER, CENTER)
textSize(40)
fill(240, 225, 48)
text("Welcome to Fallin't", width / 2, 0.25 * height / 2)
textSize(100)
fill(50, 50, 50)
text(ceil(remainingTime / 1000.0), width / 2, 1.65 * height / 2)
| <mask token>
def mousePressed():
global platforms
platforms = []
starter_platform = platform([100, 700])
platforms.append(starter_platform)
global p1
p1 = player()
loop()
def setup():
size(500, 800)
rectMode(CENTER)
global atStartUp
atStartUp = True
global startTimeMs
startTimeMs = millis()
global bg, go, sb
bg = loadImage('assets\\background.png')
bg.resize(width, height)
go = loadImage('assets\\gameover.jpg')
go.resize(width, height)
sb = loadImage('assets\\start.png')
sb.resize(width, height)
global startOfGame
startOfGame = False
global platforms
platforms = []
starter_platform = platform([100, 700])
platforms.append(starter_platform)
global p1
p1 = player()
def draw():
global atStartUp
if atStartUp:
currentTimeMs = millis()
startUpTimeRemaining = delay - (currentTimeMs - startTimeMs)
startScreen(startUpTimeRemaining)
atStartUp = startUpTimeRemaining > 0
return
frameRate(30)
background(bg)
for platform in platforms:
platform.display()
p1.update(platforms)
platform_manager(platforms)
if p1.ypos > height + 25:
background(go)
fill(255, 255, 255)
textAlign(CENTER, CENTER)
textSize(80)
text('GAME', width / 2, 2 * height / 10)
text('OVER', width / 2, 3 * height / 10)
textSize(30)
fill(240, 225, 48)
text('Score: ' + str(p1.score / 100), width / 2, 0.5 * height / 10)
textSize(20)
fill(255, 255, 255)
text('Click anywhere on the screen to RETRY', width / 2, 8 * height /
10)
text('Press ESC to exit', width / 2, 8.5 * height / 10)
textSize(10)
fill(240, 225, 48)
text('Made by Priyam Sahoo', width / 2, 9.5 * height / 10)
textAlign(LEFT)
noLoop()
def startScreen(remainingTime):
background(sb)
fill(0)
textAlign(CENTER, CENTER)
textSize(40)
fill(240, 225, 48)
text("Welcome to Fallin't", width / 2, 0.25 * height / 2)
textSize(100)
fill(50, 50, 50)
text(ceil(remainingTime / 1000.0), width / 2, 1.65 * height / 2)
| <mask token>
delay = 3000
startOfGame = False
def mousePressed():
global platforms
platforms = []
starter_platform = platform([100, 700])
platforms.append(starter_platform)
global p1
p1 = player()
loop()
def setup():
size(500, 800)
rectMode(CENTER)
global atStartUp
atStartUp = True
global startTimeMs
startTimeMs = millis()
global bg, go, sb
bg = loadImage('assets\\background.png')
bg.resize(width, height)
go = loadImage('assets\\gameover.jpg')
go.resize(width, height)
sb = loadImage('assets\\start.png')
sb.resize(width, height)
global startOfGame
startOfGame = False
global platforms
platforms = []
starter_platform = platform([100, 700])
platforms.append(starter_platform)
global p1
p1 = player()
def draw():
global atStartUp
if atStartUp:
currentTimeMs = millis()
startUpTimeRemaining = delay - (currentTimeMs - startTimeMs)
startScreen(startUpTimeRemaining)
atStartUp = startUpTimeRemaining > 0
return
frameRate(30)
background(bg)
for platform in platforms:
platform.display()
p1.update(platforms)
platform_manager(platforms)
if p1.ypos > height + 25:
background(go)
fill(255, 255, 255)
textAlign(CENTER, CENTER)
textSize(80)
text('GAME', width / 2, 2 * height / 10)
text('OVER', width / 2, 3 * height / 10)
textSize(30)
fill(240, 225, 48)
text('Score: ' + str(p1.score / 100), width / 2, 0.5 * height / 10)
textSize(20)
fill(255, 255, 255)
text('Click anywhere on the screen to RETRY', width / 2, 8 * height /
10)
text('Press ESC to exit', width / 2, 8.5 * height / 10)
textSize(10)
fill(240, 225, 48)
text('Made by Priyam Sahoo', width / 2, 9.5 * height / 10)
textAlign(LEFT)
noLoop()
def startScreen(remainingTime):
background(sb)
fill(0)
textAlign(CENTER, CENTER)
textSize(40)
fill(240, 225, 48)
text("Welcome to Fallin't", width / 2, 0.25 * height / 2)
textSize(100)
fill(50, 50, 50)
text(ceil(remainingTime / 1000.0), width / 2, 1.65 * height / 2)
| from platform_class import *
from player_class import *
from functions import *
delay = 3000
startOfGame = False
def mousePressed():
global platforms
platforms = []
starter_platform = platform([100, 700])
platforms.append(starter_platform)
global p1
p1 = player()
loop()
def setup():
size(500, 800)
rectMode(CENTER)
global atStartUp
atStartUp = True
global startTimeMs
startTimeMs = millis()
global bg, go, sb
bg = loadImage('assets\\background.png')
bg.resize(width, height)
go = loadImage('assets\\gameover.jpg')
go.resize(width, height)
sb = loadImage('assets\\start.png')
sb.resize(width, height)
global startOfGame
startOfGame = False
global platforms
platforms = []
starter_platform = platform([100, 700])
platforms.append(starter_platform)
global p1
p1 = player()
def draw():
global atStartUp
if atStartUp:
currentTimeMs = millis()
startUpTimeRemaining = delay - (currentTimeMs - startTimeMs)
startScreen(startUpTimeRemaining)
atStartUp = startUpTimeRemaining > 0
return
frameRate(30)
background(bg)
for platform in platforms:
platform.display()
p1.update(platforms)
platform_manager(platforms)
if p1.ypos > height + 25:
background(go)
fill(255, 255, 255)
textAlign(CENTER, CENTER)
textSize(80)
text('GAME', width / 2, 2 * height / 10)
text('OVER', width / 2, 3 * height / 10)
textSize(30)
fill(240, 225, 48)
text('Score: ' + str(p1.score / 100), width / 2, 0.5 * height / 10)
textSize(20)
fill(255, 255, 255)
text('Click anywhere on the screen to RETRY', width / 2, 8 * height /
10)
text('Press ESC to exit', width / 2, 8.5 * height / 10)
textSize(10)
fill(240, 225, 48)
text('Made by Priyam Sahoo', width / 2, 9.5 * height / 10)
textAlign(LEFT)
noLoop()
def startScreen(remainingTime):
background(sb)
fill(0)
textAlign(CENTER, CENTER)
textSize(40)
fill(240, 225, 48)
text("Welcome to Fallin't", width / 2, 0.25 * height / 2)
textSize(100)
fill(50, 50, 50)
text(ceil(remainingTime / 1000.0), width / 2, 1.65 * height / 2)
| from platform_class import *
from player_class import *
from functions import *
delay = 3000
startOfGame = False
# def keyPressed():
# startOfGame = True
# print(startOfGame)
# if (keyCode == 'B'):
# print("I am pressed")
# startOfGame = True
def mousePressed():
global platforms
platforms = []
starter_platform = platform([100, 700])
platforms.append(starter_platform)
global p1
p1 = player()
loop()
def setup():
#global setup options
size(500, 800)
rectMode(CENTER)
# sb = loadImage("assets\\gameover.jpg")
# sb.resize(width, height)
# background(sb)
global atStartUp
atStartUp = True
global startTimeMs
startTimeMs = millis()
global bg, go, sb
bg = loadImage("assets\\background.png")
bg.resize(width, height)
go = loadImage("assets\\gameover.jpg")
go.resize(width, height)
sb = loadImage("assets\\start.png")
sb.resize(width, height)
global startOfGame
startOfGame = False
#list of platforms
global platforms
platforms = []
starter_platform = platform([100, 700])
platforms.append(starter_platform)
global p1
p1 = player()
def draw():
global atStartUp
if (atStartUp):
currentTimeMs = millis()
startUpTimeRemaining = delay - (currentTimeMs - startTimeMs)
startScreen(startUpTimeRemaining)
atStartUp = startUpTimeRemaining > 0
return
frameRate(30)
background(bg)
for platform in platforms:
# print (len(platforms))
platform.display()
p1.update(platforms)
platform_manager(platforms)
#this ends the game if the player falls off the screen
if p1.ypos > height+25:
background(go)
fill(255, 255, 255)
textAlign(CENTER, CENTER)
textSize(80)
text("GAME", width/2, 2*height/10)
text("OVER", width/2, 3*height/10)
textSize(30)
fill(240,225,48)
text("Score: "+str(p1.score/100), width/2, 0.5*height/10)
textSize(20)
fill(255, 255, 255)
text("Click anywhere on the screen to RETRY", width/2, 8*height/10)
text("Press ESC to exit", width/2, 8.5*height/10)
textSize(10)
fill(240,225,48)
text("Made by Priyam Sahoo", width/2, 9.5*height/10)
textAlign(LEFT)
noLoop()
def startScreen(remainingTime):
background(sb)
fill(0)
textAlign(CENTER, CENTER)
textSize(40)
fill(240,225,48)
text("Welcome to Fallin't", width/2, 0.25*height/2)
textSize(100)
fill(50, 50, 50)
text(ceil(remainingTime / 1000.0), width/2, 1.65*height/2)
| [
3,
4,
5,
6,
7
] |
1,139 | 93ac8a1f795f7809a3e88b56ce90bf1d31706554 | <mask token>
class DengueInfection(BasedDataset):
<mask token>
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(
f'year == "{row[\'year\']}" & month =="{row[\'month\']}"'
).reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row['year']][col
].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
<mask token>
<mask token>
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if
x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end=
'20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,
end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +
year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
<mask token>
def week_of_year(self):
pass
<mask token>
<mask token>
<mask token>
<mask token>
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
<mask token>
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
<mask token>
<mask token>
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
<mask token>
<mask token>
<mask token>
def precip_mm(self):
self.fill_nan(col='precip_mm')
<mask token>
def city(self):
self.df = self.df[self.df['city'] != 'sj']
| <mask token>
class DengueInfection(BasedDataset):
def __init__(self, cfg, development):
super(DengueInfection, self).__init__(cfg=cfg, dataset_type=
FileTypes.TSV, development=development)
if development:
self.total_cases()
self.extract_month()
self.extract_quarter()
self.week_start_date()
self.city()
self.cyclic_encoder(col='weekofyear', max_val=53)
self.cyclic_encoder(col='month', max_val=12)
self.persiann_precip_mm()
self.ncep_avg_temp_k()
self.ncep_diur_temp_rng_k()
self.ncep_max_air_temp_k()
self.ncep_min_air_temp_k()
self.ncep_air_temp_k()
self.ncep_dew_point_temp_k()
self.avg_temp_c()
self.diur_temp_rng_c()
self.max_temp_c()
self.min_temp_c()
self.precip_mm()
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(
f'year == "{row[\'year\']}" & month =="{row[\'month\']}"'
).reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row['year']][col
].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
<mask token>
<mask token>
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if
x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end=
'20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,
end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +
year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
<mask token>
def week_of_year(self):
pass
<mask token>
<mask token>
<mask token>
def ncep_air_temp_k(self):
self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_air_temp_c')
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
<mask token>
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
<mask token>
<mask token>
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
<mask token>
<mask token>
<mask token>
def precip_mm(self):
self.fill_nan(col='precip_mm')
<mask token>
def city(self):
self.df = self.df[self.df['city'] != 'sj']
| <mask token>
class DengueInfection(BasedDataset):
def __init__(self, cfg, development):
super(DengueInfection, self).__init__(cfg=cfg, dataset_type=
FileTypes.TSV, development=development)
if development:
self.total_cases()
self.extract_month()
self.extract_quarter()
self.week_start_date()
self.city()
self.cyclic_encoder(col='weekofyear', max_val=53)
self.cyclic_encoder(col='month', max_val=12)
self.persiann_precip_mm()
self.ncep_avg_temp_k()
self.ncep_diur_temp_rng_k()
self.ncep_max_air_temp_k()
self.ncep_min_air_temp_k()
self.ncep_air_temp_k()
self.ncep_dew_point_temp_k()
self.avg_temp_c()
self.diur_temp_rng_c()
self.max_temp_c()
self.min_temp_c()
self.precip_mm()
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(
f'year == "{row[\'year\']}" & month =="{row[\'month\']}"'
).reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row['year']][col
].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
def extract_month(self):
self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])
self.df['month'] = self.df['week_start_date'].dt.month
<mask token>
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if
x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end=
'20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,
end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +
year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
<mask token>
def week_of_year(self):
pass
<mask token>
<mask token>
<mask token>
def ncep_air_temp_k(self):
self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_air_temp_c')
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
def ncep_dew_point_temp_k(self):
"""
dew point temperature in Kelvin degrees measured by NCEP CFSR;
:rtype: object
"""
self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_dew_point_temp_c')
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
def ncep_precip_kg_per_m2(self):
self.fill_nan(col='NCEP_precip_kg_per_m2')
<mask token>
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
<mask token>
<mask token>
def min_temp_c(self):
self.fill_nan(col='min_temp_c')
def precip_mm(self):
self.fill_nan(col='precip_mm')
<mask token>
def city(self):
self.df = self.df[self.df['city'] != 'sj']
| <mask token>
class DengueInfection(BasedDataset):
def __init__(self, cfg, development):
super(DengueInfection, self).__init__(cfg=cfg, dataset_type=
FileTypes.TSV, development=development)
if development:
self.total_cases()
self.extract_month()
self.extract_quarter()
self.week_start_date()
self.city()
self.cyclic_encoder(col='weekofyear', max_val=53)
self.cyclic_encoder(col='month', max_val=12)
self.persiann_precip_mm()
self.ncep_avg_temp_k()
self.ncep_diur_temp_rng_k()
self.ncep_max_air_temp_k()
self.ncep_min_air_temp_k()
self.ncep_air_temp_k()
self.ncep_dew_point_temp_k()
self.avg_temp_c()
self.diur_temp_rng_c()
self.max_temp_c()
self.min_temp_c()
self.precip_mm()
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(
f'year == "{row[\'year\']}" & month =="{row[\'month\']}"'
).reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row['year']][col
].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
def extract_month(self):
self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])
self.df['month'] = self.df['week_start_date'].dt.month
<mask token>
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if
x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end=
'20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,
end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +
year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
<mask token>
def week_of_year(self):
pass
<mask token>
def six_month(self):
self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)
def persiann_precip_mm(self):
self.fill_nan(col='PERSIANN_precip_mm')
def ncep_air_temp_k(self):
self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_air_temp_c')
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda
k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
def ncep_dew_point_temp_k(self):
"""
dew point temperature in Kelvin degrees measured by NCEP CFSR;
:rtype: object
"""
self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_dew_point_temp_c')
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
def ncep_precip_kg_per_m2(self):
self.fill_nan(col='NCEP_precip_kg_per_m2')
<mask token>
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'
].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
def diur_temp_rng_c(self):
self.fill_nan(col='diur_temp_rng_c')
<mask token>
def min_temp_c(self):
self.fill_nan(col='min_temp_c')
def precip_mm(self):
self.fill_nan(col='precip_mm')
<mask token>
def city(self):
self.df = self.df[self.df['city'] != 'sj']
| # Copyright (c) 2021, Omid Erfanmanesh, All rights reserved.
import math
import numpy as np
import pandas as pd
from data.based.based_dataset import BasedDataset
from data.based.file_types import FileTypes
class DengueInfection(BasedDataset):
def __init__(self, cfg, development):
super(DengueInfection, self).__init__(cfg=cfg, dataset_type=FileTypes.TSV, development=development)
if development:
self.total_cases()
self.extract_month()
self.extract_quarter()
self.week_start_date()
# self.six_month()
# self.week_split()
self.city()
self.cyclic_encoder(col='weekofyear',max_val=53)
self.cyclic_encoder(col='month', max_val=12)
self.persiann_precip_mm()
self.ncep_avg_temp_k()
self.ncep_diur_temp_rng_k()
self.ncep_max_air_temp_k()
self.ncep_min_air_temp_k()
self.ncep_air_temp_k()
self.ncep_dew_point_temp_k()
self.avg_temp_c()
self.diur_temp_rng_c()
self.max_temp_c()
self.min_temp_c()
self.precip_mm()
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(f'year == "{row["year"]}" & month =="{row["month"]}"').reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row["year"]][col].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
def extract_month(self):
self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])
self.df['month'] = self.df['week_start_date'].dt.month
def extract_quarter(self):
self.df['quarter'] = self.df['week_start_date'].dt.quarter
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end='20/06/' + year),
'summer': pd.date_range(start='21/06/' + year, end='22/09/' + year),
'autumn': pd.date_range(start='23/09/' + year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
def year(self):
pass
def week_of_year(self):
pass
def week_start_date(self):
pass
def six_month(self):
self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)
def persiann_precip_mm(self):
self.fill_nan(col='PERSIANN_precip_mm')
def ncep_air_temp_k(self):
self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_air_temp_c')
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
def ncep_dew_point_temp_k(self):
"""
dew point temperature in Kelvin degrees measured by NCEP CFSR;
:rtype: object
"""
self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_dew_point_temp_c')
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
def ncep_precip_kg_per_m2(self):
self.fill_nan(col='NCEP_precip_kg_per_m2')
def ncep_humidity_percent(self):
self.fill_nan(col='NCEP_humidity_percent')
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
def diur_temp_rng_c(self):
self.fill_nan(col='diur_temp_rng_c')
def max_temp_c(self):
self.fill_nan(col='max_temp_c')
def min_temp_c(self):
self.fill_nan(col='min_temp_c')
def precip_mm(self):
self.fill_nan(col='precip_mm')
def total_cases(self):
self.df = self.df[self.df['total_cases'] < 41]
def city(self):
self.df = self.df[self.df['city'] != 'sj']
| [
16,
18,
22,
25,
33
] |
1,140 | 22fe07a237f2c5f531d189c07596a22df191d038 | from vmgCommanderBase import CommanderBase
from vmgInstallerApt import InstallerApt
from vmgInstallerYum import InstallerYum
from vmgConfigLinux import ConfigLinux
from runCommands import *
import shutil
import os
import time
from vmgLogging import *
from writeFormat import *
from vmgControlVmware import *
from vmgUtils import *
""" Functions to write lines in a .vmx file. """
log = logging.getLogger("vmgen.vmgCommanderLxc")
""" The distribution used for container creation parameters. """
distro = {
"debian":{
"vm":"/home/vmgen/vmware/Debian (lxc)/Debian (lxc).vmx",
"hostname":"root@debian-lxc",
"script":"my-lxc-debian.sh",
"scripts-folder":"../scripts-lxc/debian/"},
"fedora":{
"vm":"/home/vmgen/vmware/Fedora 64-bit/Fedora 64-bit.vmx",
"hostname":"root@fedora-lxc",
"script":"my-lxc-fedora.sh",
"scripts-folder":"../scripts-lxc/fedora/"}
}
installer = {
'debian' : InstallerApt,
'ubuntu' : InstallerApt,
'fedora' : InstallerYum
}
""" Container operating system parameters. """
os_params = {
"fedora-64":{
"os":"fedora",
"version":"14",
"arch":"amd64"},
"fedora":{
"os":"fedora",
"version":"14",
"arch":"x86"},
"debian5-64":{
"os":"debian",
"version":"lenny",
"arch":"amd64"},
"debian5":{
"os":"debian",
"version":"lenny",
"arch":"x86"},
}
""" The path in the VMware machine where the container is created. """
path = "/lxc"
class CommanderLxc(CommanderBase):
def setupHardware(self):
log.info("Creating the hardware configuration...")
self.os = self.data.getSection("hardware").get("os")
self.id = self.data.getSection("hardware").get("vm_id")
# extract the os parameters from the config file
os_type = os_params[self.os]["os"]
ver = os_params[self.os]["version"]
arch = os_params[self.os]["arch"]
self.vm = distro[os_type]["vm"]
self.host = distro[os_type]["hostname"]
folder = distro[os_type]["scripts-folder"]
script = distro[os_type]["script"]
self.config = path + "/" + self.id + "/" + "config." + self.id
self.roots = path + "/" + self.id + "/" + "rootfs." + self.id
self.fstab = path + "/" + self.id + "/" + "fstab." + self.id
# set the user and host used for the SSH connection
setUserHost(self.host)
# power on the auxiliary VMware machine
log.info("\tStarting the virtual machine...")
try_power_on_vm(self.vm)
# set default root password
passwd = "pass"
#self.data.getSection("config").get("root_passwd")
# copy the needed scripts to the virtual machine
log.info("\tCopying the scripts to the virtual machine...")
files = os.listdir(folder)
paths = [os.path.join(folder, f) for f in files]
copyFilesToVM(paths, self.host)
for f in files:
executeCommandSSH("chmod a+x " + f)
# create a temp file containing lines to be appended to the container
# config file
log.info("\tFilling up the network section in the config file...")
temp_file = "eth.tmp"
with open(temp_file, "w") as f:
log.info("\Setting memory and CPUs...")
section = self.data.getSection("hardware")
ram = section.get("ram") + "M"
num_cpu = int(section.get("num_cpu"))
if num_cpu == 1:
cpus = "0"
else:
cpus = "0" + "-" + str(num_cpu - 1)
# TODO: the kernel needs support for the memory controller
writeOption(f, "#lxc.cgroup.memory.limit_in_bytes", ram, False)
writeOption(f, "lxc.cgroup.cpuset.cpus", cpus, False)
# create network interfaces
log.info("\tCreating the network interfaces...")
self.eth_list = getSortedValues(section.get("eths").data)
eth_config = getSortedValues(
self.data.getSection("network").get("eths").data)
for i, eth_pair in enumerate(zip(self.eth_list, eth_config)):
i = str(i)
eth, eth_c = eth_pair
eth_name = eth.get("name")
writeOption(f, "lxc.network.type", "veth", False)
writeOption(f, "lxc.network.link", "br0", False)
writeOption(f, "lxc.network.name", eth_name, False)
writeOption(f, "lxc.network.mtu", "1500", False)
# set IP address
ip_type = eth_c.get("type")
if ip_type == "static":
ip = eth_c.get("address")
mask = getNetmaskCIDR(eth_c.get("network"))
else:
ip = "0.0.0.0"
mask = ""
writeOption(f, "lxc.network.ipv4", ip+mask, False)
if eth.contains("connected"):
writeOption(f, "lxc.network.flags", "up", False)
# set MAC address, if present
mac = eth.get("hw_address")
if mac:
writeOption(f, "lxc.network.hwaddr", mac)
# copy the temp file to the virtual machine
copyFileToVM(temp_file, self.host)
os.remove(temp_file)
# run the script on the virtual machine, to create the container
log.info("\tRun the container creation script...")
executeCommandSSH("./" + script + " " + path + " " + self.id + " " +
ver + " " + arch + " " + passwd)
def setupOperatingSystem(self):
pass
def startVM(self):
""" Start the container. """
log.info("\tStarting the container...")
executeCommandSSH("pushd " + path)
executeCommandSSH("lxc-create" + " -n " + self.id + " -f " + self.config)
# executeCommandSSH("lxc-start" + " -n " + self.id + " -f " + self.config)
def shutdownVM(self):
""" Shutdown the container and the virtual machine. """
log.info("\tStopping the container...")
# executeCommandSSH("lxc-stop" + " -n " + self.id)
executeCommandSSH("lxc-destroy" + " -n " + self.id)
executeCommandSSH("shutdown -h now")
def connectToVM(self):
print "\nEstablishing connection to the VM..."
def disconnectFromVM(self):
print "\nTerminating connection to the VM..."
def setupServices(self):
print "\nInstalling services..."
section = self.data.getSection("services")
self.installPrograms(section)
def setupDeveloperTools(self):
print "\nInstalling developer tools..."
section = self.data.getSection("devel")
self.installPrograms(section)
def setupGuiTools(self):
print "\nInstalling GUI tools..."
section = self.data.getSection("gui")
self.installPrograms(section)
def createArchive(self):
executeCommandSSH("cd " + path)
files = self.config + " " + self.fstab + " " + self.rootfs
arch_name = self.id + ".zip"
executeCommandSSH("zip -r " + arch_name + " " + files)
copyFileFromVM(path + "/" + arch_name, "./", self.host)
return [arch_name, ""]
def getModuleName(self):
return "lxc"
def getConfigInstance(self):
return ConfigLinux(self.data, self.communicator)
def getInstallerInstance(self):
vm_os = self.data.getSection("hardware").get("os")
for k in installer.keys():
if str(k) in vm_os:
return installer[k](self.communicator)
return None | null | null | null | null | [
0
] |
1,141 | af6dd7bde25453f25c0701e4ac246ff6bce29fa7 | <mask token>
| <mask token>
for x in range(100, 1000, 2):
x = str(x)
if x[0] == x[1] or x[0] == x[2] or x[1] == x[2]:
k += 1
print(k)
| k = 0
for x in range(100, 1000, 2):
x = str(x)
if x[0] == x[1] or x[0] == x[2] or x[1] == x[2]:
k += 1
print(k)
| null | null | [
0,
1,
2
] |
1,142 | 38fceb57977cb792be1a63e8571cd222facdf656 | <mask token>
| <mask token>
for i in red:
turtle.forward(200)
turtle.left(90)
turtle.done()
| <mask token>
red = range(4)
for i in red:
turtle.forward(200)
turtle.left(90)
turtle.done()
| import turtle
red = range(4)
for i in red:
turtle.forward(200)
turtle.left(90)
turtle.done()
| import turtle
red = range(4);
for i in red:
turtle.forward(200)
turtle.left(90)
turtle.done() | [
0,
1,
2,
3,
4
] |
1,143 | f0a3778e74d113a5de778fa17ec321c6680c56c2 | <mask token>
def test_burst_evolved():
"""Test burst() in EvolvedCluster"""
cluster = p22.EvolvedCluster('..#\n#..\n...')
assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean
cluster.burst()
assert cluster.virus.direction == p22.Directions.left
assert cluster.virus.pos == p22.Position(1, 0)
assert cluster.infected[p22.Position(1, 1)] == p22.State.Weakened
assert cluster.infected[cluster.virus.pos] == p22.State.Infected
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.up
assert cluster.virus.pos == p22.Position(0, 0)
assert cluster.infected[prev_pos] == p22.State.Flagged
assert cluster.infected[cluster.virus.pos] == p22.State.Clean
@pytest.mark.skip(reason='too slow to test')
def test_solve_b():
"""Tests for solve_b()"""
print('\nTesting solve_b()')
assert p22.solve_b(100, '..#\n#..\n...') == 26
assert p22.solve_b(10000000, '..#\n#..\n...') == 2511944
def test_solve_a0():
"""Tests for solve_a0()"""
print('\nTesting solve_a0()')
assert p22.solve_a0(7, '..#\n#..\n...') == 5
assert p22.solve_a0(70, '..#\n#..\n...') == 41
assert p22.solve_a0(10000, '..#\n#..\n...') == 5587
def test_solve_b0():
"""Tests for solve_b0()"""
print('\nTesting solve_b0()')
assert p22.solve_b0(100, '..#\n#..\n...') == 26
assert p22.solve_b0(10000000, '..#\n#..\n...') == 2511944
| <mask token>
def test_solve_a():
"""Tests for solve_b()"""
print('\nTesting solve_a()')
assert p22.solve_a(7, '..#\n#..\n...') == 5
assert p22.solve_a(70, '..#\n#..\n...') == 41
assert p22.solve_a(10000, '..#\n#..\n...') == 5587
def test_burst_evolved():
"""Test burst() in EvolvedCluster"""
cluster = p22.EvolvedCluster('..#\n#..\n...')
assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean
cluster.burst()
assert cluster.virus.direction == p22.Directions.left
assert cluster.virus.pos == p22.Position(1, 0)
assert cluster.infected[p22.Position(1, 1)] == p22.State.Weakened
assert cluster.infected[cluster.virus.pos] == p22.State.Infected
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.up
assert cluster.virus.pos == p22.Position(0, 0)
assert cluster.infected[prev_pos] == p22.State.Flagged
assert cluster.infected[cluster.virus.pos] == p22.State.Clean
@pytest.mark.skip(reason='too slow to test')
def test_solve_b():
"""Tests for solve_b()"""
print('\nTesting solve_b()')
assert p22.solve_b(100, '..#\n#..\n...') == 26
assert p22.solve_b(10000000, '..#\n#..\n...') == 2511944
def test_solve_a0():
"""Tests for solve_a0()"""
print('\nTesting solve_a0()')
assert p22.solve_a0(7, '..#\n#..\n...') == 5
assert p22.solve_a0(70, '..#\n#..\n...') == 41
assert p22.solve_a0(10000, '..#\n#..\n...') == 5587
def test_solve_b0():
"""Tests for solve_b0()"""
print('\nTesting solve_b0()')
assert p22.solve_b0(100, '..#\n#..\n...') == 26
assert p22.solve_b0(10000000, '..#\n#..\n...') == 2511944
| <mask token>
def test_burst():
"""Test burst() in Cluster"""
print('\nTesting burst()')
cluster = p22.Cluster('..#\n#..\n...')
assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean
cluster.burst()
assert cluster.virus.direction == p22.Directions.left
assert cluster.virus.pos == p22.Position(1, 0)
assert cluster.infected[p22.Position(1, 1)] == p22.State.Infected
assert cluster.infected[cluster.virus.pos] == p22.State.Infected
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.up
assert cluster.virus.pos == p22.Position(0, 0)
assert cluster.infected[prev_pos] == p22.State.Clean
for _ in range(4):
assert cluster.infected[cluster.virus.pos] == p22.State.Clean
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.infected[prev_pos] == p22.State.Infected
assert cluster.virus.pos == p22.Position(0, 0)
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.right
assert cluster.virus.pos == p22.Position(0, 1)
assert cluster.infected[prev_pos] == p22.State.Clean
assert cluster.infections_caused == 5
def test_solve_a():
"""Tests for solve_b()"""
print('\nTesting solve_a()')
assert p22.solve_a(7, '..#\n#..\n...') == 5
assert p22.solve_a(70, '..#\n#..\n...') == 41
assert p22.solve_a(10000, '..#\n#..\n...') == 5587
def test_burst_evolved():
"""Test burst() in EvolvedCluster"""
cluster = p22.EvolvedCluster('..#\n#..\n...')
assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean
cluster.burst()
assert cluster.virus.direction == p22.Directions.left
assert cluster.virus.pos == p22.Position(1, 0)
assert cluster.infected[p22.Position(1, 1)] == p22.State.Weakened
assert cluster.infected[cluster.virus.pos] == p22.State.Infected
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.up
assert cluster.virus.pos == p22.Position(0, 0)
assert cluster.infected[prev_pos] == p22.State.Flagged
assert cluster.infected[cluster.virus.pos] == p22.State.Clean
@pytest.mark.skip(reason='too slow to test')
def test_solve_b():
"""Tests for solve_b()"""
print('\nTesting solve_b()')
assert p22.solve_b(100, '..#\n#..\n...') == 26
assert p22.solve_b(10000000, '..#\n#..\n...') == 2511944
def test_solve_a0():
"""Tests for solve_a0()"""
print('\nTesting solve_a0()')
assert p22.solve_a0(7, '..#\n#..\n...') == 5
assert p22.solve_a0(70, '..#\n#..\n...') == 41
assert p22.solve_a0(10000, '..#\n#..\n...') == 5587
def test_solve_b0():
"""Tests for solve_b0()"""
print('\nTesting solve_b0()')
assert p22.solve_b0(100, '..#\n#..\n...') == 26
assert p22.solve_b0(10000000, '..#\n#..\n...') == 2511944
| import pytest
import problem22 as p22
def test_burst():
"""Test burst() in Cluster"""
print('\nTesting burst()')
cluster = p22.Cluster('..#\n#..\n...')
assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean
cluster.burst()
assert cluster.virus.direction == p22.Directions.left
assert cluster.virus.pos == p22.Position(1, 0)
assert cluster.infected[p22.Position(1, 1)] == p22.State.Infected
assert cluster.infected[cluster.virus.pos] == p22.State.Infected
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.up
assert cluster.virus.pos == p22.Position(0, 0)
assert cluster.infected[prev_pos] == p22.State.Clean
for _ in range(4):
assert cluster.infected[cluster.virus.pos] == p22.State.Clean
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.infected[prev_pos] == p22.State.Infected
assert cluster.virus.pos == p22.Position(0, 0)
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.right
assert cluster.virus.pos == p22.Position(0, 1)
assert cluster.infected[prev_pos] == p22.State.Clean
assert cluster.infections_caused == 5
def test_solve_a():
"""Tests for solve_b()"""
print('\nTesting solve_a()')
assert p22.solve_a(7, '..#\n#..\n...') == 5
assert p22.solve_a(70, '..#\n#..\n...') == 41
assert p22.solve_a(10000, '..#\n#..\n...') == 5587
def test_burst_evolved():
"""Test burst() in EvolvedCluster"""
cluster = p22.EvolvedCluster('..#\n#..\n...')
assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean
cluster.burst()
assert cluster.virus.direction == p22.Directions.left
assert cluster.virus.pos == p22.Position(1, 0)
assert cluster.infected[p22.Position(1, 1)] == p22.State.Weakened
assert cluster.infected[cluster.virus.pos] == p22.State.Infected
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.up
assert cluster.virus.pos == p22.Position(0, 0)
assert cluster.infected[prev_pos] == p22.State.Flagged
assert cluster.infected[cluster.virus.pos] == p22.State.Clean
@pytest.mark.skip(reason='too slow to test')
def test_solve_b():
"""Tests for solve_b()"""
print('\nTesting solve_b()')
assert p22.solve_b(100, '..#\n#..\n...') == 26
assert p22.solve_b(10000000, '..#\n#..\n...') == 2511944
def test_solve_a0():
"""Tests for solve_a0()"""
print('\nTesting solve_a0()')
assert p22.solve_a0(7, '..#\n#..\n...') == 5
assert p22.solve_a0(70, '..#\n#..\n...') == 41
assert p22.solve_a0(10000, '..#\n#..\n...') == 5587
def test_solve_b0():
"""Tests for solve_b0()"""
print('\nTesting solve_b0()')
assert p22.solve_b0(100, '..#\n#..\n...') == 26
assert p22.solve_b0(10000000, '..#\n#..\n...') == 2511944
| import pytest
import problem22 as p22
def test_burst():
"""Test burst() in Cluster"""
print('\nTesting burst()')
cluster = p22.Cluster('..#\n#..\n...')
assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean
cluster.burst()
assert cluster.virus.direction == p22.Directions.left
assert cluster.virus.pos == p22.Position(1,0)
assert cluster.infected[p22.Position(1,1)] == p22.State.Infected
assert cluster.infected[cluster.virus.pos] == p22.State.Infected
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.up # turned right
assert cluster.virus.pos == p22.Position(0, 0) # moved up
assert cluster.infected[prev_pos] == p22.State.Clean # cleaned
# four times in a row finds clean and infects
for _ in range(4):
assert cluster.infected[cluster.virus.pos] == p22.State.Clean
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.infected[prev_pos] == p22.State.Infected
assert cluster.virus.pos == p22.Position(0, 0)
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.right
assert cluster.virus.pos == p22.Position(0, 1)
assert cluster.infected[prev_pos] == p22.State.Clean
assert cluster.infections_caused == 5
def test_solve_a():
"""Tests for solve_b()"""
print('\nTesting solve_a()')
assert p22.solve_a(7, '..#\n#..\n...') == 5
assert p22.solve_a(70, '..#\n#..\n...') == 41
assert p22.solve_a(10000, '..#\n#..\n...') == 5587
def test_burst_evolved():
"""Test burst() in EvolvedCluster"""
cluster = p22.EvolvedCluster('..#\n#..\n...')
assert cluster.infected[p22.Position(0, 2)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 0)] == p22.State.Infected
assert cluster.infected[p22.Position(1, 1)] == p22.State.Clean
cluster.burst()
assert cluster.virus.direction == p22.Directions.left
assert cluster.virus.pos == p22.Position(1,0)
assert cluster.infected[p22.Position(1,1)] == p22.State.Weakened
assert cluster.infected[cluster.virus.pos] == p22.State.Infected
prev_pos = cluster.virus.pos
cluster.burst()
assert cluster.virus.direction == p22.Directions.up
assert cluster.virus.pos == p22.Position(0,0)
assert cluster.infected[prev_pos] == p22.State.Flagged
assert cluster.infected[cluster.virus.pos] == p22.State.Clean
@pytest.mark.skip(reason="too slow to test")
def test_solve_b():
"""Tests for solve_b()"""
print('\nTesting solve_b()')
assert p22.solve_b(100, '..#\n#..\n...') == 26
assert p22.solve_b(10000000, '..#\n#..\n...') == 2511944
def test_solve_a0():
"""Tests for solve_a0()"""
print('\nTesting solve_a0()')
assert p22.solve_a0(7, '..#\n#..\n...') == 5
assert p22.solve_a0(70, '..#\n#..\n...') == 41
assert p22.solve_a0(10000, '..#\n#..\n...') == 5587
def test_solve_b0():
"""Tests for solve_b0()"""
print('\nTesting solve_b0()')
assert p22.solve_b0(100, '..#\n#..\n...') == 26
assert p22.solve_b0(10000000, '..#\n#..\n...') == 2511944 | [
4,
5,
6,
7,
8
] |
1,144 | 87df5481cf2dd5bb990a9b4bd5169d9293d6af79 | #%%
import numpy
import time
import scipy
import os
os.chdir('/home/bbales2/modal')
import pyximport
import seaborn
pyximport.install(reload_support = True)
import polybasisqu
reload(polybasisqu)
#from rotations import symmetry
#from rotations import quaternion
#from rotations import inv_rotations
# basis polynomials are x^n * y^m * z^l where n + m + l <= N
N = 14
density = 8700.0 #4401.695921#
# Dimensions -- watch the scaling
X = .011 #0.007753#
Y = .013 #0.009057#
Z = .019 #0.013199#
c11 = 2.6
anisotropic = 2.8421
c44 = 1.35
c12 = -(c44 * 2.0 / anisotropic - c11)
# Standard deviation around each mode prediction
std = 1.0
# Rotations
w = 1.0
x = 0.0
y = 0.0
z = 0.0
# These are the sampled modes in khz
# Frequencies from SXSA
data = numpy.array([
68.066,
87.434,
104.045,
105.770,
115.270,
122.850,
131.646,
137.702,
139.280,
149.730,
156.548,
156.790,
169.746,
172.139,
173.153,
178.047,
183.433,
188.288,
197.138,
197.869,
198.128,
203.813,
206.794,
212.173,
212.613,
214.528,
215.840,
221.452,
227.569,
232.430])
#%%
c12 = -(c44 * 2.0 / anisotropic - c11)
dp, pv, ddpdX, ddpdY, ddpdZ, dpvdX, dpvdY, dpvdZ = polybasisqu.build(N, X, Y, Z)
C = numpy.array([[c11, c12, c12, 0, 0, 0],
[c12, c11, c12, 0, 0, 0],
[c12, c12, c11, 0, 0, 0],
[0, 0, 0, c44, 0, 0],
[0, 0, 0, 0, c44, 0],
[0, 0, 0, 0, 0, c44]])
w, x, y, z = 0.594755820, -0.202874980, 0.640151553, 0.441942582
#w, x, y, z = 1.0, 0.0, 0.0, 0.0
#w, x, y, z = [0.87095, 0.17028, 0.03090, 0.45989]
#w, x, y, z = [0.93894, -0.09845, -0.14279, -0.29717]
C, dCdw, dCdx, dCdy, dCdz, Kt = polybasisqu.buildRot(C, w, x, y, z)
K, M = polybasisqu.buildKM(C, dp, pv, density)
eigs2, evecs = scipy.linalg.eigh(K, M, eigvals = (6, 6 + 30 - 1))
freqs = numpy.sqrt(eigs2 * 1e11) / (numpy.pi * 2000)
print "computed, accepted"
for e1, dat in zip(freqs, data):
print "{0:0.5f} {1:0.3f}".format(e1, dat)
#freqs + 0.25 * numpy.random.randn(len(freqs))
#%%
dCdc11 = numpy.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]], dtype = 'float64')
dCdc11 = Kt.dot(dCdc11).dot(Kt.T)
dCdc12 = numpy.array([[0, 1, 1, 0, 0, 0],
[1, 0, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]], dtype = 'float64')
dCdc12 = Kt.dot(dCdc12).dot(Kt.T)
dCdc44 = numpy.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1]], dtype = 'float64')
dCdc44 = Kt.dot(dCdc44).dot(Kt.T)
if True:
dKdw, _ = polybasisqu.buildKM(dCdw, dp, pv, density)
dKdx, _ = polybasisqu.buildKM(dCdx, dp, pv, density)
dKdy, _ = polybasisqu.buildKM(dCdy, dp, pv, density)
dKdz, _ = polybasisqu.buildKM(dCdz, dp, pv, density)
dKdc11, _ = polybasisqu.buildKM(dCdc11, dp, pv, density)
dKdc12, _ = polybasisqu.buildKM(dCdc12, dp, pv, density)
dKdc44, _ = polybasisqu.buildKM(dCdc44, dp, pv, density)
dldw = numpy.array([evecs[:, i].T.dot(dKdw.dot(evecs[:, i])) for i in range(evecs.shape[1])])
dldx = numpy.array([evecs[:, i].T.dot(dKdx.dot(evecs[:, i])) for i in range(evecs.shape[1])])
dldy = numpy.array([evecs[:, i].T.dot(dKdy.dot(evecs[:, i])) for i in range(evecs.shape[1])])
dldz = numpy.array([evecs[:, i].T.dot(dKdz.dot(evecs[:, i])) for i in range(evecs.shape[1])])
dldc11 = numpy.array([evecs[:, i].T.dot(dKdc11.dot(evecs[:, i])) for i in range(evecs.shape[1])])
dldc12 = numpy.array([evecs[:, i].T.dot(dKdc12.dot(evecs[:, i])) for i in range(evecs.shape[1])])
dldc44 = numpy.array([evecs[:, i].T.dot(dKdc44.dot(evecs[:, i])) for i in range(evecs.shape[1])])
#%%
for a, b, c in zip(dldc11, dldc12, dldc44):
print a, b, c
#%%
for f1, f2, f3 in zip(freqs1, freqs2, freqs3[:30]):
print ", ".join(["{0:.2f}".format(a) for a in [f1, f2, f3]])
#%%
print "minimum (y = -0.015), y = 0.0, measured, error vs. y = -0.015, error vs. y = 0.0"
for e1, e2, dat in zip(eigs, eigs2, data):
print "{0:0.3f} {1:0.3f} {2:0.3f} {3:0.3f} {4:0.3f}".format(e1, e2, dat, numpy.abs(e1 - dat), numpy.abs(e2 - dat))
| null | null | null | null | [
0
] |
1,145 | 600691b87f7776e96bbf439d7195b870ed86090b | <mask token>
def configure_distro(distro='debian', arch='i386', release='unstable'):
if distro not in ['ubuntu', 'debian']:
print('Unknown Distro %s' % distro)
return False
if distro == 'ubuntu':
if arch in ['amd64', 'i386']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:
distro_conf['debootstrap_mirror'
] = 'http://ports.ubuntu.com/ubuntu-ports'
elif arch in ['powerpc']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
distro_conf['components'] = ['main', 'restricted', 'universe',
'multiverse']
distro_conf['keyring'
] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'
elif distro == 'debian':
distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'
distro_conf['components'] = ['main', 'non-free', 'contrib']
distro_conf['source_security_suites'] = 'RELEASE/updates'
distro_conf['source_security_url'] = 'http://security.debian.org/'
distro_conf['skip_updates'] = True
if release in ['unstable', 'sid']:
distro_conf['skip_security'] = True
distro_conf['keyring'
] = '/usr/share/keyrings/debian-archive-keyring.gpg'
def check_chroot_path(start_path, end_path):
if os.path.ismount(start_path):
print('%s is mounted' % start_path)
else:
print('%s is not mounted' % start_path)
exit()
complete_path = os.path.join(start_path, end_path)
cmd = 'btrfs subvolume list "%s" > /dev/null 2>&1' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if not p.returncode:
print('E: %s already exist!' % complete_path)
exit()
else:
cmd = 'btrfs subvolume create "%s"' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
<mask token>
| <mask token>
def configure_distro(distro='debian', arch='i386', release='unstable'):
if distro not in ['ubuntu', 'debian']:
print('Unknown Distro %s' % distro)
return False
if distro == 'ubuntu':
if arch in ['amd64', 'i386']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:
distro_conf['debootstrap_mirror'
] = 'http://ports.ubuntu.com/ubuntu-ports'
elif arch in ['powerpc']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
distro_conf['components'] = ['main', 'restricted', 'universe',
'multiverse']
distro_conf['keyring'
] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'
elif distro == 'debian':
distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'
distro_conf['components'] = ['main', 'non-free', 'contrib']
distro_conf['source_security_suites'] = 'RELEASE/updates'
distro_conf['source_security_url'] = 'http://security.debian.org/'
distro_conf['skip_updates'] = True
if release in ['unstable', 'sid']:
distro_conf['skip_security'] = True
distro_conf['keyring'
] = '/usr/share/keyrings/debian-archive-keyring.gpg'
def check_chroot_path(start_path, end_path):
if os.path.ismount(start_path):
print('%s is mounted' % start_path)
else:
print('%s is not mounted' % start_path)
exit()
complete_path = os.path.join(start_path, end_path)
cmd = 'btrfs subvolume list "%s" > /dev/null 2>&1' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if not p.returncode:
print('E: %s already exist!' % complete_path)
exit()
else:
cmd = 'btrfs subvolume create "%s"' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if __name__ == '__main__':
if os.geteuid() != 0:
print('You must be root')
exit()
parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=
'Install specific distro', default='debian')
parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=
'What architecture to select', default='i386')
parser.add_argument('-r', '--release', help='What release to select',
default='unstable')
args = parser.parse_args()
chroot_end_path = os.path.join(args.distro, '-'.join([args.release,
args.arch]))
check_chroot_path(chroot_start_path, chroot_end_path)
configure_distro(args.distro, args.arch, args.release)
pprint(distro_conf)
cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf
['keyring'], '--arch=%s' % args.arch, '--include=%s' % include,
'--components=%s' % ','.join(distro_conf['components']), args.
release, os.path.join(chroot_start_path, chroot_end_path),
distro_conf['debootstrap_mirror']]
pprint(cmd)
p = subprocess.Popen(cmd, cwd='/')
p.wait()
| <mask token>
chroot_start_path = '/srv/chroot'
chroots_conf = '/etc/schroot/chroot.d'
build_pkgs = 'build-essential fakeroot devscripts apt-utils'
include = 'eatmydata,ccache,lintian'
distro_conf = {'debootstrap_mirror': None, 'components': None,
'source_security_suites': None, 'source_security_url': None,
'skip_updates': False, 'skip_security': False, 'keyring': None}
def configure_distro(distro='debian', arch='i386', release='unstable'):
if distro not in ['ubuntu', 'debian']:
print('Unknown Distro %s' % distro)
return False
if distro == 'ubuntu':
if arch in ['amd64', 'i386']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:
distro_conf['debootstrap_mirror'
] = 'http://ports.ubuntu.com/ubuntu-ports'
elif arch in ['powerpc']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
distro_conf['components'] = ['main', 'restricted', 'universe',
'multiverse']
distro_conf['keyring'
] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'
elif distro == 'debian':
distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'
distro_conf['components'] = ['main', 'non-free', 'contrib']
distro_conf['source_security_suites'] = 'RELEASE/updates'
distro_conf['source_security_url'] = 'http://security.debian.org/'
distro_conf['skip_updates'] = True
if release in ['unstable', 'sid']:
distro_conf['skip_security'] = True
distro_conf['keyring'
] = '/usr/share/keyrings/debian-archive-keyring.gpg'
def check_chroot_path(start_path, end_path):
if os.path.ismount(start_path):
print('%s is mounted' % start_path)
else:
print('%s is not mounted' % start_path)
exit()
complete_path = os.path.join(start_path, end_path)
cmd = 'btrfs subvolume list "%s" > /dev/null 2>&1' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if not p.returncode:
print('E: %s already exist!' % complete_path)
exit()
else:
cmd = 'btrfs subvolume create "%s"' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if __name__ == '__main__':
if os.geteuid() != 0:
print('You must be root')
exit()
parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=
'Install specific distro', default='debian')
parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=
'What architecture to select', default='i386')
parser.add_argument('-r', '--release', help='What release to select',
default='unstable')
args = parser.parse_args()
chroot_end_path = os.path.join(args.distro, '-'.join([args.release,
args.arch]))
check_chroot_path(chroot_start_path, chroot_end_path)
configure_distro(args.distro, args.arch, args.release)
pprint(distro_conf)
cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf
['keyring'], '--arch=%s' % args.arch, '--include=%s' % include,
'--components=%s' % ','.join(distro_conf['components']), args.
release, os.path.join(chroot_start_path, chroot_end_path),
distro_conf['debootstrap_mirror']]
pprint(cmd)
p = subprocess.Popen(cmd, cwd='/')
p.wait()
| import sys, os
import argparse
import subprocess
from pprint import pprint
chroot_start_path = '/srv/chroot'
chroots_conf = '/etc/schroot/chroot.d'
build_pkgs = 'build-essential fakeroot devscripts apt-utils'
include = 'eatmydata,ccache,lintian'
distro_conf = {'debootstrap_mirror': None, 'components': None,
'source_security_suites': None, 'source_security_url': None,
'skip_updates': False, 'skip_security': False, 'keyring': None}
def configure_distro(distro='debian', arch='i386', release='unstable'):
if distro not in ['ubuntu', 'debian']:
print('Unknown Distro %s' % distro)
return False
if distro == 'ubuntu':
if arch in ['amd64', 'i386']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
elif arch in ['armel', 'hppa', 'ia64', 'lpia', 'sparc']:
distro_conf['debootstrap_mirror'
] = 'http://ports.ubuntu.com/ubuntu-ports'
elif arch in ['powerpc']:
distro_conf['debootstrap_mirror'
] = 'http://archive.ubuntu.com/ubuntu'
distro_conf['components'] = ['main', 'restricted', 'universe',
'multiverse']
distro_conf['keyring'
] = '/usr/share/keyrings/ubuntu-archive-keyring.gpg'
elif distro == 'debian':
distro_conf['debootstrap_mirror'] = 'http://ftp.debian.org/debian'
distro_conf['components'] = ['main', 'non-free', 'contrib']
distro_conf['source_security_suites'] = 'RELEASE/updates'
distro_conf['source_security_url'] = 'http://security.debian.org/'
distro_conf['skip_updates'] = True
if release in ['unstable', 'sid']:
distro_conf['skip_security'] = True
distro_conf['keyring'
] = '/usr/share/keyrings/debian-archive-keyring.gpg'
def check_chroot_path(start_path, end_path):
if os.path.ismount(start_path):
print('%s is mounted' % start_path)
else:
print('%s is not mounted' % start_path)
exit()
complete_path = os.path.join(start_path, end_path)
cmd = 'btrfs subvolume list "%s" > /dev/null 2>&1' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if not p.returncode:
print('E: %s already exist!' % complete_path)
exit()
else:
cmd = 'btrfs subvolume create "%s"' % complete_path
p = subprocess.Popen(cmd, cwd='/', shell=True)
p.wait()
print(p.returncode)
if __name__ == '__main__':
if os.geteuid() != 0:
print('You must be root')
exit()
parser = argparse.ArgumentParser(description='Create a Sbuild Chroot',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d', '--distro', metavar='DISTRIBUTION', help=
'Install specific distro', default='debian')
parser.add_argument('-a', '--arch', metavar='ARCHITECTURE', help=
'What architecture to select', default='i386')
parser.add_argument('-r', '--release', help='What release to select',
default='unstable')
args = parser.parse_args()
chroot_end_path = os.path.join(args.distro, '-'.join([args.release,
args.arch]))
check_chroot_path(chroot_start_path, chroot_end_path)
configure_distro(args.distro, args.arch, args.release)
pprint(distro_conf)
cmd = ['sbuild-createchroot', '--verbose', '--keyring=%s' % distro_conf
['keyring'], '--arch=%s' % args.arch, '--include=%s' % include,
'--components=%s' % ','.join(distro_conf['components']), args.
release, os.path.join(chroot_start_path, chroot_end_path),
distro_conf['debootstrap_mirror']]
pprint(cmd)
p = subprocess.Popen(cmd, cwd='/')
p.wait()
| #!/usr/bin/python
import sys,os
import argparse
import subprocess
from pprint import pprint
chroot_start_path="/srv/chroot"
chroots_conf="/etc/schroot/chroot.d"
build_pkgs = 'build-essential fakeroot devscripts apt-utils'
include = 'eatmydata,ccache,lintian'
distro_conf={
'debootstrap_mirror':None,
'components':None,
'source_security_suites':None,
'source_security_url':None,
'skip_updates':False,
'skip_security':False,
'keyring':None,
}
def configure_distro(distro="debian",arch="i386",release="unstable"):
if distro not in ['ubuntu','debian']:
print("Unknown Distro %s" % distro)
return False
if (distro == 'ubuntu'):
if ( arch in ['amd64','i386'] ):
distro_conf['debootstrap_mirror'] = "http://archive.ubuntu.com/ubuntu"
elif ( arch in ['armel', 'hppa', 'ia64' , 'lpia', 'sparc'] ):
distro_conf['debootstrap_mirror'] = "http://ports.ubuntu.com/ubuntu-ports"
elif ( arch in ['powerpc'] ):
distro_conf['debootstrap_mirror'] = "http://archive.ubuntu.com/ubuntu"
distro_conf['components'] = ['main','restricted', 'universe', 'multiverse']
distro_conf['keyring'] = "/usr/share/keyrings/ubuntu-archive-keyring.gpg"
elif (distro == 'debian'):
distro_conf['debootstrap_mirror'] = "http://ftp.debian.org/debian"
distro_conf['components'] = ['main','non-free','contrib']
distro_conf['source_security_suites'] = "RELEASE/updates"
distro_conf['source_security_url'] = "http://security.debian.org/"
#Debian only performs security updates
distro_conf['skip_updates'] = True
if (release in ['unstable','sid'] ):
distro_conf['skip_security'] = True
distro_conf['keyring'] = "/usr/share/keyrings/debian-archive-keyring.gpg"
def check_chroot_path(start_path,end_path):
if( os.path.ismount( start_path ) ) :
print("%s is mounted" % start_path)
else:
print("%s is not mounted" % start_path)
exit()
complete_path = os.path.join(start_path,end_path)
cmd = 'btrfs subvolume list "%s" > /dev/null 2>&1' % complete_path
p = subprocess.Popen(cmd,cwd='/',shell=True)
p.wait()
print(p.returncode)
if (not p.returncode):
print("E: %s already exist!"%complete_path)
exit()
else:
cmd = 'btrfs subvolume create "%s"' % complete_path
p = subprocess.Popen(cmd,cwd='/',shell=True)
p.wait()
print(p.returncode)
if __name__ == "__main__":
if os.geteuid() != 0:
print("You must be root")
exit()
parser = argparse.ArgumentParser(description="Create a Sbuild Chroot",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-d','--distro',metavar="DISTRIBUTION",help="Install specific distro",default="debian")
parser.add_argument('-a','--arch',metavar="ARCHITECTURE",help="What architecture to select",default="i386")
parser.add_argument('-r','--release',help="What release to select",default="unstable")
args = parser.parse_args()
chroot_end_path = os.path.join( args.distro , "-".join([args.release,args.arch]) )
check_chroot_path(chroot_start_path,chroot_end_path)
configure_distro(args.distro,args.arch,args.release)
pprint(distro_conf)
cmd = [ 'sbuild-createchroot' ,
'--verbose',
'--keyring=%s' % distro_conf['keyring'] ,
'--arch=%s' % args.arch ,
'--include=%s' % include,
'--components=%s' % ",".join(distro_conf['components']),
args.release ,
os.path.join(chroot_start_path,chroot_end_path),
distro_conf['debootstrap_mirror'],
]
pprint(cmd)
p = subprocess.Popen(cmd,cwd='/')
p.wait()
| [
2,
3,
4,
5,
6
] |
1,146 | b44f75db652b3a40cd9475bfe44027724e845252 | <mask token>
| <mask token>
ensurepip.bootstrap()
try:
import pip
except ImportError:
print(
'Error: Failed to install pip, make sure you are running this script as admin.'
)
sys.exit()
<mask token>
print('You are using Python' + str(sys.version_info[0]) + str(sys.
version_info[1]) + ' ' + arch + '.')
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win32.whl'
elif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):
if sys.version_info[1] == 4:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win32.whl'
elif sys.version_info[1] == 5:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win32.whl'
elif sys.version_info[1] == 6:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win32.whl'
else:
print('Pygame only supports Python 27, 34, 35 and 36.')
sys.exit()
if pip.main(['install', wheelUrl]) == 0:
print('Pygame should now be installed.')
else:
print('Something went wrong during the installation of pygame.')
os.system('pause')
| <mask token>
ensurepip.bootstrap()
try:
import pip
except ImportError:
print(
'Error: Failed to install pip, make sure you are running this script as admin.'
)
sys.exit()
arch = platform.architecture()[0]
wheelUrl = (
'https://raw.githubusercontent.com/Starfox64/pygame-installer/master/wheels/'
)
print('You are using Python' + str(sys.version_info[0]) + str(sys.
version_info[1]) + ' ' + arch + '.')
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win32.whl'
elif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):
if sys.version_info[1] == 4:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win32.whl'
elif sys.version_info[1] == 5:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win32.whl'
elif sys.version_info[1] == 6:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win32.whl'
else:
print('Pygame only supports Python 27, 34, 35 and 36.')
sys.exit()
if pip.main(['install', wheelUrl]) == 0:
print('Pygame should now be installed.')
else:
print('Something went wrong during the installation of pygame.')
os.system('pause')
| import platform, sys, os, ensurepip
ensurepip.bootstrap()
try:
import pip
except ImportError:
print(
'Error: Failed to install pip, make sure you are running this script as admin.'
)
sys.exit()
arch = platform.architecture()[0]
wheelUrl = (
'https://raw.githubusercontent.com/Starfox64/pygame-installer/master/wheels/'
)
print('You are using Python' + str(sys.version_info[0]) + str(sys.
version_info[1]) + ' ' + arch + '.')
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b1-cp27-cp27m-win32.whl'
elif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):
if sys.version_info[1] == 4:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b1-cp34-cp34m-win32.whl'
elif sys.version_info[1] == 5:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b1-cp35-cp35m-win32.whl'
elif sys.version_info[1] == 6:
if arch == '64bit':
wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win_amd64.whl'
else:
wheelUrl += 'pygame-1.9.2b8-cp36-cp36m-win32.whl'
else:
print('Pygame only supports Python 27, 34, 35 and 36.')
sys.exit()
if pip.main(['install', wheelUrl]) == 0:
print('Pygame should now be installed.')
else:
print('Something went wrong during the installation of pygame.')
os.system('pause')
| import platform, sys, os, ensurepip
ensurepip.bootstrap()
try:
import pip
except ImportError:
print("Error: Failed to install pip, make sure you are running this script as admin.")
sys.exit()
arch = platform.architecture()[0]
wheelUrl = "https://raw.githubusercontent.com/Starfox64/pygame-installer/master/wheels/"
print("You are using Python" + str(sys.version_info[0]) + str(sys.version_info[1]) + " " + arch + ".")
if sys.version_info[0] == 2 and sys.version_info[1] == 7:
if arch == "64bit":
wheelUrl += "pygame-1.9.2b1-cp27-cp27m-win_amd64.whl"
else:
wheelUrl += "pygame-1.9.2b1-cp27-cp27m-win32.whl"
elif sys.version_info[0] == 3 and sys.version_info[1] in (4, 5, 6):
if sys.version_info[1] == 4:
if arch == "64bit":
wheelUrl += "pygame-1.9.2b1-cp34-cp34m-win_amd64.whl"
else:
wheelUrl += "pygame-1.9.2b1-cp34-cp34m-win32.whl"
elif sys.version_info[1] == 5:
if arch == "64bit":
wheelUrl += "pygame-1.9.2b1-cp35-cp35m-win_amd64.whl"
else:
wheelUrl += "pygame-1.9.2b1-cp35-cp35m-win32.whl"
elif sys.version_info[1] == 6:
if arch == "64bit":
wheelUrl += "pygame-1.9.2b8-cp36-cp36m-win_amd64.whl"
else:
wheelUrl += "pygame-1.9.2b8-cp36-cp36m-win32.whl"
else:
print("Pygame only supports Python 27, 34, 35 and 36.")
sys.exit()
if pip.main(["install", wheelUrl]) == 0:
print("Pygame should now be installed.")
else:
print("Something went wrong during the installation of pygame.")
os.system("pause")
| [
0,
1,
2,
3,
4
] |
1,147 | 0cf5b009f384d2ca7162b5a88699afb3702ae1f6 | <mask token>
class Timer(object):
<mask token>
def reset(self):
self.time_ = 0.0
self.start_ = 0.0
def start(self):
self.start_ = time.clock()
def end(self):
self.time_ += time.clock() - self.start_
<mask token>
| <mask token>
class Timer(object):
def __init__(self):
self.time_ = 0.0
self.start_ = 0.0
def reset(self):
self.time_ = 0.0
self.start_ = 0.0
def start(self):
self.start_ = time.clock()
def end(self):
self.time_ += time.clock() - self.start_
<mask token>
| <mask token>
class Timer(object):
def __init__(self):
self.time_ = 0.0
self.start_ = 0.0
def reset(self):
self.time_ = 0.0
self.start_ = 0.0
def start(self):
self.start_ = time.clock()
def end(self):
self.time_ += time.clock() - self.start_
def timing(timer):
"""Decorator for timing.
Example:
timer = Timer()
@timing(timer)
def foo():
pass
:param timer:
"""
def real_timing(function):
def advice(*args, **kwargs):
timer.start()
result = function(*args, **kwargs)
timer.end()
return result
return advice
return real_timing
| import time
class Timer(object):
def __init__(self):
self.time_ = 0.0
self.start_ = 0.0
def reset(self):
self.time_ = 0.0
self.start_ = 0.0
def start(self):
self.start_ = time.clock()
def end(self):
self.time_ += time.clock() - self.start_
def timing(timer):
"""Decorator for timing.
Example:
timer = Timer()
@timing(timer)
def foo():
pass
:param timer:
"""
def real_timing(function):
def advice(*args, **kwargs):
timer.start()
result = function(*args, **kwargs)
timer.end()
return result
return advice
return real_timing
| #!/usr/bin/env python
# coding: utf-8
import time
class Timer(object):
def __init__(self):
self.time_ = 0.
self.start_ = 0.
def reset(self):
self.time_ = 0.
self.start_ = 0.
def start(self):
self.start_ = time.clock()
def end(self):
self.time_ += time.clock() - self.start_
def timing(timer):
"""Decorator for timing.
Example:
timer = Timer()
@timing(timer)
def foo():
pass
:param timer:
"""
def real_timing(function):
def advice(*args, **kwargs):
timer.start()
result = function(*args, **kwargs)
timer.end()
return result
return advice
return real_timing
| [
4,
5,
6,
7,
8
] |
1,148 | 65301be73bb56147609a103a932266013c3c0bd6 | <mask token>
| <mask token>
print(
'Bienvenido a este programa para que introduzcas una frase y un carácter, y decirte cuántas veces aparece ese carácter en tu frase.'
)
print(
"""----------------------------------------------------------------------------------------------------------------------------------
"""
)
<mask token>
while i < len(ourString):
if ourString[i] == ourChar:
counter += 1
i += 1
print(f"""
El carácter {ourChar} aparece {counter} veces.""")
| <mask token>
print(
'Bienvenido a este programa para que introduzcas una frase y un carácter, y decirte cuántas veces aparece ese carácter en tu frase.'
)
print(
"""----------------------------------------------------------------------------------------------------------------------------------
"""
)
ourString = input('Escribe lo que quieras: ')
ourChar = input('Escribe un solo carácter: ')
counter = 0
i = 0
while i < len(ourString):
if ourString[i] == ourChar:
counter += 1
i += 1
print(f"""
El carácter {ourChar} aparece {counter} veces.""")
| """
Pide una cadena y un carácter por teclado y muestra cuantas veces aparece el carácter en la cadena.
Autor: David Galván Fontalba
Fecha: 27/10/2019
Algoritmo:
Pido un cadena
Pido un caracter
contador en 0
Hago una variable que empieza siendo 0, i
mientras i <= len(cadena)
si cadena[i] == caracter
contador +1
si no
i +1
fin
"""
print("Bienvenido a este programa para que introduzcas una frase y un carácter, y decirte cuántas veces aparece ese carácter en tu frase.")
print("----------------------------------------------------------------------------------------------------------------------------------\n")
ourString = input("Escribe lo que quieras: ")
ourChar = input("Escribe un solo carácter: ")
counter = 0
i = 0
while i < len(ourString) :
if ourString[i] == ourChar :
counter += 1
i += 1
print(f"\nEl carácter {ourChar} aparece {counter} veces.") | null | [
0,
1,
2,
3
] |
1,149 | 96065e7e61b63f915561f117d71092e4bfb9a5da | <mask token>
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
def test_invalid_regex():
exception = IntegrityError if connection.vendor == 'sqlite' else DataError
with pytest.raises(exception):
Page.objects.create(url='(?P<match>.*)')
| <mask token>
def test_match():
Page.objects.create(url='^/[A-Z]*/$')
assert Page.objects.filter(url__match='/PATH/')
assert not Page.objects.filter(url__match='/path/')
def test_imatch():
Page.objects.create(url='^/[a-z]*/$')
assert Page.objects.filter(url__imatch='/path/')
assert Page.objects.filter(url__imatch='/PATH/')
<mask token>
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
def test_invalid_regex():
exception = IntegrityError if connection.vendor == 'sqlite' else DataError
with pytest.raises(exception):
Page.objects.create(url='(?P<match>.*)')
| <mask token>
def test_match():
Page.objects.create(url='^/[A-Z]*/$')
assert Page.objects.filter(url__match='/PATH/')
assert not Page.objects.filter(url__match='/path/')
def test_imatch():
Page.objects.create(url='^/[a-z]*/$')
assert Page.objects.filter(url__imatch='/path/')
assert Page.objects.filter(url__imatch='/PATH/')
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
@pytest.mark.parametrize('regex', ('', '.*', '.?', '[\\w]*', '[\\w]?'))
def test_empty_regex(regex):
with pytest.raises(IntegrityError):
Page.objects.create(url=regex)
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
def test_invalid_regex():
exception = IntegrityError if connection.vendor == 'sqlite' else DataError
with pytest.raises(exception):
Page.objects.create(url='(?P<match>.*)')
| <mask token>
pytestmark = pytest.mark.django_db
MYSQL_REASON = 'MySQL parses check constraints but are ignored by all engines'
def test_match():
Page.objects.create(url='^/[A-Z]*/$')
assert Page.objects.filter(url__match='/PATH/')
assert not Page.objects.filter(url__match='/path/')
def test_imatch():
Page.objects.create(url='^/[a-z]*/$')
assert Page.objects.filter(url__imatch='/path/')
assert Page.objects.filter(url__imatch='/PATH/')
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
@pytest.mark.parametrize('regex', ('', '.*', '.?', '[\\w]*', '[\\w]?'))
def test_empty_regex(regex):
with pytest.raises(IntegrityError):
Page.objects.create(url=regex)
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
def test_invalid_regex():
exception = IntegrityError if connection.vendor == 'sqlite' else DataError
with pytest.raises(exception):
Page.objects.create(url='(?P<match>.*)')
| from __future__ import absolute_import, unicode_literals
from django.db import DataError, IntegrityError, connection
import pytest
from .models import Page
pytestmark = pytest.mark.django_db
MYSQL_REASON = 'MySQL parses check constraints but are ignored by all engines'
def test_match():
Page.objects.create(url='^/[A-Z]*/$')
assert Page.objects.filter(url__match='/PATH/')
assert not Page.objects.filter(url__match='/path/')
def test_imatch():
Page.objects.create(url='^/[a-z]*/$')
assert Page.objects.filter(url__imatch='/path/')
assert Page.objects.filter(url__imatch='/PATH/')
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
@pytest.mark.parametrize('regex', ('', '.*', '.?', '[\w]*', '[\w]?'))
def test_empty_regex(regex):
with pytest.raises(IntegrityError):
Page.objects.create(url=regex)
@pytest.mark.skipif('connection.vendor == "mysql"', reason=MYSQL_REASON)
def test_invalid_regex():
exception = IntegrityError if connection.vendor == 'sqlite' else DataError
with pytest.raises(exception):
Page.objects.create(url='(?P<match>.*)')
| [
1,
3,
4,
5,
7
] |
1,150 | 833c8234d829dfa1937392f0ad4952aeffa4e26d | <mask token>
| def is_balanced(tree_root):
if tree_root is None:
return True
nodeQ = [(tree_root, 0)]
depths = []
while len(nodeQ):
last_node, depth = nodeQ.pop()
if not last_node.left and not last_node.right:
if depth not in depths:
depths.append(depth)
if len(depths) > 1 and max(depths) - min(depths) > 1:
return False
else:
if last_node.left:
nodeQ.append((last_node.left, depth + 1))
if last_node.right:
nodeQ.append((last_node.right, depth + 1))
return True
| def is_balanced(tree_root):
# Determine if the tree is superbalanced
if tree_root is None:
return True
nodeQ = [(tree_root, 0)]
depths = []
while len(nodeQ):
last_node, depth = nodeQ.pop()
if( not last_node.left ) and (not last_node.right ):
if depth not in depths:
depths.append(depth)
if ((len(depths) > 1) and (max(depths) - min(depths) > 1)):
return False
else:
if(last_node.left):
nodeQ.append((last_node.left, depth + 1))
if(last_node.right):
nodeQ.append((last_node.right, depth + 1))
return True
# store node pointer and depth as tuples
# pop together and store in variables node, depth
# append node.right, node.left
# put in while loop until list is empty
| null | null | [
0,
1,
2
] |
1,151 | 6d5acaa4a60b646432feb59f4d8eb9c9d0dceb0f | <mask token>
| <mask token>
def block(request, limit=None):
try:
links = cache.get_cache('sape', expire=3600).get(key='links',
createfunc=load_links)
except:
links = cache.get_cache('sape', expire=300).get(key='links',
createfunc=lambda : {})
if request.path in links:
if not hasattr(request, 'sape_links_shown'):
request.sape_links_shown = 0
slc = links[request.path][request.sape_links_shown:request.
sape_links_shown + limit if limit is not None else None]
request.sape_links_shown += len(slc)
if slc:
return {'class': 'sape', 'links': links['__sape_delimiter__'].
join(slc)}
return None
<mask token>
| <mask token>
def block(request, limit=None):
try:
links = cache.get_cache('sape', expire=3600).get(key='links',
createfunc=load_links)
except:
links = cache.get_cache('sape', expire=300).get(key='links',
createfunc=lambda : {})
if request.path in links:
if not hasattr(request, 'sape_links_shown'):
request.sape_links_shown = 0
slc = links[request.path][request.sape_links_shown:request.
sape_links_shown + limit if limit is not None else None]
request.sape_links_shown += len(slc)
if slc:
return {'class': 'sape', 'links': links['__sape_delimiter__'].
join(slc)}
return None
def load_links():
return dict(map(lambda path_links: (path_links[0], [link.decode(
'windows-1251') for link in path_links[1].values()] if isinstance(
path_links[1], dict) else path_links[1]), phpserialize.loads(
urllib2.urlopen(urllib2.Request(
'http://dispenser-01.sape.ru/code.php?user={0}&host={1}'.format(
config.sape_user_id, config.sape_host))).read()).items()))
| import phpserialize
import urllib2
from cache import cache
from config import config
def block(request, limit=None):
try:
links = cache.get_cache('sape', expire=3600).get(key='links',
createfunc=load_links)
except:
links = cache.get_cache('sape', expire=300).get(key='links',
createfunc=lambda : {})
if request.path in links:
if not hasattr(request, 'sape_links_shown'):
request.sape_links_shown = 0
slc = links[request.path][request.sape_links_shown:request.
sape_links_shown + limit if limit is not None else None]
request.sape_links_shown += len(slc)
if slc:
return {'class': 'sape', 'links': links['__sape_delimiter__'].
join(slc)}
return None
def load_links():
return dict(map(lambda path_links: (path_links[0], [link.decode(
'windows-1251') for link in path_links[1].values()] if isinstance(
path_links[1], dict) else path_links[1]), phpserialize.loads(
urllib2.urlopen(urllib2.Request(
'http://dispenser-01.sape.ru/code.php?user={0}&host={1}'.format(
config.sape_user_id, config.sape_host))).read()).items()))
| #!/usr/bin/python
# -*- coding: utf-8 -*-
import phpserialize
import urllib2
from cache import cache
from config import config
def block(request, limit=None):
try:
links = cache.get_cache("sape", expire=3600).get(key="links", createfunc=load_links)
except:
links = cache.get_cache("sape", expire=300).get(key="links", createfunc=lambda: {})
if request.path in links:
if not hasattr(request, "sape_links_shown"):
request.sape_links_shown = 0
slc = links[request.path][request.sape_links_shown : request.sape_links_shown + limit if limit is not None else None]
request.sape_links_shown += len(slc)
if slc:
return {
"class" : "sape",
"links" : links["__sape_delimiter__"].join(slc),
}
return None
def load_links():
return dict(
map(
lambda path_links: (path_links[0], [link.decode("windows-1251") for link in path_links[1].values()] if isinstance(path_links[1], dict) else path_links[1]),
phpserialize.loads(
urllib2.urlopen(urllib2.Request(
"http://dispenser-01.sape.ru/code.php?user={0}&host={1}".format(config.sape_user_id, config.sape_host)
)).read()
).items()
)
)
| [
0,
1,
2,
3,
4
] |
1,152 | aeab80e2d0006ffa938366ef046d2ab3d387f88c | <mask token>
def click():
i = 0
cal = 0
info = ''
for x in EntryArr:
if not x.get():
messagebox.showinfo('Error', 'Campos no llenos')
return
else:
info += f'{Label[i]}\t{x.get()}' + '\n'
cal = 40
i += 1
if Arr3.get() == 1:
cal += 20
if Arr4.get() == 2:
cal += 20
messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))
<mask token>
def edicion1():
indice = 0
for i in range(0, 2):
EntryArr.append(tk.StringVar())
grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice,
10, 10)
grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)
indice += 1
grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)
icol = 1
Arr3 = tk.IntVar()
for i in range(0, 3):
grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,
value=i), icol, 2, 5, 5)
icol += 1
icol = 1
grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)
for i in range(0, 4):
grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,
value=i), icol, 3, 5, 5)
icol += 1
grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)
icol = 0
for key in respuesta:
respuesta[key] = tk.IntVar()
ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row
=5, column=icol)
icol = icol + 1
Botton = tk.Button(ventana, text='Aceptar', command=click)
grid(Botton, 2, 10, 10, 10)
def main():
edicion1()
ventana.mainloop()
<mask token>
| <mask token>
def grid(Component, col, row1, padx1, pady1):
Component.grid(column=col, row=row1, padx=padx1, pady=pady1)
def click():
i = 0
cal = 0
info = ''
for x in EntryArr:
if not x.get():
messagebox.showinfo('Error', 'Campos no llenos')
return
else:
info += f'{Label[i]}\t{x.get()}' + '\n'
cal = 40
i += 1
if Arr3.get() == 1:
cal += 20
if Arr4.get() == 2:
cal += 20
messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))
<mask token>
def edicion1():
indice = 0
for i in range(0, 2):
EntryArr.append(tk.StringVar())
grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice,
10, 10)
grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)
indice += 1
grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)
icol = 1
Arr3 = tk.IntVar()
for i in range(0, 3):
grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,
value=i), icol, 2, 5, 5)
icol += 1
icol = 1
grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)
for i in range(0, 4):
grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,
value=i), icol, 3, 5, 5)
icol += 1
grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)
icol = 0
for key in respuesta:
respuesta[key] = tk.IntVar()
ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row
=5, column=icol)
icol = icol + 1
Botton = tk.Button(ventana, text='Aceptar', command=click)
grid(Botton, 2, 10, 10, 10)
def main():
edicion1()
ventana.mainloop()
main()
| <mask token>
ventana = tk.Tk()
EntryArr = []
Label = ['¿Que es la analisis psicologico?', '¿Como se lee la mente?',
'¿Cuantas persepciones psicologicas existen?',
'¿Padre de la Psicologia moderna?', 'Parte del cuerpo donde esta la psyco']
Arr3 = tk.IntVar()
opciones1 = ['1', '2', '5']
opciones2 = ['John Lenon', 'Leon Borrego', 'Jefry', 'mxrio']
opciones3 = ['Cabeza', 'mente', 'Pecho', 'corazon', 'Manos']
respuesta = dict.fromkeys(opciones3, None)
def grid(Component, col, row1, padx1, pady1):
Component.grid(column=col, row=row1, padx=padx1, pady=pady1)
def click():
i = 0
cal = 0
info = ''
for x in EntryArr:
if not x.get():
messagebox.showinfo('Error', 'Campos no llenos')
return
else:
info += f'{Label[i]}\t{x.get()}' + '\n'
cal = 40
i += 1
if Arr3.get() == 1:
cal += 20
if Arr4.get() == 2:
cal += 20
messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))
Arr3 = tk.IntVar()
Arr4 = tk.IntVar()
def edicion1():
indice = 0
for i in range(0, 2):
EntryArr.append(tk.StringVar())
grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice,
10, 10)
grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)
indice += 1
grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)
icol = 1
Arr3 = tk.IntVar()
for i in range(0, 3):
grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,
value=i), icol, 2, 5, 5)
icol += 1
icol = 1
grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)
for i in range(0, 4):
grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,
value=i), icol, 3, 5, 5)
icol += 1
grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)
icol = 0
for key in respuesta:
respuesta[key] = tk.IntVar()
ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row
=5, column=icol)
icol = icol + 1
Botton = tk.Button(ventana, text='Aceptar', command=click)
grid(Botton, 2, 10, 10, 10)
def main():
edicion1()
ventana.mainloop()
main()
| import tkinter as tk
from tkinter import ttk, messagebox, Menu
ventana = tk.Tk()
EntryArr = []
Label = ['¿Que es la analisis psicologico?', '¿Como se lee la mente?',
'¿Cuantas persepciones psicologicas existen?',
'¿Padre de la Psicologia moderna?', 'Parte del cuerpo donde esta la psyco']
Arr3 = tk.IntVar()
opciones1 = ['1', '2', '5']
opciones2 = ['John Lenon', 'Leon Borrego', 'Jefry', 'mxrio']
opciones3 = ['Cabeza', 'mente', 'Pecho', 'corazon', 'Manos']
respuesta = dict.fromkeys(opciones3, None)
def grid(Component, col, row1, padx1, pady1):
Component.grid(column=col, row=row1, padx=padx1, pady=pady1)
def click():
i = 0
cal = 0
info = ''
for x in EntryArr:
if not x.get():
messagebox.showinfo('Error', 'Campos no llenos')
return
else:
info += f'{Label[i]}\t{x.get()}' + '\n'
cal = 40
i += 1
if Arr3.get() == 1:
cal += 20
if Arr4.get() == 2:
cal += 20
messagebox.showinfo('resultados', 'Tu calificaion es' + str(cal))
Arr3 = tk.IntVar()
Arr4 = tk.IntVar()
def edicion1():
indice = 0
for i in range(0, 2):
EntryArr.append(tk.StringVar())
grid(ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice,
10, 10)
grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)
indice += 1
grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)
icol = 1
Arr3 = tk.IntVar()
for i in range(0, 3):
grid(ttk.Radiobutton(ventana, text=opciones1[i], variable=Arr3,
value=i), icol, 2, 5, 5)
icol += 1
icol = 1
grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)
for i in range(0, 4):
grid(ttk.Radiobutton(ventana, text=opciones2[i], variable=Arr4,
value=i), icol, 3, 5, 5)
icol += 1
grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)
icol = 0
for key in respuesta:
respuesta[key] = tk.IntVar()
ttk.Checkbutton(ventana, text=key, variable=respuesta[key]).grid(row
=5, column=icol)
icol = icol + 1
Botton = tk.Button(ventana, text='Aceptar', command=click)
grid(Botton, 2, 10, 10, 10)
def main():
edicion1()
ventana.mainloop()
main()
| import tkinter as tk
from tkinter import ttk, messagebox, Menu
ventana = tk.Tk()
EntryArr = []
Label = ["¿Que es la analisis psicologico?", "¿Como se lee la mente?", "¿Cuantas persepciones psicologicas existen?", "¿Padre de la Psicologia moderna?", "Parte del cuerpo donde esta la psyco"]
Arr3 = tk.IntVar()
opciones1 = ["1", "2","5"]
opciones2 = ["John Lenon", "Leon Borrego", "Jefry", "mxrio"]
opciones3 = ["Cabeza", "mente", "Pecho", "corazon", "Manos"]
respuesta = dict.fromkeys(opciones3, None)
def grid(Component, col, row1, padx1, pady1):
Component.grid(column=col, row=row1, padx=padx1, pady=pady1)
def click():
i = 0
cal = 0
info = ""
for x in EntryArr:
if not x.get():
messagebox.showinfo("Error","Campos no llenos")
return
else:
info += (f"{Label[i]}\t{x.get()}"+ "\n")
cal = 40
i+= 1
if(Arr3.get() == 1):
cal+= 20
if (Arr4.get() == 2):
cal+= 20
messagebox.showinfo("resultados","Tu calificaion es"+ str(cal) )
Arr3 = tk.IntVar()
Arr4 = tk.IntVar()
def edicion1():
indice = 0
for i in range(0,2):
EntryArr.append(tk.StringVar())
grid(
ttk.Entry(ventana, textvariable=EntryArr[indice]), 1, indice, 10, 10)
grid(ttk.Label(ventana, text=Label[i]), 0, indice, 10, 10)
indice += 1
grid(ttk.Label(ventana, text=Label[2]), 0, indice, 10, 10)
icol = 1
Arr3 = tk.IntVar()
for i in range(0,3):
grid(ttk.Radiobutton(ventana, text = opciones1[i], variable=Arr3, value = i), icol, 2, 5, 5)
icol +=1
icol = 1
grid(ttk.Label(ventana, text=Label[3]), 0, 3, 10, 10)
for i in range(0,4):
grid(ttk.Radiobutton(ventana, text = opciones2[i], variable=Arr4, value = i), icol, 3, 5, 5)
icol +=1
# Botton
grid(ttk.Label(ventana, text=Label[4]), 0, 4, 10, 10)
icol = 0
for key in respuesta:
respuesta[key] = tk.IntVar()
ttk.Checkbutton(ventana, text = key, variable = respuesta[key]).grid(row = 5, column = icol)
icol = icol + 1
Botton = tk.Button(ventana, text="Aceptar", command = click)
grid(Botton, 2, 10, 10, 10)
def main():
edicion1()
ventana.mainloop()
main()
| [
3,
5,
6,
7,
8
] |
1,153 | a2871585ce36888cf89c4dc5a6a7de6b212412bb | def geo_avg(x, lat, dim=2):
"""
geo_avg: to calculate weighting average according to latitude
input:
x: variable
lat: corresponding latittude
dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]
output:
result: 1d or 2d average result
"""
import numpy as np
s = x.shape
if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):
x = np.nanmean(x, axis=-1)
coslat = np.cos(lat / 180 * np.pi)
s = x.shape
if len(s) == 3:
result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1
) / np.nanmean(coslat)
if len(s) == 2:
result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(
coslat)
return result
<mask token>
def select_month(x, target_mon):
"""
select month or season from a monthly time series
input:
x: array, 1,2,3,4 dimension
target_mon:
1. number of month, from 1-12
2. name of month, e.g. Jan, Feb
3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5
4. phase name: dry: 1,2,3,12; wet: 6,7,8,9
output:
array with month selected or seasonal mean
"""
s = x.shape
n_mon = s[0]
if type(target_mon) != str:
i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]
return x[i_mon]
else:
import numpy as np
from datetime import datetime, timedelta
mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).
strftime('%b') for i in range(12)]
mon_dict = {mon_name_list[i]: i for i in range(12)}
season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9,
10], 'MAM': [2, 3, 4]}
phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}
if target_mon in mon_dict:
i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]
return x[i_mon]
elif target_mon in season_dict:
i_mon = [i for i in range(n_mon) if i % 12 in season_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'DJF':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)
else:
i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'dry':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)
def normalize(x):
"""
function to normalize data
"""
import numpy as np
return (x - np.nanmean(x)) / np.nanstd(x)
<mask token>
def moving_average(arr, n, method='nan'):
"""
calculate moving average values of 1-d array, and return an array with the same length
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
import numpy as np
def moving_average_center(a, n):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
l1 = n // 2 - 1
l2 = n - l1
l = len(arr)
arr_new = np.zeros(l)
if method == 'nan':
arr_new[:l1] = np.nan
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
arr_new[l - l2 + 1:] = np.nan
if method == 'avg':
for i in range(l1):
arr_new[i] = np.nanmean(arr[:i + 1])
for i in range(l2):
arr_new[-i - 1] = np.nanmean(arr[-i - 1:])
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
if method == 'diff' and n == 13:
a2 = moving_average_center(arr, n)
diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]
).mean(axis=0)
a1 = arr[:6] - diff[6:]
a12 = np.append(a1, a2)
a3 = arr[-6:] - diff[:6]
arr_new = np.append(a12, a3)
return arr_new
def convert_cftime_to_int(t):
"""
convert cftime to integer
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
from datetime import datetime
return int(datetime.strftime(datetime.strptime(t.isoformat(),
'%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))
<mask token>
| def geo_avg(x, lat, dim=2):
"""
geo_avg: to calculate weighting average according to latitude
input:
x: variable
lat: corresponding latittude
dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]
output:
result: 1d or 2d average result
"""
import numpy as np
s = x.shape
if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):
x = np.nanmean(x, axis=-1)
coslat = np.cos(lat / 180 * np.pi)
s = x.shape
if len(s) == 3:
result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1
) / np.nanmean(coslat)
if len(s) == 2:
result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(
coslat)
return result
<mask token>
def select_month(x, target_mon):
"""
select month or season from a monthly time series
input:
x: array, 1,2,3,4 dimension
target_mon:
1. number of month, from 1-12
2. name of month, e.g. Jan, Feb
3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5
4. phase name: dry: 1,2,3,12; wet: 6,7,8,9
output:
array with month selected or seasonal mean
"""
s = x.shape
n_mon = s[0]
if type(target_mon) != str:
i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]
return x[i_mon]
else:
import numpy as np
from datetime import datetime, timedelta
mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).
strftime('%b') for i in range(12)]
mon_dict = {mon_name_list[i]: i for i in range(12)}
season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9,
10], 'MAM': [2, 3, 4]}
phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}
if target_mon in mon_dict:
i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]
return x[i_mon]
elif target_mon in season_dict:
i_mon = [i for i in range(n_mon) if i % 12 in season_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'DJF':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)
else:
i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'dry':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)
def normalize(x):
"""
function to normalize data
"""
import numpy as np
return (x - np.nanmean(x)) / np.nanstd(x)
<mask token>
def moving_average(arr, n, method='nan'):
"""
calculate moving average values of 1-d array, and return an array with the same length
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
import numpy as np
def moving_average_center(a, n):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
l1 = n // 2 - 1
l2 = n - l1
l = len(arr)
arr_new = np.zeros(l)
if method == 'nan':
arr_new[:l1] = np.nan
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
arr_new[l - l2 + 1:] = np.nan
if method == 'avg':
for i in range(l1):
arr_new[i] = np.nanmean(arr[:i + 1])
for i in range(l2):
arr_new[-i - 1] = np.nanmean(arr[-i - 1:])
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
if method == 'diff' and n == 13:
a2 = moving_average_center(arr, n)
diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]
).mean(axis=0)
a1 = arr[:6] - diff[6:]
a12 = np.append(a1, a2)
a3 = arr[-6:] - diff[:6]
arr_new = np.append(a12, a3)
return arr_new
def convert_cftime_to_int(t):
"""
convert cftime to integer
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
from datetime import datetime
return int(datetime.strftime(datetime.strptime(t.isoformat(),
'%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))
def get_lat_lim(lat, lat_min, lat_max):
"""
calculate a range of latitude, in both hemispheres
"""
import numpy as np
i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]
i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]
i_lats = [i_lat_s, i_lat_n]
return i_lats
| def geo_avg(x, lat, dim=2):
"""
geo_avg: to calculate weighting average according to latitude
input:
x: variable
lat: corresponding latittude
dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]
output:
result: 1d or 2d average result
"""
import numpy as np
s = x.shape
if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):
x = np.nanmean(x, axis=-1)
coslat = np.cos(lat / 180 * np.pi)
s = x.shape
if len(s) == 3:
result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1
) / np.nanmean(coslat)
if len(s) == 2:
result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(
coslat)
return result
def cal_anomaly(x):
"""
calculate anomaly of a numpy array
input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month
output: x with seasonal cycle removed
"""
import numpy as np
s = x.shape
n_time = s[0]
monthly_mean = np.nanmean(x.reshape([n_time // 12, 12, *s[1:]]), axis=0
).reshape([1, 12, *s[1:]]).repeat(len(x) // 12, axis=0).reshape(s)
return x - monthly_mean
def select_month(x, target_mon):
"""
select month or season from a monthly time series
input:
x: array, 1,2,3,4 dimension
target_mon:
1. number of month, from 1-12
2. name of month, e.g. Jan, Feb
3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5
4. phase name: dry: 1,2,3,12; wet: 6,7,8,9
output:
array with month selected or seasonal mean
"""
s = x.shape
n_mon = s[0]
if type(target_mon) != str:
i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]
return x[i_mon]
else:
import numpy as np
from datetime import datetime, timedelta
mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).
strftime('%b') for i in range(12)]
mon_dict = {mon_name_list[i]: i for i in range(12)}
season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9,
10], 'MAM': [2, 3, 4]}
phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}
if target_mon in mon_dict:
i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]
return x[i_mon]
elif target_mon in season_dict:
i_mon = [i for i in range(n_mon) if i % 12 in season_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'DJF':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)
else:
i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'dry':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)
def normalize(x):
"""
function to normalize data
"""
import numpy as np
return (x - np.nanmean(x)) / np.nanstd(x)
<mask token>
def moving_average(arr, n, method='nan'):
"""
calculate moving average values of 1-d array, and return an array with the same length
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
import numpy as np
def moving_average_center(a, n):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
l1 = n // 2 - 1
l2 = n - l1
l = len(arr)
arr_new = np.zeros(l)
if method == 'nan':
arr_new[:l1] = np.nan
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
arr_new[l - l2 + 1:] = np.nan
if method == 'avg':
for i in range(l1):
arr_new[i] = np.nanmean(arr[:i + 1])
for i in range(l2):
arr_new[-i - 1] = np.nanmean(arr[-i - 1:])
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
if method == 'diff' and n == 13:
a2 = moving_average_center(arr, n)
diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]
).mean(axis=0)
a1 = arr[:6] - diff[6:]
a12 = np.append(a1, a2)
a3 = arr[-6:] - diff[:6]
arr_new = np.append(a12, a3)
return arr_new
def convert_cftime_to_int(t):
"""
convert cftime to integer
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
from datetime import datetime
return int(datetime.strftime(datetime.strptime(t.isoformat(),
'%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))
def get_lat_lim(lat, lat_min, lat_max):
"""
calculate a range of latitude, in both hemispheres
"""
import numpy as np
i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]
i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]
i_lats = [i_lat_s, i_lat_n]
return i_lats
| def geo_avg(x, lat, dim=2):
"""
geo_avg: to calculate weighting average according to latitude
input:
x: variable
lat: corresponding latittude
dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]
output:
result: 1d or 2d average result
"""
import numpy as np
s = x.shape
if (len(s) == 4) & (dim == 2) or (len(s) == 3) & (dim == 1):
x = np.nanmean(x, axis=-1)
coslat = np.cos(lat / 180 * np.pi)
s = x.shape
if len(s) == 3:
result = np.nanmean(x * coslat[np.newaxis, np.newaxis, :], axis=-1
) / np.nanmean(coslat)
if len(s) == 2:
result = np.nanmean(x * coslat[np.newaxis, :], axis=-1) / np.nanmean(
coslat)
return result
def cal_anomaly(x):
"""
calculate anomaly of a numpy array
input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month
output: x with seasonal cycle removed
"""
import numpy as np
s = x.shape
n_time = s[0]
monthly_mean = np.nanmean(x.reshape([n_time // 12, 12, *s[1:]]), axis=0
).reshape([1, 12, *s[1:]]).repeat(len(x) // 12, axis=0).reshape(s)
return x - monthly_mean
def select_month(x, target_mon):
"""
select month or season from a monthly time series
input:
x: array, 1,2,3,4 dimension
target_mon:
1. number of month, from 1-12
2. name of month, e.g. Jan, Feb
3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5
4. phase name: dry: 1,2,3,12; wet: 6,7,8,9
output:
array with month selected or seasonal mean
"""
s = x.shape
n_mon = s[0]
if type(target_mon) != str:
i_mon = [i for i in range(n_mon) if i % 12 == target_mon - 1]
return x[i_mon]
else:
import numpy as np
from datetime import datetime, timedelta
mon_name_list = [(datetime(2000, 1, 1) + timedelta(days=31 * i)).
strftime('%b') for i in range(12)]
mon_dict = {mon_name_list[i]: i for i in range(12)}
season_dict = {'DJF': [0, 1, 11], 'JJA': [5, 6, 7], 'SON': [8, 9,
10], 'MAM': [2, 3, 4]}
phase_dict = {'dry': [0, 1, 2, 11], 'wet': [5, 6, 7, 8]}
if target_mon in mon_dict:
i_mon = [i for i in range(n_mon) if i % 12 == mon_dict[target_mon]]
return x[i_mon]
elif target_mon in season_dict:
i_mon = [i for i in range(n_mon) if i % 12 in season_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'DJF':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 3, *s[1:]]), axis=1)
else:
i_mon = [i for i in range(n_mon) if i % 12 in phase_dict[
target_mon]]
x_mon = x[i_mon]
if target_mon == 'dry':
x_mon = np.append(np.nan, x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0] // 12, 4, *s[1:]]), axis=1)
def normalize(x):
"""
function to normalize data
"""
import numpy as np
return (x - np.nanmean(x)) / np.nanstd(x)
def find_index(arr, target, method='nearest'):
"""
find an index of target value from amonotonous 1-d array arr
"""
import numpy as np
if method == 'nearest':
return np.abs(arr - target).argmin()
else:
if arr[1] < arr[0]:
arr = arr[::-1]
if method == 'higher':
return np.where(arr >= target)[0][0]
if method == 'lower':
return np.where(arr <= target)[0][-1]
def moving_average(arr, n, method='nan'):
"""
calculate moving average values of 1-d array, and return an array with the same length
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
import numpy as np
def moving_average_center(a, n):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
l1 = n // 2 - 1
l2 = n - l1
l = len(arr)
arr_new = np.zeros(l)
if method == 'nan':
arr_new[:l1] = np.nan
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
arr_new[l - l2 + 1:] = np.nan
if method == 'avg':
for i in range(l1):
arr_new[i] = np.nanmean(arr[:i + 1])
for i in range(l2):
arr_new[-i - 1] = np.nanmean(arr[-i - 1:])
arr_new[l1:l - l2 + 1] = moving_average_center(arr, n)
if method == 'diff' and n == 13:
a2 = moving_average_center(arr, n)
diff = (arr[l1:l - l2 + 1] - a2).reshape([(len(arr) - n + 1) // 12, 12]
).mean(axis=0)
a1 = arr[:6] - diff[6:]
a12 = np.append(a1, a2)
a3 = arr[-6:] - diff[:6]
arr_new = np.append(a12, a3)
return arr_new
def convert_cftime_to_int(t):
"""
convert cftime to integer
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
"""
from datetime import datetime
return int(datetime.strftime(datetime.strptime(t.isoformat(),
'%Y-%m-%dT%H:%M:%S'), '%Y%m%d'))
def get_lat_lim(lat, lat_min, lat_max):
"""
calculate a range of latitude, in both hemispheres
"""
import numpy as np
i_lat_n = np.where((lat >= lat_min) & (lat <= lat_max))[0]
i_lat_s = np.where((lat <= -lat_min) & (lat >= -lat_max))[0]
i_lats = [i_lat_s, i_lat_n]
return i_lats
| def geo_avg(x,lat,dim=2):
'''
geo_avg: to calculate weighting average according to latitude
input:
x: variable
lat: corresponding latittude
dim: the order of the lat dimension, two cases: 2:[time,lev,lat,*lon],or 1:[time or lev, lat, *lon]
output:
result: 1d or 2d average result
'''
import numpy as np
s = x.shape
if ((len(s)==4) & (dim==2)) or ((len(s)==3) & (dim==1)):
x = np.nanmean(x,axis=-1)
coslat = np.cos(lat/180*np.pi)
s = x.shape
if len(s)==3:
result = np.nanmean(x*coslat[np.newaxis,np.newaxis,:],axis=-1)/np.nanmean(coslat)
if len(s)==2:
result = np.nanmean(x*coslat[np.newaxis,:],axis=-1)/np.nanmean(coslat)
return result
def cal_anomaly(x):
'''
calculate anomaly of a numpy array
input: x: 1-d,2-d,3-d or 4d numpy array, !!! the first dimension must be month
output: x with seasonal cycle removed
'''
import numpy as np
s = x.shape
n_time = s[0]
monthly_mean = np.nanmean(x.reshape([n_time//12,12,*s[1:]]),axis=0).\
reshape([1,12,*s[1:]]).repeat(len(x)//12,axis=0).reshape(s)
return x-monthly_mean
def select_month(x,target_mon):
'''
select month or season from a monthly time series
input:
x: array, 1,2,3,4 dimension
target_mon:
1. number of month, from 1-12
2. name of month, e.g. Jan, Feb
3. season name: DJF: 1,2,12; JJA: 6,7,8 SON: 9,10,11, MAM: 3,4,5
4. phase name: dry: 1,2,3,12; wet: 6,7,8,9
output:
array with month selected or seasonal mean
'''
s = x.shape
n_mon = s[0]
if type(target_mon) != str:
i_mon = [i for i in range(n_mon) if i%12 == target_mon-1]
return x[i_mon]
else:
import numpy as np
from datetime import datetime,timedelta
mon_name_list = [(datetime(2000,1,1)+timedelta(days=31*i)).strftime("%b") for i in range(12)]
mon_dict = {mon_name_list[i]:i for i in range(12)}
season_dict = {'DJF':[0,1,11],'JJA':[5,6,7],'SON':[8,9,10],'MAM':[2,3,4]}
phase_dict = {'dry':[0,1,2,11],'wet':[5,6,7,8]}
if target_mon in mon_dict:
i_mon = [i for i in range(n_mon) if i%12 == mon_dict[target_mon]]
return x[i_mon]
elif target_mon in season_dict:
i_mon = [i for i in range(n_mon) if i%12 in season_dict[target_mon]]
x_mon = x[i_mon]
if target_mon == 'DJF':
x_mon = np.append(np.nan,x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0]//12,3,*s[1:]]),axis=1)
else:
i_mon = [i for i in range(n_mon) if i%12 in phase_dict[target_mon]]
x_mon = x[i_mon]
if target_mon == 'dry':
x_mon = np.append(np.nan,x_mon[:-1])
return np.nanmean(x_mon.reshape([s[0]//12,4,*s[1:]]),axis=1)
def normalize(x):
'''
function to normalize data
'''
import numpy as np
return (x-np.nanmean(x))/np.nanstd(x)
def find_index(arr,target,method='nearest'):
'''
find an index of target value from amonotonous 1-d array arr
'''
import numpy as np
if method == 'nearest':
return (np.abs(arr - target)).argmin()
else:
if arr[1]<arr[0]: ## if x is a decreasing array, reverse
arr = arr[::-1]
if method == 'higher':
return np.where(arr>=target)[0][0]
if method == 'lower':
return np.where(arr<=target)[0][-1]
def moving_average(arr,n,method = 'nan'):
'''
calculate moving average values of 1-d array, and return an array with the same length
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
'''
import numpy as np
def moving_average_center(a, n) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
l1 = n//2-1
l2 = n-l1
l = len(arr)
arr_new = np.zeros(l)
if method == 'nan':
arr_new[:l1] = np.nan
arr_new[l1:l-l2+1] = moving_average_center(arr, n)
arr_new[l-l2+1:] = np.nan
if method == 'avg':
for i in range(l1):
arr_new[i] = np.nanmean(arr[:i+1])
for i in range(l2):
arr_new[-i-1] = np.nanmean(arr[-i-1:])
arr_new[l1:l-l2+1] = moving_average_center(arr, n)
if method == 'diff' and n==13:
a2 = moving_average_center(arr, n)
diff = (arr[l1:l-l2+1]-a2).reshape([(len(arr)-n+1)//12,12]).mean(axis=0) # monthly mean difference between arr and running mean
a1 = arr[:6] - diff[6:]
a12 = np.append(a1,a2)
a3 = arr[-6:] - diff[:6]
arr_new = np.append(a12,a3)
return arr_new
def convert_cftime_to_int(t):
'''
convert cftime to integer
input:
arr: 1-d array
n: moving window length
method:
nan: fill in nan
avg: average from 0-1, 0-2, 0-3 ...
diff: only use this when calculate annual mean, n = 13
'''
from datetime import datetime
return int(datetime.strftime(datetime.strptime(t.isoformat(),'%Y-%m-%dT%H:%M:%S'),
'%Y%m%d'))
def get_lat_lim(lat,lat_min,lat_max):
'''
calculate a range of latitude, in both hemispheres
'''
import numpy as np
i_lat_n = np.where((lat>=lat_min) & (lat<=lat_max))[0]
i_lat_s = np.where((lat<=-lat_min) & (lat>=-lat_max))[0]
i_lats = [i_lat_s,i_lat_n]
return i_lats
| [
5,
6,
7,
8,
9
] |
1,154 | 3a6038cb80548b98fc7e4a328092f1dc1ffd6dfd | <mask token>
class ConfigLoader:
<mask token>
def __init__(self, level):
self._log = Logger('configloader', level)
self._log.info('ready.')
def configure(self, filename='config.yaml'):
"""
Read and return configuration from the specified YAML file.
Pretty-prints the configuration object if the log level is set to DEBUG.
"""
self._log.info('reading from yaml configuration file {}...'.format(
filename))
_config = yaml.safe_load(open(filename, 'r'))
if self._log.level == Level.DEBUG:
self._log.debug('YAML configuration as read:')
print(Fore.BLUE)
pp = pprint.PrettyPrinter(width=80, indent=2)
pp.pprint(_config)
print(Style.RESET_ALL)
self._log.info('configuration read.')
return _config
| <mask token>
class ConfigLoader:
"""
Has just one method: configure() reads a YAML file.
"""
def __init__(self, level):
self._log = Logger('configloader', level)
self._log.info('ready.')
def configure(self, filename='config.yaml'):
"""
Read and return configuration from the specified YAML file.
Pretty-prints the configuration object if the log level is set to DEBUG.
"""
self._log.info('reading from yaml configuration file {}...'.format(
filename))
_config = yaml.safe_load(open(filename, 'r'))
if self._log.level == Level.DEBUG:
self._log.debug('YAML configuration as read:')
print(Fore.BLUE)
pp = pprint.PrettyPrinter(width=80, indent=2)
pp.pprint(_config)
print(Style.RESET_ALL)
self._log.info('configuration read.')
return _config
| <mask token>
init()
try:
import yaml
except ImportError:
exit(
'This script requires the pyyaml module\nInstall with: pip3 install --user pyyaml'
)
<mask token>
class ConfigLoader:
"""
Has just one method: configure() reads a YAML file.
"""
def __init__(self, level):
self._log = Logger('configloader', level)
self._log.info('ready.')
def configure(self, filename='config.yaml'):
"""
Read and return configuration from the specified YAML file.
Pretty-prints the configuration object if the log level is set to DEBUG.
"""
self._log.info('reading from yaml configuration file {}...'.format(
filename))
_config = yaml.safe_load(open(filename, 'r'))
if self._log.level == Level.DEBUG:
self._log.debug('YAML configuration as read:')
print(Fore.BLUE)
pp = pprint.PrettyPrinter(width=80, indent=2)
pp.pprint(_config)
print(Style.RESET_ALL)
self._log.info('configuration read.')
return _config
| import pprint
from colorama import init, Fore, Style
init()
try:
import yaml
except ImportError:
exit(
'This script requires the pyyaml module\nInstall with: pip3 install --user pyyaml'
)
from core.logger import Level, Logger
class ConfigLoader:
"""
Has just one method: configure() reads a YAML file.
"""
def __init__(self, level):
self._log = Logger('configloader', level)
self._log.info('ready.')
def configure(self, filename='config.yaml'):
"""
Read and return configuration from the specified YAML file.
Pretty-prints the configuration object if the log level is set to DEBUG.
"""
self._log.info('reading from yaml configuration file {}...'.format(
filename))
_config = yaml.safe_load(open(filename, 'r'))
if self._log.level == Level.DEBUG:
self._log.debug('YAML configuration as read:')
print(Fore.BLUE)
pp = pprint.PrettyPrinter(width=80, indent=2)
pp.pprint(_config)
print(Style.RESET_ALL)
self._log.info('configuration read.')
return _config
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: Murray Altheim
# created: 2020-04-15
# modified: 2020-04-15
import pprint
from colorama import init, Fore, Style
init()
try:
import yaml
except ImportError:
exit("This script requires the pyyaml module\nInstall with: pip3 install --user pyyaml")
from core.logger import Level, Logger
class ConfigLoader():
'''
Has just one method: configure() reads a YAML file.
'''
def __init__(self, level):
self._log = Logger('configloader', level)
self._log.info('ready.')
# ..........................................................................
def configure(self, filename='config.yaml'):
'''
Read and return configuration from the specified YAML file.
Pretty-prints the configuration object if the log level is set to DEBUG.
'''
self._log.info('reading from yaml configuration file {}...'.format(filename))
_config = yaml.safe_load(open(filename, 'r'))
if self._log.level == Level.DEBUG:
self._log.debug('YAML configuration as read:')
print(Fore.BLUE)
pp = pprint.PrettyPrinter(width=80, indent=2)
pp.pprint(_config)
print(Style.RESET_ALL)
self._log.info('configuration read.')
return _config
#EOF
| [
3,
4,
5,
6,
7
] |
1,155 | 34d3eebf6ccb19f891ccbb16db47cd6412f1cb0f | <mask token>
| <mask token>
print(numbers)
print(numbers[1])
print(numbers[-1])
<mask token>
print(numbers)
del numbers[1]
print(numbers)
numbers.append(17)
print(numbers)
numbers.insert(2, 5)
print(numbers)
numbers.sort()
print(numbers)
| numbers = [3, 4, 6, 7]
print(numbers)
print(numbers[1])
print(numbers[-1])
numbers[1] = 3
print(numbers)
del numbers[1]
print(numbers)
numbers.append(17)
print(numbers)
numbers.insert(2, 5)
print(numbers)
numbers.sort()
print(numbers)
| numbers = [3,4,6,7]
# 0 1 2 3
print(numbers)
print(numbers[1])
print(numbers[-1])
numbers[1] = 3
print(numbers)
del numbers[1]
print(numbers)
numbers.append(17)
print(numbers)
numbers.insert(2,5)
print(numbers)
numbers.sort()
print(numbers) | null | [
0,
1,
2,
3
] |
1,156 | 05ced056bf2f59f85bef82e53803e7df7ff8c8df | <mask token>
def select_histograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
<mask token>
def norm_to_first_bin(wrp):
histo = wrp.histo.Clone()
firstbin = histo.GetBinContent(1)
histo.Scale(1.0 / firstbin)
info = wrp.all_info()
info['lumi'] /= firstbin
return varial.wrappers.HistoWrapper(histo, **info)
<mask token>
def norm_histos_to_integral(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(wrp)
else:
yield wrp
def label_axes(wrps):
for w in wrps:
if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':
w.histo.GetXaxis().SetTitle(w.histo.GetTitle())
w.histo.GetYaxis().SetTitle('events')
w.histo.SetTitle('')
yield w
<mask token>
def for_stacked_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.
sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)
wrps = label_axes(wrps)
return wrps
<mask token>
def do_nothing_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = label_axes(wrps)
return wrps
<mask token>
def norm_cf_factory(**kws):
kws['hook_loaded_histos'] = norm_cf_hook
kws['save_lin_log_scale'] = True
kws['save_name_func'] = lambda w: w.name + '_norm'
return varial.tools.Plotter(**kws)
def do_nothing_factory(**kws):
kws['hook_loaded_histos'] = do_nothing_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def for_eff_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_eff_plots_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def create_name(name):
return name + 'v' + varial.settings.git_tag
<mask token>
| <mask token>
def select_histograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def select_splithistograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def norm_to_first_bin(wrp):
histo = wrp.histo.Clone()
firstbin = histo.GetBinContent(1)
histo.Scale(1.0 / firstbin)
info = wrp.all_info()
info['lumi'] /= firstbin
return varial.wrappers.HistoWrapper(histo, **info)
<mask token>
def norm_histos_to_integral(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(wrp)
else:
yield wrp
def label_axes(wrps):
for w in wrps:
if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':
w.histo.GetXaxis().SetTitle(w.histo.GetTitle())
w.histo.GetYaxis().SetTitle('events')
w.histo.SetTitle('')
yield w
def norm_cf_plots(wrps):
for w in wrps:
if w.name.startswith('cf_') and isinstance(w, varial.wrappers.
HistoWrapper):
yield varial.operations.norm_to_integral(w)
else:
yield w
def for_stacked_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.
sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)
wrps = label_axes(wrps)
return wrps
<mask token>
def do_nothing_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = label_axes(wrps)
return wrps
def for_eff_plots_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: (
'100* ' if 'TpTp_M' in w.sample else '') + w.sample, is_signal=lambda
w: 'TpTp_M' in w.sample, lumi=lambda w: 0.01 if 'TpTp_M' in w.
sample else 1.0)
wrps = gen.gen_make_eff_graphs(wrps)
wrps = label_axes(wrps)
return wrps
<mask token>
def norm_cf_factory(**kws):
kws['hook_loaded_histos'] = norm_cf_hook
kws['save_lin_log_scale'] = True
kws['save_name_func'] = lambda w: w.name + '_norm'
return varial.tools.Plotter(**kws)
def do_nothing_factory(**kws):
kws['hook_loaded_histos'] = do_nothing_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def for_eff_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_eff_plots_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def create_name(name):
return name + 'v' + varial.settings.git_tag
<mask token>
| <mask token>
def select_histograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def select_splithistograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def norm_to_first_bin(wrp):
histo = wrp.histo.Clone()
firstbin = histo.GetBinContent(1)
histo.Scale(1.0 / firstbin)
info = wrp.all_info()
info['lumi'] /= firstbin
return varial.wrappers.HistoWrapper(histo, **info)
def norm_histos_to_first_bin(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield norm_to_first_bin(wrp)
else:
yield wrp
def norm_histos_to_integral(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(wrp)
else:
yield wrp
def label_axes(wrps):
for w in wrps:
if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':
w.histo.GetXaxis().SetTitle(w.histo.GetTitle())
w.histo.GetYaxis().SetTitle('events')
w.histo.SetTitle('')
yield w
def norm_cf_plots(wrps):
for w in wrps:
if w.name.startswith('cf_') and isinstance(w, varial.wrappers.
HistoWrapper):
yield varial.operations.norm_to_integral(w)
else:
yield w
def for_stacked_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.
sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)
wrps = label_axes(wrps)
return wrps
<mask token>
def do_nothing_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = label_axes(wrps)
return wrps
def for_eff_plots_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: (
'100* ' if 'TpTp_M' in w.sample else '') + w.sample, is_signal=lambda
w: 'TpTp_M' in w.sample, lumi=lambda w: 0.01 if 'TpTp_M' in w.
sample else 1.0)
wrps = gen.gen_make_eff_graphs(wrps)
wrps = label_axes(wrps)
return wrps
<mask token>
def norm_cf_factory(**kws):
kws['hook_loaded_histos'] = norm_cf_hook
kws['save_lin_log_scale'] = True
kws['save_name_func'] = lambda w: w.name + '_norm'
return varial.tools.Plotter(**kws)
def do_nothing_factory(**kws):
kws['hook_loaded_histos'] = do_nothing_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def for_eff_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_eff_plots_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def create_name(name):
return name + 'v' + varial.settings.git_tag
<mask token>
| <mask token>
ROOT.gROOT.SetBatch()
ROOT.gROOT.ProcessLine('gErrorIgnoreLevel = kError;')
<mask token>
dirname = 'VLQToHiggsPairProd'
varial.settings.rootfile_postfixes = ['.png', '.pdf']
varial.settings.git_tag = varial.settings.readgittag('./GITTAGGER_LOG.txt')
current_tag = varial.settings.git_tag
smpls = list()
smpls.append(Sample(name='QCD', legend='QCD'))
smpls.append(Sample(name='TTJets', legend='TTJets'))
smpls.append(Sample(name='WJets', legend='WJets'))
smpls.append(Sample(name='ZJets', legend='ZJets'))
analysis.all_samples = dict((s.name, s) for s in smpls)
varial.settings.defaults_Legend['x_pos'] = 0.8
varial.settings.defaults_Legend['label_width'] = 0.36
varial.settings.defaults_Legend['label_height'] = 0.03
varial.settings.box_text_size = 0.03
varial.settings.colors = {'TTJets': 632, 'WJets': 878, 'ZJets': 596,
'TpTp_M1000': 870}
current_cuts = ['AfterPresel', 'FullSelection']
current_hists = ['/EventHists', '/MuonHists']
use_cuts = False
use_histos = False
varial.settings.stacking_order = ['ZJets', 'WJets', 'TTJets']
def select_histograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def select_splithistograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-' + c not in wrp.in_file_path for c in
current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
return use_this
def norm_to_first_bin(wrp):
histo = wrp.histo.Clone()
firstbin = histo.GetBinContent(1)
histo.Scale(1.0 / firstbin)
info = wrp.all_info()
info['lumi'] /= firstbin
return varial.wrappers.HistoWrapper(histo, **info)
def norm_histos_to_first_bin(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield norm_to_first_bin(wrp)
else:
yield wrp
def norm_histos_to_integral(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(wrp)
else:
yield wrp
def label_axes(wrps):
for w in wrps:
if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':
w.histo.GetXaxis().SetTitle(w.histo.GetTitle())
w.histo.GetYaxis().SetTitle('events')
w.histo.SetTitle('')
yield w
def norm_cf_plots(wrps):
for w in wrps:
if w.name.startswith('cf_') and isinstance(w, varial.wrappers.
HistoWrapper):
yield varial.operations.norm_to_integral(w)
else:
yield w
def for_stacked_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: w.
sample, is_signal=lambda w: 'TpTp_M' in w.sample, lumi=lambda w: 1.0)
wrps = label_axes(wrps)
return wrps
def norm_cf_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = norm_histos_to_first_bin(wrps)
wrps = label_axes(wrps)
return wrps
def do_nothing_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = label_axes(wrps)
return wrps
def for_eff_plots_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(wrps, sample=lambda w: w.file_path.split(
'.')[-2], analyzer=lambda w: w.in_file_path[0], legend=lambda w: (
'100* ' if 'TpTp_M' in w.sample else '') + w.sample, is_signal=lambda
w: 'TpTp_M' in w.sample, lumi=lambda w: 0.01 if 'TpTp_M' in w.
sample else 1.0)
wrps = gen.gen_make_eff_graphs(wrps)
wrps = label_axes(wrps)
return wrps
def stack_histos_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_stacked_hook
kws['plot_setup'] = gen.mc_stack_n_data_sum
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def norm_cf_factory(**kws):
kws['hook_loaded_histos'] = norm_cf_hook
kws['save_lin_log_scale'] = True
kws['save_name_func'] = lambda w: w.name + '_norm'
return varial.tools.Plotter(**kws)
def do_nothing_factory(**kws):
kws['hook_loaded_histos'] = do_nothing_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def for_eff_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_eff_plots_hook
kws['save_lin_log_scale'] = True
return varial.tools.Plotter(**kws)
def create_name(name):
return name + 'v' + varial.settings.git_tag
tagger = varial.tools.GitTagger('./GITTAGGER_LOG.txt')
tagger.run()
p1 = varial.tools.mk_rootfile_plotter(name=create_name(dirname),
filter_keyfunc=select_histograms, plotter_factory=stack_histos_factory,
combine_files=True)
p2 = varial.tools.mk_rootfile_plotter(name=create_name(dirname),
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.
endswith('raw'), plotter_factory=norm_cf_factory, combine_files=True)
p3 = varial.tools.mk_rootfile_plotter(name=create_name(dirname),
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.
endswith('raw'), plotter_factory=do_nothing_factory, combine_files=True)
p4 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',
pattern='v1.19_unmerged_files/*.root', filter_keyfunc=
select_splithistograms, plotter_factory=for_eff_factory, combine_files=
False)
p5 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',
filter_keyfunc=select_splithistograms, plotter_factory=for_eff_factory,
combine_files=True)
p6 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',
pattern='v1.19_unmerged_files/*.root', filter_keyfunc=lambda w: w.name.
startswith('cf_') and not w.name.endswith('raw'), plotter_factory=
norm_cf_factory, combine_files=False)
p7 = varial.tools.mk_rootfile_plotter(name=create_name(dirname) + 'split',
pattern='v1.19_unmerged_files/*.root', filter_keyfunc=lambda w: w.name.
startswith('cf_') and not w.name.endswith('raw'), plotter_factory=
do_nothing_factory, combine_files=False)
time.sleep(1)
p1.run()
p2.run()
p3.run()
p5.run()
varial.tools.WebCreator().run()
| #!/usr/bin/env python
import ROOT
ROOT.gROOT.SetBatch()
ROOT.gROOT.ProcessLine('gErrorIgnoreLevel = kError;')
import os
import time
import varial.tools
import varial.generators as gen
import itertools
from varial.sample import Sample
import varial.analysis as analysis
# import varial.toolinterface
dirname = 'VLQToHiggsPairProd'
varial.settings.rootfile_postfixes = ['.png','.pdf']
varial.settings.git_tag = varial.settings.readgittag('./GITTAGGER_LOG.txt')
current_tag = varial.settings.git_tag
# sample definitions
smpls = list()
smpls.append(Sample(
name='QCD',
legend='QCD'
))
smpls.append(Sample(
name='TTJets',
legend='TTJets'
))
smpls.append(Sample(
name='WJets',
legend='WJets'
))
smpls.append(Sample(
name='ZJets',
legend='ZJets'
))
analysis.all_samples = dict((s.name, s) for s in smpls)
varial.settings.defaults_Legend['x_pos'] = 0.80
varial.settings.defaults_Legend['label_width'] = 0.36
varial.settings.defaults_Legend['label_height'] = 0.03
# varial.settings.debug_mode = True
varial.settings.box_text_size = 0.03
varial.settings.colors = {
'TTJets': 632,
'WJets': 878,
'ZJets': 596,
'TpTp_M1000': 870,
# 'TpJ_TH_M800_NonTlep': 434,
}
# SELECT HISTOGRAMS TO PLOT HERE!
# use these functions to specifically select histograms for plotting
current_cuts = ['AfterPresel', 'FullSelection'] # 'Nminus1-MuonPtCut', 'OneCut-HTCut', 'FullSelection', 'Nminus1-6OneHiggsTagCut'
current_hists = ['/EventHists', '/MuonHists'] # "/ElectronHists", '/MuonHists', '/JetHists', '/TopJetHists', '/EventHists', '/GenHists/w_decay_lin', '/GenHists/w_decay_log'
use_cuts = False
use_histos = False
varial.settings.stacking_order = ['ZJets', 'WJets', 'TTJets']
def select_histograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-'+c not in wrp.in_file_path for c in current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
# if ('GenHists' in wrp.in_file_path and not (wrp.name.startswith('mu_') or wrp.name.startswith('genjet_'))):
# use_this = False
# if 'GenHists' in wrp.in_file_path and ('NoCuts' not in wrp.in_file_path and 'Nminus1-BTagCut' not in wrp.in_file_path):
# use_this = False
return use_this
def select_splithistograms(wrp):
use_this = True
if use_cuts and all('NoGenSel-'+c not in wrp.in_file_path for c in current_cuts):
use_this = False
if wrp.name.startswith('cf_'):
use_this = False
if use_histos and all(c not in wrp.in_file_path for c in current_hists):
use_this = False
# if ('GenHists' in wrp.in_file_path and not (wrp.name.startswith('mu_') or wrp.name.startswith('genjet_'))):
# use_this = False
# if 'GenHists' in wrp.in_file_path and ('NoCuts' not in wrp.in_file_path and 'Nminus1-BTagCut' not in wrp.in_file_path):
# use_this = False
return use_this
# SOME FUNCTIONS TO MANIPULATE HISTOGRAMS
def norm_to_first_bin(wrp):
histo = wrp.histo.Clone()
firstbin = histo.GetBinContent(1)
histo.Scale(1. / firstbin)
info = wrp.all_info()
info["lumi"] /= firstbin
return varial.wrappers.HistoWrapper(histo, **info)
def norm_histos_to_first_bin(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield norm_to_first_bin(wrp)
else:
yield wrp
def norm_histos_to_integral(wrps):
for wrp in wrps:
if isinstance(wrp, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(wrp)
else:
yield wrp
def label_axes(wrps):
for w in wrps:
if 'TH1' in w.type and w.histo.GetXaxis().GetTitle() == '':
w.histo.GetXaxis().SetTitle(w.histo.GetTitle())
w.histo.GetYaxis().SetTitle('events')
w.histo.SetTitle('')
yield w
def norm_cf_plots(wrps):
for w in wrps:
if w.name.startswith('cf_') and isinstance(w, varial.wrappers.HistoWrapper):
yield varial.operations.norm_to_integral(w)
else:
yield w
# HOOK FUNCTIONS FOR PLOTTER_FACTORIES; manipulate histograms here
def for_stacked_hook(wrps):
# wrps = norm_cf_plots(wrps)
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(
wrps,
sample=lambda w: w.file_path.split('.')[-2],
analyzer=lambda w: w.in_file_path[0],
legend=lambda w: w.sample,
is_signal=lambda w: 'TpTp_M' in w.sample,
lumi=lambda w: 1.
)
# wrps = gen.imap_conditional(wrps, lambda w: 'TpJ_TH_M800' in w.sample, gen.op.norm_to_lumi)
wrps = label_axes(wrps)
return wrps
def norm_cf_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = norm_histos_to_first_bin(wrps)
wrps = label_axes(wrps)
return wrps
def do_nothing_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = label_axes(wrps)
return wrps
def for_eff_plots_hook(wrps):
wrps = itertools.ifilter(lambda w: w.histo.Integral(), wrps)
wrps = gen.gen_add_wrp_info(
wrps,
sample=lambda w: w.file_path.split('.')[-2],
analyzer=lambda w: w.in_file_path[0],
legend=lambda w: ('100* ' if 'TpTp_M' in w.sample else '') + w.sample,
is_signal=lambda w: 'TpTp_M' in w.sample,
lumi=lambda w: 0.01 if 'TpTp_M' in w.sample else 1.
)
wrps = gen.gen_make_eff_graphs(wrps)
wrps = label_axes(wrps)
return wrps
# def calc_stack_order(wrps):
# for w in wrps:
# def stack_by_max(wrps):
# wrps = calc_stack_order(wrps)
# wrps = gen.mc_stack_n_data_sum(wrps)
# return wrps
# PLOTTER FACTORIES; select here in general which histograms to plot, how to manipulate them a.s.o.
def stack_histos_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_stacked_hook
kws['plot_setup'] = gen.mc_stack_n_data_sum
kws['save_lin_log_scale'] = True
# kws['save_log_scale'] = True
# kws['hook_canvas_pre_build'] = canvas_hook
# kws['hook_canvas_post_build'] = canvas_hook
return varial.tools.Plotter(**kws)
def norm_cf_factory(**kws):
# kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = norm_cf_hook
kws['save_lin_log_scale'] = True
kws['save_name_func'] = lambda w : w.name + '_norm'
# kws['save_log_scale'] = True
# kws['hook_canvas_pre_build'] = canvas_hook
# kws['hook_canvas_post_build'] = canvas_hook
return varial.tools.Plotter(**kws)
def do_nothing_factory(**kws):
# kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = do_nothing_hook
kws['save_lin_log_scale'] = True
# kws['save_log_scale'] = True
# kws['hook_canvas_pre_build'] = canvas_hook
# kws['hook_canvas_post_build'] = canvas_hook
return varial.tools.Plotter(**kws)
def for_eff_factory(**kws):
kws['filter_keyfunc'] = lambda w: 'TH1' in w.type
kws['hook_loaded_histos'] = for_eff_plots_hook
kws['save_lin_log_scale'] = True
# kws['save_log_scale'] = True
# kws['hook_canvas_pre_build'] = canvas_hook
# kws['hook_canvas_post_build'] = canvas_hook
return varial.tools.Plotter(**kws)
def create_name(name):
return name+'v'+varial.settings.git_tag
tagger = varial.tools.GitTagger('./GITTAGGER_LOG.txt')
tagger.run()
p1 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname),
# filter_keyfunc=lambda w: not w.name.startswith('cf_'),
filter_keyfunc=select_histograms,
plotter_factory=stack_histos_factory,
combine_files=True
)
p2 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname),
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),
plotter_factory=norm_cf_factory,
combine_files=True
)
p3 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname),
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),
plotter_factory=do_nothing_factory,
combine_files=True
)
p4 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname)+'split',
pattern='v1.19_unmerged_files/*.root',
filter_keyfunc=select_splithistograms,
plotter_factory=for_eff_factory,
combine_files=False
)
p5 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname)+'split',
# filter_keyfunc=lambda w: not w.name.startswith('cf_'),
filter_keyfunc=select_splithistograms,
plotter_factory=for_eff_factory,
combine_files=True
)
p6 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname)+'split',
pattern='v1.19_unmerged_files/*.root',
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),
plotter_factory=norm_cf_factory,
combine_files=False
)
p7 = varial.tools.mk_rootfile_plotter(
name=create_name(dirname)+'split',
pattern='v1.19_unmerged_files/*.root',
filter_keyfunc=lambda w: w.name.startswith('cf_') and not w.name.endswith('raw'),
plotter_factory=do_nothing_factory,
combine_files=False
)
time.sleep(1)
p1.run()
p2.run()
p3.run()
# p4.run()
p5.run()
# p6.run()
# p7.run()
varial.tools.WebCreator().run()
# os.system('rm -r ~/www/TprimeAnalysis/%s' % create_name(dirname))
# os.system('cp -r %s ~/www/TprimeAnalysis/' % create_name(dirname))
| [
10,
13,
14,
18,
20
] |
1,157 | 7c63abacce07ee9d4c2b3941d05f951b75c8d0ff | <mask token>
class PlayerRecord:
<mask token>
character: str
<mask token>
@property
def team(self) ->ba.SessionTeam:
"""The ba.SessionTeam the last associated player was last on.
This can still return a valid result even if the player is gone.
Raises a ba.SessionTeamNotFoundError if the team no longer exists.
"""
assert self._sessionteam is not None
team = self._sessionteam()
if team is None:
raise SessionTeamNotFoundError()
return team
<mask token>
def getname(self, full: bool=False) ->str:
"""Return the player entry's name."""
return self.name_full if full else self.name
def get_icon(self) ->Dict[str, Any]:
"""Get the icon for this instance's player."""
player = self._last_sessionplayer
assert player is not None
return player.get_icon()
<mask token>
<mask token>
def associate_with_sessionplayer(self, sessionplayer: ba.SessionPlayer
) ->None:
"""Associate this entry with a ba.SessionPlayer."""
self._sessionteam = weakref.ref(sessionplayer.sessionteam)
self.character = sessionplayer.character
self._last_sessionplayer = sessionplayer
self._sessionplayer = sessionplayer
self.streak = 0
<mask token>
<mask token>
<mask token>
class Stats:
"""Manages scores and statistics for a ba.Session.
category: Gameplay Classes
"""
def __init__(self) ->None:
self._activity: Optional[ReferenceType[ba.Activity]] = None
self._player_records: Dict[str, PlayerRecord] = {}
self.orchestrahitsound1: Optional[ba.Sound] = None
self.orchestrahitsound2: Optional[ba.Sound] = None
self.orchestrahitsound3: Optional[ba.Sound] = None
self.orchestrahitsound4: Optional[ba.Sound] = None
def setactivity(self, activity: Optional[ba.Activity]) ->None:
"""Set the current activity for this instance."""
self._activity = None if activity is None else weakref.ref(activity)
if activity is not None:
if activity.expired:
print_error('unexpected finalized activity')
else:
with _ba.Context(activity):
self._load_activity_media()
def getactivity(self) ->Optional[ba.Activity]:
"""Get the activity associated with this instance.
May return None.
"""
if self._activity is None:
return None
return self._activity()
def _load_activity_media(self) ->None:
self.orchestrahitsound1 = _ba.getsound('orchestraHit')
self.orchestrahitsound2 = _ba.getsound('orchestraHit2')
self.orchestrahitsound3 = _ba.getsound('orchestraHit3')
self.orchestrahitsound4 = _ba.getsound('orchestraHit4')
def reset(self) ->None:
"""Reset the stats instance completely."""
for p_entry in list(self._player_records.values()):
p_entry.cancel_multi_kill_timer()
self._player_records = {}
def reset_accum(self) ->None:
"""Reset per-sound sub-scores."""
for s_player in list(self._player_records.values()):
s_player.cancel_multi_kill_timer()
s_player.accumscore = 0
s_player.accum_kill_count = 0
s_player.accum_killed_count = 0
s_player.streak = 0
def register_sessionplayer(self, player: ba.SessionPlayer) ->None:
"""Register a ba.SessionPlayer with this score-set."""
assert player.exists()
name = player.getname()
if name in self._player_records:
self._player_records[name].associate_with_sessionplayer(player)
else:
name_full = player.getname(full=True)
self._player_records[name] = PlayerRecord(name, name_full,
player, self)
def get_records(self) ->Dict[str, ba.PlayerRecord]:
"""Get PlayerRecord corresponding to still-existing players."""
records = {}
for record_id, record in self._player_records.items():
lastplayer = record.get_last_sessionplayer()
if lastplayer and lastplayer.getname() == record_id:
records[record_id] = record
return records
def player_scored(self, player: ba.Player, base_points: int=1, target:
Sequence[float]=None, kill: bool=False, victim_player: ba.Player=
None, scale: float=1.0, color: Sequence[float]=None, title: Union[
str, ba.Lstr]=None, screenmessage: bool=True, display: bool=True,
importance: int=1, showpoints: bool=True, big_message: bool=False
) ->int:
"""Register a score for the player.
Return value is actual score with multipliers and such factored in.
"""
from bastd.actor.popuptext import PopupText
from ba import _math
from ba._gameactivity import GameActivity
from ba._lang import Lstr
del victim_player
name = player.getname()
s_player = self._player_records[name]
if kill:
s_player.submit_kill(showpoints=showpoints)
display_color: Sequence[float] = (1.0, 1.0, 1.0, 1.0)
if color is not None:
display_color = color
elif importance != 1:
display_color = 1.0, 1.0, 0.4, 1.0
points = base_points
if display and big_message:
try:
assert self._activity is not None
activity = self._activity()
if isinstance(activity, GameActivity):
name_full = player.getname(full=True, icon=False)
activity.show_zoom_message(Lstr(resource=
'nameScoresText', subs=[('${NAME}', name_full)]),
color=_math.normalized_color(player.team.color))
except Exception:
print_exception('error showing big_message')
if display and showpoints:
our_pos = player.node.position if player.node else None
if our_pos is not None:
if target is None:
target = our_pos
display_pos = target[0], max(target[1], our_pos[1] - 2.0), min(
target[2], our_pos[2] + 2.0)
activity = self.getactivity()
if activity is not None:
if title is not None:
sval = Lstr(value='+${A} ${B}', subs=[('${A}', str(
points)), ('${B}', title)])
else:
sval = Lstr(value='+${A}', subs=[('${A}', str(points))]
)
PopupText(sval, color=display_color, scale=1.2 * scale,
position=display_pos).autoretain()
if kill:
s_player.accum_kill_count += 1
s_player.kill_count += 1
try:
if screenmessage and not kill:
_ba.screenmessage(Lstr(resource='nameScoresText', subs=[(
'${NAME}', name)]), top=True, color=player.color, image
=player.get_icon())
except Exception:
print_exception('error announcing score')
s_player.score += points
s_player.accumscore += points
if points != 0:
activity = self._activity() if self._activity is not None else None
if activity is not None:
activity.handlemessage(PlayerScoredMessage(score=points))
return points
def player_was_killed(self, player: ba.Player, killed: bool=False,
killer: ba.Player=None) ->None:
"""Should be called when a player is killed."""
from ba._lang import Lstr
name = player.getname()
prec = self._player_records[name]
prec.streak = 0
if killed:
prec.accum_killed_count += 1
prec.killed_count += 1
try:
if killed and _ba.getactivity().announce_player_deaths:
if killer is player:
_ba.screenmessage(Lstr(resource='nameSuicideText', subs
=[('${NAME}', name)]), top=True, color=player.color,
image=player.get_icon())
elif killer is not None:
if killer.team is player.team:
_ba.screenmessage(Lstr(resource='nameBetrayedText',
subs=[('${NAME}', killer.getname()), (
'${VICTIM}', name)]), top=True, color=killer.
color, image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameKilledText',
subs=[('${NAME}', killer.getname()), (
'${VICTIM}', name)]), top=True, color=killer.
color, image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameDiedText', subs=[(
'${NAME}', name)]), top=True, color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing kill')
| <mask token>
class PlayerRecord:
<mask token>
character: str
def __init__(self, name: str, name_full: str, sessionplayer: ba.
SessionPlayer, stats: ba.Stats):
self.name = name
self.name_full = name_full
self.score = 0
self.accumscore = 0
self.kill_count = 0
self.accum_kill_count = 0
self.killed_count = 0
self.accum_killed_count = 0
self._multi_kill_timer: Optional[ba.Timer] = None
self._multi_kill_count = 0
self._stats = weakref.ref(stats)
self._last_sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionteam: Optional[ReferenceType[ba.SessionTeam]] = None
self.streak = 0
self.associate_with_sessionplayer(sessionplayer)
@property
def team(self) ->ba.SessionTeam:
"""The ba.SessionTeam the last associated player was last on.
This can still return a valid result even if the player is gone.
Raises a ba.SessionTeamNotFoundError if the team no longer exists.
"""
assert self._sessionteam is not None
team = self._sessionteam()
if team is None:
raise SessionTeamNotFoundError()
return team
@property
def player(self) ->ba.SessionPlayer:
"""Return the instance's associated ba.SessionPlayer.
Raises a ba.SessionPlayerNotFoundError if the player
no longer exists.
"""
if not self._sessionplayer:
raise SessionPlayerNotFoundError()
return self._sessionplayer
def getname(self, full: bool=False) ->str:
"""Return the player entry's name."""
return self.name_full if full else self.name
def get_icon(self) ->Dict[str, Any]:
"""Get the icon for this instance's player."""
player = self._last_sessionplayer
assert player is not None
return player.get_icon()
def cancel_multi_kill_timer(self) ->None:
"""Cancel any multi-kill timer for this player entry."""
self._multi_kill_timer = None
def getactivity(self) ->Optional[ba.Activity]:
"""Return the ba.Activity this instance is currently associated with.
Returns None if the activity no longer exists."""
stats = self._stats()
if stats is not None:
return stats.getactivity()
return None
def associate_with_sessionplayer(self, sessionplayer: ba.SessionPlayer
) ->None:
"""Associate this entry with a ba.SessionPlayer."""
self._sessionteam = weakref.ref(sessionplayer.sessionteam)
self.character = sessionplayer.character
self._last_sessionplayer = sessionplayer
self._sessionplayer = sessionplayer
self.streak = 0
def _end_multi_kill(self) ->None:
self._multi_kill_timer = None
self._multi_kill_count = 0
def get_last_sessionplayer(self) ->ba.SessionPlayer:
"""Return the last ba.Player we were associated with."""
assert self._last_sessionplayer is not None
return self._last_sessionplayer
<mask token>
class Stats:
"""Manages scores and statistics for a ba.Session.
category: Gameplay Classes
"""
def __init__(self) ->None:
self._activity: Optional[ReferenceType[ba.Activity]] = None
self._player_records: Dict[str, PlayerRecord] = {}
self.orchestrahitsound1: Optional[ba.Sound] = None
self.orchestrahitsound2: Optional[ba.Sound] = None
self.orchestrahitsound3: Optional[ba.Sound] = None
self.orchestrahitsound4: Optional[ba.Sound] = None
def setactivity(self, activity: Optional[ba.Activity]) ->None:
"""Set the current activity for this instance."""
self._activity = None if activity is None else weakref.ref(activity)
if activity is not None:
if activity.expired:
print_error('unexpected finalized activity')
else:
with _ba.Context(activity):
self._load_activity_media()
def getactivity(self) ->Optional[ba.Activity]:
"""Get the activity associated with this instance.
May return None.
"""
if self._activity is None:
return None
return self._activity()
def _load_activity_media(self) ->None:
self.orchestrahitsound1 = _ba.getsound('orchestraHit')
self.orchestrahitsound2 = _ba.getsound('orchestraHit2')
self.orchestrahitsound3 = _ba.getsound('orchestraHit3')
self.orchestrahitsound4 = _ba.getsound('orchestraHit4')
def reset(self) ->None:
"""Reset the stats instance completely."""
for p_entry in list(self._player_records.values()):
p_entry.cancel_multi_kill_timer()
self._player_records = {}
def reset_accum(self) ->None:
"""Reset per-sound sub-scores."""
for s_player in list(self._player_records.values()):
s_player.cancel_multi_kill_timer()
s_player.accumscore = 0
s_player.accum_kill_count = 0
s_player.accum_killed_count = 0
s_player.streak = 0
def register_sessionplayer(self, player: ba.SessionPlayer) ->None:
"""Register a ba.SessionPlayer with this score-set."""
assert player.exists()
name = player.getname()
if name in self._player_records:
self._player_records[name].associate_with_sessionplayer(player)
else:
name_full = player.getname(full=True)
self._player_records[name] = PlayerRecord(name, name_full,
player, self)
def get_records(self) ->Dict[str, ba.PlayerRecord]:
"""Get PlayerRecord corresponding to still-existing players."""
records = {}
for record_id, record in self._player_records.items():
lastplayer = record.get_last_sessionplayer()
if lastplayer and lastplayer.getname() == record_id:
records[record_id] = record
return records
def player_scored(self, player: ba.Player, base_points: int=1, target:
Sequence[float]=None, kill: bool=False, victim_player: ba.Player=
None, scale: float=1.0, color: Sequence[float]=None, title: Union[
str, ba.Lstr]=None, screenmessage: bool=True, display: bool=True,
importance: int=1, showpoints: bool=True, big_message: bool=False
) ->int:
"""Register a score for the player.
Return value is actual score with multipliers and such factored in.
"""
from bastd.actor.popuptext import PopupText
from ba import _math
from ba._gameactivity import GameActivity
from ba._lang import Lstr
del victim_player
name = player.getname()
s_player = self._player_records[name]
if kill:
s_player.submit_kill(showpoints=showpoints)
display_color: Sequence[float] = (1.0, 1.0, 1.0, 1.0)
if color is not None:
display_color = color
elif importance != 1:
display_color = 1.0, 1.0, 0.4, 1.0
points = base_points
if display and big_message:
try:
assert self._activity is not None
activity = self._activity()
if isinstance(activity, GameActivity):
name_full = player.getname(full=True, icon=False)
activity.show_zoom_message(Lstr(resource=
'nameScoresText', subs=[('${NAME}', name_full)]),
color=_math.normalized_color(player.team.color))
except Exception:
print_exception('error showing big_message')
if display and showpoints:
our_pos = player.node.position if player.node else None
if our_pos is not None:
if target is None:
target = our_pos
display_pos = target[0], max(target[1], our_pos[1] - 2.0), min(
target[2], our_pos[2] + 2.0)
activity = self.getactivity()
if activity is not None:
if title is not None:
sval = Lstr(value='+${A} ${B}', subs=[('${A}', str(
points)), ('${B}', title)])
else:
sval = Lstr(value='+${A}', subs=[('${A}', str(points))]
)
PopupText(sval, color=display_color, scale=1.2 * scale,
position=display_pos).autoretain()
if kill:
s_player.accum_kill_count += 1
s_player.kill_count += 1
try:
if screenmessage and not kill:
_ba.screenmessage(Lstr(resource='nameScoresText', subs=[(
'${NAME}', name)]), top=True, color=player.color, image
=player.get_icon())
except Exception:
print_exception('error announcing score')
s_player.score += points
s_player.accumscore += points
if points != 0:
activity = self._activity() if self._activity is not None else None
if activity is not None:
activity.handlemessage(PlayerScoredMessage(score=points))
return points
def player_was_killed(self, player: ba.Player, killed: bool=False,
killer: ba.Player=None) ->None:
"""Should be called when a player is killed."""
from ba._lang import Lstr
name = player.getname()
prec = self._player_records[name]
prec.streak = 0
if killed:
prec.accum_killed_count += 1
prec.killed_count += 1
try:
if killed and _ba.getactivity().announce_player_deaths:
if killer is player:
_ba.screenmessage(Lstr(resource='nameSuicideText', subs
=[('${NAME}', name)]), top=True, color=player.color,
image=player.get_icon())
elif killer is not None:
if killer.team is player.team:
_ba.screenmessage(Lstr(resource='nameBetrayedText',
subs=[('${NAME}', killer.getname()), (
'${VICTIM}', name)]), top=True, color=killer.
color, image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameKilledText',
subs=[('${NAME}', killer.getname()), (
'${VICTIM}', name)]), top=True, color=killer.
color, image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameDiedText', subs=[(
'${NAME}', name)]), top=True, color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing kill')
| <mask token>
if TYPE_CHECKING:
import ba
from weakref import ReferenceType
from typing import Any, Dict, Optional, Sequence, Union, Tuple
@dataclass
class PlayerScoredMessage:
"""Informs something that a ba.Player scored.
Category: Message Classes
Attrs:
score
The score value.
"""
score: int
class PlayerRecord:
"""Stats for an individual player in a ba.Stats object.
Category: Gameplay Classes
This does not necessarily correspond to a ba.Player that is
still present (stats may be retained for players that leave
mid-game)
"""
character: str
def __init__(self, name: str, name_full: str, sessionplayer: ba.
SessionPlayer, stats: ba.Stats):
self.name = name
self.name_full = name_full
self.score = 0
self.accumscore = 0
self.kill_count = 0
self.accum_kill_count = 0
self.killed_count = 0
self.accum_killed_count = 0
self._multi_kill_timer: Optional[ba.Timer] = None
self._multi_kill_count = 0
self._stats = weakref.ref(stats)
self._last_sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionteam: Optional[ReferenceType[ba.SessionTeam]] = None
self.streak = 0
self.associate_with_sessionplayer(sessionplayer)
@property
def team(self) ->ba.SessionTeam:
"""The ba.SessionTeam the last associated player was last on.
This can still return a valid result even if the player is gone.
Raises a ba.SessionTeamNotFoundError if the team no longer exists.
"""
assert self._sessionteam is not None
team = self._sessionteam()
if team is None:
raise SessionTeamNotFoundError()
return team
@property
def player(self) ->ba.SessionPlayer:
"""Return the instance's associated ba.SessionPlayer.
Raises a ba.SessionPlayerNotFoundError if the player
no longer exists.
"""
if not self._sessionplayer:
raise SessionPlayerNotFoundError()
return self._sessionplayer
def getname(self, full: bool=False) ->str:
"""Return the player entry's name."""
return self.name_full if full else self.name
def get_icon(self) ->Dict[str, Any]:
"""Get the icon for this instance's player."""
player = self._last_sessionplayer
assert player is not None
return player.get_icon()
def cancel_multi_kill_timer(self) ->None:
"""Cancel any multi-kill timer for this player entry."""
self._multi_kill_timer = None
def getactivity(self) ->Optional[ba.Activity]:
"""Return the ba.Activity this instance is currently associated with.
Returns None if the activity no longer exists."""
stats = self._stats()
if stats is not None:
return stats.getactivity()
return None
def associate_with_sessionplayer(self, sessionplayer: ba.SessionPlayer
) ->None:
"""Associate this entry with a ba.SessionPlayer."""
self._sessionteam = weakref.ref(sessionplayer.sessionteam)
self.character = sessionplayer.character
self._last_sessionplayer = sessionplayer
self._sessionplayer = sessionplayer
self.streak = 0
def _end_multi_kill(self) ->None:
self._multi_kill_timer = None
self._multi_kill_count = 0
def get_last_sessionplayer(self) ->ba.SessionPlayer:
"""Return the last ba.Player we were associated with."""
assert self._last_sessionplayer is not None
return self._last_sessionplayer
def submit_kill(self, showpoints: bool=True) ->None:
"""Submit a kill for this player entry."""
from ba._lang import Lstr
from ba._general import Call
self._multi_kill_count += 1
stats = self._stats()
assert stats
if self._multi_kill_count == 1:
score = 0
name = None
delay = 0.0
color = 0.0, 0.0, 0.0, 1.0
scale = 1.0
sound = None
elif self._multi_kill_count == 2:
score = 20
name = Lstr(resource='twoKillText')
color = 0.1, 1.0, 0.0, 1
scale = 1.0
delay = 0.0
sound = stats.orchestrahitsound1
elif self._multi_kill_count == 3:
score = 40
name = Lstr(resource='threeKillText')
color = 1.0, 0.7, 0.0, 1
scale = 1.1
delay = 0.3
sound = stats.orchestrahitsound2
elif self._multi_kill_count == 4:
score = 60
name = Lstr(resource='fourKillText')
color = 1.0, 1.0, 0.0, 1
scale = 1.2
delay = 0.6
sound = stats.orchestrahitsound3
elif self._multi_kill_count == 5:
score = 80
name = Lstr(resource='fiveKillText')
color = 1.0, 0.5, 0.0, 1
scale = 1.3
delay = 0.9
sound = stats.orchestrahitsound4
else:
score = 100
name = Lstr(resource='multiKillText', subs=[('${COUNT}', str(
self._multi_kill_count))])
color = 1.0, 0.5, 0.0, 1
scale = 1.3
delay = 1.0
sound = stats.orchestrahitsound4
def _apply(name2: Lstr, score2: int, showpoints2: bool, color2:
Tuple[float, float, float, float], scale2: float, sound2:
Optional[ba.Sound]) ->None:
from bastd.actor.popuptext import PopupText
our_pos: Optional[ba.Vec3] = None
if self._sessionplayer:
if self._sessionplayer.activityplayer is not None:
try:
our_pos = self._sessionplayer.activityplayer.position
except NotFoundError:
pass
if our_pos is None:
return
our_pos = _ba.Vec3(our_pos[0] + (random.random() - 0.5) * 2.0,
our_pos[1] + (random.random() - 0.5) * 2.0, our_pos[2] + (
random.random() - 0.5) * 2.0)
activity = self.getactivity()
if activity is not None:
PopupText(Lstr(value=('+' + str(score2) + ' ' if
showpoints2 else '') + '${N}', subs=[('${N}', name2)]),
color=color2, scale=scale2, position=our_pos).autoretain()
if sound2:
_ba.playsound(sound2)
self.score += score2
self.accumscore += score2
if score2 != 0 and activity is not None:
activity.handlemessage(PlayerScoredMessage(score=score2))
if name is not None:
_ba.timer(0.3 + delay, Call(_apply, name, score, showpoints,
color, scale, sound))
self._multi_kill_timer = _ba.Timer(1.0, self._end_multi_kill)
class Stats:
"""Manages scores and statistics for a ba.Session.
category: Gameplay Classes
"""
def __init__(self) ->None:
self._activity: Optional[ReferenceType[ba.Activity]] = None
self._player_records: Dict[str, PlayerRecord] = {}
self.orchestrahitsound1: Optional[ba.Sound] = None
self.orchestrahitsound2: Optional[ba.Sound] = None
self.orchestrahitsound3: Optional[ba.Sound] = None
self.orchestrahitsound4: Optional[ba.Sound] = None
def setactivity(self, activity: Optional[ba.Activity]) ->None:
"""Set the current activity for this instance."""
self._activity = None if activity is None else weakref.ref(activity)
if activity is not None:
if activity.expired:
print_error('unexpected finalized activity')
else:
with _ba.Context(activity):
self._load_activity_media()
def getactivity(self) ->Optional[ba.Activity]:
"""Get the activity associated with this instance.
May return None.
"""
if self._activity is None:
return None
return self._activity()
def _load_activity_media(self) ->None:
self.orchestrahitsound1 = _ba.getsound('orchestraHit')
self.orchestrahitsound2 = _ba.getsound('orchestraHit2')
self.orchestrahitsound3 = _ba.getsound('orchestraHit3')
self.orchestrahitsound4 = _ba.getsound('orchestraHit4')
def reset(self) ->None:
"""Reset the stats instance completely."""
for p_entry in list(self._player_records.values()):
p_entry.cancel_multi_kill_timer()
self._player_records = {}
def reset_accum(self) ->None:
"""Reset per-sound sub-scores."""
for s_player in list(self._player_records.values()):
s_player.cancel_multi_kill_timer()
s_player.accumscore = 0
s_player.accum_kill_count = 0
s_player.accum_killed_count = 0
s_player.streak = 0
def register_sessionplayer(self, player: ba.SessionPlayer) ->None:
"""Register a ba.SessionPlayer with this score-set."""
assert player.exists()
name = player.getname()
if name in self._player_records:
self._player_records[name].associate_with_sessionplayer(player)
else:
name_full = player.getname(full=True)
self._player_records[name] = PlayerRecord(name, name_full,
player, self)
def get_records(self) ->Dict[str, ba.PlayerRecord]:
"""Get PlayerRecord corresponding to still-existing players."""
records = {}
for record_id, record in self._player_records.items():
lastplayer = record.get_last_sessionplayer()
if lastplayer and lastplayer.getname() == record_id:
records[record_id] = record
return records
def player_scored(self, player: ba.Player, base_points: int=1, target:
Sequence[float]=None, kill: bool=False, victim_player: ba.Player=
None, scale: float=1.0, color: Sequence[float]=None, title: Union[
str, ba.Lstr]=None, screenmessage: bool=True, display: bool=True,
importance: int=1, showpoints: bool=True, big_message: bool=False
) ->int:
"""Register a score for the player.
Return value is actual score with multipliers and such factored in.
"""
from bastd.actor.popuptext import PopupText
from ba import _math
from ba._gameactivity import GameActivity
from ba._lang import Lstr
del victim_player
name = player.getname()
s_player = self._player_records[name]
if kill:
s_player.submit_kill(showpoints=showpoints)
display_color: Sequence[float] = (1.0, 1.0, 1.0, 1.0)
if color is not None:
display_color = color
elif importance != 1:
display_color = 1.0, 1.0, 0.4, 1.0
points = base_points
if display and big_message:
try:
assert self._activity is not None
activity = self._activity()
if isinstance(activity, GameActivity):
name_full = player.getname(full=True, icon=False)
activity.show_zoom_message(Lstr(resource=
'nameScoresText', subs=[('${NAME}', name_full)]),
color=_math.normalized_color(player.team.color))
except Exception:
print_exception('error showing big_message')
if display and showpoints:
our_pos = player.node.position if player.node else None
if our_pos is not None:
if target is None:
target = our_pos
display_pos = target[0], max(target[1], our_pos[1] - 2.0), min(
target[2], our_pos[2] + 2.0)
activity = self.getactivity()
if activity is not None:
if title is not None:
sval = Lstr(value='+${A} ${B}', subs=[('${A}', str(
points)), ('${B}', title)])
else:
sval = Lstr(value='+${A}', subs=[('${A}', str(points))]
)
PopupText(sval, color=display_color, scale=1.2 * scale,
position=display_pos).autoretain()
if kill:
s_player.accum_kill_count += 1
s_player.kill_count += 1
try:
if screenmessage and not kill:
_ba.screenmessage(Lstr(resource='nameScoresText', subs=[(
'${NAME}', name)]), top=True, color=player.color, image
=player.get_icon())
except Exception:
print_exception('error announcing score')
s_player.score += points
s_player.accumscore += points
if points != 0:
activity = self._activity() if self._activity is not None else None
if activity is not None:
activity.handlemessage(PlayerScoredMessage(score=points))
return points
def player_was_killed(self, player: ba.Player, killed: bool=False,
killer: ba.Player=None) ->None:
"""Should be called when a player is killed."""
from ba._lang import Lstr
name = player.getname()
prec = self._player_records[name]
prec.streak = 0
if killed:
prec.accum_killed_count += 1
prec.killed_count += 1
try:
if killed and _ba.getactivity().announce_player_deaths:
if killer is player:
_ba.screenmessage(Lstr(resource='nameSuicideText', subs
=[('${NAME}', name)]), top=True, color=player.color,
image=player.get_icon())
elif killer is not None:
if killer.team is player.team:
_ba.screenmessage(Lstr(resource='nameBetrayedText',
subs=[('${NAME}', killer.getname()), (
'${VICTIM}', name)]), top=True, color=killer.
color, image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameKilledText',
subs=[('${NAME}', killer.getname()), (
'${VICTIM}', name)]), top=True, color=killer.
color, image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameDiedText', subs=[(
'${NAME}', name)]), top=True, color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing kill')
| <mask token>
from __future__ import annotations
import random
import weakref
from typing import TYPE_CHECKING
from dataclasses import dataclass
import _ba
from ba._error import print_exception, print_error, SessionTeamNotFoundError, SessionPlayerNotFoundError, NotFoundError
if TYPE_CHECKING:
import ba
from weakref import ReferenceType
from typing import Any, Dict, Optional, Sequence, Union, Tuple
@dataclass
class PlayerScoredMessage:
"""Informs something that a ba.Player scored.
Category: Message Classes
Attrs:
score
The score value.
"""
score: int
class PlayerRecord:
"""Stats for an individual player in a ba.Stats object.
Category: Gameplay Classes
This does not necessarily correspond to a ba.Player that is
still present (stats may be retained for players that leave
mid-game)
"""
character: str
def __init__(self, name: str, name_full: str, sessionplayer: ba.
SessionPlayer, stats: ba.Stats):
self.name = name
self.name_full = name_full
self.score = 0
self.accumscore = 0
self.kill_count = 0
self.accum_kill_count = 0
self.killed_count = 0
self.accum_killed_count = 0
self._multi_kill_timer: Optional[ba.Timer] = None
self._multi_kill_count = 0
self._stats = weakref.ref(stats)
self._last_sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionteam: Optional[ReferenceType[ba.SessionTeam]] = None
self.streak = 0
self.associate_with_sessionplayer(sessionplayer)
@property
def team(self) ->ba.SessionTeam:
"""The ba.SessionTeam the last associated player was last on.
This can still return a valid result even if the player is gone.
Raises a ba.SessionTeamNotFoundError if the team no longer exists.
"""
assert self._sessionteam is not None
team = self._sessionteam()
if team is None:
raise SessionTeamNotFoundError()
return team
@property
def player(self) ->ba.SessionPlayer:
"""Return the instance's associated ba.SessionPlayer.
Raises a ba.SessionPlayerNotFoundError if the player
no longer exists.
"""
if not self._sessionplayer:
raise SessionPlayerNotFoundError()
return self._sessionplayer
def getname(self, full: bool=False) ->str:
"""Return the player entry's name."""
return self.name_full if full else self.name
def get_icon(self) ->Dict[str, Any]:
"""Get the icon for this instance's player."""
player = self._last_sessionplayer
assert player is not None
return player.get_icon()
def cancel_multi_kill_timer(self) ->None:
"""Cancel any multi-kill timer for this player entry."""
self._multi_kill_timer = None
def getactivity(self) ->Optional[ba.Activity]:
"""Return the ba.Activity this instance is currently associated with.
Returns None if the activity no longer exists."""
stats = self._stats()
if stats is not None:
return stats.getactivity()
return None
def associate_with_sessionplayer(self, sessionplayer: ba.SessionPlayer
) ->None:
"""Associate this entry with a ba.SessionPlayer."""
self._sessionteam = weakref.ref(sessionplayer.sessionteam)
self.character = sessionplayer.character
self._last_sessionplayer = sessionplayer
self._sessionplayer = sessionplayer
self.streak = 0
def _end_multi_kill(self) ->None:
self._multi_kill_timer = None
self._multi_kill_count = 0
def get_last_sessionplayer(self) ->ba.SessionPlayer:
"""Return the last ba.Player we were associated with."""
assert self._last_sessionplayer is not None
return self._last_sessionplayer
def submit_kill(self, showpoints: bool=True) ->None:
"""Submit a kill for this player entry."""
from ba._lang import Lstr
from ba._general import Call
self._multi_kill_count += 1
stats = self._stats()
assert stats
if self._multi_kill_count == 1:
score = 0
name = None
delay = 0.0
color = 0.0, 0.0, 0.0, 1.0
scale = 1.0
sound = None
elif self._multi_kill_count == 2:
score = 20
name = Lstr(resource='twoKillText')
color = 0.1, 1.0, 0.0, 1
scale = 1.0
delay = 0.0
sound = stats.orchestrahitsound1
elif self._multi_kill_count == 3:
score = 40
name = Lstr(resource='threeKillText')
color = 1.0, 0.7, 0.0, 1
scale = 1.1
delay = 0.3
sound = stats.orchestrahitsound2
elif self._multi_kill_count == 4:
score = 60
name = Lstr(resource='fourKillText')
color = 1.0, 1.0, 0.0, 1
scale = 1.2
delay = 0.6
sound = stats.orchestrahitsound3
elif self._multi_kill_count == 5:
score = 80
name = Lstr(resource='fiveKillText')
color = 1.0, 0.5, 0.0, 1
scale = 1.3
delay = 0.9
sound = stats.orchestrahitsound4
else:
score = 100
name = Lstr(resource='multiKillText', subs=[('${COUNT}', str(
self._multi_kill_count))])
color = 1.0, 0.5, 0.0, 1
scale = 1.3
delay = 1.0
sound = stats.orchestrahitsound4
def _apply(name2: Lstr, score2: int, showpoints2: bool, color2:
Tuple[float, float, float, float], scale2: float, sound2:
Optional[ba.Sound]) ->None:
from bastd.actor.popuptext import PopupText
our_pos: Optional[ba.Vec3] = None
if self._sessionplayer:
if self._sessionplayer.activityplayer is not None:
try:
our_pos = self._sessionplayer.activityplayer.position
except NotFoundError:
pass
if our_pos is None:
return
our_pos = _ba.Vec3(our_pos[0] + (random.random() - 0.5) * 2.0,
our_pos[1] + (random.random() - 0.5) * 2.0, our_pos[2] + (
random.random() - 0.5) * 2.0)
activity = self.getactivity()
if activity is not None:
PopupText(Lstr(value=('+' + str(score2) + ' ' if
showpoints2 else '') + '${N}', subs=[('${N}', name2)]),
color=color2, scale=scale2, position=our_pos).autoretain()
if sound2:
_ba.playsound(sound2)
self.score += score2
self.accumscore += score2
if score2 != 0 and activity is not None:
activity.handlemessage(PlayerScoredMessage(score=score2))
if name is not None:
_ba.timer(0.3 + delay, Call(_apply, name, score, showpoints,
color, scale, sound))
self._multi_kill_timer = _ba.Timer(1.0, self._end_multi_kill)
class Stats:
"""Manages scores and statistics for a ba.Session.
category: Gameplay Classes
"""
def __init__(self) ->None:
self._activity: Optional[ReferenceType[ba.Activity]] = None
self._player_records: Dict[str, PlayerRecord] = {}
self.orchestrahitsound1: Optional[ba.Sound] = None
self.orchestrahitsound2: Optional[ba.Sound] = None
self.orchestrahitsound3: Optional[ba.Sound] = None
self.orchestrahitsound4: Optional[ba.Sound] = None
def setactivity(self, activity: Optional[ba.Activity]) ->None:
"""Set the current activity for this instance."""
self._activity = None if activity is None else weakref.ref(activity)
if activity is not None:
if activity.expired:
print_error('unexpected finalized activity')
else:
with _ba.Context(activity):
self._load_activity_media()
def getactivity(self) ->Optional[ba.Activity]:
"""Get the activity associated with this instance.
May return None.
"""
if self._activity is None:
return None
return self._activity()
def _load_activity_media(self) ->None:
self.orchestrahitsound1 = _ba.getsound('orchestraHit')
self.orchestrahitsound2 = _ba.getsound('orchestraHit2')
self.orchestrahitsound3 = _ba.getsound('orchestraHit3')
self.orchestrahitsound4 = _ba.getsound('orchestraHit4')
def reset(self) ->None:
"""Reset the stats instance completely."""
for p_entry in list(self._player_records.values()):
p_entry.cancel_multi_kill_timer()
self._player_records = {}
def reset_accum(self) ->None:
"""Reset per-sound sub-scores."""
for s_player in list(self._player_records.values()):
s_player.cancel_multi_kill_timer()
s_player.accumscore = 0
s_player.accum_kill_count = 0
s_player.accum_killed_count = 0
s_player.streak = 0
def register_sessionplayer(self, player: ba.SessionPlayer) ->None:
"""Register a ba.SessionPlayer with this score-set."""
assert player.exists()
name = player.getname()
if name in self._player_records:
self._player_records[name].associate_with_sessionplayer(player)
else:
name_full = player.getname(full=True)
self._player_records[name] = PlayerRecord(name, name_full,
player, self)
def get_records(self) ->Dict[str, ba.PlayerRecord]:
"""Get PlayerRecord corresponding to still-existing players."""
records = {}
for record_id, record in self._player_records.items():
lastplayer = record.get_last_sessionplayer()
if lastplayer and lastplayer.getname() == record_id:
records[record_id] = record
return records
def player_scored(self, player: ba.Player, base_points: int=1, target:
Sequence[float]=None, kill: bool=False, victim_player: ba.Player=
None, scale: float=1.0, color: Sequence[float]=None, title: Union[
str, ba.Lstr]=None, screenmessage: bool=True, display: bool=True,
importance: int=1, showpoints: bool=True, big_message: bool=False
) ->int:
"""Register a score for the player.
Return value is actual score with multipliers and such factored in.
"""
from bastd.actor.popuptext import PopupText
from ba import _math
from ba._gameactivity import GameActivity
from ba._lang import Lstr
del victim_player
name = player.getname()
s_player = self._player_records[name]
if kill:
s_player.submit_kill(showpoints=showpoints)
display_color: Sequence[float] = (1.0, 1.0, 1.0, 1.0)
if color is not None:
display_color = color
elif importance != 1:
display_color = 1.0, 1.0, 0.4, 1.0
points = base_points
if display and big_message:
try:
assert self._activity is not None
activity = self._activity()
if isinstance(activity, GameActivity):
name_full = player.getname(full=True, icon=False)
activity.show_zoom_message(Lstr(resource=
'nameScoresText', subs=[('${NAME}', name_full)]),
color=_math.normalized_color(player.team.color))
except Exception:
print_exception('error showing big_message')
if display and showpoints:
our_pos = player.node.position if player.node else None
if our_pos is not None:
if target is None:
target = our_pos
display_pos = target[0], max(target[1], our_pos[1] - 2.0), min(
target[2], our_pos[2] + 2.0)
activity = self.getactivity()
if activity is not None:
if title is not None:
sval = Lstr(value='+${A} ${B}', subs=[('${A}', str(
points)), ('${B}', title)])
else:
sval = Lstr(value='+${A}', subs=[('${A}', str(points))]
)
PopupText(sval, color=display_color, scale=1.2 * scale,
position=display_pos).autoretain()
if kill:
s_player.accum_kill_count += 1
s_player.kill_count += 1
try:
if screenmessage and not kill:
_ba.screenmessage(Lstr(resource='nameScoresText', subs=[(
'${NAME}', name)]), top=True, color=player.color, image
=player.get_icon())
except Exception:
print_exception('error announcing score')
s_player.score += points
s_player.accumscore += points
if points != 0:
activity = self._activity() if self._activity is not None else None
if activity is not None:
activity.handlemessage(PlayerScoredMessage(score=points))
return points
def player_was_killed(self, player: ba.Player, killed: bool=False,
killer: ba.Player=None) ->None:
"""Should be called when a player is killed."""
from ba._lang import Lstr
name = player.getname()
prec = self._player_records[name]
prec.streak = 0
if killed:
prec.accum_killed_count += 1
prec.killed_count += 1
try:
if killed and _ba.getactivity().announce_player_deaths:
if killer is player:
_ba.screenmessage(Lstr(resource='nameSuicideText', subs
=[('${NAME}', name)]), top=True, color=player.color,
image=player.get_icon())
elif killer is not None:
if killer.team is player.team:
_ba.screenmessage(Lstr(resource='nameBetrayedText',
subs=[('${NAME}', killer.getname()), (
'${VICTIM}', name)]), top=True, color=killer.
color, image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameKilledText',
subs=[('${NAME}', killer.getname()), (
'${VICTIM}', name)]), top=True, color=killer.
color, image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameDiedText', subs=[(
'${NAME}', name)]), top=True, color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing kill')
| # Copyright (c) 2011-2020 Eric Froemling
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Functionality related to scores and statistics."""
from __future__ import annotations
import random
import weakref
from typing import TYPE_CHECKING
from dataclasses import dataclass
import _ba
from ba._error import (print_exception, print_error, SessionTeamNotFoundError,
SessionPlayerNotFoundError, NotFoundError)
if TYPE_CHECKING:
import ba
from weakref import ReferenceType
from typing import Any, Dict, Optional, Sequence, Union, Tuple
@dataclass
class PlayerScoredMessage:
"""Informs something that a ba.Player scored.
Category: Message Classes
Attrs:
score
The score value.
"""
score: int
class PlayerRecord:
"""Stats for an individual player in a ba.Stats object.
Category: Gameplay Classes
This does not necessarily correspond to a ba.Player that is
still present (stats may be retained for players that leave
mid-game)
"""
character: str
def __init__(self, name: str, name_full: str,
sessionplayer: ba.SessionPlayer, stats: ba.Stats):
self.name = name
self.name_full = name_full
self.score = 0
self.accumscore = 0
self.kill_count = 0
self.accum_kill_count = 0
self.killed_count = 0
self.accum_killed_count = 0
self._multi_kill_timer: Optional[ba.Timer] = None
self._multi_kill_count = 0
self._stats = weakref.ref(stats)
self._last_sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionplayer: Optional[ba.SessionPlayer] = None
self._sessionteam: Optional[ReferenceType[ba.SessionTeam]] = None
self.streak = 0
self.associate_with_sessionplayer(sessionplayer)
@property
def team(self) -> ba.SessionTeam:
"""The ba.SessionTeam the last associated player was last on.
This can still return a valid result even if the player is gone.
Raises a ba.SessionTeamNotFoundError if the team no longer exists.
"""
assert self._sessionteam is not None
team = self._sessionteam()
if team is None:
raise SessionTeamNotFoundError()
return team
@property
def player(self) -> ba.SessionPlayer:
"""Return the instance's associated ba.SessionPlayer.
Raises a ba.SessionPlayerNotFoundError if the player
no longer exists.
"""
if not self._sessionplayer:
raise SessionPlayerNotFoundError()
return self._sessionplayer
def getname(self, full: bool = False) -> str:
"""Return the player entry's name."""
return self.name_full if full else self.name
def get_icon(self) -> Dict[str, Any]:
"""Get the icon for this instance's player."""
player = self._last_sessionplayer
assert player is not None
return player.get_icon()
def cancel_multi_kill_timer(self) -> None:
"""Cancel any multi-kill timer for this player entry."""
self._multi_kill_timer = None
def getactivity(self) -> Optional[ba.Activity]:
"""Return the ba.Activity this instance is currently associated with.
Returns None if the activity no longer exists."""
stats = self._stats()
if stats is not None:
return stats.getactivity()
return None
def associate_with_sessionplayer(self,
sessionplayer: ba.SessionPlayer) -> None:
"""Associate this entry with a ba.SessionPlayer."""
self._sessionteam = weakref.ref(sessionplayer.sessionteam)
self.character = sessionplayer.character
self._last_sessionplayer = sessionplayer
self._sessionplayer = sessionplayer
self.streak = 0
def _end_multi_kill(self) -> None:
self._multi_kill_timer = None
self._multi_kill_count = 0
def get_last_sessionplayer(self) -> ba.SessionPlayer:
"""Return the last ba.Player we were associated with."""
assert self._last_sessionplayer is not None
return self._last_sessionplayer
def submit_kill(self, showpoints: bool = True) -> None:
"""Submit a kill for this player entry."""
# FIXME Clean this up.
# pylint: disable=too-many-statements
from ba._lang import Lstr
from ba._general import Call
self._multi_kill_count += 1
stats = self._stats()
assert stats
if self._multi_kill_count == 1:
score = 0
name = None
delay = 0.0
color = (0.0, 0.0, 0.0, 1.0)
scale = 1.0
sound = None
elif self._multi_kill_count == 2:
score = 20
name = Lstr(resource='twoKillText')
color = (0.1, 1.0, 0.0, 1)
scale = 1.0
delay = 0.0
sound = stats.orchestrahitsound1
elif self._multi_kill_count == 3:
score = 40
name = Lstr(resource='threeKillText')
color = (1.0, 0.7, 0.0, 1)
scale = 1.1
delay = 0.3
sound = stats.orchestrahitsound2
elif self._multi_kill_count == 4:
score = 60
name = Lstr(resource='fourKillText')
color = (1.0, 1.0, 0.0, 1)
scale = 1.2
delay = 0.6
sound = stats.orchestrahitsound3
elif self._multi_kill_count == 5:
score = 80
name = Lstr(resource='fiveKillText')
color = (1.0, 0.5, 0.0, 1)
scale = 1.3
delay = 0.9
sound = stats.orchestrahitsound4
else:
score = 100
name = Lstr(resource='multiKillText',
subs=[('${COUNT}', str(self._multi_kill_count))])
color = (1.0, 0.5, 0.0, 1)
scale = 1.3
delay = 1.0
sound = stats.orchestrahitsound4
def _apply(name2: Lstr, score2: int, showpoints2: bool,
color2: Tuple[float, float, float, float], scale2: float,
sound2: Optional[ba.Sound]) -> None:
from bastd.actor.popuptext import PopupText
# Only award this if they're still alive and we can get
# a current position for them.
our_pos: Optional[ba.Vec3] = None
if self._sessionplayer:
if self._sessionplayer.activityplayer is not None:
try:
our_pos = self._sessionplayer.activityplayer.position
except NotFoundError:
pass
if our_pos is None:
return
# Jitter position a bit since these often come in clusters.
our_pos = _ba.Vec3(our_pos[0] + (random.random() - 0.5) * 2.0,
our_pos[1] + (random.random() - 0.5) * 2.0,
our_pos[2] + (random.random() - 0.5) * 2.0)
activity = self.getactivity()
if activity is not None:
PopupText(Lstr(
value=(('+' + str(score2) + ' ') if showpoints2 else '') +
'${N}',
subs=[('${N}', name2)]),
color=color2,
scale=scale2,
position=our_pos).autoretain()
if sound2:
_ba.playsound(sound2)
self.score += score2
self.accumscore += score2
# Inform a running game of the score.
if score2 != 0 and activity is not None:
activity.handlemessage(PlayerScoredMessage(score=score2))
if name is not None:
_ba.timer(
0.3 + delay,
Call(_apply, name, score, showpoints, color, scale, sound))
# Keep the tally rollin'...
# set a timer for a bit in the future.
self._multi_kill_timer = _ba.Timer(1.0, self._end_multi_kill)
class Stats:
"""Manages scores and statistics for a ba.Session.
category: Gameplay Classes
"""
def __init__(self) -> None:
self._activity: Optional[ReferenceType[ba.Activity]] = None
self._player_records: Dict[str, PlayerRecord] = {}
self.orchestrahitsound1: Optional[ba.Sound] = None
self.orchestrahitsound2: Optional[ba.Sound] = None
self.orchestrahitsound3: Optional[ba.Sound] = None
self.orchestrahitsound4: Optional[ba.Sound] = None
def setactivity(self, activity: Optional[ba.Activity]) -> None:
"""Set the current activity for this instance."""
self._activity = None if activity is None else weakref.ref(activity)
# Load our media into this activity's context.
if activity is not None:
if activity.expired:
print_error('unexpected finalized activity')
else:
with _ba.Context(activity):
self._load_activity_media()
def getactivity(self) -> Optional[ba.Activity]:
"""Get the activity associated with this instance.
May return None.
"""
if self._activity is None:
return None
return self._activity()
def _load_activity_media(self) -> None:
self.orchestrahitsound1 = _ba.getsound('orchestraHit')
self.orchestrahitsound2 = _ba.getsound('orchestraHit2')
self.orchestrahitsound3 = _ba.getsound('orchestraHit3')
self.orchestrahitsound4 = _ba.getsound('orchestraHit4')
def reset(self) -> None:
"""Reset the stats instance completely."""
# Just to be safe, lets make sure no multi-kill timers are gonna go off
# for no-longer-on-the-list players.
for p_entry in list(self._player_records.values()):
p_entry.cancel_multi_kill_timer()
self._player_records = {}
def reset_accum(self) -> None:
"""Reset per-sound sub-scores."""
for s_player in list(self._player_records.values()):
s_player.cancel_multi_kill_timer()
s_player.accumscore = 0
s_player.accum_kill_count = 0
s_player.accum_killed_count = 0
s_player.streak = 0
def register_sessionplayer(self, player: ba.SessionPlayer) -> None:
"""Register a ba.SessionPlayer with this score-set."""
assert player.exists() # Invalid refs should never be passed to funcs.
name = player.getname()
if name in self._player_records:
# If the player already exists, update his character and such as
# it may have changed.
self._player_records[name].associate_with_sessionplayer(player)
else:
name_full = player.getname(full=True)
self._player_records[name] = PlayerRecord(name, name_full, player,
self)
def get_records(self) -> Dict[str, ba.PlayerRecord]:
"""Get PlayerRecord corresponding to still-existing players."""
records = {}
# Go through our player records and return ones whose player id still
# corresponds to a player with that name.
for record_id, record in self._player_records.items():
lastplayer = record.get_last_sessionplayer()
if lastplayer and lastplayer.getname() == record_id:
records[record_id] = record
return records
def player_scored(self,
player: ba.Player,
base_points: int = 1,
target: Sequence[float] = None,
kill: bool = False,
victim_player: ba.Player = None,
scale: float = 1.0,
color: Sequence[float] = None,
title: Union[str, ba.Lstr] = None,
screenmessage: bool = True,
display: bool = True,
importance: int = 1,
showpoints: bool = True,
big_message: bool = False) -> int:
"""Register a score for the player.
Return value is actual score with multipliers and such factored in.
"""
# FIXME: Tidy this up.
# pylint: disable=cyclic-import
# pylint: disable=too-many-branches
# pylint: disable=too-many-locals
# pylint: disable=too-many-statements
from bastd.actor.popuptext import PopupText
from ba import _math
from ba._gameactivity import GameActivity
from ba._lang import Lstr
del victim_player # Currently unused.
name = player.getname()
s_player = self._player_records[name]
if kill:
s_player.submit_kill(showpoints=showpoints)
display_color: Sequence[float] = (1.0, 1.0, 1.0, 1.0)
if color is not None:
display_color = color
elif importance != 1:
display_color = (1.0, 1.0, 0.4, 1.0)
points = base_points
# If they want a big announcement, throw a zoom-text up there.
if display and big_message:
try:
assert self._activity is not None
activity = self._activity()
if isinstance(activity, GameActivity):
name_full = player.getname(full=True, icon=False)
activity.show_zoom_message(
Lstr(resource='nameScoresText',
subs=[('${NAME}', name_full)]),
color=_math.normalized_color(player.team.color))
except Exception:
print_exception('error showing big_message')
# If we currently have a actor, pop up a score over it.
if display and showpoints:
our_pos = player.node.position if player.node else None
if our_pos is not None:
if target is None:
target = our_pos
# If display-pos is *way* lower than us, raise it up
# (so we can still see scores from dudes that fell off cliffs).
display_pos = (target[0], max(target[1], our_pos[1] - 2.0),
min(target[2], our_pos[2] + 2.0))
activity = self.getactivity()
if activity is not None:
if title is not None:
sval = Lstr(value='+${A} ${B}',
subs=[('${A}', str(points)),
('${B}', title)])
else:
sval = Lstr(value='+${A}',
subs=[('${A}', str(points))])
PopupText(sval,
color=display_color,
scale=1.2 * scale,
position=display_pos).autoretain()
# Tally kills.
if kill:
s_player.accum_kill_count += 1
s_player.kill_count += 1
# Report non-kill scorings.
try:
if screenmessage and not kill:
_ba.screenmessage(Lstr(resource='nameScoresText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing score')
s_player.score += points
s_player.accumscore += points
# Inform a running game of the score.
if points != 0:
activity = self._activity() if self._activity is not None else None
if activity is not None:
activity.handlemessage(PlayerScoredMessage(score=points))
return points
def player_was_killed(self,
player: ba.Player,
killed: bool = False,
killer: ba.Player = None) -> None:
"""Should be called when a player is killed."""
from ba._lang import Lstr
name = player.getname()
prec = self._player_records[name]
prec.streak = 0
if killed:
prec.accum_killed_count += 1
prec.killed_count += 1
try:
if killed and _ba.getactivity().announce_player_deaths:
if killer is player:
_ba.screenmessage(Lstr(resource='nameSuicideText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
elif killer is not None:
if killer.team is player.team:
_ba.screenmessage(Lstr(resource='nameBetrayedText',
subs=[('${NAME}',
killer.getname()),
('${VICTIM}', name)]),
top=True,
color=killer.color,
image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameKilledText',
subs=[('${NAME}',
killer.getname()),
('${VICTIM}', name)]),
top=True,
color=killer.color,
image=killer.get_icon())
else:
_ba.screenmessage(Lstr(resource='nameDiedText',
subs=[('${NAME}', name)]),
top=True,
color=player.color,
image=player.get_icon())
except Exception:
print_exception('error announcing kill')
| [
17,
23,
28,
29,
30
] |
1,158 | 243016b14f503a09147f434e7bec31dc204fafdf | # This script is for character creation.
print ("Welcome to the character wizard creation!")
# Here you will select your race from the list.
race = ["human", "ork", "elf"]
print race
race = raw_input("Please choose your race: ")
print "You have choosen %r" %race
# Here you will select your gender.
gender = ["male", "female"]
print gender
gender = raw_input("Please choose your gender: ")
print "You have choosen %r" %gender
character = {'race': race, 'gender': gender}
| null | null | null | null | [
0
] |
1,159 | 2187f38dc9b14ecc355e98fe15d36fdefd548f04 | <mask token>
| <mask token>
def get_token_from_request(request):
token_tuple = request.COOKIES.get('money_api_token')
matches = re.search('(<Token: (\\S*)>)', token_tuple)
token = matches.groups(0)[1]
return token
<mask token>
| <mask token>
def get_token_from_request(request):
token_tuple = request.COOKIES.get('money_api_token')
matches = re.search('(<Token: (\\S*)>)', token_tuple)
token = matches.groups(0)[1]
return token
def get_student_from_request(request):
current_token = get_token_from_request(request)
current_user = Token.objects.filter(key=current_token).last().user
current_email = User.objects.filter(username=current_user).last().email
return ValidatedStudent.objects.filter(email=current_email).last()
| import re
from .models import ValidatedStudent
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
def get_token_from_request(request):
token_tuple = request.COOKIES.get('money_api_token')
matches = re.search('(<Token: (\\S*)>)', token_tuple)
token = matches.groups(0)[1]
return token
def get_student_from_request(request):
current_token = get_token_from_request(request)
current_user = Token.objects.filter(key=current_token).last().user
current_email = User.objects.filter(username=current_user).last().email
return ValidatedStudent.objects.filter(email=current_email).last()
| import re
from .models import ValidatedStudent
from rest_framework.authtoken.models import Token
from django.contrib.auth.models import User
def get_token_from_request(request):
token_tuple = request.COOKIES.get('money_api_token')
matches = re.search(r'(<Token: (\S*)>)', token_tuple)
token = matches.groups(0)[1]
return token
def get_student_from_request(request):
current_token = get_token_from_request(request)
current_user = Token.objects.filter(key=current_token).last().user
current_email = User.objects.filter(username=current_user).last().email
return ValidatedStudent.objects.filter(email=current_email).last()
| [
0,
1,
2,
3,
4
] |
1,160 | 04487dce97231a7be2bf3b164e93f0ea4d01ba05 | <mask token>
| def palinPerm(str):
charSet = set()
for c in str:
if c not in charSet:
charSet.add(c)
else:
charSet.remove(c)
return len(charSet) == 1 or len(charSet) == 0
<mask token>
| def palinPerm(str):
charSet = set()
for c in str:
if c not in charSet:
charSet.add(c)
else:
charSet.remove(c)
return len(charSet) == 1 or len(charSet) == 0
<mask token>
print(response)
| def palinPerm(str):
charSet = set()
for c in str:
if c not in charSet:
charSet.add(c)
else:
charSet.remove(c)
return len(charSet) == 1 or len(charSet) == 0
response = 'It is a palinPerm' if palinPerm('dadadad'
) else 'No, not a palinPerm'
print(response)
| # Write function that determines if a string a palindrome off of any permutation
def palinPerm(str):
# Create empty set
charSet = set()
# Loop through string, if character does not exist in set, add it. If it does, remove it.
for c in str:
if c not in charSet:
charSet.add(c)
else:
charSet.remove(c)
# The final set should either have 1 element or none
return len(charSet) == 1 or len(charSet) == 0
response = "It is a palinPerm" if palinPerm("dadadad") else "No, not a palinPerm"
print(response)
# Time Complexity: O(N) | [
0,
1,
2,
3,
4
] |
1,161 | 5e355732f07029aa644617ac9b5e9ad50ee9397f | <mask token>
| <mask token>
urlpatterns = [url('^porta/list$', porta_list, name='porta_list'), url(
'^porta/detail/(?P<pk>\\d+)$', porta_detail, name='porta_detail'), url(
'^porta/new/$', porta_new, name='porta_new'), url(
'^porta/update/(?P<pk>\\d+)$', porta_update, name='porta_update'), url(
'^porta/delete/(?P<pk>\\d+)$', porta_delete, name='porta_delete'), url(
'^porta/usuarios/(?P<pk>\\d+)$', porta_delete, name='porta_delete'),
url('^grupo/list$', grupo_list, name='grupo_list'), url(
'^grupo/detail/(?P<pk>\\d+)$', grupo_detail, name='grupo_detail'), url(
'^grupo/new/$', grupo_new, name='grupo_new'), url(
'^grupo/update/(?P<pk>\\d+)$', grupo_update, name='grupo_update'), url(
'^grupo/delete/(?P<pk>\\d+)$', grupo_delete, name='grupo_delete'), url(
'^edit/grupo/$', edit_grupo, name='edit_grupo'), url(
'^usuario/acesso/grupo/(?P<pk>\\d+)$', usuario_acesso_grupo, name=
'usuario_acesso_grupo'), url('^usuario/sem_acesso/grupo/(?P<pk>\\d+)$',
usuario_sem_acesso_grupo, name='usuario_sem_acesso_grupo'), url(
'^porta/no_grupo/(?P<pk>\\d+)$', porta_no_grupo, name='porta_no_grupo'),
url('^porta/nao_grupo/(?P<pk>\\d+)$', porta_nao_grupo, name=
'porta_nao_grupo'), url('^portas/$', portas, name='portas'), url(
'^porta/busca/(?P<pk>\\d+)$', busca_porta, name='busca_porta'), url(
'^busca/porta_frequencia/$', busca_porta_frequencia, name=
'busca_frequencia_porta'), url('^frequencia_porta_acesso/$',
frequencia_porta_acesso, name='frequencia_porta_acesso'), url(
'^porta/frequencia_acesso/(?P<pk>\\d+)$', porta_frequencias, name=
'porta_frequencias')]
| from django.conf.urls import url
from django.contrib.auth.views import login, logout
from appPortas.views import *
urlpatterns = [url('^porta/list$', porta_list, name='porta_list'), url(
'^porta/detail/(?P<pk>\\d+)$', porta_detail, name='porta_detail'), url(
'^porta/new/$', porta_new, name='porta_new'), url(
'^porta/update/(?P<pk>\\d+)$', porta_update, name='porta_update'), url(
'^porta/delete/(?P<pk>\\d+)$', porta_delete, name='porta_delete'), url(
'^porta/usuarios/(?P<pk>\\d+)$', porta_delete, name='porta_delete'),
url('^grupo/list$', grupo_list, name='grupo_list'), url(
'^grupo/detail/(?P<pk>\\d+)$', grupo_detail, name='grupo_detail'), url(
'^grupo/new/$', grupo_new, name='grupo_new'), url(
'^grupo/update/(?P<pk>\\d+)$', grupo_update, name='grupo_update'), url(
'^grupo/delete/(?P<pk>\\d+)$', grupo_delete, name='grupo_delete'), url(
'^edit/grupo/$', edit_grupo, name='edit_grupo'), url(
'^usuario/acesso/grupo/(?P<pk>\\d+)$', usuario_acesso_grupo, name=
'usuario_acesso_grupo'), url('^usuario/sem_acesso/grupo/(?P<pk>\\d+)$',
usuario_sem_acesso_grupo, name='usuario_sem_acesso_grupo'), url(
'^porta/no_grupo/(?P<pk>\\d+)$', porta_no_grupo, name='porta_no_grupo'),
url('^porta/nao_grupo/(?P<pk>\\d+)$', porta_nao_grupo, name=
'porta_nao_grupo'), url('^portas/$', portas, name='portas'), url(
'^porta/busca/(?P<pk>\\d+)$', busca_porta, name='busca_porta'), url(
'^busca/porta_frequencia/$', busca_porta_frequencia, name=
'busca_frequencia_porta'), url('^frequencia_porta_acesso/$',
frequencia_porta_acesso, name='frequencia_porta_acesso'), url(
'^porta/frequencia_acesso/(?P<pk>\\d+)$', porta_frequencias, name=
'porta_frequencias')]
| from django.conf.urls import url
from django.contrib.auth.views import login,logout
from appPortas.views import *
urlpatterns = [
url(r'^porta/list$', porta_list, name='porta_list'),
url(r'^porta/detail/(?P<pk>\d+)$',porta_detail, name='porta_detail'),
url(r'^porta/new/$', porta_new, name='porta_new'),
url(r'^porta/update/(?P<pk>\d+)$',porta_update, name='porta_update'),
url(r'^porta/delete/(?P<pk>\d+)$',porta_delete, name='porta_delete'),
url(r'^porta/usuarios/(?P<pk>\d+)$', porta_delete, name='porta_delete'),
url(r'^grupo/list$', grupo_list, name='grupo_list'),
url(r'^grupo/detail/(?P<pk>\d+)$',grupo_detail, name='grupo_detail'),
url(r'^grupo/new/$', grupo_new, name='grupo_new'),
url(r'^grupo/update/(?P<pk>\d+)$',grupo_update, name='grupo_update'),
url(r'^grupo/delete/(?P<pk>\d+)$',grupo_delete, name='grupo_delete'),
url(r'^edit/grupo/$', edit_grupo, name='edit_grupo'),
url(r'^usuario/acesso/grupo/(?P<pk>\d+)$', usuario_acesso_grupo, name='usuario_acesso_grupo'),
url(r'^usuario/sem_acesso/grupo/(?P<pk>\d+)$', usuario_sem_acesso_grupo, name='usuario_sem_acesso_grupo'),
url(r'^porta/no_grupo/(?P<pk>\d+)$', porta_no_grupo, name='porta_no_grupo'),
url(r'^porta/nao_grupo/(?P<pk>\d+)$', porta_nao_grupo, name='porta_nao_grupo'),
url(r'^portas/$', portas, name='portas'),
url(r'^porta/busca/(?P<pk>\d+)$', busca_porta, name='busca_porta'),
url(r'^busca/porta_frequencia/$', busca_porta_frequencia, name='busca_frequencia_porta'),
url(r'^frequencia_porta_acesso/$', frequencia_porta_acesso, name='frequencia_porta_acesso'),
url(r'^porta/frequencia_acesso/(?P<pk>\d+)$', porta_frequencias, name='porta_frequencias'),
]
| null | [
0,
1,
2,
3
] |
1,162 | 6bcddd1b2ec8653400f710e5cab552d4bec75b6b | #!/usr/bin/env python
"""
This code is fot testing the region growing.
"""
import os
import sys
import time
import nibabel as nib
import region_growing as rg
import matplotlib.pyplot as plt
import numpy as np
img = nib.load("zstat1.nii.gz")
data = img.get_data()
#test coor [36,60,28] [21,39,30] [23,38,30]
coor = [23,38,30]
num = 10000
size_list = []
st = time.time()
for t in range(1,50):
t = t/10.0
print t
region_img,size = rg.region_growing(data,coor,float(t),num,6)
print "Totoal time is :%s"%(time.time()-st)
size_list.append([t,size])
print size_list
size_list = np.array(size_list)
plt.plot(size_list[:,0],size_list[:,1],'ro')
plt.show()
result = img
result._data = region_img
nib.save(result,"region.nii.gz")
| null | null | null | null | [
0
] |
1,163 | e464b465c4bc90c250c0ea02c17b7398d975964b | <mask token>
def main():
args = parser.parse_args()
quiet = False
if args.quiet:
quiet = True
tempo2 = True
ptoa = False
if args.print_toas:
ptoa = True
if not quiet:
print('Loading the archive files for DM estimation')
archives = []
for filename in args.files:
archives.append(psrchive.Archive_load(filename))
narch = len(archives)
if narch >= 1:
if not quiet:
print('Appending the archives ...'),
ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,
args.b5fscrunch)
if not quiet:
print(' done!')
elif not quiet:
print('Only one archive was given, so nothing to frequency-append.')
ar = archives[0]
del archives
ar_psr = ar.get_source()
ar_nbins = ar.get_nbin()
ar_tel = ar.get_telescope()
mjd_start = ar.get_Integration(0).get_start_time().in_days()
mjd_end = ar.get_Integration(0).get_end_time().in_days()
ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0
length = ar.integration_length()
ar.update_centre_frequency()
ar_centfr = ar.get_centre_frequency()
ar_nchan = ar.get_nchan()
ar_bw = ar.get_bandwidth()
ar_chnwdth = ar_bw / ar_nchan
ffrac = args.fscrunch
if not quiet:
print('\nNow preparing for DM estimation\n')
pwd = os.getcwd()
if args.ephem != None:
ephemeris = args.ephem
else:
ephemeris = 'ephemerides/' + ar_psr + '.par'
if not os.path.exists(ephemeris):
sys.exit(1)
if not quiet:
print('\nEphemeris file is:' + ephemeris + '\n')
model = []
for filename in args.model:
model.append(psrchive.Archive_load(filename))
if args.model != None:
if len(args.model) == 1:
model = freq_appendModel(1, model, args.offset, args.b3fscrunch,
args.b5fscrunch)
if len(args.model) > 1:
model = freq_appendModel(1, model, args.offset, args.b3fscrunch,
args.b5fscrunch)
if args.model == None:
if not quiet:
print('Looking for matching template in templates directory...'),
import subprocess
tempdir = 'templates/*.sm'
tempfile = ar_psr + '_tmp.txt'
a = subprocess.call(
"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'" % (tempdir,
tempfile), shell=True)
tempnchan = ''
t1 = str(ar_nbins)
if ar_tel == 'gmrt':
t2 = str(int(ar_bw))
else:
t2 = str(ar_bw)
t3 = '%.2f' % ar_centfr
f = open(tempfile, 'r')
for line in f:
line = line.strip()
columns = line.split()
t4 = float(columns[5])
t4 = '%.2f' % t4
if ar_tel == 'gmrt':
if columns[1] == ar_psr and columns[2] == t1 and str(int(
columns[3])) == t2 and t4 == t3:
modeltempl = columns[0]
tempnchan = columns[4]
if not quiet:
print(' done\n')
elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]
) == t2 and t4 == t3:
modeltempl = columns[0]
tempnchan = columns[4]
if not quiet:
print(' done\n')
if modeltempl == '' and tempnchan == '':
print(
'\n** No matching template found for DM fitting. Exiting. **\n'
)
sys.exit(1)
f.close()
os.remove(tempfile)
if not quiet:
print('Found matching template: ' + modeltempl)
model.append(psrchive.Archive_load(modeltempl))
if not quiet:
print('\nEstimating the DM from the observation')
model.update_centre_frequency()
arch = ar.clone()
dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,
ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,
ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)
if args.writeout:
infile = open(ephemeris, 'r')
tmpeph = ar_psr + '.eph'
output = open(tmpeph, 'w+')
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
dmline = 'DM\t\t\t ' + str(dmval) + '\t\t' + str(dmverr)
dmepochline = 'DMEPOCH\t\t ' + str(round(ar_mjd, 2))
if not args.quiet:
print('Updating the ephemeris with new DM... '),
f = open(tmpeph, 'a')
f.write('%s\n %s\n' % (dmline, dmepochline))
if not args.quiet:
print(' done!')
f.close()
if not quiet:
print(
'Correcting the DM of the observed file and writing it out... '
),
os.remove(tmpeph)
dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')
if not os.path.exists(dirfinal):
os.makedirs(dirfinal)
outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd
) + '_' + ar_tel + '.ar'
ar.set_dispersion_measure(dmval)
ar.dedisperse()
if not args.Tscrunch:
ar.tscrunch(args.tscrunch)
else:
ar.tscrunch()
if not args.Fscrunch:
ar.fscrunch(ffrac)
else:
ar.fscrunch()
ar.unload(outfile)
if not args.quiet:
print(' done!')
del ar
if not quiet:
print('The file is corrected for DM and is written out to\n' +
outfile)
f = open(ar_psr + '_DM_timeseries.txt', 'a')
f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\n' % (
filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,
ToA_Err, ar_centfr, ar_bw, ar_tel))
f.close()
import time
end = time.time()
total = end - start
print(
'-----------------------------------------------------------------------------'
)
print('MJD\t\tDM\t\tDMerr\t\tChisq\tC_Fr\tBW\tTel')
print('%.6f\t%.6f\t%.6f\t%.2f\t%.1f\t%.1f\t%s' % (ar_mjd, dmval, dmverr,
fitchisq, ar_centfr, ar_bw, ar_tel))
print(
'-----------------------------------------------------------------------------'
)
print('\nThe program took %.1f seconds to finish' % total)
<mask token>
def DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,
ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):
if model == None:
sys.exit(1)
init_dm = ar.get_dispersion_measure()
if not quiet:
print('Using the ArrivalTime (pat) with PGS in Tempo2 format')
arrtim = psrchive.ArrivalTime()
arrtim.set_shift_estimator('PGS')
arrtim.set_format('Tempo2')
arrtim.set_format_flags('IPTA')
if not quiet:
print('Loading the template file for processing... '),
std = model.clone()
std.pscrunch()
std.tscrunch()
std_nchan = std.get_nchan()
std.dedisperse()
std.fscrunch(ffrac)
arrtim.set_standard(std)
if not quiet:
print(' done!')
ar.fscrunch(ffrac)
ar.pscrunch()
ar.tscrunch()
arrtim.set_observation(ar)
if not quiet:
print('Finding the ToAs... '),
toas = arrtim.get_toas()
toas_filtered = [x.split()[:5] for x in toas]
str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)
freq = np.asarray(str_freq, dtype=np.float64)
amjd = np.asarray(str_mjd, dtype=np.float64)
terr = np.asarray(str_toaErr, dtype=np.float64)
if not quiet:
print(' done!')
print('Removing the bad ToAs using Huber Regression... '),
condition1 = terr < 3 * np.median(terr)
freqnew = np.extract(condition1, freq)
amjdnew = np.extract(condition1, amjd)
terrnew = np.extract(condition1, terr)
tempfile = ar_psr + '_tmp.txt'
f = open(tempfile, 'w+')
head = 'FORMAT 1\n'
f.write('%s' % head)
for i in range(0, np.size(freqnew)):
f.write('%s %.12f %.20f %.8f %s\n' % (str_filename[0], freqnew[i],
amjdnew[i], terrnew[i], str_site[0]))
f.close()
tmpstr = 'tempo2 -output general2 -f'
tmp = os.popen(tmpstr +
' %s %s -s "1111111 {freq} {pre} {err}\n" | grep \'1111111\'' % (
ephemeris, tempfile)).read()
os.remove(tempfile)
tmp1 = tmp.split('\n')
freqtmp = np.zeros(np.size(amjdnew))
toastmp = np.zeros(np.size(amjdnew))
TErrtmp = np.zeros(np.size(amjdnew))
for i in range(np.size(amjdnew)):
_, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()
TErrtmp /= 1000000.0
from sklearn import linear_model
from sklearn.linear_model import HuberRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
freqarr = freqtmp.reshape(-1, 1)
toastmp *= 1000000.0
toashift = np.min(toastmp) * -1.5
toastmp += toashift
Terrtmp = TErrtmp * 1000000.0
model = make_pipeline(PolynomialFeatures(2), HuberRegressor())
model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /
Terrtmp))
y_pred = model.predict(freqarr)
residuals = toastmp - y_pred
median = np.median(residuals)
MAD = np.median(np.abs(residuals - np.median(residuals))
) / 0.6744897501960817
condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD
)
freqf = np.around(np.extract(condition2, freqarr), 3)
amjdf = np.extract(condition2, amjdnew)
toasf = np.extract(condition2, toastmp)
terrf = np.extract(condition2, TErrtmp)
prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))
terrf *= 1000000.0
if not quiet:
print(' done!')
if ptoa:
if not quiet:
print('Writing out ToAs into a file in tempo2 format'),
dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')
if not os.path.exists(dirtoas):
os.makedirs(dirtoas)
outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd
) + '_' + ar_tel + '_ToAs.txt'
f = open(outfile, 'w+')
head = 'FORMAT 1'
f.write('%s\n' % head)
for i in range(0, np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i],
amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print('done!')
if not quiet:
print('\nWriting the ToAs to a temporary file for tempo2 fitting...'),
outfiletmp = ar_psr + 'tmp_ToAs.txt'
f = open(outfiletmp, 'w+')
head = 'FORMAT 1'
f.write('%s\n' % head)
for i in range(0, np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i],
amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print(' done!\n')
dmstr = os.popen(
"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'"
% (ephemeris, outfiletmp)).read()
dm, dmerr = dmstr.split()
dmval = float(dm)
dmverr = float(dmerr)
chisqstr = os.popen(
"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'" %
(ephemeris, outfiletmp)).read()
fitchisq = float(chisqstr)
os.remove(outfiletmp)
infile = open(ephemeris, 'r')
tmpeph1 = ar_psr + '_tmpeph.eph'
output = open(tmpeph1, 'w+')
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
dmline = 'DM ' + str(dmval) + '\t1\t' + str(dmverr)
dmepochline = 'DMEPOCH\t ' + str(round(ar_mjd, 2))
f = open(tmpeph1, 'a')
f.write('%s\n%s\n' % (dmline, dmepochline))
f.close()
newarch = ar.clone()
newarch.tscrunch()
newarch.set_dispersion_measure(dmval)
arrtim.set_observation(newarch)
arrtim.set_standard(std)
toas1 = arrtim.get_toas()
toas1_filtered = [x.split()[:5] for x in toas1]
str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*
toas1_filtered)
freq1 = np.asarray(str_freq1, dtype=np.float64)
amjd1 = np.asarray(str_mjd1, dtype=np.float64)
terr1 = np.asarray(str_toaErr1, dtype=np.float64)
freqnew1 = np.extract(condition1, freq1)
amjdnew1 = np.extract(condition1, amjd1)
terrnew1 = np.extract(condition1, terr1)
tempfile1 = ar_psr + '_tmp1.txt'
f = open(tempfile1, 'w+')
head = 'FORMAT 1\n'
f.write('%s' % head)
for i in range(0, np.size(freqnew1)):
f.write('%s %.12f %.20f %.8f %s\n' % (str_filename1[0], freqnew1[i],
amjdnew1[i], terrnew1[i], str_site1[0]))
f.close()
tmp2 = os.popen(
"""tempo2 -output general2 -f %s %s -s "1111111 {freq} {pre} {err}
" | grep '1111111'"""
% (tmpeph1, tempfile1)).read()
os.remove(tempfile1)
os.remove(tmpeph1)
tmp3 = tmp2.split('\n')
freqtmp2 = np.zeros(np.size(amjdnew1))
toastmp2 = np.zeros(np.size(amjdnew1))
TErrtmp2 = np.zeros(np.size(amjdnew1))
for i in range(np.size(amjdnew1)):
_, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()
freqf1 = np.around(np.extract(condition2, freqtmp2), 3)
amjdf1 = np.extract(condition2, amjdnew1)
toasf1 = np.extract(condition2, toastmp2)
terrf1 = np.extract(condition2, TErrtmp2)
toasf1 *= 1000000.0
postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))
ar_nbin = newarch.get_nbin()
ar_nchn = newarch.get_nchan()
if narch == 1:
freq_bot = ar.get_centre_frequency() - ar_bw / 2.0
freq_top = ar.get_centre_frequency() + ar_bw / 2.0
if narch > 1:
if ar_bw == 200.0:
freq_bot = 400.0
freq_top = 1460.0
if ar_bw == 400.0:
freq_bot = 300.0
freq_top = 1460.0
newarch.dedisperse()
newarch.remove_baseline()
profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,
ar_nbin)
prof = newarch.clone()
prof.fscrunch()
profdata1D = prof.get_data().flatten()
profdata1D /= np.max(profdata1D)
residDM = init_dm - dmval
dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) **
2 - 1.0 / (freqf / 1000.0) ** 2)
dmoff = np.median(toasf) - np.median(dmcurve)
dmcurve += dmoff
fig = plt.figure(3, figsize=(8, 6))
fig.subplots_adjust(hspace=0.05)
ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)
ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)
ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)
ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)
ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)
ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0,
ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')
ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)
ax0.tick_params(axis='x', which='both', bottom=True, top=True,
labelbottom=False)
ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',
linewidth=0.5)
ax1.set_xlim(0, ar_nbin - 1)
ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)
ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)
ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',
label='Prefit: Unfiltered', capsize=2)
ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')
ax2.set_xlim(freq_bot, freq_top)
ax2.grid()
ax2.legend(loc='upper right')
ax2.axes.xaxis.set_ticklabels([])
ax3.yaxis.set_label_position('right')
ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=
'Prefit: Filtered', capsize=2)
ax3.set_xlim(freq_bot, freq_top)
ax3.grid()
ax3.legend(loc='upper right')
ax3.axes.xaxis.set_ticklabels([])
ax3.set_ylabel('ToA Residuals ($\\mu$s)', fontweight='bold', fontsize=12)
ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',
label='Postfit', capsize=2)
ax4.set_xlim(freq_bot, freq_top)
ax4.grid()
ax4.legend(loc='upper right')
ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)
fig.suptitle(
"""Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\mu$s; Postfit Wrms: %.2f $\\mu$s
Median ToA Err: %.2f $\\mu$s; DM: %.6f $\\pm$ %.6f pc cm$^{-3}$; Reduced $\\chi^2$: %.2f"""
% (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(
terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')
dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')
if not os.path.exists(dirplot):
os.makedirs(dirplot)
plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr
) + '_' + ar_tel + '_DMfitResid.pdf'
plt.savefig(plotfile, format='pdf')
plt.close()
if not quiet:
print('done!')
del ar
return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)
<mask token>
def freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
if archives[0].get_telescope() == 'GMRT':
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = archives[i].get_Integration(0).get_folding_period()
offset = 0.670520675
jump = offset / period - int(offset / period)
if ar_frq >= 1260.0 and ar_frq < 1460.0:
if ar_mjd >= 58810.0 and ar_mjd < 58991.0:
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[0].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[1].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0], archives[1])
del archives[1]
return archives[0]
<mask token>
def freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
if archives[0].get_telescope() == 'GMRT':
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = archives[i].get_Integration(0).get_folding_period()
offset = 0.670520675
jump = offset / period - int(offset / period)
if ar_frq >= 1260.0 and ar_frq < 1460.0:
if ar_mjd >= 58810.0 and ar_mjd < 58991.0:
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[0].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[1].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0], archives[1])
del archives[1]
return archives[0]
<mask token>
| <mask token>
matplotlib.use('Agg')
<mask token>
parser.add_argument('files', nargs='+', type=str, help=
'The list of fits file(s) for processing')
parser.add_argument('-E', '--ephem', type=str, help=
'Ephemeris file to update the model. Exits if not ' +
'given or is not available in "PWD/ephemerides" ' + 'directory')
parser.add_argument('-M', '--model', nargs='+', type=str, help=
'Model template for ToA generation. Exits if not ' +
'given or is not available in "PWD/templates" ' + 'directory')
parser.add_argument('-f', '--fscrunch', type=int, default=1, help=
'Factor to scrunch the number of channels for ' +
'doing DM estimation (Def: 1)')
parser.add_argument('-b3f', '--b3fscrunch', type=int, default=1, help=
'Factor to scrunch the number of channels for ' +
'band3 GMRT data (Def: 1)')
parser.add_argument('-b5f', '--b5fscrunch', type=int, default=1, help=
'Factor to scrunch the number of channels for ' +
'band5 GMRT data (Def: 1)')
parser.add_argument('-w', '--writeout', action='store_true', help=
'Writes out the DM corrected file. Def: False')
parser.add_argument('-ptoa', '--print_toas', action='store_true', help=
'Print the prefit ToAs to file in tempo2 format. ' + 'Def: False')
parser.add_argument('-F', '--Fscrunch', action='store_true', help=
'Fully scrunch the number of channels for the ' +
'final output archive (Def: False)')
parser.add_argument('-T', '--Tscrunch', action='store_true', help=
'Completely time scrunch all the integrations')
parser.add_argument('-t', '--tscrunch', type=int, default=1, help=
'Factor to scrunch the number of integrations for ' +
'the final output archive (Def: None)')
parser.add_argument('-o', '--offset', type=float, default=0.670520675, help
='Offset to shift band 5 ToAs (in secs)')
parser.add_argument('-q', '--quiet', action='store_true', help=
'Only print warnings')
def main():
args = parser.parse_args()
quiet = False
if args.quiet:
quiet = True
tempo2 = True
ptoa = False
if args.print_toas:
ptoa = True
if not quiet:
print('Loading the archive files for DM estimation')
archives = []
for filename in args.files:
archives.append(psrchive.Archive_load(filename))
narch = len(archives)
if narch >= 1:
if not quiet:
print('Appending the archives ...'),
ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,
args.b5fscrunch)
if not quiet:
print(' done!')
elif not quiet:
print('Only one archive was given, so nothing to frequency-append.')
ar = archives[0]
del archives
ar_psr = ar.get_source()
ar_nbins = ar.get_nbin()
ar_tel = ar.get_telescope()
mjd_start = ar.get_Integration(0).get_start_time().in_days()
mjd_end = ar.get_Integration(0).get_end_time().in_days()
ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0
length = ar.integration_length()
ar.update_centre_frequency()
ar_centfr = ar.get_centre_frequency()
ar_nchan = ar.get_nchan()
ar_bw = ar.get_bandwidth()
ar_chnwdth = ar_bw / ar_nchan
ffrac = args.fscrunch
if not quiet:
print('\nNow preparing for DM estimation\n')
pwd = os.getcwd()
if args.ephem != None:
ephemeris = args.ephem
else:
ephemeris = 'ephemerides/' + ar_psr + '.par'
if not os.path.exists(ephemeris):
sys.exit(1)
if not quiet:
print('\nEphemeris file is:' + ephemeris + '\n')
model = []
for filename in args.model:
model.append(psrchive.Archive_load(filename))
if args.model != None:
if len(args.model) == 1:
model = freq_appendModel(1, model, args.offset, args.b3fscrunch,
args.b5fscrunch)
if len(args.model) > 1:
model = freq_appendModel(1, model, args.offset, args.b3fscrunch,
args.b5fscrunch)
if args.model == None:
if not quiet:
print('Looking for matching template in templates directory...'),
import subprocess
tempdir = 'templates/*.sm'
tempfile = ar_psr + '_tmp.txt'
a = subprocess.call(
"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'" % (tempdir,
tempfile), shell=True)
tempnchan = ''
t1 = str(ar_nbins)
if ar_tel == 'gmrt':
t2 = str(int(ar_bw))
else:
t2 = str(ar_bw)
t3 = '%.2f' % ar_centfr
f = open(tempfile, 'r')
for line in f:
line = line.strip()
columns = line.split()
t4 = float(columns[5])
t4 = '%.2f' % t4
if ar_tel == 'gmrt':
if columns[1] == ar_psr and columns[2] == t1 and str(int(
columns[3])) == t2 and t4 == t3:
modeltempl = columns[0]
tempnchan = columns[4]
if not quiet:
print(' done\n')
elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]
) == t2 and t4 == t3:
modeltempl = columns[0]
tempnchan = columns[4]
if not quiet:
print(' done\n')
if modeltempl == '' and tempnchan == '':
print(
'\n** No matching template found for DM fitting. Exiting. **\n'
)
sys.exit(1)
f.close()
os.remove(tempfile)
if not quiet:
print('Found matching template: ' + modeltempl)
model.append(psrchive.Archive_load(modeltempl))
if not quiet:
print('\nEstimating the DM from the observation')
model.update_centre_frequency()
arch = ar.clone()
dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,
ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,
ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)
if args.writeout:
infile = open(ephemeris, 'r')
tmpeph = ar_psr + '.eph'
output = open(tmpeph, 'w+')
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
dmline = 'DM\t\t\t ' + str(dmval) + '\t\t' + str(dmverr)
dmepochline = 'DMEPOCH\t\t ' + str(round(ar_mjd, 2))
if not args.quiet:
print('Updating the ephemeris with new DM... '),
f = open(tmpeph, 'a')
f.write('%s\n %s\n' % (dmline, dmepochline))
if not args.quiet:
print(' done!')
f.close()
if not quiet:
print(
'Correcting the DM of the observed file and writing it out... '
),
os.remove(tmpeph)
dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')
if not os.path.exists(dirfinal):
os.makedirs(dirfinal)
outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd
) + '_' + ar_tel + '.ar'
ar.set_dispersion_measure(dmval)
ar.dedisperse()
if not args.Tscrunch:
ar.tscrunch(args.tscrunch)
else:
ar.tscrunch()
if not args.Fscrunch:
ar.fscrunch(ffrac)
else:
ar.fscrunch()
ar.unload(outfile)
if not args.quiet:
print(' done!')
del ar
if not quiet:
print('The file is corrected for DM and is written out to\n' +
outfile)
f = open(ar_psr + '_DM_timeseries.txt', 'a')
f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\n' % (
filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,
ToA_Err, ar_centfr, ar_bw, ar_tel))
f.close()
import time
end = time.time()
total = end - start
print(
'-----------------------------------------------------------------------------'
)
print('MJD\t\tDM\t\tDMerr\t\tChisq\tC_Fr\tBW\tTel')
print('%.6f\t%.6f\t%.6f\t%.2f\t%.1f\t%.1f\t%s' % (ar_mjd, dmval, dmverr,
fitchisq, ar_centfr, ar_bw, ar_tel))
print(
'-----------------------------------------------------------------------------'
)
print('\nThe program took %.1f seconds to finish' % total)
<mask token>
def DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,
ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):
if model == None:
sys.exit(1)
init_dm = ar.get_dispersion_measure()
if not quiet:
print('Using the ArrivalTime (pat) with PGS in Tempo2 format')
arrtim = psrchive.ArrivalTime()
arrtim.set_shift_estimator('PGS')
arrtim.set_format('Tempo2')
arrtim.set_format_flags('IPTA')
if not quiet:
print('Loading the template file for processing... '),
std = model.clone()
std.pscrunch()
std.tscrunch()
std_nchan = std.get_nchan()
std.dedisperse()
std.fscrunch(ffrac)
arrtim.set_standard(std)
if not quiet:
print(' done!')
ar.fscrunch(ffrac)
ar.pscrunch()
ar.tscrunch()
arrtim.set_observation(ar)
if not quiet:
print('Finding the ToAs... '),
toas = arrtim.get_toas()
toas_filtered = [x.split()[:5] for x in toas]
str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)
freq = np.asarray(str_freq, dtype=np.float64)
amjd = np.asarray(str_mjd, dtype=np.float64)
terr = np.asarray(str_toaErr, dtype=np.float64)
if not quiet:
print(' done!')
print('Removing the bad ToAs using Huber Regression... '),
condition1 = terr < 3 * np.median(terr)
freqnew = np.extract(condition1, freq)
amjdnew = np.extract(condition1, amjd)
terrnew = np.extract(condition1, terr)
tempfile = ar_psr + '_tmp.txt'
f = open(tempfile, 'w+')
head = 'FORMAT 1\n'
f.write('%s' % head)
for i in range(0, np.size(freqnew)):
f.write('%s %.12f %.20f %.8f %s\n' % (str_filename[0], freqnew[i],
amjdnew[i], terrnew[i], str_site[0]))
f.close()
tmpstr = 'tempo2 -output general2 -f'
tmp = os.popen(tmpstr +
' %s %s -s "1111111 {freq} {pre} {err}\n" | grep \'1111111\'' % (
ephemeris, tempfile)).read()
os.remove(tempfile)
tmp1 = tmp.split('\n')
freqtmp = np.zeros(np.size(amjdnew))
toastmp = np.zeros(np.size(amjdnew))
TErrtmp = np.zeros(np.size(amjdnew))
for i in range(np.size(amjdnew)):
_, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()
TErrtmp /= 1000000.0
from sklearn import linear_model
from sklearn.linear_model import HuberRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
freqarr = freqtmp.reshape(-1, 1)
toastmp *= 1000000.0
toashift = np.min(toastmp) * -1.5
toastmp += toashift
Terrtmp = TErrtmp * 1000000.0
model = make_pipeline(PolynomialFeatures(2), HuberRegressor())
model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /
Terrtmp))
y_pred = model.predict(freqarr)
residuals = toastmp - y_pred
median = np.median(residuals)
MAD = np.median(np.abs(residuals - np.median(residuals))
) / 0.6744897501960817
condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD
)
freqf = np.around(np.extract(condition2, freqarr), 3)
amjdf = np.extract(condition2, amjdnew)
toasf = np.extract(condition2, toastmp)
terrf = np.extract(condition2, TErrtmp)
prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))
terrf *= 1000000.0
if not quiet:
print(' done!')
if ptoa:
if not quiet:
print('Writing out ToAs into a file in tempo2 format'),
dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')
if not os.path.exists(dirtoas):
os.makedirs(dirtoas)
outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd
) + '_' + ar_tel + '_ToAs.txt'
f = open(outfile, 'w+')
head = 'FORMAT 1'
f.write('%s\n' % head)
for i in range(0, np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i],
amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print('done!')
if not quiet:
print('\nWriting the ToAs to a temporary file for tempo2 fitting...'),
outfiletmp = ar_psr + 'tmp_ToAs.txt'
f = open(outfiletmp, 'w+')
head = 'FORMAT 1'
f.write('%s\n' % head)
for i in range(0, np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i],
amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print(' done!\n')
dmstr = os.popen(
"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'"
% (ephemeris, outfiletmp)).read()
dm, dmerr = dmstr.split()
dmval = float(dm)
dmverr = float(dmerr)
chisqstr = os.popen(
"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'" %
(ephemeris, outfiletmp)).read()
fitchisq = float(chisqstr)
os.remove(outfiletmp)
infile = open(ephemeris, 'r')
tmpeph1 = ar_psr + '_tmpeph.eph'
output = open(tmpeph1, 'w+')
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
dmline = 'DM ' + str(dmval) + '\t1\t' + str(dmverr)
dmepochline = 'DMEPOCH\t ' + str(round(ar_mjd, 2))
f = open(tmpeph1, 'a')
f.write('%s\n%s\n' % (dmline, dmepochline))
f.close()
newarch = ar.clone()
newarch.tscrunch()
newarch.set_dispersion_measure(dmval)
arrtim.set_observation(newarch)
arrtim.set_standard(std)
toas1 = arrtim.get_toas()
toas1_filtered = [x.split()[:5] for x in toas1]
str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*
toas1_filtered)
freq1 = np.asarray(str_freq1, dtype=np.float64)
amjd1 = np.asarray(str_mjd1, dtype=np.float64)
terr1 = np.asarray(str_toaErr1, dtype=np.float64)
freqnew1 = np.extract(condition1, freq1)
amjdnew1 = np.extract(condition1, amjd1)
terrnew1 = np.extract(condition1, terr1)
tempfile1 = ar_psr + '_tmp1.txt'
f = open(tempfile1, 'w+')
head = 'FORMAT 1\n'
f.write('%s' % head)
for i in range(0, np.size(freqnew1)):
f.write('%s %.12f %.20f %.8f %s\n' % (str_filename1[0], freqnew1[i],
amjdnew1[i], terrnew1[i], str_site1[0]))
f.close()
tmp2 = os.popen(
"""tempo2 -output general2 -f %s %s -s "1111111 {freq} {pre} {err}
" | grep '1111111'"""
% (tmpeph1, tempfile1)).read()
os.remove(tempfile1)
os.remove(tmpeph1)
tmp3 = tmp2.split('\n')
freqtmp2 = np.zeros(np.size(amjdnew1))
toastmp2 = np.zeros(np.size(amjdnew1))
TErrtmp2 = np.zeros(np.size(amjdnew1))
for i in range(np.size(amjdnew1)):
_, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()
freqf1 = np.around(np.extract(condition2, freqtmp2), 3)
amjdf1 = np.extract(condition2, amjdnew1)
toasf1 = np.extract(condition2, toastmp2)
terrf1 = np.extract(condition2, TErrtmp2)
toasf1 *= 1000000.0
postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))
ar_nbin = newarch.get_nbin()
ar_nchn = newarch.get_nchan()
if narch == 1:
freq_bot = ar.get_centre_frequency() - ar_bw / 2.0
freq_top = ar.get_centre_frequency() + ar_bw / 2.0
if narch > 1:
if ar_bw == 200.0:
freq_bot = 400.0
freq_top = 1460.0
if ar_bw == 400.0:
freq_bot = 300.0
freq_top = 1460.0
newarch.dedisperse()
newarch.remove_baseline()
profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,
ar_nbin)
prof = newarch.clone()
prof.fscrunch()
profdata1D = prof.get_data().flatten()
profdata1D /= np.max(profdata1D)
residDM = init_dm - dmval
dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) **
2 - 1.0 / (freqf / 1000.0) ** 2)
dmoff = np.median(toasf) - np.median(dmcurve)
dmcurve += dmoff
fig = plt.figure(3, figsize=(8, 6))
fig.subplots_adjust(hspace=0.05)
ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)
ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)
ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)
ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)
ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)
ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0,
ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')
ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)
ax0.tick_params(axis='x', which='both', bottom=True, top=True,
labelbottom=False)
ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',
linewidth=0.5)
ax1.set_xlim(0, ar_nbin - 1)
ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)
ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)
ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',
label='Prefit: Unfiltered', capsize=2)
ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')
ax2.set_xlim(freq_bot, freq_top)
ax2.grid()
ax2.legend(loc='upper right')
ax2.axes.xaxis.set_ticklabels([])
ax3.yaxis.set_label_position('right')
ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=
'Prefit: Filtered', capsize=2)
ax3.set_xlim(freq_bot, freq_top)
ax3.grid()
ax3.legend(loc='upper right')
ax3.axes.xaxis.set_ticklabels([])
ax3.set_ylabel('ToA Residuals ($\\mu$s)', fontweight='bold', fontsize=12)
ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',
label='Postfit', capsize=2)
ax4.set_xlim(freq_bot, freq_top)
ax4.grid()
ax4.legend(loc='upper right')
ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)
fig.suptitle(
"""Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\mu$s; Postfit Wrms: %.2f $\\mu$s
Median ToA Err: %.2f $\\mu$s; DM: %.6f $\\pm$ %.6f pc cm$^{-3}$; Reduced $\\chi^2$: %.2f"""
% (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(
terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')
dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')
if not os.path.exists(dirplot):
os.makedirs(dirplot)
plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr
) + '_' + ar_tel + '_DMfitResid.pdf'
plt.savefig(plotfile, format='pdf')
plt.close()
if not quiet:
print('done!')
del ar
return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)
<mask token>
def freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
if archives[0].get_telescope() == 'GMRT':
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = archives[i].get_Integration(0).get_folding_period()
offset = 0.670520675
jump = offset / period - int(offset / period)
if ar_frq >= 1260.0 and ar_frq < 1460.0:
if ar_mjd >= 58810.0 and ar_mjd < 58991.0:
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[0].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[1].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0], archives[1])
del archives[1]
return archives[0]
<mask token>
def freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
if archives[0].get_telescope() == 'GMRT':
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = archives[i].get_Integration(0).get_folding_period()
offset = 0.670520675
jump = offset / period - int(offset / period)
if ar_frq >= 1260.0 and ar_frq < 1460.0:
if ar_mjd >= 58810.0 and ar_mjd < 58991.0:
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[0].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[1].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0], archives[1])
del archives[1]
return archives[0]
main()
| <mask token>
matplotlib.use('Agg')
<mask token>
start = time.time()
parser = argparse.ArgumentParser(description='Code for measuring in-band ' +
'DM for pulsar data in psrfits format.')
parser.add_argument('files', nargs='+', type=str, help=
'The list of fits file(s) for processing')
parser.add_argument('-E', '--ephem', type=str, help=
'Ephemeris file to update the model. Exits if not ' +
'given or is not available in "PWD/ephemerides" ' + 'directory')
parser.add_argument('-M', '--model', nargs='+', type=str, help=
'Model template for ToA generation. Exits if not ' +
'given or is not available in "PWD/templates" ' + 'directory')
parser.add_argument('-f', '--fscrunch', type=int, default=1, help=
'Factor to scrunch the number of channels for ' +
'doing DM estimation (Def: 1)')
parser.add_argument('-b3f', '--b3fscrunch', type=int, default=1, help=
'Factor to scrunch the number of channels for ' +
'band3 GMRT data (Def: 1)')
parser.add_argument('-b5f', '--b5fscrunch', type=int, default=1, help=
'Factor to scrunch the number of channels for ' +
'band5 GMRT data (Def: 1)')
parser.add_argument('-w', '--writeout', action='store_true', help=
'Writes out the DM corrected file. Def: False')
parser.add_argument('-ptoa', '--print_toas', action='store_true', help=
'Print the prefit ToAs to file in tempo2 format. ' + 'Def: False')
parser.add_argument('-F', '--Fscrunch', action='store_true', help=
'Fully scrunch the number of channels for the ' +
'final output archive (Def: False)')
parser.add_argument('-T', '--Tscrunch', action='store_true', help=
'Completely time scrunch all the integrations')
parser.add_argument('-t', '--tscrunch', type=int, default=1, help=
'Factor to scrunch the number of integrations for ' +
'the final output archive (Def: None)')
parser.add_argument('-o', '--offset', type=float, default=0.670520675, help
='Offset to shift band 5 ToAs (in secs)')
parser.add_argument('-q', '--quiet', action='store_true', help=
'Only print warnings')
def main():
args = parser.parse_args()
quiet = False
if args.quiet:
quiet = True
tempo2 = True
ptoa = False
if args.print_toas:
ptoa = True
if not quiet:
print('Loading the archive files for DM estimation')
archives = []
for filename in args.files:
archives.append(psrchive.Archive_load(filename))
narch = len(archives)
if narch >= 1:
if not quiet:
print('Appending the archives ...'),
ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,
args.b5fscrunch)
if not quiet:
print(' done!')
elif not quiet:
print('Only one archive was given, so nothing to frequency-append.')
ar = archives[0]
del archives
ar_psr = ar.get_source()
ar_nbins = ar.get_nbin()
ar_tel = ar.get_telescope()
mjd_start = ar.get_Integration(0).get_start_time().in_days()
mjd_end = ar.get_Integration(0).get_end_time().in_days()
ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0
length = ar.integration_length()
ar.update_centre_frequency()
ar_centfr = ar.get_centre_frequency()
ar_nchan = ar.get_nchan()
ar_bw = ar.get_bandwidth()
ar_chnwdth = ar_bw / ar_nchan
ffrac = args.fscrunch
if not quiet:
print('\nNow preparing for DM estimation\n')
pwd = os.getcwd()
if args.ephem != None:
ephemeris = args.ephem
else:
ephemeris = 'ephemerides/' + ar_psr + '.par'
if not os.path.exists(ephemeris):
sys.exit(1)
if not quiet:
print('\nEphemeris file is:' + ephemeris + '\n')
model = []
for filename in args.model:
model.append(psrchive.Archive_load(filename))
if args.model != None:
if len(args.model) == 1:
model = freq_appendModel(1, model, args.offset, args.b3fscrunch,
args.b5fscrunch)
if len(args.model) > 1:
model = freq_appendModel(1, model, args.offset, args.b3fscrunch,
args.b5fscrunch)
if args.model == None:
if not quiet:
print('Looking for matching template in templates directory...'),
import subprocess
tempdir = 'templates/*.sm'
tempfile = ar_psr + '_tmp.txt'
a = subprocess.call(
"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'" % (tempdir,
tempfile), shell=True)
tempnchan = ''
t1 = str(ar_nbins)
if ar_tel == 'gmrt':
t2 = str(int(ar_bw))
else:
t2 = str(ar_bw)
t3 = '%.2f' % ar_centfr
f = open(tempfile, 'r')
for line in f:
line = line.strip()
columns = line.split()
t4 = float(columns[5])
t4 = '%.2f' % t4
if ar_tel == 'gmrt':
if columns[1] == ar_psr and columns[2] == t1 and str(int(
columns[3])) == t2 and t4 == t3:
modeltempl = columns[0]
tempnchan = columns[4]
if not quiet:
print(' done\n')
elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]
) == t2 and t4 == t3:
modeltempl = columns[0]
tempnchan = columns[4]
if not quiet:
print(' done\n')
if modeltempl == '' and tempnchan == '':
print(
'\n** No matching template found for DM fitting. Exiting. **\n'
)
sys.exit(1)
f.close()
os.remove(tempfile)
if not quiet:
print('Found matching template: ' + modeltempl)
model.append(psrchive.Archive_load(modeltempl))
if not quiet:
print('\nEstimating the DM from the observation')
model.update_centre_frequency()
arch = ar.clone()
dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,
ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,
ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)
if args.writeout:
infile = open(ephemeris, 'r')
tmpeph = ar_psr + '.eph'
output = open(tmpeph, 'w+')
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
dmline = 'DM\t\t\t ' + str(dmval) + '\t\t' + str(dmverr)
dmepochline = 'DMEPOCH\t\t ' + str(round(ar_mjd, 2))
if not args.quiet:
print('Updating the ephemeris with new DM... '),
f = open(tmpeph, 'a')
f.write('%s\n %s\n' % (dmline, dmepochline))
if not args.quiet:
print(' done!')
f.close()
if not quiet:
print(
'Correcting the DM of the observed file and writing it out... '
),
os.remove(tmpeph)
dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')
if not os.path.exists(dirfinal):
os.makedirs(dirfinal)
outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd
) + '_' + ar_tel + '.ar'
ar.set_dispersion_measure(dmval)
ar.dedisperse()
if not args.Tscrunch:
ar.tscrunch(args.tscrunch)
else:
ar.tscrunch()
if not args.Fscrunch:
ar.fscrunch(ffrac)
else:
ar.fscrunch()
ar.unload(outfile)
if not args.quiet:
print(' done!')
del ar
if not quiet:
print('The file is corrected for DM and is written out to\n' +
outfile)
f = open(ar_psr + '_DM_timeseries.txt', 'a')
f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\n' % (
filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,
ToA_Err, ar_centfr, ar_bw, ar_tel))
f.close()
import time
end = time.time()
total = end - start
print(
'-----------------------------------------------------------------------------'
)
print('MJD\t\tDM\t\tDMerr\t\tChisq\tC_Fr\tBW\tTel')
print('%.6f\t%.6f\t%.6f\t%.2f\t%.1f\t%.1f\t%s' % (ar_mjd, dmval, dmverr,
fitchisq, ar_centfr, ar_bw, ar_tel))
print(
'-----------------------------------------------------------------------------'
)
print('\nThe program took %.1f seconds to finish' % total)
<mask token>
def DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,
ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):
if model == None:
sys.exit(1)
init_dm = ar.get_dispersion_measure()
if not quiet:
print('Using the ArrivalTime (pat) with PGS in Tempo2 format')
arrtim = psrchive.ArrivalTime()
arrtim.set_shift_estimator('PGS')
arrtim.set_format('Tempo2')
arrtim.set_format_flags('IPTA')
if not quiet:
print('Loading the template file for processing... '),
std = model.clone()
std.pscrunch()
std.tscrunch()
std_nchan = std.get_nchan()
std.dedisperse()
std.fscrunch(ffrac)
arrtim.set_standard(std)
if not quiet:
print(' done!')
ar.fscrunch(ffrac)
ar.pscrunch()
ar.tscrunch()
arrtim.set_observation(ar)
if not quiet:
print('Finding the ToAs... '),
toas = arrtim.get_toas()
toas_filtered = [x.split()[:5] for x in toas]
str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)
freq = np.asarray(str_freq, dtype=np.float64)
amjd = np.asarray(str_mjd, dtype=np.float64)
terr = np.asarray(str_toaErr, dtype=np.float64)
if not quiet:
print(' done!')
print('Removing the bad ToAs using Huber Regression... '),
condition1 = terr < 3 * np.median(terr)
freqnew = np.extract(condition1, freq)
amjdnew = np.extract(condition1, amjd)
terrnew = np.extract(condition1, terr)
tempfile = ar_psr + '_tmp.txt'
f = open(tempfile, 'w+')
head = 'FORMAT 1\n'
f.write('%s' % head)
for i in range(0, np.size(freqnew)):
f.write('%s %.12f %.20f %.8f %s\n' % (str_filename[0], freqnew[i],
amjdnew[i], terrnew[i], str_site[0]))
f.close()
tmpstr = 'tempo2 -output general2 -f'
tmp = os.popen(tmpstr +
' %s %s -s "1111111 {freq} {pre} {err}\n" | grep \'1111111\'' % (
ephemeris, tempfile)).read()
os.remove(tempfile)
tmp1 = tmp.split('\n')
freqtmp = np.zeros(np.size(amjdnew))
toastmp = np.zeros(np.size(amjdnew))
TErrtmp = np.zeros(np.size(amjdnew))
for i in range(np.size(amjdnew)):
_, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()
TErrtmp /= 1000000.0
from sklearn import linear_model
from sklearn.linear_model import HuberRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
freqarr = freqtmp.reshape(-1, 1)
toastmp *= 1000000.0
toashift = np.min(toastmp) * -1.5
toastmp += toashift
Terrtmp = TErrtmp * 1000000.0
model = make_pipeline(PolynomialFeatures(2), HuberRegressor())
model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /
Terrtmp))
y_pred = model.predict(freqarr)
residuals = toastmp - y_pred
median = np.median(residuals)
MAD = np.median(np.abs(residuals - np.median(residuals))
) / 0.6744897501960817
condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD
)
freqf = np.around(np.extract(condition2, freqarr), 3)
amjdf = np.extract(condition2, amjdnew)
toasf = np.extract(condition2, toastmp)
terrf = np.extract(condition2, TErrtmp)
prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))
terrf *= 1000000.0
if not quiet:
print(' done!')
if ptoa:
if not quiet:
print('Writing out ToAs into a file in tempo2 format'),
dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')
if not os.path.exists(dirtoas):
os.makedirs(dirtoas)
outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd
) + '_' + ar_tel + '_ToAs.txt'
f = open(outfile, 'w+')
head = 'FORMAT 1'
f.write('%s\n' % head)
for i in range(0, np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i],
amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print('done!')
if not quiet:
print('\nWriting the ToAs to a temporary file for tempo2 fitting...'),
outfiletmp = ar_psr + 'tmp_ToAs.txt'
f = open(outfiletmp, 'w+')
head = 'FORMAT 1'
f.write('%s\n' % head)
for i in range(0, np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i],
amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print(' done!\n')
dmstr = os.popen(
"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'"
% (ephemeris, outfiletmp)).read()
dm, dmerr = dmstr.split()
dmval = float(dm)
dmverr = float(dmerr)
chisqstr = os.popen(
"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'" %
(ephemeris, outfiletmp)).read()
fitchisq = float(chisqstr)
os.remove(outfiletmp)
infile = open(ephemeris, 'r')
tmpeph1 = ar_psr + '_tmpeph.eph'
output = open(tmpeph1, 'w+')
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
dmline = 'DM ' + str(dmval) + '\t1\t' + str(dmverr)
dmepochline = 'DMEPOCH\t ' + str(round(ar_mjd, 2))
f = open(tmpeph1, 'a')
f.write('%s\n%s\n' % (dmline, dmepochline))
f.close()
newarch = ar.clone()
newarch.tscrunch()
newarch.set_dispersion_measure(dmval)
arrtim.set_observation(newarch)
arrtim.set_standard(std)
toas1 = arrtim.get_toas()
toas1_filtered = [x.split()[:5] for x in toas1]
str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*
toas1_filtered)
freq1 = np.asarray(str_freq1, dtype=np.float64)
amjd1 = np.asarray(str_mjd1, dtype=np.float64)
terr1 = np.asarray(str_toaErr1, dtype=np.float64)
freqnew1 = np.extract(condition1, freq1)
amjdnew1 = np.extract(condition1, amjd1)
terrnew1 = np.extract(condition1, terr1)
tempfile1 = ar_psr + '_tmp1.txt'
f = open(tempfile1, 'w+')
head = 'FORMAT 1\n'
f.write('%s' % head)
for i in range(0, np.size(freqnew1)):
f.write('%s %.12f %.20f %.8f %s\n' % (str_filename1[0], freqnew1[i],
amjdnew1[i], terrnew1[i], str_site1[0]))
f.close()
tmp2 = os.popen(
"""tempo2 -output general2 -f %s %s -s "1111111 {freq} {pre} {err}
" | grep '1111111'"""
% (tmpeph1, tempfile1)).read()
os.remove(tempfile1)
os.remove(tmpeph1)
tmp3 = tmp2.split('\n')
freqtmp2 = np.zeros(np.size(amjdnew1))
toastmp2 = np.zeros(np.size(amjdnew1))
TErrtmp2 = np.zeros(np.size(amjdnew1))
for i in range(np.size(amjdnew1)):
_, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()
freqf1 = np.around(np.extract(condition2, freqtmp2), 3)
amjdf1 = np.extract(condition2, amjdnew1)
toasf1 = np.extract(condition2, toastmp2)
terrf1 = np.extract(condition2, TErrtmp2)
toasf1 *= 1000000.0
postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))
ar_nbin = newarch.get_nbin()
ar_nchn = newarch.get_nchan()
if narch == 1:
freq_bot = ar.get_centre_frequency() - ar_bw / 2.0
freq_top = ar.get_centre_frequency() + ar_bw / 2.0
if narch > 1:
if ar_bw == 200.0:
freq_bot = 400.0
freq_top = 1460.0
if ar_bw == 400.0:
freq_bot = 300.0
freq_top = 1460.0
newarch.dedisperse()
newarch.remove_baseline()
profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,
ar_nbin)
prof = newarch.clone()
prof.fscrunch()
profdata1D = prof.get_data().flatten()
profdata1D /= np.max(profdata1D)
residDM = init_dm - dmval
dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) **
2 - 1.0 / (freqf / 1000.0) ** 2)
dmoff = np.median(toasf) - np.median(dmcurve)
dmcurve += dmoff
fig = plt.figure(3, figsize=(8, 6))
fig.subplots_adjust(hspace=0.05)
ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)
ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)
ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)
ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)
ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)
ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0,
ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')
ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)
ax0.tick_params(axis='x', which='both', bottom=True, top=True,
labelbottom=False)
ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',
linewidth=0.5)
ax1.set_xlim(0, ar_nbin - 1)
ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)
ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)
ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',
label='Prefit: Unfiltered', capsize=2)
ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')
ax2.set_xlim(freq_bot, freq_top)
ax2.grid()
ax2.legend(loc='upper right')
ax2.axes.xaxis.set_ticklabels([])
ax3.yaxis.set_label_position('right')
ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=
'Prefit: Filtered', capsize=2)
ax3.set_xlim(freq_bot, freq_top)
ax3.grid()
ax3.legend(loc='upper right')
ax3.axes.xaxis.set_ticklabels([])
ax3.set_ylabel('ToA Residuals ($\\mu$s)', fontweight='bold', fontsize=12)
ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',
label='Postfit', capsize=2)
ax4.set_xlim(freq_bot, freq_top)
ax4.grid()
ax4.legend(loc='upper right')
ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)
fig.suptitle(
"""Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\mu$s; Postfit Wrms: %.2f $\\mu$s
Median ToA Err: %.2f $\\mu$s; DM: %.6f $\\pm$ %.6f pc cm$^{-3}$; Reduced $\\chi^2$: %.2f"""
% (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(
terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')
dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')
if not os.path.exists(dirplot):
os.makedirs(dirplot)
plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr
) + '_' + ar_tel + '_DMfitResid.pdf'
plt.savefig(plotfile, format='pdf')
plt.close()
if not quiet:
print('done!')
del ar
return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)
<mask token>
def freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
if archives[0].get_telescope() == 'GMRT':
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = archives[i].get_Integration(0).get_folding_period()
offset = 0.670520675
jump = offset / period - int(offset / period)
if ar_frq >= 1260.0 and ar_frq < 1460.0:
if ar_mjd >= 58810.0 and ar_mjd < 58991.0:
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[0].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[1].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0], archives[1])
del archives[1]
return archives[0]
<mask token>
def freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
if archives[0].get_telescope() == 'GMRT':
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = archives[i].get_Integration(0).get_folding_period()
offset = 0.670520675
jump = offset / period - int(offset / period)
if ar_frq >= 1260.0 and ar_frq < 1460.0:
if ar_mjd >= 58810.0 and ar_mjd < 58991.0:
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[0].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[1].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0], archives[1])
del archives[1]
return archives[0]
main()
| <mask token>
import os
import sys
import numpy as np
import psrchive
import argparse
import time
import warnings
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import gridspec
start = time.time()
parser = argparse.ArgumentParser(description='Code for measuring in-band ' +
'DM for pulsar data in psrfits format.')
parser.add_argument('files', nargs='+', type=str, help=
'The list of fits file(s) for processing')
parser.add_argument('-E', '--ephem', type=str, help=
'Ephemeris file to update the model. Exits if not ' +
'given or is not available in "PWD/ephemerides" ' + 'directory')
parser.add_argument('-M', '--model', nargs='+', type=str, help=
'Model template for ToA generation. Exits if not ' +
'given or is not available in "PWD/templates" ' + 'directory')
parser.add_argument('-f', '--fscrunch', type=int, default=1, help=
'Factor to scrunch the number of channels for ' +
'doing DM estimation (Def: 1)')
parser.add_argument('-b3f', '--b3fscrunch', type=int, default=1, help=
'Factor to scrunch the number of channels for ' +
'band3 GMRT data (Def: 1)')
parser.add_argument('-b5f', '--b5fscrunch', type=int, default=1, help=
'Factor to scrunch the number of channels for ' +
'band5 GMRT data (Def: 1)')
parser.add_argument('-w', '--writeout', action='store_true', help=
'Writes out the DM corrected file. Def: False')
parser.add_argument('-ptoa', '--print_toas', action='store_true', help=
'Print the prefit ToAs to file in tempo2 format. ' + 'Def: False')
parser.add_argument('-F', '--Fscrunch', action='store_true', help=
'Fully scrunch the number of channels for the ' +
'final output archive (Def: False)')
parser.add_argument('-T', '--Tscrunch', action='store_true', help=
'Completely time scrunch all the integrations')
parser.add_argument('-t', '--tscrunch', type=int, default=1, help=
'Factor to scrunch the number of integrations for ' +
'the final output archive (Def: None)')
parser.add_argument('-o', '--offset', type=float, default=0.670520675, help
='Offset to shift band 5 ToAs (in secs)')
parser.add_argument('-q', '--quiet', action='store_true', help=
'Only print warnings')
def main():
args = parser.parse_args()
quiet = False
if args.quiet:
quiet = True
tempo2 = True
ptoa = False
if args.print_toas:
ptoa = True
if not quiet:
print('Loading the archive files for DM estimation')
archives = []
for filename in args.files:
archives.append(psrchive.Archive_load(filename))
narch = len(archives)
if narch >= 1:
if not quiet:
print('Appending the archives ...'),
ar = freq_appendData(narch, archives, args.offset, args.b3fscrunch,
args.b5fscrunch)
if not quiet:
print(' done!')
elif not quiet:
print('Only one archive was given, so nothing to frequency-append.')
ar = archives[0]
del archives
ar_psr = ar.get_source()
ar_nbins = ar.get_nbin()
ar_tel = ar.get_telescope()
mjd_start = ar.get_Integration(0).get_start_time().in_days()
mjd_end = ar.get_Integration(0).get_end_time().in_days()
ar_mjd = mjd_start + (mjd_end - mjd_start) / 2.0
length = ar.integration_length()
ar.update_centre_frequency()
ar_centfr = ar.get_centre_frequency()
ar_nchan = ar.get_nchan()
ar_bw = ar.get_bandwidth()
ar_chnwdth = ar_bw / ar_nchan
ffrac = args.fscrunch
if not quiet:
print('\nNow preparing for DM estimation\n')
pwd = os.getcwd()
if args.ephem != None:
ephemeris = args.ephem
else:
ephemeris = 'ephemerides/' + ar_psr + '.par'
if not os.path.exists(ephemeris):
sys.exit(1)
if not quiet:
print('\nEphemeris file is:' + ephemeris + '\n')
model = []
for filename in args.model:
model.append(psrchive.Archive_load(filename))
if args.model != None:
if len(args.model) == 1:
model = freq_appendModel(1, model, args.offset, args.b3fscrunch,
args.b5fscrunch)
if len(args.model) > 1:
model = freq_appendModel(1, model, args.offset, args.b3fscrunch,
args.b5fscrunch)
if args.model == None:
if not quiet:
print('Looking for matching template in templates directory...'),
import subprocess
tempdir = 'templates/*.sm'
tempfile = ar_psr + '_tmp.txt'
a = subprocess.call(
"psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'" % (tempdir,
tempfile), shell=True)
tempnchan = ''
t1 = str(ar_nbins)
if ar_tel == 'gmrt':
t2 = str(int(ar_bw))
else:
t2 = str(ar_bw)
t3 = '%.2f' % ar_centfr
f = open(tempfile, 'r')
for line in f:
line = line.strip()
columns = line.split()
t4 = float(columns[5])
t4 = '%.2f' % t4
if ar_tel == 'gmrt':
if columns[1] == ar_psr and columns[2] == t1 and str(int(
columns[3])) == t2 and t4 == t3:
modeltempl = columns[0]
tempnchan = columns[4]
if not quiet:
print(' done\n')
elif columns[1] == ar_psr and columns[2] == t1 and str(columns[3]
) == t2 and t4 == t3:
modeltempl = columns[0]
tempnchan = columns[4]
if not quiet:
print(' done\n')
if modeltempl == '' and tempnchan == '':
print(
'\n** No matching template found for DM fitting. Exiting. **\n'
)
sys.exit(1)
f.close()
os.remove(tempfile)
if not quiet:
print('Found matching template: ' + modeltempl)
model.append(psrchive.Archive_load(modeltempl))
if not quiet:
print('\nEstimating the DM from the observation')
model.update_centre_frequency()
arch = ar.clone()
dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch,
ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,
ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch)
if args.writeout:
infile = open(ephemeris, 'r')
tmpeph = ar_psr + '.eph'
output = open(tmpeph, 'w+')
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
dmline = 'DM\t\t\t ' + str(dmval) + '\t\t' + str(dmverr)
dmepochline = 'DMEPOCH\t\t ' + str(round(ar_mjd, 2))
if not args.quiet:
print('Updating the ephemeris with new DM... '),
f = open(tmpeph, 'a')
f.write('%s\n %s\n' % (dmline, dmepochline))
if not args.quiet:
print(' done!')
f.close()
if not quiet:
print(
'Correcting the DM of the observed file and writing it out... '
),
os.remove(tmpeph)
dirfinal = os.path.join(pwd, ar_psr + '_' + ar_tel + '_final')
if not os.path.exists(dirfinal):
os.makedirs(dirfinal)
outfile = dirfinal + '/' + ar_psr + '_' + str(ar_mjd
) + '_' + ar_tel + '.ar'
ar.set_dispersion_measure(dmval)
ar.dedisperse()
if not args.Tscrunch:
ar.tscrunch(args.tscrunch)
else:
ar.tscrunch()
if not args.Fscrunch:
ar.fscrunch(ffrac)
else:
ar.fscrunch()
ar.unload(outfile)
if not args.quiet:
print(' done!')
del ar
if not quiet:
print('The file is corrected for DM and is written out to\n' +
outfile)
f = open(ar_psr + '_DM_timeseries.txt', 'a')
f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\n' % (
filename, ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms,
ToA_Err, ar_centfr, ar_bw, ar_tel))
f.close()
import time
end = time.time()
total = end - start
print(
'-----------------------------------------------------------------------------'
)
print('MJD\t\tDM\t\tDMerr\t\tChisq\tC_Fr\tBW\tTel')
print('%.6f\t%.6f\t%.6f\t%.2f\t%.1f\t%.1f\t%s' % (ar_mjd, dmval, dmverr,
fitchisq, ar_centfr, ar_bw, ar_tel))
print(
'-----------------------------------------------------------------------------'
)
print('\nThe program took %.1f seconds to finish' % total)
<mask token>
def DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model,
ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):
if model == None:
sys.exit(1)
init_dm = ar.get_dispersion_measure()
if not quiet:
print('Using the ArrivalTime (pat) with PGS in Tempo2 format')
arrtim = psrchive.ArrivalTime()
arrtim.set_shift_estimator('PGS')
arrtim.set_format('Tempo2')
arrtim.set_format_flags('IPTA')
if not quiet:
print('Loading the template file for processing... '),
std = model.clone()
std.pscrunch()
std.tscrunch()
std_nchan = std.get_nchan()
std.dedisperse()
std.fscrunch(ffrac)
arrtim.set_standard(std)
if not quiet:
print(' done!')
ar.fscrunch(ffrac)
ar.pscrunch()
ar.tscrunch()
arrtim.set_observation(ar)
if not quiet:
print('Finding the ToAs... '),
toas = arrtim.get_toas()
toas_filtered = [x.split()[:5] for x in toas]
str_filename, str_freq, str_mjd, str_toaErr, str_site = zip(*toas_filtered)
freq = np.asarray(str_freq, dtype=np.float64)
amjd = np.asarray(str_mjd, dtype=np.float64)
terr = np.asarray(str_toaErr, dtype=np.float64)
if not quiet:
print(' done!')
print('Removing the bad ToAs using Huber Regression... '),
condition1 = terr < 3 * np.median(terr)
freqnew = np.extract(condition1, freq)
amjdnew = np.extract(condition1, amjd)
terrnew = np.extract(condition1, terr)
tempfile = ar_psr + '_tmp.txt'
f = open(tempfile, 'w+')
head = 'FORMAT 1\n'
f.write('%s' % head)
for i in range(0, np.size(freqnew)):
f.write('%s %.12f %.20f %.8f %s\n' % (str_filename[0], freqnew[i],
amjdnew[i], terrnew[i], str_site[0]))
f.close()
tmpstr = 'tempo2 -output general2 -f'
tmp = os.popen(tmpstr +
' %s %s -s "1111111 {freq} {pre} {err}\n" | grep \'1111111\'' % (
ephemeris, tempfile)).read()
os.remove(tempfile)
tmp1 = tmp.split('\n')
freqtmp = np.zeros(np.size(amjdnew))
toastmp = np.zeros(np.size(amjdnew))
TErrtmp = np.zeros(np.size(amjdnew))
for i in range(np.size(amjdnew)):
_, freqtmp[i], toastmp[i], TErrtmp[i] = tmp1[i].split()
TErrtmp /= 1000000.0
from sklearn import linear_model
from sklearn.linear_model import HuberRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
freqarr = freqtmp.reshape(-1, 1)
toastmp *= 1000000.0
toashift = np.min(toastmp) * -1.5
toastmp += toashift
Terrtmp = TErrtmp * 1000000.0
model = make_pipeline(PolynomialFeatures(2), HuberRegressor())
model.fit(freqarr, toastmp, huberregressor__sample_weight=np.ravel(1.0 /
Terrtmp))
y_pred = model.predict(freqarr)
residuals = toastmp - y_pred
median = np.median(residuals)
MAD = np.median(np.abs(residuals - np.median(residuals))
) / 0.6744897501960817
condition2 = (residuals > median - 3 * MAD) & (residuals < median + 3 * MAD
)
freqf = np.around(np.extract(condition2, freqarr), 3)
amjdf = np.extract(condition2, amjdnew)
toasf = np.extract(condition2, toastmp)
terrf = np.extract(condition2, TErrtmp)
prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))
terrf *= 1000000.0
if not quiet:
print(' done!')
if ptoa:
if not quiet:
print('Writing out ToAs into a file in tempo2 format'),
dirtoas = os.path.join(pwd, ar_psr + '_' + ar_tel + '_ToAs')
if not os.path.exists(dirtoas):
os.makedirs(dirtoas)
outfile = dirtoas + '/' + ar_psr + '_' + str(ar_mjd
) + '_' + ar_tel + '_ToAs.txt'
f = open(outfile, 'w+')
head = 'FORMAT 1'
f.write('%s\n' % head)
for i in range(0, np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i],
amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print('done!')
if not quiet:
print('\nWriting the ToAs to a temporary file for tempo2 fitting...'),
outfiletmp = ar_psr + 'tmp_ToAs.txt'
f = open(outfiletmp, 'w+')
head = 'FORMAT 1'
f.write('%s\n' % head)
for i in range(0, np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i],
amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print(' done!\n')
dmstr = os.popen(
"tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk '{print $5,$6}'"
% (ephemeris, outfiletmp)).read()
dm, dmerr = dmstr.split()
dmval = float(dm)
dmverr = float(dmerr)
chisqstr = os.popen(
"tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk '{print $9}'" %
(ephemeris, outfiletmp)).read()
fitchisq = float(chisqstr)
os.remove(outfiletmp)
infile = open(ephemeris, 'r')
tmpeph1 = ar_psr + '_tmpeph.eph'
output = open(tmpeph1, 'w+')
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
dmline = 'DM ' + str(dmval) + '\t1\t' + str(dmverr)
dmepochline = 'DMEPOCH\t ' + str(round(ar_mjd, 2))
f = open(tmpeph1, 'a')
f.write('%s\n%s\n' % (dmline, dmepochline))
f.close()
newarch = ar.clone()
newarch.tscrunch()
newarch.set_dispersion_measure(dmval)
arrtim.set_observation(newarch)
arrtim.set_standard(std)
toas1 = arrtim.get_toas()
toas1_filtered = [x.split()[:5] for x in toas1]
str_filename1, str_freq1, str_mjd1, str_toaErr1, str_site1 = zip(*
toas1_filtered)
freq1 = np.asarray(str_freq1, dtype=np.float64)
amjd1 = np.asarray(str_mjd1, dtype=np.float64)
terr1 = np.asarray(str_toaErr1, dtype=np.float64)
freqnew1 = np.extract(condition1, freq1)
amjdnew1 = np.extract(condition1, amjd1)
terrnew1 = np.extract(condition1, terr1)
tempfile1 = ar_psr + '_tmp1.txt'
f = open(tempfile1, 'w+')
head = 'FORMAT 1\n'
f.write('%s' % head)
for i in range(0, np.size(freqnew1)):
f.write('%s %.12f %.20f %.8f %s\n' % (str_filename1[0], freqnew1[i],
amjdnew1[i], terrnew1[i], str_site1[0]))
f.close()
tmp2 = os.popen(
"""tempo2 -output general2 -f %s %s -s "1111111 {freq} {pre} {err}
" | grep '1111111'"""
% (tmpeph1, tempfile1)).read()
os.remove(tempfile1)
os.remove(tmpeph1)
tmp3 = tmp2.split('\n')
freqtmp2 = np.zeros(np.size(amjdnew1))
toastmp2 = np.zeros(np.size(amjdnew1))
TErrtmp2 = np.zeros(np.size(amjdnew1))
for i in range(np.size(amjdnew1)):
_, freqtmp2[i], toastmp2[i], TErrtmp2[i] = tmp3[i].split()
freqf1 = np.around(np.extract(condition2, freqtmp2), 3)
amjdf1 = np.extract(condition2, amjdnew1)
toasf1 = np.extract(condition2, toastmp2)
terrf1 = np.extract(condition2, TErrtmp2)
toasf1 *= 1000000.0
postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))
ar_nbin = newarch.get_nbin()
ar_nchn = newarch.get_nchan()
if narch == 1:
freq_bot = ar.get_centre_frequency() - ar_bw / 2.0
freq_top = ar.get_centre_frequency() + ar_bw / 2.0
if narch > 1:
if ar_bw == 200.0:
freq_bot = 400.0
freq_top = 1460.0
if ar_bw == 400.0:
freq_bot = 300.0
freq_top = 1460.0
newarch.dedisperse()
newarch.remove_baseline()
profdata2D = newarch.get_data()[:, 0, :, :].flatten().reshape(ar_nchn,
ar_nbin)
prof = newarch.clone()
prof.fscrunch()
profdata1D = prof.get_data().flatten()
profdata1D /= np.max(profdata1D)
residDM = init_dm - dmval
dmcurve = 4.15 * 1000.0 * residDM * (1.0 / (np.min(freqf) / 1000.0) **
2 - 1.0 / (freqf / 1000.0) ** 2)
dmoff = np.median(toasf) - np.median(dmcurve)
dmcurve += dmoff
fig = plt.figure(3, figsize=(8, 6))
fig.subplots_adjust(hspace=0.05)
ax0 = plt.subplot2grid((3, 8), (0, 0), rowspan=2, colspan=3)
ax1 = plt.subplot2grid((3, 8), (2, 0), rowspan=1, colspan=3)
ax2 = plt.subplot2grid((3, 8), (0, 4), colspan=4)
ax3 = plt.subplot2grid((3, 8), (1, 4), colspan=4)
ax4 = plt.subplot2grid((3, 8), (2, 4), colspan=4)
ax0.imshow(np.sqrt(profdata2D ** 2) ** 0.5, origin='lower', extent=(0,
ar_nbin - 1, freq_bot, freq_top), aspect='auto', cmap='hot')
ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)
ax0.tick_params(axis='x', which='both', bottom=True, top=True,
labelbottom=False)
ax1.plot(np.arange(ar_nbin, dtype=float), profdata1D, color='black',
linewidth=0.5)
ax1.set_xlim(0, ar_nbin - 1)
ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)
ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)
ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp, fmt='.', color='gray',
label='Prefit: Unfiltered', capsize=2)
ax2.plot(freqtmp, y_pred, '--r', label='Polynomial Fit')
ax2.set_xlim(freq_bot, freq_top)
ax2.grid()
ax2.legend(loc='upper right')
ax2.axes.xaxis.set_ticklabels([])
ax3.yaxis.set_label_position('right')
ax3.errorbar(freqf, toasf - np.median(toasf), terrf, fmt='.k', label=
'Prefit: Filtered', capsize=2)
ax3.set_xlim(freq_bot, freq_top)
ax3.grid()
ax3.legend(loc='upper right')
ax3.axes.xaxis.set_ticklabels([])
ax3.set_ylabel('ToA Residuals ($\\mu$s)', fontweight='bold', fontsize=12)
ax4.errorbar(freqf1, toasf1 - np.median(toasf1), terrf1, fmt='.r',
label='Postfit', capsize=2)
ax4.set_xlim(freq_bot, freq_top)
ax4.grid()
ax4.legend(loc='upper right')
ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)
fig.suptitle(
"""Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\\mu$s; Postfit Wrms: %.2f $\\mu$s
Median ToA Err: %.2f $\\mu$s; DM: %.6f $\\pm$ %.6f pc cm$^{-3}$; Reduced $\\chi^2$: %.2f"""
% (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(
terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')
dirplot = os.path.join(pwd, ar_psr + '_' + ar_tel + '_plots')
if not os.path.exists(dirplot):
os.makedirs(dirplot)
plotfile = dirplot + '/' + ar_psr + '_' + str(ar_mjd) + '_' + str(ar_centfr
) + '_' + ar_tel + '_DMfitResid.pdf'
plt.savefig(plotfile, format='pdf')
plt.close()
if not quiet:
print('done!')
del ar
return dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1)
<mask token>
def freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
if archives[0].get_telescope() == 'GMRT':
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = archives[i].get_Integration(0).get_folding_period()
offset = 0.670520675
jump = offset / period - int(offset / period)
if ar_frq >= 1260.0 and ar_frq < 1460.0:
if ar_mjd >= 58810.0 and ar_mjd < 58991.0:
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[0].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[1].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0], archives[1])
del archives[1]
return archives[0]
<mask token>
def freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
if archives[0].get_telescope() == 'GMRT':
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = archives[i].get_Integration(0).get_folding_period()
offset = 0.670520675
jump = offset / period - int(offset / period)
if ar_frq >= 1260.0 and ar_frq < 1460.0:
if ar_mjd >= 58810.0 and ar_mjd < 58991.0:
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[0].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if 300.0 < ttfreq < 500.0:
archives[1].fscrunch(b3scrunch)
if 1160.0 < ttfreq < 1460.0:
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0], archives[1])
del archives[1]
return archives[0]
main()
| #!/usr/bin/python
'''
** dmcalc **
Estimates the Dispersion Measure (DM) from the data in psrfits file format.
Returns the DM value with its uncertainty and reduced chi-square from tempo2
DM fit.
Dependencies
-------------
PSRCHIVE with python interface: http://psrchive.sourceforge.net/
TEMPO2: https://bitbucket.org/psrsoft/tempo2
SKLEARN: https://scikit-learn.org/stable/install.html
Parameters
----------
file(s) : Input file(s) in psrfits format
ephem : Ephemeris (or parameter) file of the pulsar. This is required
to update the model. It can be given as a command line argument.
If it is available in "PWD/ephemerides" folder, one can use that.
Giving the file with this option overrides the default one.
model : Template profile for cross-correlating with the observation to
obtain DM. It can be given as a command line argument, otherwise
it will look for a matching one in "PWD/ephemerides" directory
and if found, will use that instead. One can use this option to
override the default selection.
fscrunch : int, optional, default: None. Factor for scrunching the frequency
channels before passing it to DM estimation.
b3fscrunch : int, optional, default: None. Factor for scrunching the BAND3
data of uGMRT before passing it to DM estimation.
b3fscrunch : int, optional, default: None. Factor for scrunching the BAND5
data of uGMRT before passing it to DM estimation.
offset : float, optional, default: None. Fix for jump between BAND3 and
BAND5 of uGMRT bands.
writeout : bool, optional, default: False. Writes out the file corrected
for DM in a default directory (PWD/PSRJ_{site}_final), using the
following options to reduce the file.
plot : bool, optional, default: True. Prints the data analysis plot in
a PDF file. ToA rejection steps and DM corrected ToAs are shown
in addition to DM corrected frequency evolution of the profile.
ptoa : bool, optional, default: False. Prints the outliers cleaned ToAs
to a file in the TEMPO2 readable format, so that, if required,
it can be used for other purposes.
Fscrunch : bool, optional, default: False. Collapse all frequency channels
to produce one profile.
Tscrunch : bool, optional, default: False. Collapse all sub-integrations
to produce one profile.
tscrunch : int, optional, default: None. Factor to scrunch sub-integrations
for writing out the DM corrected file.
quiet : bool, optional, default: False. Supresses all print statements
except warnings and errors.
Returns
-------
Dispersion Measure with uncertainty.
Examples
--------
# (a) for DM estimation with files in default directories:
#
dmcalc.py inputfile.fits
#
# (c) to use different ephemeris and template files:
#
dmcalc.py -E ephemeris.par -M model.fits data_file.fits
#
# (d) to write the DM corrected fits file and ToAs:
#
./dmcalc2.py -w -ptoa inputfile.fits
'''
# import modules...
import os
import sys
import numpy as np
import psrchive
import argparse
import time
import warnings
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import gridspec
start = time.time()
parser = argparse.ArgumentParser(description='Code for measuring in-band '+
'DM for pulsar data in psrfits format.')
parser.add_argument('files', nargs='+', type=str,
help='The list of fits file(s) for processing')
parser.add_argument('-E', '--ephem', type=str,
help='Ephemeris file to update the model. Exits if not '+
'given or is not available in "PWD/ephemerides" '+
'directory')
parser.add_argument('-M', '--model', nargs='+', type=str,
help='Model template for ToA generation. Exits if not '+
'given or is not available in "PWD/templates" '+
'directory')
parser.add_argument('-f','--fscrunch', type=int, default=1,
help='Factor to scrunch the number of channels for '+
'doing DM estimation (Def: 1)')
parser.add_argument('-b3f','--b3fscrunch', type=int, default=1,
help='Factor to scrunch the number of channels for '+
'band3 GMRT data (Def: 1)')
parser.add_argument('-b5f','--b5fscrunch', type=int, default=1,
help='Factor to scrunch the number of channels for '+
'band5 GMRT data (Def: 1)')
parser.add_argument('-w','--writeout', action='store_true',
help='Writes out the DM corrected file. Def: False')
parser.add_argument('-ptoa','--print_toas', action='store_true',
help='Print the prefit ToAs to file in tempo2 format. '+
'Def: False')
parser.add_argument('-F','--Fscrunch', action='store_true',
help='Fully scrunch the number of channels for the '+
'final output archive (Def: False)')
parser.add_argument('-T','--Tscrunch', action='store_true',
help='Completely time scrunch all the integrations')
parser.add_argument('-t','--tscrunch', type=int, default=1,
help='Factor to scrunch the number of integrations for '+
'the final output archive (Def: None)')
parser.add_argument('-o','--offset', type=float, default=0.670520675,
help='Offset to shift band 5 ToAs (in secs)')
parser.add_argument('-q', '--quiet', action='store_true',
help='Only print warnings')
def main():
# parses the input arguments
args = parser.parse_args()
# checks status of quiet and ptoa
quiet=False
if args.quiet:
quiet=True
tempo2=True
ptoa=False
if args.print_toas:
ptoa=True
if not quiet:
print("Loading the archive files for DM estimation")
# loads the psrfits file
archives = []
for filename in args.files:
archives.append(psrchive.Archive_load(filename))
narch = len(archives)
if narch >= 1:
if not quiet:
print("Appending the archives ..."),
# append data
ar = freq_appendData(narch, archives, args.offset,
args.b3fscrunch, args.b5fscrunch)
if not quiet:
print(" done!")
else:
if not quiet:
print("Only one archive was given, so nothing to frequency-append.")
# ar is the final archive after performing frequency append
ar = archives[0]
del archives
# extracts relevant information from the archive
ar_psr = ar.get_source()
ar_nbins = ar.get_nbin()
ar_tel = ar.get_telescope()
mjd_start=ar.get_Integration(0).get_start_time().in_days()
mjd_end=ar.get_Integration(0).get_end_time().in_days()
ar_mjd = mjd_start + (mjd_end-mjd_start)/2.
length = ar.integration_length()
ar.update_centre_frequency()
ar_centfr = ar.get_centre_frequency()
ar_nchan = ar.get_nchan()
ar_bw = ar.get_bandwidth()
ar_chnwdth = ar_bw / ar_nchan
ffrac = args.fscrunch
if not quiet:
print("\nNow preparing for DM estimation\n")
pwd=os.getcwd()
# checks for ephemeris file and exit if not given or is not available
# in the default directory "PWD/ephemerides".
if args.ephem != None:
ephemeris = args.ephem
else:
ephemeris = "ephemerides/"+ar_psr+".par"
if not (os.path.exists(ephemeris)):
sys.exit(1)
if not quiet:
print ("\nEphemeris file is:"+ephemeris+'\n')
# if template is given as input argument load and process them
model = []
for filename in args.model:
model.append(psrchive.Archive_load(filename))
if args.model != None:
if len(args.model) == 1:
model = freq_appendModel(1,model,args.offset, args.b3fscrunch, args.b5fscrunch)
if len(args.model) > 1:
model = freq_appendModel(1,model,args.offset, args.b3fscrunch, args.b5fscrunch)
# If the template is not given, looking for a matching template in the templates directory
if args.model == None:
if not quiet:
print("Looking for matching template in templates directory..."),
import subprocess
tempdir="templates/*.sm"
tempfile=ar_psr+'_tmp.txt'
a=subprocess.call("psredit -c name,nbin,bw,nchan,freq -Q '%s' > '%s'"
% (tempdir,tempfile), shell=True)
tempnchan=""
t1=str(ar_nbins)
if ar_tel=='gmrt':
t2=str(int(ar_bw))
else:
t2=str((ar_bw))
t3=('%.2f'%ar_centfr)
f = open(tempfile,'r')
for line in f:
line = line.strip()
columns=line.split()
t4 = float(columns[5])
t4 = ('%.2f'%t4)
if ar_tel=='gmrt':
if (columns[1]==ar_psr and columns[2]==t1 and str(int(columns[3]))==t2 and t4==t3):
modeltempl=columns[0]
tempnchan=columns[4]
if not quiet:
print (' done\n')
else:
if (columns[1]==ar_psr and columns[2]==t1 and str((columns[3]))==t2 and t4==t3):
modeltempl=columns[0]
tempnchan=columns[4]
if not quiet:
print (' done\n')
if modeltempl=='' and tempnchan=='':
print("\n** No matching template found for DM fitting. Exiting. **\n")
sys.exit(1)
f.close()
os.remove(tempfile)
if not quiet:
print("Found matching template: "+modeltempl)
model.append(psrchive.Archive_load(modeltempl))
if not quiet:
print("\nEstimating the DM from the observation")
model.update_centre_frequency()
# cloning the original file for passing to DMCalc() routine
arch = ar.clone()
# Calling the DM estimation routine
dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err = DMCalc(arch, ar_nchan, ar_centfr,
ar_bw, ar_psr, ar_tel, ar_mjd, model,
ephemeris, pwd, ffrac, quiet, tempo2,
ptoa, narch)
# writing out the final DM corrected file, if requested
if args.writeout:
# removing the DM and DMEPOCH from the ephemeris file for uptation
infile = open(ephemeris,"r")
tmpeph = ar_psr+'.eph'
output = open(tmpeph,"w+")
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
# updating the ephemeris file with measured DM
dmline = "DM "+str(dmval)+"\t\t"+str(dmverr)
dmepochline = "DMEPOCH "+str(round(ar_mjd,2))
if not args.quiet:
print("Updating the ephemeris with new DM... "),
f = open(tmpeph,'a')
f.write("%s\n %s\n" % (dmline, dmepochline))
if not args.quiet:
print(" done!")
f.close()
# updating the ephemeris in the archive with the measured DM
if not quiet:
print("Correcting the DM of the observed file and writing it out... "),
os.remove(tmpeph)
# creating the directory for writing the file
dirfinal=os.path.join(pwd,ar_psr+"_"+ar_tel+"_final")
if not os.path.exists(dirfinal):
os.makedirs(dirfinal)
# filename with path of the DM corrected file
outfile = dirfinal+"/"+ar_psr + "_" + str(ar_mjd) + "_" + ar_tel + ".ar"
# Setting the DMC flag to 1. In other words, doing the DM correction.
ar.set_dispersion_measure(dmval)
ar.dedisperse()
# Performing different scrunching in the archive for writing out
if not args.Tscrunch:
ar.tscrunch(args.tscrunch)
else:
ar.tscrunch()
if not args.Fscrunch:
ar.fscrunch(ffrac)
else:
ar.fscrunch()
# Writing out the DM corrected, time/frequency scrunched file.
ar.unload(outfile)
if not args.quiet:
print(" done!")
del ar
if not quiet:
print("The file is corrected for DM and is written out to\n"+outfile)
# Printing the results to the file and also in the terminal
f= open(ar_psr+"_DM_timeseries.txt",'a')
f.write('%s %.4f %.6f %.6f %.2f %.4f %.4f %.4f %.2f %.2f %s\n' %( filename, \
ar_mjd, dmval, dmverr, fitchisq, pre_rms, post_rms, ToA_Err, ar_centfr, \
ar_bw, ar_tel))
f.close()
import time
end = time.time()
total = end - start
print ('-----------------------------------------------------------------------------')
print ('MJD\t\tDM\t\tDMerr\t\tChisq\tC_Fr\tBW\tTel')
print ('%.6f\t%.6f\t%.6f\t%.2f\t%.1f\t%.1f\t%s' % (ar_mjd, dmval, dmverr,
fitchisq, ar_centfr, ar_bw, ar_tel) )
print ('-----------------------------------------------------------------------------')
print("\nThe program took %.1f seconds to finish"%total)
#-------------------------------------------------------------------------------------------#
''' Main function that performs the DM estimation '''
def DMCalc(ar, ar_nchan, ar_centfr, ar_bw, ar_psr, ar_tel, ar_mjd, model, ephemeris, pwd, ffrac, quiet, tempo2, ptoa, narch):
# Checks if model file is available.
if model == None:
sys.exit(1)
init_dm = ar.get_dispersion_measure()
# setting up the ToA estimation routine using the psrchive ArrivalTime()
if not quiet:
print("Using the ArrivalTime (pat) with PGS in Tempo2 format")
arrtim = psrchive.ArrivalTime()
arrtim.set_shift_estimator('PGS')
arrtim.set_format('Tempo2')
arrtim.set_format_flags('IPTA')
if not quiet:
print("Loading the template file for processing... "),
std = model.clone()
std.pscrunch()
std.tscrunch()
std_nchan = std.get_nchan()
std.dedisperse()
std.fscrunch(ffrac)
arrtim.set_standard(std)
if not quiet:
print(" done!")
ar.fscrunch(ffrac)
ar.pscrunch()
ar.tscrunch()
arrtim.set_observation(ar)
if not quiet:
print("Finding the ToAs... "),
# Finding the ToAs and reading it into numpy arrays
toas = arrtim.get_toas()
toas_filtered = [x.split()[:5] for x in toas]
str_filename,str_freq,str_mjd,str_toaErr,str_site = zip(*toas_filtered)
freq = np.asarray(str_freq, dtype=np.float64)
amjd = np.asarray(str_mjd, dtype=np.float64)
terr = np.asarray(str_toaErr, dtype=np.float64)
if not quiet:
print(" done!")
print("Removing the bad ToAs using Huber Regression... "),
# removing the ToAs with zero errors
condition1 = terr < 3*np.median(terr)
freqnew = np.extract(condition1,freq)
amjdnew = np.extract(condition1,amjd)
terrnew = np.extract(condition1,terr)
# writing the ToAs to a temporary file for getting the non-phase resolved ToAs using general2
tempfile = ar_psr+"_tmp.txt"
f = open(tempfile,"w+")
head="FORMAT 1\n"
f.write('%s' % head)
for i in range(0,np.size(freqnew)):
f.write('%s %.12f %.20f %.8f %s\n' %
(str_filename[0], freqnew[i], amjdnew[i], terrnew[i], str_site[0]))
f.close()
tmpstr="tempo2 -output general2 -f"
tmp = os.popen(tmpstr+" %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'" %
(ephemeris,tempfile)).read()
os.remove(tempfile)
# extracting the data from general2 output
tmp1 = tmp.split('\n')
freqtmp = np.zeros(np.size(amjdnew))
toastmp = np.zeros(np.size(amjdnew))
TErrtmp = np.zeros(np.size(amjdnew))
for i in range(np.size(amjdnew)):
_,freqtmp[i],toastmp[i],TErrtmp[i] = (tmp1[i].split())
TErrtmp /= 1e+6
# importing libraries for outlier removal
from sklearn import linear_model
from sklearn.linear_model import HuberRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
# changing the shape of frequency array
freqarr = freqtmp.reshape(-1,1)
# making a nu^2 model and fitting using Huber Regression
toastmp *= 1e+6
toashift = (np.min(toastmp)*-1.5)
toastmp += toashift
Terrtmp = TErrtmp*1e+6
model = make_pipeline(PolynomialFeatures(2), HuberRegressor())
model.fit(freqarr,toastmp,
huberregressor__sample_weight=np.ravel(1./Terrtmp))
y_pred = model.predict(freqarr)
residuals = toastmp - y_pred
median = np.median(residuals)
MAD = np.median(np.abs(residuals-np.median(residuals)))/0.6744897501960817
# filtering the good ToAs
condition2 = (residuals > median - 3*MAD) & (residuals < median + 3*MAD)
freqf = np.around(np.extract(condition2,freqarr),3)
amjdf = np.extract(condition2,amjdnew)
toasf = np.extract(condition2,toastmp)
terrf = np.extract(condition2,TErrtmp)
prefit_rms = np.sqrt(np.cov(toasf, aweights=terrf))
terrf *= 1e+6
if not quiet:
print(" done!")
# writing out the ToAs in proper format
if ptoa:
if not quiet:
print ('Writing out ToAs into a file in tempo2 format'),
dirtoas=os.path.join(pwd,ar_psr+"_"+ar_tel+"_ToAs")
if not os.path.exists(dirtoas):
os.makedirs(dirtoas)
outfile=dirtoas+"/"+ar_psr+"_"+str(ar_mjd)+"_"+ar_tel+"_ToAs.txt"
f = open(outfile,"w+")
head="FORMAT 1"
f.write('%s\n' % head)
for i in range(0,np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i], amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print("done!")
# Fitting the ToAs with tempo2 for DM
if not quiet:
print("\nWriting the ToAs to a temporary file for tempo2 fitting..."),
outfiletmp=ar_psr+"tmp_ToAs.txt"
f = open(outfiletmp,"w+")
head="FORMAT 1"
f.write('%s\n' % head)
for i in range(0,np.size(freqf)):
f.write('%s %.8f %.18f %.6f %s\n' % (str_filename[0], freqf[i], amjdf[i], terrf[i], str_site[0]))
f.close()
if not quiet:
print(" done!\n")
# performing the fit
dmstr=os.popen("tempo2 -f %s %s -nofit -fit dm | grep 'DM (cm^-3 pc)'| awk \'{print $5,$6}\'"
% (ephemeris, outfiletmp)).read()
(dm, dmerr) = dmstr.split()
dmval = float(dm)
dmverr = float(dmerr)
# doing the fit again to read the chisquare
chisqstr=os.popen("tempo2 -f %s %s -nofit -fit dm | grep 'Fit Chisq'| awk \'{print $9}\'"
% (ephemeris, outfiletmp)).read()
fitchisq = float(chisqstr)
os.remove(outfiletmp)
# Preparing the data for plotting the residuals, prefit and postfit
infile = open(ephemeris,"r")
tmpeph1 = ar_psr+'_tmpeph.eph'
output = open(tmpeph1,"w+")
for i, line in enumerate(infile):
if not line.lstrip().startswith('DM'):
if not line.lstrip().startswith('DMEPOCH'):
output.write(line)
infile.close()
output.close()
# updating the ephemeris file with measured DM
dmline = "DM "+str(dmval)+"\t1\t"+str(dmverr)
dmepochline = "DMEPOCH "+str(round(ar_mjd,2))
f = open(tmpeph1,'a')
f.write('%s\n%s\n' % (dmline, dmepochline))
f.close()
newarch = ar.clone()
newarch.tscrunch()
newarch.set_dispersion_measure(dmval)
arrtim.set_observation(newarch)
arrtim.set_standard(std)
toas1 = arrtim.get_toas()
toas1_filtered = [x.split()[:5] for x in toas1]
str_filename1,str_freq1,str_mjd1,str_toaErr1,str_site1 = zip(*toas1_filtered)
freq1 = np.asarray(str_freq1, dtype=np.float64)
amjd1 = np.asarray(str_mjd1, dtype=np.float64)
terr1 = np.asarray(str_toaErr1, dtype=np.float64)
freqnew1 = np.extract(condition1,freq1)
amjdnew1 = np.extract(condition1,amjd1)
terrnew1 = np.extract(condition1,terr1)
tempfile1 = ar_psr+"_tmp1.txt"
f = open(tempfile1,"w+")
head="FORMAT 1\n"
f.write('%s' % head)
for i in range(0,np.size(freqnew1)):
f.write('%s %.12f %.20f %.8f %s\n' % (str_filename1[0], freqnew1[i], amjdnew1[i], terrnew1[i], str_site1[0]))
f.close()
tmp2 = os.popen("tempo2 -output general2 -f %s %s -s \"1111111 {freq} {pre} {err}\n\" | grep '1111111'"
% (tmpeph1,tempfile1)).read()
os.remove(tempfile1)
os.remove(tmpeph1)
# extracting the data from general2 output
tmp3 = tmp2.split('\n')
freqtmp2 = np.zeros(np.size(amjdnew1))
toastmp2 = np.zeros(np.size(amjdnew1))
TErrtmp2 = np.zeros(np.size(amjdnew1))
for i in range(np.size(amjdnew1)):
_,freqtmp2[i],toastmp2[i],TErrtmp2[i] = (tmp3[i].split())
freqf1 = np.around(np.extract(condition2,freqtmp2),3)
amjdf1 = np.extract(condition2,amjdnew1)
toasf1 = np.extract(condition2,toastmp2)
terrf1 = np.extract(condition2,TErrtmp2)
toasf1 *= 1e+6
postfit_rms = np.sqrt(np.cov(toasf1, aweights=terrf1))
ar_nbin = newarch.get_nbin()
ar_nchn = newarch.get_nchan()
if (narch == 1):
freq_bot = (ar.get_centre_frequency() - ar_bw/2.0)
freq_top = (ar.get_centre_frequency() + ar_bw/2.0)
if (narch > 1):
if (ar_bw == 200.):
freq_bot = 400.0
freq_top = 1460.0
if (ar_bw == 400.):
freq_bot = 300.0
freq_top = 1460.0
# Getting the profile data for plotting
newarch.dedisperse()
newarch.remove_baseline()
profdata2D = newarch.get_data()[:,0,:,:].flatten().reshape(ar_nchn,ar_nbin)
prof = newarch.clone()
prof.fscrunch()
profdata1D = prof.get_data().flatten()
profdata1D /= np.max(profdata1D)
residDM = init_dm - dmval
dmcurve = 4.15 * 1000. * residDM * ( (1./(np.min(freqf)/1000.)**2) - (1./(freqf/1000.)**2) )
dmoff = np.median(toasf) - np.median(dmcurve)
dmcurve += dmoff
# Now does the actual plotting
fig = plt.figure(3, figsize=(8, 6))
fig.subplots_adjust(hspace=0.05)
ax0 = plt.subplot2grid((3, 8), (0,0), rowspan=2, colspan=3)
ax1 = plt.subplot2grid((3, 8), (2,0), rowspan=1, colspan=3)
ax2 = plt.subplot2grid((3, 8), (0,4), colspan=4)
ax3 = plt.subplot2grid((3, 8), (1,4), colspan=4)
ax4 = plt.subplot2grid((3, 8), (2,4), colspan=4)
ax0.imshow((np.sqrt(profdata2D**2))**0.5, origin='lower', extent=(0,ar_nbin-1,freq_bot,freq_top), aspect='auto', cmap='hot')
ax0.set_ylabel('Frequency (MHz)', fontweight='bold', fontsize=12)
ax0.tick_params(axis='x', which='both', bottom=True, top=True,
labelbottom=False)
ax1.plot(np.arange(ar_nbin, dtype=float),profdata1D, color='black', linewidth=0.5)
ax1.set_xlim(0,ar_nbin-1)
ax1.set_xlabel('Pulse Phase (bins)', fontweight='bold', fontsize=12)
ax1.set_ylabel('Intensity', fontweight='bold', fontsize=12)
ax2.errorbar(freqtmp, toastmp, yerr=Terrtmp,fmt='.', color='gray', label='Prefit: Unfiltered', capsize=2)
ax2.plot(freqtmp, y_pred,'--r', label='Polynomial Fit')
ax2.set_xlim(freq_bot, freq_top)
ax2.grid()
ax2.legend(loc='upper right')
ax2.axes.xaxis.set_ticklabels([])
ax3.yaxis.set_label_position("right")
ax3.errorbar(freqf, toasf-np.median(toasf), terrf,fmt='.k', label='Prefit: Filtered', capsize=2)
ax3.set_xlim(freq_bot, freq_top)
ax3.grid()
ax3.legend(loc='upper right')
ax3.axes.xaxis.set_ticklabels([])
ax3.set_ylabel(r'ToA Residuals ($\mu$s)', fontweight='bold', fontsize=12)
ax4.errorbar(freqf1, toasf1-np.median(toasf1), terrf1, fmt='.r', label='Postfit', capsize=2)
ax4.set_xlim(freq_bot, freq_top)
ax4.grid()
ax4.legend(loc='upper right')
ax4.set_xlabel('Frequency (MHz)', fontweight='bold', fontsize=12)
fig.suptitle('Source: PSR %s; MJD: %.4f; Prefit Wrms: %.2f $\mu$s; Postfit Wrms: %.2f $\mu$s\nMedian ToA Err: %.2f $\mu$s; DM: %.6f $\pm$ %.6f pc cm$^{-3}$; Reduced $\chi^2$: %.2f' % (ar.get_source(), ar_mjd, prefit_rms, postfit_rms, np.median(terrf1), dmval, dmverr, fitchisq), fontsize=11, fontweight='bold')
dirplot=os.path.join(pwd,ar_psr+"_"+ar_tel+"_plots")
if not os.path.exists(dirplot):
os.makedirs(dirplot)
plotfile=dirplot+"/"+ar_psr+"_"+str(ar_mjd)+"_"+str(ar_centfr)+"_"+ar_tel+"_DMfitResid.pdf"
plt.savefig(plotfile, format='pdf')
plt.close()
if not quiet:
print ('done!')
del ar
return(dmval, dmverr, fitchisq, prefit_rms, postfit_rms, np.median(terrf1))
''' Frequency appending the data archives '''
def freq_appendData(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
# GMRT specific Jump. This is not ideal, as these jumps calculated by tempo2
# will be dependent on the pulsar period. Default values of this jump given
# is from the timing of PSR J1643-1224.
# PS: this jump is valid for only cycle 37 dataset (or the given MJD limits).
if (archives[0].get_telescope() == 'GMRT'):
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = (archives[i].get_Integration(0).get_folding_period())
offset = 0.670520675
jump = (offset/period) - int(offset/period)
if (ar_frq >= 1260. and ar_frq < 1460.):
if (ar_mjd >=58810. and ar_mjd < 58991.):
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if (300. < ttfreq < 500.):
archives[0].fscrunch(b3scrunch)
if (1160. < ttfreq < 1460.):
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if (300. < ttfreq < 500.):
archives[1].fscrunch(b3scrunch)
if (1160. < ttfreq < 1460.):
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0],archives[1])
del archives[1]
return(archives[0])
''' Frequency Appending the Templates '''
def freq_appendModel(narch, archives, offset, b3scrunch, b5scrunch):
for i in range(narch):
archives[i].tscrunch()
# GMRT specific Jump. This is not ideal, as these jumps calculated by tempo2
# will be dependent on the pulsar period. Default values of this jump given
# is from the timing of PSR J1643-1224.
# PS: this jump is valid for only cycle 37 dataset (or the given MJD limits).
if (archives[0].get_telescope() == 'GMRT'):
for i in range(narch):
ar_mjd = archives[i].get_Integration(0).get_start_time().in_days()
ar_frq = archives[i].get_centre_frequency()
ar_bw = archives[i].get_bandwidth()
period = (archives[i].get_Integration(0).get_folding_period())
offset = 0.670520675
jump = (offset/period) - int(offset/period)
if (ar_frq >= 1260. and ar_frq < 1460.):
if (ar_mjd >=58810. and ar_mjd < 58991.):
archives[i].rotate_phase(-jump)
freq_append = psrchive.FrequencyAppend()
ttfreq = archives[0].get_centre_frequency()
if (300. < ttfreq < 500.):
archives[0].fscrunch(b3scrunch)
if (1160. < ttfreq < 1460.):
archives[0].fscrunch(b5scrunch)
freq_append.init(archives[0])
while len(archives) > 1:
ttfreq = archives[1].get_centre_frequency()
if (300. < ttfreq < 500.):
archives[1].fscrunch(b3scrunch)
if (1160. < ttfreq < 1460.):
archives[1].fscrunch(b5scrunch)
freq_append.append(archives[0],archives[1])
del archives[1]
return(archives[0])
#----------------------------------------------------------------------------------#
main()
| [
4,
5,
6,
7,
8
] |
1,164 | 307bb7461a729ba979f6a862fe7c292c42f96ce6 | <mask token>
| <mask token>
for _ in range(times):
removed = elements.pop()
elements.insert(0, removed)
print(elements)
| elements = str(input('Type the elements of list: ')).split()
elements = list(map(float, elements))
times = int(input('How many times you wish shift to right: '))
for _ in range(times):
removed = elements.pop()
elements.insert(0, removed)
print(elements)
| # -*- coding: utf-8 -*-
elements = str(input("Type the elements of list: ")).split()
elements = list(map(float,elements))
times = int(input("How many times you wish shift to right: "))
for _ in range(times):
removed = elements.pop()
elements.insert(0,removed)
print(elements) | null | [
0,
1,
2,
3
] |
1,165 | 83e2f9c56c45a288aabd777fb244089367649258 | <mask token>
| <mask token>
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
IS_TESTING = False
FOLDER_TO_ORGANIZE = ''
FOLDER_FOR_OTHERS = ''
FOLDER_TO_ORGANIZE_TEST = ''
LOG_FILE = ''
IGNORE_HIDDEN_FILES = True
FILES_DESTINATION = {'images': ['.jpg', '.jpeg', '.png'], 'documents': [
'.pdf', '.xlsx', '.docx', '.txt'], 'apps': ['.pkg', '.dmg', '.exe'],
'videos': ['.mp4', '.flv'], 'audios': ['.mp3'], 'compressions': ['.rar',
'.zip'], 'scripts': ['.py', '.rb', '.js', '.html']}
| import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
IS_TESTING = False
FOLDER_TO_ORGANIZE = ''
FOLDER_FOR_OTHERS = ''
FOLDER_TO_ORGANIZE_TEST = ''
LOG_FILE = ''
IGNORE_HIDDEN_FILES = True
FILES_DESTINATION = {'images': ['.jpg', '.jpeg', '.png'], 'documents': [
'.pdf', '.xlsx', '.docx', '.txt'], 'apps': ['.pkg', '.dmg', '.exe'],
'videos': ['.mp4', '.flv'], 'audios': ['.mp3'], 'compressions': ['.rar',
'.zip'], 'scripts': ['.py', '.rb', '.js', '.html']}
| import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
IS_TESTING = False
FOLDER_TO_ORGANIZE = ''
FOLDER_FOR_OTHERS = ''
FOLDER_TO_ORGANIZE_TEST = ''
LOG_FILE = ''
IGNORE_HIDDEN_FILES = True
FILES_DESTINATION = {
'images': ['.jpg', '.jpeg', '.png'],
'documents': ['.pdf', '.xlsx', '.docx', '.txt'],
'apps': ['.pkg', '.dmg', '.exe'],
'videos': ['.mp4', '.flv'],
'audios': ['.mp3'],
'compressions': ['.rar', '.zip'],
'scripts': ['.py', '.rb', '.js', '.html'],
}
| null | [
0,
1,
2,
3
] |
1,166 | 8035f195cd01dc50691cd93ea91a6377b1d83f24 | <mask token>
class GpuThread(threading.Thread):
<mask token>
def run(self):
i = 0
while True:
result_dict = self.que_det.get(block=True)
try:
print(time.ctime(), ' ', result_dict['json_id'],
' Using GPU Device ', self.index)
t_s = time.time()
nodule_df = self.lung_dete.prediction(result_dict[
'prep_data'], result_dict['prep_spac'], result_dict[
'prep_ebox'], result_dict['prep_mask'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung dete prediction):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[
'prep_case'], result_dict['prep_spac'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung isnc nodule cls):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_lobe(preb, result_dict['prep_mask'])
result_dict['nodule_preb'] = preb
self.que_ret.put(result_dict, timeout=2)
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING US TIME(lung lobe):', time.time() - t_s)
i += 1
del result_dict, nodule_df, preb
except FunctionTimedOut:
print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')
except Exception as e:
if result_dict and 'json_id' in result_dict.keys():
print(time.ctime() + 'GPU ERROR : {} {}'.format(e,
result_dict['json_id']))
error_info(200, result_dict)
else:
print(time.ctime() + 'GPU ERROR : {}'.format(e))
<mask token>
| <mask token>
class GpuThread(threading.Thread):
def __init__(self, que_det, que_ret, index):
threading.Thread.__init__(self)
self.que_det = que_det
self.que_ret = que_ret
self.index = index
self.lung_dete = LungDetection('./model/det.ckpt', self.index)
self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)
l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))
l_u.encoding = 'latin1'
self.left_gmm = l_u.load()
r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))
r_u.encoding = 'latin1'
self.right_gmm = r_u.load()
def run(self):
i = 0
while True:
result_dict = self.que_det.get(block=True)
try:
print(time.ctime(), ' ', result_dict['json_id'],
' Using GPU Device ', self.index)
t_s = time.time()
nodule_df = self.lung_dete.prediction(result_dict[
'prep_data'], result_dict['prep_spac'], result_dict[
'prep_ebox'], result_dict['prep_mask'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung dete prediction):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[
'prep_case'], result_dict['prep_spac'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung isnc nodule cls):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_lobe(preb, result_dict['prep_mask'])
result_dict['nodule_preb'] = preb
self.que_ret.put(result_dict, timeout=2)
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING US TIME(lung lobe):', time.time() - t_s)
i += 1
del result_dict, nodule_df, preb
except FunctionTimedOut:
print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')
except Exception as e:
if result_dict and 'json_id' in result_dict.keys():
print(time.ctime() + 'GPU ERROR : {} {}'.format(e,
result_dict['json_id']))
error_info(200, result_dict)
else:
print(time.ctime() + 'GPU ERROR : {}'.format(e))
<mask token>
| <mask token>
class GpuThread(threading.Thread):
def __init__(self, que_det, que_ret, index):
threading.Thread.__init__(self)
self.que_det = que_det
self.que_ret = que_ret
self.index = index
self.lung_dete = LungDetection('./model/det.ckpt', self.index)
self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)
l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))
l_u.encoding = 'latin1'
self.left_gmm = l_u.load()
r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))
r_u.encoding = 'latin1'
self.right_gmm = r_u.load()
def run(self):
i = 0
while True:
result_dict = self.que_det.get(block=True)
try:
print(time.ctime(), ' ', result_dict['json_id'],
' Using GPU Device ', self.index)
t_s = time.time()
nodule_df = self.lung_dete.prediction(result_dict[
'prep_data'], result_dict['prep_spac'], result_dict[
'prep_ebox'], result_dict['prep_mask'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung dete prediction):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[
'prep_case'], result_dict['prep_spac'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung isnc nodule cls):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_lobe(preb, result_dict['prep_mask'])
result_dict['nodule_preb'] = preb
self.que_ret.put(result_dict, timeout=2)
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING US TIME(lung lobe):', time.time() - t_s)
i += 1
del result_dict, nodule_df, preb
except FunctionTimedOut:
print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')
except Exception as e:
if result_dict and 'json_id' in result_dict.keys():
print(time.ctime() + 'GPU ERROR : {} {}'.format(e,
result_dict['json_id']))
error_info(200, result_dict)
else:
print(time.ctime() + 'GPU ERROR : {}'.format(e))
@func_set_timeout(5)
def lung_lobe(self, nodule_df, mask):
nodule_df_values = nodule_df[['coordX', 'coordY', 'coordZ']].values
lungs = []
lobes = []
lobel_info = []
for nodule in nodule_df_values:
lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.
right_gmm)
lungs.append(lung)
lobes.append(lobe)
lobel_info.append(lung + '肺' + (lobe + '叶' if not lobe == '' else
''))
nodule_df['lung'] = lungs
nodule_df['lobe'] = lobes
nodule_df['lobel_info'] = lobel_info
return nodule_df
| import threading
import time
import pickle
from utils.ret_utils import error_info
from nodule_class.isnodule import LungIsncls
from preprocessing.location import lobe_locate_gmm
from detection.lung_detection import LungDetection
from func_timeout import FunctionTimedOut
from func_timeout import func_set_timeout
import weakref
class GpuThread(threading.Thread):
def __init__(self, que_det, que_ret, index):
threading.Thread.__init__(self)
self.que_det = que_det
self.que_ret = que_ret
self.index = index
self.lung_dete = LungDetection('./model/det.ckpt', self.index)
self.lung_isnc = LungIsncls('./model/isn.ckpt', self.index)
l_u = pickle._Unpickler(open('./model/left_gmm.pkl', 'rb'))
l_u.encoding = 'latin1'
self.left_gmm = l_u.load()
r_u = pickle._Unpickler(open('./model/right_gmm.pkl', 'rb'))
r_u.encoding = 'latin1'
self.right_gmm = r_u.load()
def run(self):
i = 0
while True:
result_dict = self.que_det.get(block=True)
try:
print(time.ctime(), ' ', result_dict['json_id'],
' Using GPU Device ', self.index)
t_s = time.time()
nodule_df = self.lung_dete.prediction(result_dict[
'prep_data'], result_dict['prep_spac'], result_dict[
'prep_ebox'], result_dict['prep_mask'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung dete prediction):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_isnc.nodule_cls(nodule_df, result_dict[
'prep_case'], result_dict['prep_spac'])
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING USE TIME(lung isnc nodule cls):', time.time(
) - t_s)
t_s = time.time()
preb = self.lung_lobe(preb, result_dict['prep_mask'])
result_dict['nodule_preb'] = preb
self.que_ret.put(result_dict, timeout=2)
print(time.ctime(), ' ', result_dict['json_id'],
'GPU DOING US TIME(lung lobe):', time.time() - t_s)
i += 1
del result_dict, nodule_df, preb
except FunctionTimedOut:
print(time.ctime(), result_dict['json_id'], 'GPU FUN TIMEOUT ')
except Exception as e:
if result_dict and 'json_id' in result_dict.keys():
print(time.ctime() + 'GPU ERROR : {} {}'.format(e,
result_dict['json_id']))
error_info(200, result_dict)
else:
print(time.ctime() + 'GPU ERROR : {}'.format(e))
@func_set_timeout(5)
def lung_lobe(self, nodule_df, mask):
nodule_df_values = nodule_df[['coordX', 'coordY', 'coordZ']].values
lungs = []
lobes = []
lobel_info = []
for nodule in nodule_df_values:
lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.
right_gmm)
lungs.append(lung)
lobes.append(lobe)
lobel_info.append(lung + '肺' + (lobe + '叶' if not lobe == '' else
''))
nodule_df['lung'] = lungs
nodule_df['lobe'] = lobes
nodule_df['lobel_info'] = lobel_info
return nodule_df
| import threading
import time
# import numpy as np
import pickle
from utils.ret_utils import error_info
from nodule_class.isnodule import LungIsncls
from preprocessing.location import lobe_locate_gmm
from detection.lung_detection import LungDetection
from func_timeout import FunctionTimedOut
from func_timeout import func_set_timeout
import weakref
class GpuThread(threading.Thread):
def __init__(self, que_det, que_ret, index):
threading.Thread.__init__(self)
self.que_det = que_det
self.que_ret = que_ret
self.index = index
self.lung_dete = LungDetection("./model/det.ckpt", self.index)
# is nodule cls
self.lung_isnc = LungIsncls("./model/isn.ckpt", self.index)
l_u = pickle._Unpickler(open("./model/left_gmm.pkl", "rb"))
l_u.encoding = "latin1"
self.left_gmm = l_u.load()
r_u = pickle._Unpickler(open("./model/right_gmm.pkl", "rb"))
r_u.encoding = "latin1"
self.right_gmm = r_u.load()
# cudnn.benchmark = True
def run(self):
i = 0
while True:
result_dict = self.que_det.get(block=True)
try:
print(
time.ctime(),
" ",
result_dict["json_id"],
" Using GPU Device ",
self.index,
)
t_s = time.time()
nodule_df = self.lung_dete.prediction(
result_dict["prep_data"],
result_dict["prep_spac"],
result_dict["prep_ebox"],
result_dict["prep_mask"],
)
print(
time.ctime(),
" ",
result_dict["json_id"],
"GPU DOING USE TIME(lung dete prediction):",
time.time() - t_s,
)
t_s = time.time()
preb = self.lung_isnc.nodule_cls(
nodule_df, result_dict["prep_case"], result_dict["prep_spac"]
)
print(
time.ctime(),
" ",
result_dict["json_id"],
"GPU DOING USE TIME(lung isnc nodule cls):",
time.time() - t_s,
)
# preb = lung_isnc.nodule_cls(nodule_df, result_dict['prep_case'], result_dict['prep_spac'])
# del lung_isnc
t_s = time.time()
preb = self.lung_lobe(preb, result_dict["prep_mask"])
result_dict["nodule_preb"] = preb
self.que_ret.put(result_dict, timeout=2)
print(
time.ctime(),
" ",
result_dict["json_id"],
"GPU DOING US TIME(lung lobe):",
time.time() - t_s,
)
i += 1
del result_dict, nodule_df, preb
except FunctionTimedOut:
print(time.ctime(), result_dict["json_id"], "GPU FUN TIMEOUT ")
except Exception as e:
if result_dict and "json_id" in result_dict.keys():
print(
time.ctime()
+ "GPU ERROR : {} {}".format(e, result_dict["json_id"])
)
error_info(200, result_dict)
else:
print(time.ctime() + "GPU ERROR : {}".format(e))
@func_set_timeout(5)
def lung_lobe(self, nodule_df, mask):
nodule_df_values = nodule_df[["coordX", "coordY", "coordZ"]].values
lungs = []
lobes = []
lobel_info = []
for nodule in nodule_df_values:
lung, lobe = lobe_locate_gmm(nodule, mask, self.left_gmm, self.right_gmm)
lungs.append(lung)
lobes.append(lobe)
lobel_info.append(lung + "肺" + (lobe + "叶" if not lobe == "" else ""))
nodule_df["lung"] = lungs
nodule_df["lobe"] = lobes
nodule_df["lobel_info"] = lobel_info
return nodule_df
| [
2,
3,
4,
5,
6
] |
1,167 | def089c2749444797ac3079809c082dacab08554 | <mask token>
| class ModelInfo:
<mask token>
| class ModelInfo:
def __init__(self, name: str, path: str, filter: str):
self.name: str = name
self.path: str = path
self.filter: str = filter
| null | null | [
0,
1,
2
] |
1,168 | 17505f5c14190df3311c04c19f687937481b920b | <mask token>
@app.route('/visualisation/confirmed/<string:country>')
@cross_origin()
def confirmedCases(country):
array = dataEx.getData('Confirmed', country).tolist()
return jsonify({'confirmed': array})
@app.route('/visualisation/recovered/<string:country>')
@cross_origin()
def recoveredCases(country):
array = dataEx.getData('Recovered', country).tolist()
return jsonify({'recovered': array})
<mask token>
@app.route('/visualisation/maxofall/<string:country>')
@cross_origin()
def maxofall(country):
array = dataEx.getMaxOfAll(country).tolist()
return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':
array[2]})
@app.route('/visualisation/newdata/<string:country>')
@cross_origin()
def NewData(country):
array = dataEx.getNewData(country)[0]
lastUpdate = dataEx.getNewData(country)[1]
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2], 'lastUpdate': lastUpdate})
@app.route('/visualisation/regionsData')
@cross_origin()
def dataByregion():
array = dataEx.getRegionsData()
return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':
array[2], 'somme': array[3]})
@app.route('/visualisation/StatistiqueMonde')
@cross_origin()
def getStatistiqueMonde():
array = dataEx.getStatistiqueMonde()
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2]})
@app.route('/visualisation/clusterAge')
@cross_origin()
def getClusterAge():
array = dataEx.getDataClusterAge()
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/clusterTest')
@cross_origin()
def getClusterTest():
array = dataEx.getDataClusterTest()
print(array)
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/ageClusterMean')
@cross_origin()
def getMeanClusterAge():
array = dataEx.getDataClusterAge()[4]
print(array)
return jsonify({'meanClusters': array.tolist()})
@app.route('/visualisation/testClusterMean')
@cross_origin()
def getMeanClusterTest():
array = dataEx.getDataClusterTest()[4]
return jsonify({'meanClusters': array.tolist()})
@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',
'tags2': ''})
@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')
@cross_origin()
def analyseSentiment(tags, tags2):
array = twitterDataExtaraction(tags, tags2)
return jsonify({'neutral': array[0], 'negative': array[1], 'positive':
array[2]})
@app.route('/mongodb/nature')
@cross_origin()
def getNature():
cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/economy')
@cross_origin()
def getEconomy():
cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/mentalhealth')
@cross_origin()
def getMentalhealth():
cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}
) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/politics')
@cross_origin()
def getPolitics():
cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/visualisation/clusteringAge')
@cross_origin()
def getClusteringAge():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({
}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
@app.route('/visualisation/clusteringTest')
@cross_origin()
def getClusteringTest():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringTest.find().skip(db.clusteringTest.count_documents
({}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
<mask token>
| <mask token>
@app.route('/visualisation/confirmed/<string:country>')
@cross_origin()
def confirmedCases(country):
array = dataEx.getData('Confirmed', country).tolist()
return jsonify({'confirmed': array})
@app.route('/visualisation/recovered/<string:country>')
@cross_origin()
def recoveredCases(country):
array = dataEx.getData('Recovered', country).tolist()
return jsonify({'recovered': array})
@app.route('/visualisation/death/<string:country>')
@cross_origin()
def deathCases(country):
array = dataEx.getData('Deaths', country).tolist()
return jsonify({'deaths': array})
@app.route('/visualisation/maxofall/<string:country>')
@cross_origin()
def maxofall(country):
array = dataEx.getMaxOfAll(country).tolist()
return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':
array[2]})
@app.route('/visualisation/newdata/<string:country>')
@cross_origin()
def NewData(country):
array = dataEx.getNewData(country)[0]
lastUpdate = dataEx.getNewData(country)[1]
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2], 'lastUpdate': lastUpdate})
@app.route('/visualisation/regionsData')
@cross_origin()
def dataByregion():
array = dataEx.getRegionsData()
return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':
array[2], 'somme': array[3]})
@app.route('/visualisation/StatistiqueMonde')
@cross_origin()
def getStatistiqueMonde():
array = dataEx.getStatistiqueMonde()
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2]})
@app.route('/visualisation/clusterAge')
@cross_origin()
def getClusterAge():
array = dataEx.getDataClusterAge()
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/clusterTest')
@cross_origin()
def getClusterTest():
array = dataEx.getDataClusterTest()
print(array)
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/ageClusterMean')
@cross_origin()
def getMeanClusterAge():
array = dataEx.getDataClusterAge()[4]
print(array)
return jsonify({'meanClusters': array.tolist()})
@app.route('/visualisation/testClusterMean')
@cross_origin()
def getMeanClusterTest():
array = dataEx.getDataClusterTest()[4]
return jsonify({'meanClusters': array.tolist()})
@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',
'tags2': ''})
@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')
@cross_origin()
def analyseSentiment(tags, tags2):
array = twitterDataExtaraction(tags, tags2)
return jsonify({'neutral': array[0], 'negative': array[1], 'positive':
array[2]})
@app.route('/mongodb/nature')
@cross_origin()
def getNature():
cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/economy')
@cross_origin()
def getEconomy():
cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/mentalhealth')
@cross_origin()
def getMentalhealth():
cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}
) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/politics')
@cross_origin()
def getPolitics():
cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/visualisation/clusteringAge')
@cross_origin()
def getClusteringAge():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({
}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
@app.route('/visualisation/clusteringTest')
@cross_origin()
def getClusteringTest():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringTest.find().skip(db.clusteringTest.count_documents
({}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
<mask token>
| <mask token>
@app.route('/visualisation/confirmed/<string:country>')
@cross_origin()
def confirmedCases(country):
array = dataEx.getData('Confirmed', country).tolist()
return jsonify({'confirmed': array})
@app.route('/visualisation/recovered/<string:country>')
@cross_origin()
def recoveredCases(country):
array = dataEx.getData('Recovered', country).tolist()
return jsonify({'recovered': array})
@app.route('/visualisation/death/<string:country>')
@cross_origin()
def deathCases(country):
array = dataEx.getData('Deaths', country).tolist()
return jsonify({'deaths': array})
@app.route('/visualisation/maxofall/<string:country>')
@cross_origin()
def maxofall(country):
array = dataEx.getMaxOfAll(country).tolist()
return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':
array[2]})
@app.route('/visualisation/newdata/<string:country>')
@cross_origin()
def NewData(country):
array = dataEx.getNewData(country)[0]
lastUpdate = dataEx.getNewData(country)[1]
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2], 'lastUpdate': lastUpdate})
@app.route('/visualisation/regionsData')
@cross_origin()
def dataByregion():
array = dataEx.getRegionsData()
return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':
array[2], 'somme': array[3]})
@app.route('/visualisation/StatistiqueMonde')
@cross_origin()
def getStatistiqueMonde():
array = dataEx.getStatistiqueMonde()
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2]})
@app.route('/visualisation/clusterAge')
@cross_origin()
def getClusterAge():
array = dataEx.getDataClusterAge()
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/clusterTest')
@cross_origin()
def getClusterTest():
array = dataEx.getDataClusterTest()
print(array)
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/ageClusterMean')
@cross_origin()
def getMeanClusterAge():
array = dataEx.getDataClusterAge()[4]
print(array)
return jsonify({'meanClusters': array.tolist()})
@app.route('/visualisation/testClusterMean')
@cross_origin()
def getMeanClusterTest():
array = dataEx.getDataClusterTest()[4]
return jsonify({'meanClusters': array.tolist()})
@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',
'tags2': ''})
@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')
@cross_origin()
def analyseSentiment(tags, tags2):
array = twitterDataExtaraction(tags, tags2)
return jsonify({'neutral': array[0], 'negative': array[1], 'positive':
array[2]})
@app.route('/mongodb/nature')
@cross_origin()
def getNature():
cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/economy')
@cross_origin()
def getEconomy():
cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/mentalhealth')
@cross_origin()
def getMentalhealth():
cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}
) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/politics')
@cross_origin()
def getPolitics():
cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/visualisation/clusteringAge')
@cross_origin()
def getClusteringAge():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({
}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
@app.route('/visualisation/clusteringTest')
@cross_origin()
def getClusteringTest():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringTest.find().skip(db.clusteringTest.count_documents
({}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
if __name__ == '__main__':
app.run(debug=True)
| from flask import Flask, jsonify
import dataExtraction as dataEx
from flask_cors import CORS, cross_origin
from analyseSentiment import twitterDataExtaraction
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config['MONGO_URI'] = 'mongodb://localhost:27017/scrapingDB'
mongo = PyMongo(app)
db = mongo.db
cors = CORS(app, resources={'/api/*': {'origins': '*'}})
@app.route('/visualisation/confirmed/<string:country>')
@cross_origin()
def confirmedCases(country):
array = dataEx.getData('Confirmed', country).tolist()
return jsonify({'confirmed': array})
@app.route('/visualisation/recovered/<string:country>')
@cross_origin()
def recoveredCases(country):
array = dataEx.getData('Recovered', country).tolist()
return jsonify({'recovered': array})
@app.route('/visualisation/death/<string:country>')
@cross_origin()
def deathCases(country):
array = dataEx.getData('Deaths', country).tolist()
return jsonify({'deaths': array})
@app.route('/visualisation/maxofall/<string:country>')
@cross_origin()
def maxofall(country):
array = dataEx.getMaxOfAll(country).tolist()
return jsonify({'confirmed': array[0], 'recovered': array[1], 'death':
array[2]})
@app.route('/visualisation/newdata/<string:country>')
@cross_origin()
def NewData(country):
array = dataEx.getNewData(country)[0]
lastUpdate = dataEx.getNewData(country)[1]
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2], 'lastUpdate': lastUpdate})
@app.route('/visualisation/regionsData')
@cross_origin()
def dataByregion():
array = dataEx.getRegionsData()
return jsonify({'regions': array[0], 'affectedNum': array[1], 'update':
array[2], 'somme': array[3]})
@app.route('/visualisation/StatistiqueMonde')
@cross_origin()
def getStatistiqueMonde():
array = dataEx.getStatistiqueMonde()
return jsonify({'totalCases': array[0], 'death': array[1], 'recovered':
array[2]})
@app.route('/visualisation/clusterAge')
@cross_origin()
def getClusterAge():
array = dataEx.getDataClusterAge()
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/clusterTest')
@cross_origin()
def getClusterTest():
array = dataEx.getDataClusterTest()
print(array)
return jsonify({'countries': array[0].tolist(), 'x': array[1].tolist(),
'y': array[2].tolist(), 'cluster': array[3].tolist()})
@app.route('/visualisation/ageClusterMean')
@cross_origin()
def getMeanClusterAge():
array = dataEx.getDataClusterAge()[4]
print(array)
return jsonify({'meanClusters': array.tolist()})
@app.route('/visualisation/testClusterMean')
@cross_origin()
def getMeanClusterTest():
array = dataEx.getDataClusterTest()[4]
return jsonify({'meanClusters': array.tolist()})
@app.route('/analysesentiment/covid19/', defaults={'tags': '#covid19',
'tags2': ''})
@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')
@cross_origin()
def analyseSentiment(tags, tags2):
array = twitterDataExtaraction(tags, tags2)
return jsonify({'neutral': array[0], 'negative': array[1], 'positive':
array[2]})
@app.route('/mongodb/nature')
@cross_origin()
def getNature():
cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/economy')
@cross_origin()
def getEconomy():
cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/mentalhealth')
@cross_origin()
def getMentalhealth():
cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}
) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/mongodb/politics')
@cross_origin()
def getPolitics():
cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)
return jsonify({'neutral': cursor[0]['neutral'], 'negative': cursor[0][
'negative'], 'positive': cursor[0]['positive']})
@app.route('/visualisation/clusteringAge')
@cross_origin()
def getClusteringAge():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({
}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
@app.route('/visualisation/clusteringTest')
@cross_origin()
def getClusteringTest():
app.config['MONGO_URI'] = 'mongodb://localhost:27017/ClusteringDB'
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringTest.find().skip(db.clusteringTest.count_documents
({}) - 1)
return jsonify({'countries': array[0]['countries'], 'x': array[0]['x'],
'y': array[0]['y'], 'cluster': array[0]['cluster']})
if __name__ == '__main__':
app.run(debug=True)
| from flask import Flask, jsonify
import dataExtraction as dataEx
from flask_cors import CORS,cross_origin
from analyseSentiment import twitterDataExtaraction
from flask_pymongo import PyMongo
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/scrapingDB"
mongo = PyMongo(app)
db = mongo.db
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
# Visualisation service part
@app.route('/visualisation/confirmed/<string:country>')
@cross_origin()
def confirmedCases(country):
array = dataEx.getData("Confirmed",country).tolist()
return jsonify({"confirmed" : array})
@app.route('/visualisation/recovered/<string:country>')
@cross_origin()
def recoveredCases(country):
array = dataEx.getData("Recovered", country).tolist()
return jsonify({"recovered": array})
@app.route('/visualisation/death/<string:country>')
@cross_origin()
def deathCases(country):
array = dataEx.getData("Deaths", country).tolist()
return jsonify({"deaths": array})
@app.route('/visualisation/maxofall/<string:country>')
@cross_origin()
def maxofall(country):
array = dataEx.getMaxOfAll(country).tolist()
return jsonify({"confirmed" : array[0], "recovered" : array[1], "death" : array[2]})
@app.route('/visualisation/newdata/<string:country>')
@cross_origin()
def NewData(country):
array = dataEx.getNewData(country)[0]
lastUpdate = dataEx.getNewData(country)[1]
return jsonify({"totalCases" :array[0], "death" :array[1], "recovered" :array[2], "lastUpdate" :lastUpdate})
@app.route('/visualisation/regionsData')
@cross_origin()
def dataByregion():
array = dataEx.getRegionsData()
return jsonify({"regions":array[0], "affectedNum": array[1], "update": array[2], "somme":array[3]})
@app.route('/visualisation/StatistiqueMonde')
@cross_origin()
def getStatistiqueMonde():
array = dataEx.getStatistiqueMonde()
return jsonify({"totalCases": array[0], "death": array[1], "recovered": array[2]})
@app.route('/visualisation/clusterAge')
@cross_origin()
def getClusterAge():
array = dataEx.getDataClusterAge()
return jsonify({"countries": array[0].tolist(), "x": array[1].tolist(),"y":array[2].tolist(), "cluster": array[3].tolist()})
@app.route('/visualisation/clusterTest')
@cross_origin()
def getClusterTest():
array = dataEx.getDataClusterTest()
print(array)
return jsonify({"countries": array[0].tolist(), "x": array[1].tolist(),"y":array[2].tolist(), "cluster": array[3].tolist()})
@app.route('/visualisation/ageClusterMean')
@cross_origin()
def getMeanClusterAge():
array = dataEx.getDataClusterAge()[4]
print(array)
return jsonify({"meanClusters": array.tolist()})
@app.route('/visualisation/testClusterMean')
@cross_origin()
def getMeanClusterTest():
array = dataEx.getDataClusterTest()[4]
return jsonify({"meanClusters": array.tolist()})
@app.route("/analysesentiment/covid19/", defaults={'tags': '#covid19','tags2': ''})
@app.route('/analysesentiment/covid19/<string:tags>/<string:tags2>')
@cross_origin()
def analyseSentiment(tags,tags2):
array = twitterDataExtaraction(tags,tags2)
return jsonify({"neutral": array[0], "negative": array[1], "positive": array[2]})
@app.route('/mongodb/nature')
@cross_origin()
def getNature():
cursor = db.nature.find().skip(db.nature.count_documents({}) - 1)
return jsonify({"neutral": cursor[0]['neutral'], "negative": cursor[0]['negative'], "positive": cursor[0]['positive']})
@app.route('/mongodb/economy')
@cross_origin()
def getEconomy():
cursor = db.economy.find().skip(db.economy.count_documents({}) - 1)
return jsonify({"neutral": cursor[0]['neutral'], "negative": cursor[0]['negative'], "positive": cursor[0]['positive']})
@app.route('/mongodb/mentalhealth')
@cross_origin()
def getMentalhealth():
cursor = db.mentalhealth.find().skip(db.mentalhealth.count_documents({}) - 1)
return jsonify({"neutral": cursor[0]['neutral'], "negative": cursor[0]['negative'], "positive": cursor[0]['positive']})
@app.route('/mongodb/politics')
@cross_origin()
def getPolitics():
cursor = db.politics.find().skip(db.politics.count_documents({}) - 1)
return jsonify({"neutral": cursor[0]['neutral'], "negative": cursor[0]['negative'], "positive": cursor[0]['positive']})
@app.route('/visualisation/clusteringAge')
@cross_origin()
def getClusteringAge():
app.config["MONGO_URI"] = "mongodb://localhost:27017/ClusteringDB"
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringAge.find().skip(db.clusteringAge.count_documents({}) - 1)
return jsonify({"countries": array[0]['countries'], "x": array[0]['x'],"y":array[0]['y'], "cluster": array[0]['cluster']})
@app.route('/visualisation/clusteringTest')
@cross_origin()
def getClusteringTest():
app.config["MONGO_URI"] = "mongodb://localhost:27017/ClusteringDB"
mongo = PyMongo(app)
db = mongo.db
array = db.clusteringTest.find().skip(db.clusteringTest.count_documents({}) - 1)
return jsonify(
{"countries": array[0]['countries'], "x": array[0]['x'], "y": array[0]['y'], "cluster": array[0]['cluster']})
if __name__ == "__main__":
app.run(debug=True) | [
17,
18,
19,
21,
22
] |
1,169 | 5c1ce46f45da33acf75a7f47add811b14d58414d | <mask token>
| <mask token>
def extraLongFactorials(n):
print(math.factorial(n))
<mask token>
| <mask token>
def extraLongFactorials(n):
print(math.factorial(n))
if __name__ == '__main__':
n = int(input())
extraLongFactorials(n)
| <mask token>
import math
import os
import random
import re
import sys
def extraLongFactorials(n):
print(math.factorial(n))
if __name__ == '__main__':
n = int(input())
extraLongFactorials(n)
| '''
Function Description
Complete the extraLongFactorials function in the editor below. It should print the result and return.
extraLongFactorials has the following parameter(s):
n: an integer
Note: Factorials of
can't be stored even in a
long long variable. Big integers must be used for such calculations. Languages like Java, Python, Ruby etc. can handle big integers, but we need to write additional code in C/C++ to handle huge values.
We recommend solving this challenge using BigIntegers.
Input Format
Input consists of a single integer
Output Format
Print the factorial of.
'''
#!/bin/python3
import math
import os
import random
import re
import sys
def extraLongFactorials(n):
print(math.factorial(n))
if __name__ == '__main__':
n = int(input())
extraLongFactorials(n)
| [
0,
1,
2,
3,
4
] |
1,170 | c6c13ab24e4907eecf1db4fded28d4fc8126c834 | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('warhawks', '0012_auto_20180607_1815'), (
'notification', '0002_auto_20180607_1759')]
operations = [migrations.CreateModel(name='N_lostandfound', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('date', models.DateTimeField
(auto_now_add=True)), ('message', models.CharField(max_length=100)),
('read', models.BooleanField(default=False)), ('from_user', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='from_user_lost', to=settings.AUTH_USER_MODEL)), ('lf',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'warhawks.LostAndFound')), ('post', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, to='warhawks.LFComment')), (
'to_user', models.ForeignKey(on_delete=django.db.models.deletion.
CASCADE, related_name='to_user_lost', to=settings.AUTH_USER_MODEL))])]
| from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [migrations.swappable_dependency(settings.
AUTH_USER_MODEL), ('warhawks', '0012_auto_20180607_1815'), (
'notification', '0002_auto_20180607_1759')]
operations = [migrations.CreateModel(name='N_lostandfound', fields=[(
'id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('date', models.DateTimeField
(auto_now_add=True)), ('message', models.CharField(max_length=100)),
('read', models.BooleanField(default=False)), ('from_user', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='from_user_lost', to=settings.AUTH_USER_MODEL)), ('lf',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'warhawks.LostAndFound')), ('post', models.ForeignKey(on_delete=
django.db.models.deletion.CASCADE, to='warhawks.LFComment')), (
'to_user', models.ForeignKey(on_delete=django.db.models.deletion.
CASCADE, related_name='to_user_lost', to=settings.AUTH_USER_MODEL))])]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-07 12:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('warhawks', '0012_auto_20180607_1815'),
('notification', '0002_auto_20180607_1759'),
]
operations = [
migrations.CreateModel(
name='N_lostandfound',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True)),
('message', models.CharField(max_length=100)),
('read', models.BooleanField(default=False)),
('from_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_user_lost', to=settings.AUTH_USER_MODEL)),
('lf', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='warhawks.LostAndFound')),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='warhawks.LFComment')),
('to_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_user_lost', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
0,
1,
2,
3,
4
] |
1,171 | ebe7c245e3e14116a37020971e67ada054e0b434 | <mask token>
| <mask token>
for reservoir in reservoirs:
storageURL = ('https://cdec.water.ca.gov/dynamicapp/QueryMonthly?s=' +
reservoir[0])
storagePage = requests.get(storageURL)
storageSoup = BeautifulSoup(storagePage.content, 'html.parser')
storageRow = storageSoup.find(text='08/2021').parent.parent
reservoir.append(storageRow.findAll('td')[1].text.strip())
avgURL = 'https://cdec.water.ca.gov/dynamicapp/profile?s=' + reservoir[0
] + '&type=res'
avgPage = requests.get(avgURL)
avgSoup = BeautifulSoup(avgPage.content, 'html.parser')
reservoir.append(avgSoup.find(text='August').parent.parent.parent.
findAll('td')[1].text.strip())
<mask token>
writer.writerow(['Reservoir', 'August storage', 'August average'])
writer.writerows(reservoirs)
| <mask token>
reservoirs = [['LVQ'], ['HTH'], ['APN'], ['KNT'], ['SHA']]
for reservoir in reservoirs:
storageURL = ('https://cdec.water.ca.gov/dynamicapp/QueryMonthly?s=' +
reservoir[0])
storagePage = requests.get(storageURL)
storageSoup = BeautifulSoup(storagePage.content, 'html.parser')
storageRow = storageSoup.find(text='08/2021').parent.parent
reservoir.append(storageRow.findAll('td')[1].text.strip())
avgURL = 'https://cdec.water.ca.gov/dynamicapp/profile?s=' + reservoir[0
] + '&type=res'
avgPage = requests.get(avgURL)
avgSoup = BeautifulSoup(avgPage.content, 'html.parser')
reservoir.append(avgSoup.find(text='August').parent.parent.parent.
findAll('td')[1].text.strip())
outfile = open('./water-data-all-august.csv', 'wb')
writer = csv.writer(outfile)
writer.writerow(['Reservoir', 'August storage', 'August average'])
writer.writerows(reservoirs)
| import requests
import csv
from bs4 import BeautifulSoup
reservoirs = [['LVQ'], ['HTH'], ['APN'], ['KNT'], ['SHA']]
for reservoir in reservoirs:
storageURL = ('https://cdec.water.ca.gov/dynamicapp/QueryMonthly?s=' +
reservoir[0])
storagePage = requests.get(storageURL)
storageSoup = BeautifulSoup(storagePage.content, 'html.parser')
storageRow = storageSoup.find(text='08/2021').parent.parent
reservoir.append(storageRow.findAll('td')[1].text.strip())
avgURL = 'https://cdec.water.ca.gov/dynamicapp/profile?s=' + reservoir[0
] + '&type=res'
avgPage = requests.get(avgURL)
avgSoup = BeautifulSoup(avgPage.content, 'html.parser')
reservoir.append(avgSoup.find(text='August').parent.parent.parent.
findAll('td')[1].text.strip())
outfile = open('./water-data-all-august.csv', 'wb')
writer = csv.writer(outfile)
writer.writerow(['Reservoir', 'August storage', 'August average'])
writer.writerows(reservoirs)
| import requests
import csv
from bs4 import BeautifulSoup
reservoirs = [["LVQ"], ["HTH"], ["APN"], ["KNT"], ["SHA"]]
for reservoir in reservoirs:
storageURL = "https://cdec.water.ca.gov/dynamicapp/QueryMonthly?s=" + reservoir[0]
storagePage = requests.get(storageURL)
storageSoup = BeautifulSoup(storagePage.content, "html.parser")
storageRow = storageSoup.find(text="08/2021").parent.parent
reservoir.append(storageRow.findAll('td')[1].text.strip())
avgURL = "https://cdec.water.ca.gov/dynamicapp/profile?s=" + reservoir[0] + "&type=res"
avgPage = requests.get(avgURL)
avgSoup = BeautifulSoup(avgPage.content, "html.parser")
reservoir.append(avgSoup.find(text="August").parent.parent.parent.findAll('td')[1].text.strip())
####################
outfile = open("./water-data-all-august.csv", "wb")
writer = csv.writer(outfile)
writer.writerow(["Reservoir", "August storage", "August average"])
writer.writerows(reservoirs) | [
0,
1,
2,
3,
4
] |
1,172 | abdedad2c2b42b54cdba0e61e095ba3df0783b81 | <mask token>
| <mask token>
__author__ = 'vidma'
| """
Contain meta-data related functions:
* accessing integration schema: fields, values, constraints on inputs/queries
* tracking fields available
* tracking known (input field) values
"""
# coding=utf-8
__author__ = 'vidma'
| null | null | [
0,
1,
2
] |
1,173 | 0de735647cf87f64ab64af081da6e11b0ed8a7a7 | <mask token>
| <mask token>
urlpatterns = [url('^login/$', login_page, name='login'), url('^logout/$',
logout_page, name='logout'), url('^register/$', register_page, name=
'register'), url('^product/$', product_list_view, name='product'), url(
'^component/$', component, name='component'), url('^tracker/$', tracker,
name='tracker'), url('^cart/', include(('cart.urls', 'cart'), namespace
='cart')), url('^detail/(?P<parameter>[\\w-]+)/$', product_detail_view,
name='detail'), url('^$', home_page, name='home'), url('^admin/', admin
.site.urls)]
| <mask token>
from django.conf import settings
from django.conf.urls.static import static
from product.views import product_list_view, component, product_detail_view
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView
from .views import home_page, login_page, register_page, logout_page
from tracker.views import tracker
urlpatterns = [url('^login/$', login_page, name='login'), url('^logout/$',
logout_page, name='logout'), url('^register/$', register_page, name=
'register'), url('^product/$', product_list_view, name='product'), url(
'^component/$', component, name='component'), url('^tracker/$', tracker,
name='tracker'), url('^cart/', include(('cart.urls', 'cart'), namespace
='cart')), url('^detail/(?P<parameter>[\\w-]+)/$', product_detail_view,
name='detail'), url('^$', home_page, name='home'), url('^admin/', admin
.site.urls)]
| """component URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from product.views import product_list_view, component, product_detail_view
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic import TemplateView
from .views import home_page, login_page, register_page, logout_page
from tracker.views import tracker
urlpatterns = [
url(r'^login/$', login_page, name='login'),
url(r'^logout/$', logout_page, name='logout'),
url(r'^register/$', register_page, name='register'),
url(r'^product/$', product_list_view, name='product'),
url(r'^component/$', component, name='component'),
url(r'^tracker/$', tracker, name='tracker'),
url(r'^cart/', include(('cart.urls', 'cart'), namespace='cart')),
#url(r'^detail/$', product_detail_view, name='detail'),
#url(r'^product/product-(?P<parameter>[\w-]+).html', 'views.product', name="product"),
#url(r'^stores/\w+/',.....)
url(r'^detail/(?P<parameter>[\w-]+)/$', product_detail_view, name='detail'),
url(r'^$', home_page, name='home'),
url(r'^admin/', admin.site.urls),
]
| null | [
0,
1,
2,
3
] |
1,174 | 96ac9088650490a7da00c7a20f634b76e673ca2d | <mask token>
class WINRM(object):
<mask token>
<mask token>
def connect(self):
"""
Method to connect to a Windows machine.
"""
try:
self.host_win_ip = 'http://' + self.host_ip + ':5985/wsman'
self.conn = Protocol(endpoint=self.host_win_ip, transport=
'ntlm', username=self.usr, password=self.pwd,
server_cert_validation='ignore')
logger.warn('Connecting Windows ...')
self.shell_id = self.conn.open_shell()
logger.warn(self.shell_id)
logger.warn('Connected to Windows.')
except Exception as error:
msg_exception_error = 'Exception raised: %s ' % error
raise msg_exception_error
def run_cmd(self, cmd):
"""
Generic Method for passing command and run it on windows machine and return output.
- **parameters**, **types**, **return** and **return types**::
:param cmd: Command to be executed on windows machine.
:return stdout,stderr,status_code : output,errormessage and statuscode of output.
:rtype stdout,stderr,status_code: tuple
"""
if 'shell_id' in dir(self):
command_id = self.conn.run_command(self.shell_id, cmd)
std_out, std_err, status_code = self.conn.get_command_output(self
.shell_id, command_id)
return std_out, std_err, status_code
| <mask token>
class WINRM(object):
<mask token>
def __init__(self, host_ip, usr, pwd):
"""
- **parameters**, **types**, **return** and **return types**::
:param os_type : windows/linux
:param host_ip: ip address of the Windows host
:param usr: username of the Windows Host
:param pwd: Password of the Windows Host
:type os_type: string
:type host_ip: string
:type u_name: string
:type pwd: string
"""
self.os_type = 'windows'
self.host_ip = host_ip
self.usr = usr
self.pwd = pwd
self.shell_id = None
self.host_win_ip = None
self.conn = None
def connect(self):
"""
Method to connect to a Windows machine.
"""
try:
self.host_win_ip = 'http://' + self.host_ip + ':5985/wsman'
self.conn = Protocol(endpoint=self.host_win_ip, transport=
'ntlm', username=self.usr, password=self.pwd,
server_cert_validation='ignore')
logger.warn('Connecting Windows ...')
self.shell_id = self.conn.open_shell()
logger.warn(self.shell_id)
logger.warn('Connected to Windows.')
except Exception as error:
msg_exception_error = 'Exception raised: %s ' % error
raise msg_exception_error
def run_cmd(self, cmd):
"""
Generic Method for passing command and run it on windows machine and return output.
- **parameters**, **types**, **return** and **return types**::
:param cmd: Command to be executed on windows machine.
:return stdout,stderr,status_code : output,errormessage and statuscode of output.
:rtype stdout,stderr,status_code: tuple
"""
if 'shell_id' in dir(self):
command_id = self.conn.run_command(self.shell_id, cmd)
std_out, std_err, status_code = self.conn.get_command_output(self
.shell_id, command_id)
return std_out, std_err, status_code
| <mask token>
class WINRM(object):
"""
WINRM Module to connect to windows host
"""
def __init__(self, host_ip, usr, pwd):
"""
- **parameters**, **types**, **return** and **return types**::
:param os_type : windows/linux
:param host_ip: ip address of the Windows host
:param usr: username of the Windows Host
:param pwd: Password of the Windows Host
:type os_type: string
:type host_ip: string
:type u_name: string
:type pwd: string
"""
self.os_type = 'windows'
self.host_ip = host_ip
self.usr = usr
self.pwd = pwd
self.shell_id = None
self.host_win_ip = None
self.conn = None
def connect(self):
"""
Method to connect to a Windows machine.
"""
try:
self.host_win_ip = 'http://' + self.host_ip + ':5985/wsman'
self.conn = Protocol(endpoint=self.host_win_ip, transport=
'ntlm', username=self.usr, password=self.pwd,
server_cert_validation='ignore')
logger.warn('Connecting Windows ...')
self.shell_id = self.conn.open_shell()
logger.warn(self.shell_id)
logger.warn('Connected to Windows.')
except Exception as error:
msg_exception_error = 'Exception raised: %s ' % error
raise msg_exception_error
def run_cmd(self, cmd):
"""
Generic Method for passing command and run it on windows machine and return output.
- **parameters**, **types**, **return** and **return types**::
:param cmd: Command to be executed on windows machine.
:return stdout,stderr,status_code : output,errormessage and statuscode of output.
:rtype stdout,stderr,status_code: tuple
"""
if 'shell_id' in dir(self):
command_id = self.conn.run_command(self.shell_id, cmd)
std_out, std_err, status_code = self.conn.get_command_output(self
.shell_id, command_id)
return std_out, std_err, status_code
| <mask token>
from winrm.protocol import Protocol
from lib import logger
class WINRM(object):
"""
WINRM Module to connect to windows host
"""
def __init__(self, host_ip, usr, pwd):
"""
- **parameters**, **types**, **return** and **return types**::
:param os_type : windows/linux
:param host_ip: ip address of the Windows host
:param usr: username of the Windows Host
:param pwd: Password of the Windows Host
:type os_type: string
:type host_ip: string
:type u_name: string
:type pwd: string
"""
self.os_type = 'windows'
self.host_ip = host_ip
self.usr = usr
self.pwd = pwd
self.shell_id = None
self.host_win_ip = None
self.conn = None
def connect(self):
"""
Method to connect to a Windows machine.
"""
try:
self.host_win_ip = 'http://' + self.host_ip + ':5985/wsman'
self.conn = Protocol(endpoint=self.host_win_ip, transport=
'ntlm', username=self.usr, password=self.pwd,
server_cert_validation='ignore')
logger.warn('Connecting Windows ...')
self.shell_id = self.conn.open_shell()
logger.warn(self.shell_id)
logger.warn('Connected to Windows.')
except Exception as error:
msg_exception_error = 'Exception raised: %s ' % error
raise msg_exception_error
def run_cmd(self, cmd):
"""
Generic Method for passing command and run it on windows machine and return output.
- **parameters**, **types**, **return** and **return types**::
:param cmd: Command to be executed on windows machine.
:return stdout,stderr,status_code : output,errormessage and statuscode of output.
:rtype stdout,stderr,status_code: tuple
"""
if 'shell_id' in dir(self):
command_id = self.conn.run_command(self.shell_id, cmd)
std_out, std_err, status_code = self.conn.get_command_output(self
.shell_id, command_id)
return std_out, std_err, status_code
| """
WINRM Module to connect to windows host
"""
from winrm.protocol import Protocol
from lib import logger
class WINRM(object):
"""
WINRM Module to connect to windows host
"""
def __init__(self, host_ip, usr, pwd):
"""
- **parameters**, **types**, **return** and **return types**::
:param os_type : windows/linux
:param host_ip: ip address of the Windows host
:param usr: username of the Windows Host
:param pwd: Password of the Windows Host
:type os_type: string
:type host_ip: string
:type u_name: string
:type pwd: string
"""
self.os_type = 'windows'
self.host_ip = host_ip
self.usr = usr
self.pwd = pwd
self.shell_id = None
self.host_win_ip = None
self.conn = None
def connect(self):
"""
Method to connect to a Windows machine.
"""
try:
self.host_win_ip = "http://" + self.host_ip + ":5985/wsman"
self.conn = Protocol(
endpoint=self.host_win_ip,
transport="ntlm",
username=self.usr,
password=self.pwd,
server_cert_validation="ignore")
logger.warn("Connecting Windows ...")
self.shell_id = self.conn.open_shell()
logger.warn(self.shell_id)
logger.warn('Connected to Windows.')
except Exception as error:
msg_exception_error = "Exception raised: %s " % error
raise(msg_exception_error)
def run_cmd(self, cmd):
"""
Generic Method for passing command and run it on windows machine and return output.
- **parameters**, **types**, **return** and **return types**::
:param cmd: Command to be executed on windows machine.
:return stdout,stderr,status_code : output,errormessage and statuscode of output.
:rtype stdout,stderr,status_code: tuple
"""
if 'shell_id' in dir(self):
#checking for the shell_id created in winrm object
command_id = self.conn.run_command(self.shell_id, cmd)
std_out, std_err, status_code = self.conn.get_command_output(
self.shell_id, command_id)
#runs the command and returns output,error,statuscode
return std_out, std_err, status_code
| [
3,
4,
5,
6,
7
] |
1,175 | ae6a6f7622bf98c094879efb1b9362a915a051b8 | <mask token>
class QuestionVectorTask(luigi.Task):
<mask token>
<mask token>
<mask token>
def output(self):
return luigi.LocalTarget('./cache/question_distance/%s.npy' % self.
dataset)
<mask token>
<mask token>
class QuestionVector(luigi.Task):
def requires(self):
yield QuestionVectorTask(dataset='train')
yield QuestionVectorTask(dataset='test')
yield QuestionVectorTask(dataset='merge')
yield QuestionVectorTask(dataset='valid')
def complete(self):
return QuestionVectorTask(dataset='train').complete(
) and QuestionVectorTask(dataset='test').complete(
) and QuestionVectorTask(dataset='merge').complete(
) and QuestionVectorTask(dataset='valid').complete()
def run(self):
pass
def load_named(self, name):
assert self.complete()
return np.load('cache/question_distance/%s.npy' % name, mmap_mode='r')
| <mask token>
class QuestionVectorTask(luigi.Task):
<mask token>
<mask token>
<mask token>
def output(self):
return luigi.LocalTarget('./cache/question_distance/%s.npy' % self.
dataset)
<mask token>
def run(self):
self.output().makedirs()
tqdm.tqdm.pandas(tqdm.tqdm)
vecs1, vecs2 = wordmat_distance.SentenceVecs().load(self.dataset)
dists = self.merge_vecs(vecs1, vecs2)
np.save('cache/question_distance/%s_tmp.npy' % self.dataset, dists)
os.rename('cache/question_distance/%s_tmp.npy' % self.dataset, self
.output().path)
class QuestionVector(luigi.Task):
def requires(self):
yield QuestionVectorTask(dataset='train')
yield QuestionVectorTask(dataset='test')
yield QuestionVectorTask(dataset='merge')
yield QuestionVectorTask(dataset='valid')
def complete(self):
return QuestionVectorTask(dataset='train').complete(
) and QuestionVectorTask(dataset='test').complete(
) and QuestionVectorTask(dataset='merge').complete(
) and QuestionVectorTask(dataset='valid').complete()
def run(self):
pass
def load_named(self, name):
assert self.complete()
return np.load('cache/question_distance/%s.npy' % name, mmap_mode='r')
| <mask token>
class QuestionVectorTask(luigi.Task):
resources = {'cpu': 1}
dataset = luigi.Parameter()
def requires(self):
yield wordmat_distance.SentenceVecs()
def output(self):
return luigi.LocalTarget('./cache/question_distance/%s.npy' % self.
dataset)
def merge_vecs(self, v1, v2):
distances = [spatial.distance.euclidean, spatial.distance.
sqeuclidean, spatial.distance.cityblock, spatial.distance.
cosine, spatial.distance.correlation, spatial.distance.
chebyshev, spatial.distance.canberra, spatial.distance.braycurtis]
total_work = v1.shape[0] * len(distances)
bar = tqdm.tqdm(desc='Question vector: %s' % self.dataset, total=
total_work)
distance_vecs = []
for d in distances:
dists = []
for a, b in zip(v1, v2):
dists.append(d(a, b))
bar.update()
stds = np.std(v1 - v2, 1)
distance_vecs.append(stds)
distance_mat = np.asarray(distance_vecs).T
return distance_mat
def run(self):
self.output().makedirs()
tqdm.tqdm.pandas(tqdm.tqdm)
vecs1, vecs2 = wordmat_distance.SentenceVecs().load(self.dataset)
dists = self.merge_vecs(vecs1, vecs2)
np.save('cache/question_distance/%s_tmp.npy' % self.dataset, dists)
os.rename('cache/question_distance/%s_tmp.npy' % self.dataset, self
.output().path)
class QuestionVector(luigi.Task):
def requires(self):
yield QuestionVectorTask(dataset='train')
yield QuestionVectorTask(dataset='test')
yield QuestionVectorTask(dataset='merge')
yield QuestionVectorTask(dataset='valid')
def complete(self):
return QuestionVectorTask(dataset='train').complete(
) and QuestionVectorTask(dataset='test').complete(
) and QuestionVectorTask(dataset='merge').complete(
) and QuestionVectorTask(dataset='valid').complete()
def run(self):
pass
def load_named(self, name):
assert self.complete()
return np.load('cache/question_distance/%s.npy' % name, mmap_mode='r')
| import luigi
import numpy as np
import tqdm
import os
from scipy import spatial
from kq import wordmat_distance
class QuestionVectorTask(luigi.Task):
resources = {'cpu': 1}
dataset = luigi.Parameter()
def requires(self):
yield wordmat_distance.SentenceVecs()
def output(self):
return luigi.LocalTarget('./cache/question_distance/%s.npy' % self.
dataset)
def merge_vecs(self, v1, v2):
distances = [spatial.distance.euclidean, spatial.distance.
sqeuclidean, spatial.distance.cityblock, spatial.distance.
cosine, spatial.distance.correlation, spatial.distance.
chebyshev, spatial.distance.canberra, spatial.distance.braycurtis]
total_work = v1.shape[0] * len(distances)
bar = tqdm.tqdm(desc='Question vector: %s' % self.dataset, total=
total_work)
distance_vecs = []
for d in distances:
dists = []
for a, b in zip(v1, v2):
dists.append(d(a, b))
bar.update()
stds = np.std(v1 - v2, 1)
distance_vecs.append(stds)
distance_mat = np.asarray(distance_vecs).T
return distance_mat
def run(self):
self.output().makedirs()
tqdm.tqdm.pandas(tqdm.tqdm)
vecs1, vecs2 = wordmat_distance.SentenceVecs().load(self.dataset)
dists = self.merge_vecs(vecs1, vecs2)
np.save('cache/question_distance/%s_tmp.npy' % self.dataset, dists)
os.rename('cache/question_distance/%s_tmp.npy' % self.dataset, self
.output().path)
class QuestionVector(luigi.Task):
def requires(self):
yield QuestionVectorTask(dataset='train')
yield QuestionVectorTask(dataset='test')
yield QuestionVectorTask(dataset='merge')
yield QuestionVectorTask(dataset='valid')
def complete(self):
return QuestionVectorTask(dataset='train').complete(
) and QuestionVectorTask(dataset='test').complete(
) and QuestionVectorTask(dataset='merge').complete(
) and QuestionVectorTask(dataset='valid').complete()
def run(self):
pass
def load_named(self, name):
assert self.complete()
return np.load('cache/question_distance/%s.npy' % name, mmap_mode='r')
| import luigi
import numpy as np
import tqdm
import os
from scipy import spatial
from kq import wordmat_distance
class QuestionVectorTask(luigi.Task):
resources = {'cpu': 1}
dataset = luigi.Parameter()
def requires(self):
#yield wordmat_distance.WeightedSentenceVecs()
yield wordmat_distance.SentenceVecs()
def output(self):
return luigi.LocalTarget('./cache/question_distance/%s.npy' % self.dataset)
def merge_vecs(self, v1, v2):
distances = [
spatial.distance.euclidean,
spatial.distance.sqeuclidean,
spatial.distance.cityblock,
spatial.distance.cosine,
spatial.distance.correlation,
spatial.distance.chebyshev,
spatial.distance.canberra,
spatial.distance.braycurtis]
total_work = v1.shape[0] * len(distances)
bar = tqdm.tqdm(desc='Question vector: %s' % self.dataset, total=total_work)
distance_vecs = []
for d in distances:
dists = []
for a, b in zip(v1, v2):
dists.append(d(a, b))
bar.update()
stds = np.std(v1 - v2, 1)
distance_vecs.append(stds)
distance_mat = np.asarray(distance_vecs).T
return distance_mat
#return np.concatenate([diffs, distance_mat], 1)
def run(self):
self.output().makedirs()
tqdm.tqdm.pandas(tqdm.tqdm)
#vecs1, vecs2 = wordmat_distance.WeightedSentenceVecs().load(dataset)
#dists_a = self.merge_vecs(vecs1, vecs2)
vecs1, vecs2 = wordmat_distance.SentenceVecs().load(self.dataset)
dists = self.merge_vecs(vecs1, vecs2)
#dists = np.concatenate([dists_a, dists_b], 0)
np.save('cache/question_distance/%s_tmp.npy' % self.dataset, dists)
os.rename('cache/question_distance/%s_tmp.npy' % self.dataset, self.output().path)
class QuestionVector(luigi.Task):
def requires(self):
yield QuestionVectorTask(dataset='train')
yield QuestionVectorTask(dataset='test')
yield QuestionVectorTask(dataset='merge')
yield QuestionVectorTask(dataset='valid')
def complete(self):
return (QuestionVectorTask(dataset='train').complete() and
QuestionVectorTask(dataset='test').complete() and
QuestionVectorTask(dataset='merge').complete() and
QuestionVectorTask(dataset='valid').complete())
def run(self):
pass
def load_named(self, name):
assert self.complete()
return np.load('cache/question_distance/%s.npy' % name, mmap_mode='r')
| [
7,
8,
11,
12,
13
] |
1,176 | 37c03732ae52171fc24aec85c940848b02d76dc1 | <mask token>
class EntryCreateView(CreateView):
<mask token>
<mask token>
<mask token>
class EntryUpdateView(UpdateView):
model = Entry
fields = ['title', 'content']
def get_success_url(self):
return reverse_lazy('entry-detail', kwargs={'pk': self.entry.id})
class EntryDeleteView(DeleteView):
model = Entry
success_url = reverse_lazy('entry-list')
| <mask token>
class EntryCreateView(CreateView):
model = Entry
fields = ['title', 'content']
success_url = reverse_lazy('entry-list')
class EntryUpdateView(UpdateView):
model = Entry
fields = ['title', 'content']
def get_success_url(self):
return reverse_lazy('entry-detail', kwargs={'pk': self.entry.id})
class EntryDeleteView(DeleteView):
model = Entry
success_url = reverse_lazy('entry-list')
| <mask token>
class EntryListView(ListView):
<mask token>
<mask token>
class EntryDetailView(DetailView):
model = Entry
class EntryCreateView(CreateView):
model = Entry
fields = ['title', 'content']
success_url = reverse_lazy('entry-list')
class EntryUpdateView(UpdateView):
model = Entry
fields = ['title', 'content']
def get_success_url(self):
return reverse_lazy('entry-detail', kwargs={'pk': self.entry.id})
class EntryDeleteView(DeleteView):
model = Entry
success_url = reverse_lazy('entry-list')
| <mask token>
class EntryListView(ListView):
model = Entry
queryset = Entry.objects.all().order_by('-date_created')
class EntryDetailView(DetailView):
model = Entry
class EntryCreateView(CreateView):
model = Entry
fields = ['title', 'content']
success_url = reverse_lazy('entry-list')
class EntryUpdateView(UpdateView):
model = Entry
fields = ['title', 'content']
def get_success_url(self):
return reverse_lazy('entry-detail', kwargs={'pk': self.entry.id})
class EntryDeleteView(DeleteView):
model = Entry
success_url = reverse_lazy('entry-list')
| from django.urls import reverse_lazy
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView,
)
from .models import Entry
class EntryListView(ListView):
model = Entry
queryset = Entry.objects.all().order_by("-date_created")
class EntryDetailView(DetailView):
model = Entry
class EntryCreateView(CreateView):
model = Entry
fields = ["title", "content"]
success_url = reverse_lazy("entry-list")
class EntryUpdateView(UpdateView):
model = Entry
fields = ["title", "content"]
def get_success_url(self):
return reverse_lazy("entry-detail", kwargs={"pk": self.entry.id})
class EntryDeleteView(DeleteView):
model = Entry
success_url = reverse_lazy("entry-list")
| [
6,
7,
10,
11,
13
] |
1,177 | 84a516e924252d897be7444e11acfecd66474090 | <mask token>
| <mask token>
with open(forbidpath, 'rb') as f:
for line in f:
word = line.strip()
forbidkword[word] = 0
<mask token>
with open(inputpath, 'rb') as f:
for line in f:
splits = line.strip().split('\t')
tag = splits[0]
if tag.find(label) > -1:
print(tag)
train = []
seg = jieba_cut.cut(splits[-1], cut_all=False)
seglist = []
for w in seg:
w = w.strip().encode('utf-8')
if w not in forbidkword:
if not re.match('\\d+$', w):
seglist.append(w)
train.append(' '.join(seglist))
X_test = vectorizer.transform(train)
X_test = chi2.transform(X_test)
pred = clf.predict(X_test)
print(pred)
lb = str(pred[0])
if lb == '1':
outfile.writelines(line.strip() + '\t')
outfile.writelines(lb + '\n')
outfile.close()
| <mask token>
outputfile = 'dzsptfidf'
X_train, y_train = cPickle.load(open(os.path.join(outputfile, 'train.data'),
'rb'))
X_test, y_test = cPickle.load(open(os.path.join(outputfile, 'test.data'), 'rb')
)
vectorizer = cPickle.load(open(os.path.join(outputfile, 'vectorizer.data'),
'rb'))
chi2 = cPickle.load(open(os.path.join(outputfile, 'ch2.data'), 'rb'))
clf = cPickle.load(open(os.path.join(outputfile, 'SGD_l2.model'), 'rb'))
inputpath = u'E:\\项目需求\\JDPower\\分类\\5月份\\financeoutput1_final_05.txt'
outputpath = u'E:\\项目需求\\JDPower\\分类\\5月份\\大宗商品.txt'
label = '大宗商品'
forbidkword = {}
forbidpath = u'..//keyword.txt'
with open(forbidpath, 'rb') as f:
for line in f:
word = line.strip()
forbidkword[word] = 0
outfile = open(outputpath, 'wb')
with open(inputpath, 'rb') as f:
for line in f:
splits = line.strip().split('\t')
tag = splits[0]
if tag.find(label) > -1:
print(tag)
train = []
seg = jieba_cut.cut(splits[-1], cut_all=False)
seglist = []
for w in seg:
w = w.strip().encode('utf-8')
if w not in forbidkword:
if not re.match('\\d+$', w):
seglist.append(w)
train.append(' '.join(seglist))
X_test = vectorizer.transform(train)
X_test = chi2.transform(X_test)
pred = clf.predict(X_test)
print(pred)
lb = str(pred[0])
if lb == '1':
outfile.writelines(line.strip() + '\t')
outfile.writelines(lb + '\n')
outfile.close()
| from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
import os
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
import jieba_cut
import random
import cPickle
import re
outputfile = 'dzsptfidf'
X_train, y_train = cPickle.load(open(os.path.join(outputfile, 'train.data'),
'rb'))
X_test, y_test = cPickle.load(open(os.path.join(outputfile, 'test.data'), 'rb')
)
vectorizer = cPickle.load(open(os.path.join(outputfile, 'vectorizer.data'),
'rb'))
chi2 = cPickle.load(open(os.path.join(outputfile, 'ch2.data'), 'rb'))
clf = cPickle.load(open(os.path.join(outputfile, 'SGD_l2.model'), 'rb'))
inputpath = u'E:\\项目需求\\JDPower\\分类\\5月份\\financeoutput1_final_05.txt'
outputpath = u'E:\\项目需求\\JDPower\\分类\\5月份\\大宗商品.txt'
label = '大宗商品'
forbidkword = {}
forbidpath = u'..//keyword.txt'
with open(forbidpath, 'rb') as f:
for line in f:
word = line.strip()
forbidkword[word] = 0
outfile = open(outputpath, 'wb')
with open(inputpath, 'rb') as f:
for line in f:
splits = line.strip().split('\t')
tag = splits[0]
if tag.find(label) > -1:
print(tag)
train = []
seg = jieba_cut.cut(splits[-1], cut_all=False)
seglist = []
for w in seg:
w = w.strip().encode('utf-8')
if w not in forbidkword:
if not re.match('\\d+$', w):
seglist.append(w)
train.append(' '.join(seglist))
X_test = vectorizer.transform(train)
X_test = chi2.transform(X_test)
pred = clf.predict(X_test)
print(pred)
lb = str(pred[0])
if lb == '1':
outfile.writelines(line.strip() + '\t')
outfile.writelines(lb + '\n')
outfile.close()
| # -*- coding:UTF-8 -*-
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
import os
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
import jieba_cut
import random
import cPickle
import re
outputfile = "dzsptfidf"
X_train,y_train = cPickle.load(open(os.path.join(outputfile,"train.data"),"rb"))
X_test,y_test = cPickle.load(open(os.path.join(outputfile,"test.data"),"rb"))
vectorizer = cPickle.load(open(os.path.join(outputfile,"vectorizer.data"),"rb"))
chi2 = cPickle.load(open(os.path.join(outputfile,"ch2.data"),"rb"))
clf = cPickle.load(open(os.path.join(outputfile,"SGD_l2.model"),"rb"))
#inputpath =u"E:\\项目需求\\JDPower\\分类\\4月份\\financeoutput1_final.txt"
#outputpath =u"E:\\项目需求\\JDPower\\分类\\4月份\\大宗商品.txt"
inputpath =u"E:\\项目需求\\JDPower\\分类\\5月份\\financeoutput1_final_05.txt"
outputpath =u"E:\\项目需求\\JDPower\\分类\\5月份\\大宗商品.txt"
label = "大宗商品"
forbidkword = {}
# load
forbidpath = u"..//keyword.txt"
with open(forbidpath, "rb") as f:
for line in f:
word = line.strip()
forbidkword[word] = 0
outfile = open(outputpath,"wb")
with open(inputpath, "rb") as f:
for line in f:
splits = line.strip().split("\t")
tag = splits[0]
if tag.find(label) > -1 :
print(tag)
train = []
#print (splits[-1])
seg = jieba_cut.cut(splits[-1], cut_all=False)
#seglist = [i for i in seg]
seglist = []
for w in seg:
#print w
w = w.strip().encode("utf-8")
if w not in forbidkword:
if not re.match(r"\d+$", w):
seglist.append(w)
train.append(" ".join(seglist))
X_test = vectorizer.transform(train)
X_test = chi2.transform(X_test)
pred = clf.predict(X_test)
#print(" ".join(pred))
print (pred)
lb = str(pred[0])
#print(isinstance(lb, unicode))
#print( lb.decode("gbk").encode("utf-8"))
#outfile.writelines(lb+"\n")
if lb == '1' :
outfile.writelines(line.strip()+"\t")
outfile.writelines(lb+"\n")
#outfile.writelines(line.strip()+"\t"+lb.decode("utf-8").encode("utf-8")+"\n")
outfile.close() | [
0,
1,
2,
3,
4
] |
1,178 | be892250c31198e801836dba24fa8218dd50e811 | <mask token>
def func3(a, b):
return
<mask token>
| <mask token>
def func1(a):
print(f'这是有参数的打印:{a}')
<mask token>
def func2(a, b):
return a + b
<mask token>
def func3(a, b):
return
<mask token>
| def func():
print('这是无参数的打印')
<mask token>
def func1(a):
print(f'这是有参数的打印:{a}')
<mask token>
def func2(a, b):
return a + b
<mask token>
def func3(a, b):
return
<mask token>
| def func():
print('这是无参数的打印')
func()
def func1(a):
print(f'这是有参数的打印:{a}')
func1('有参数a')
def func2(a, b):
return a + b
print(f'有返回值打印:{func2(3, 2)}')
def func3(a, b):
return
print(f'无返回值打印:{func3(3, 2)}')
| def func():
print("这是无参数的打印")
func()
def func1(a):
print(f"这是有参数的打印:{a}")
func1("有参数a")
def func2(a, b):
return a + b
print(f"有返回值打印:{func2(3, 2)}")
def func3(a, b):
return
print(f"无返回值打印:{func3(3, 2)}")
| [
1,
3,
4,
5,
6
] |
1,179 | dfe0ee5bbb906e5a23adcf06d2d704700fa1567d | <mask token>
| <mask token>
print(file.read())
print(file.closed)
file.close()
print(file.closed)
| file = open('../_datasets/moby_dick.txt', mode='r')
print(file.read())
print(file.closed)
file.close()
print(file.closed)
| null | null | [
0,
1,
2
] |
1,180 | d61b04539295f6b25e7f6589d32f313e3c6df82f | <mask token>
class BackgroundCheck(object):
<mask token>
<mask token>
<mask token>
<mask token>
def predict_proba(self, x):
return self.prob_background(x)
class GaussianEstimation(object):
def __init__(self):
self.mu = None
self.cov = None
self.N = 0
def fit(self, x):
N = x.shape[1]
mu = np.mean(x, axis=0)
cov = np.cov(x, rowvar=False)
if self.N is 0:
self.N = N
self.mu = mu
self.k = len(mu)
self.cov = cov
else:
self.mu = np.true_divide(self.mu * self.N + mu * N, self.N + N)
self.cov = np.true_divide(self.cov * self.N + cov * N, self.N + N)
self.N += N
def likelihood(self, x):
return np.exp(self.log_likelihood(x))
def log_likelihood(self, x):
x_mu = x - self.mu
inverse = np.linalg.inv(self.cov)
exp = np.array([np.inner(np.inner(a, inverse.T), a) for a in x_mu])
return -0.5 * (np.log(np.linalg.det(self.cov)) + exp + self.k * np.
log(2 * np.pi))
@property
def max(self):
return self.likelihood(self.mu.reshape(1, -1))
<mask token>
| <mask token>
class BackgroundCheck(object):
<mask token>
<mask token>
def prob_foreground(self, x):
l = self.model.likelihood(x)
l_max = self.model.max
return np.true_divide(l, l_max)
def prob_background(self, x):
return 1 - self.prob_foreground(x)
def predict_proba(self, x):
return self.prob_background(x)
class GaussianEstimation(object):
def __init__(self):
self.mu = None
self.cov = None
self.N = 0
def fit(self, x):
N = x.shape[1]
mu = np.mean(x, axis=0)
cov = np.cov(x, rowvar=False)
if self.N is 0:
self.N = N
self.mu = mu
self.k = len(mu)
self.cov = cov
else:
self.mu = np.true_divide(self.mu * self.N + mu * N, self.N + N)
self.cov = np.true_divide(self.cov * self.N + cov * N, self.N + N)
self.N += N
def likelihood(self, x):
return np.exp(self.log_likelihood(x))
def log_likelihood(self, x):
x_mu = x - self.mu
inverse = np.linalg.inv(self.cov)
exp = np.array([np.inner(np.inner(a, inverse.T), a) for a in x_mu])
return -0.5 * (np.log(np.linalg.det(self.cov)) + exp + self.k * np.
log(2 * np.pi))
@property
def max(self):
return self.likelihood(self.mu.reshape(1, -1))
<mask token>
| <mask token>
class BackgroundCheck(object):
def __init__(self, model):
self.model = model
def fit(self, x):
self.model.fit(x)
def prob_foreground(self, x):
l = self.model.likelihood(x)
l_max = self.model.max
return np.true_divide(l, l_max)
def prob_background(self, x):
return 1 - self.prob_foreground(x)
def predict_proba(self, x):
return self.prob_background(x)
class GaussianEstimation(object):
def __init__(self):
self.mu = None
self.cov = None
self.N = 0
def fit(self, x):
N = x.shape[1]
mu = np.mean(x, axis=0)
cov = np.cov(x, rowvar=False)
if self.N is 0:
self.N = N
self.mu = mu
self.k = len(mu)
self.cov = cov
else:
self.mu = np.true_divide(self.mu * self.N + mu * N, self.N + N)
self.cov = np.true_divide(self.cov * self.N + cov * N, self.N + N)
self.N += N
def likelihood(self, x):
return np.exp(self.log_likelihood(x))
def log_likelihood(self, x):
x_mu = x - self.mu
inverse = np.linalg.inv(self.cov)
exp = np.array([np.inner(np.inner(a, inverse.T), a) for a in x_mu])
return -0.5 * (np.log(np.linalg.det(self.cov)) + exp + self.k * np.
log(2 * np.pi))
@property
def max(self):
return self.likelihood(self.mu.reshape(1, -1))
<mask token>
| <mask token>
def get_samples(n):
return np.random.multivariate_normal(mean=MU, cov=COV, size=n)
class BackgroundCheck(object):
def __init__(self, model):
self.model = model
def fit(self, x):
self.model.fit(x)
def prob_foreground(self, x):
l = self.model.likelihood(x)
l_max = self.model.max
return np.true_divide(l, l_max)
def prob_background(self, x):
return 1 - self.prob_foreground(x)
def predict_proba(self, x):
return self.prob_background(x)
class GaussianEstimation(object):
def __init__(self):
self.mu = None
self.cov = None
self.N = 0
def fit(self, x):
N = x.shape[1]
mu = np.mean(x, axis=0)
cov = np.cov(x, rowvar=False)
if self.N is 0:
self.N = N
self.mu = mu
self.k = len(mu)
self.cov = cov
else:
self.mu = np.true_divide(self.mu * self.N + mu * N, self.N + N)
self.cov = np.true_divide(self.cov * self.N + cov * N, self.N + N)
self.N += N
def likelihood(self, x):
return np.exp(self.log_likelihood(x))
def log_likelihood(self, x):
x_mu = x - self.mu
inverse = np.linalg.inv(self.cov)
exp = np.array([np.inner(np.inner(a, inverse.T), a) for a in x_mu])
return -0.5 * (np.log(np.linalg.det(self.cov)) + exp + self.k * np.
log(2 * np.pi))
@property
def max(self):
return self.likelihood(self.mu.reshape(1, -1))
<mask token>
| import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.lines import Line2D
np.random.seed(42)
n_samples = 5000
MU = np.array([0.5, 1.5])
COV = np.array([[1., 0.7], [0.7, 2.]])
def get_samples(n):
return np.random.multivariate_normal(mean=MU, cov=COV, size=n)
class BackgroundCheck(object):
def __init__(self, model):
self.model = model
def fit(self, x):
self.model.fit(x)
def prob_foreground(self, x):
l = self.model.likelihood(x)
l_max = self.model.max
return np.true_divide(l, l_max)
def prob_background(self, x):
return 1 - self.prob_foreground(x)
def predict_proba(self, x):
return self.prob_background(x)
class GaussianEstimation(object):
def __init__(self):
self.mu = None
self.cov = None
self.N = 0
def fit(self, x):
N = x.shape[1]
mu = np.mean(x, axis=0)
cov = np.cov(x, rowvar=False)
if self.N is 0:
self.N = N
self.mu = mu
self.k = len(mu)
self.cov = cov
else:
self.mu = np.true_divide((self.mu * self.N) + (mu * N), self.N + N)
self.cov = np.true_divide((self.cov * self.N) + (cov * N), self.N + N)
self.N += N
def likelihood(self, x):
return np.exp(self.log_likelihood(x))
def log_likelihood(self, x):
x_mu = x - self.mu
# a = np.array([[1, 2]])
# b = np.array([[1, 2],[3,4]])
# np.inner(np.inner(a, b.T), a)
inverse = np.linalg.inv(self.cov)
exp = np.array([np.inner(np.inner(a, inverse.T), a) for a in x_mu])
return - 0.5 * (
np.log(np.linalg.det(self.cov))
+ exp \
+ self.k * np.log(2*np.pi)
)
@property
def max(self):
return self.likelihood(self.mu.reshape(1,-1))
model = BackgroundCheck(GaussianEstimation())
for i in range(n_samples/2):
x = get_samples(2)
model.fit(x)
x = get_samples(n_samples)
p_foreground = 1 - model.predict_proba(x)
fig = plt.figure('scatter')
fig.clf()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x[:,0], x[:,1], p_foreground)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
ax.set_zlabel('p_foreground')
fig.savefig('p_foreground_x.svg')
X = np.linspace(min(x[:,0]), max(x[:,0]), 30)
Y = np.linspace(min(x[:,1]), max(x[:,1]), 30)
X, Y = np.meshgrid(X, Y)
grid = np.concatenate((X.reshape(-1,1), Y.reshape(-1,1)), axis=1)
p_foreground = 1 - model.predict_proba(grid).reshape(X.shape[0], X.shape[1])
fig = plt.figure('surface')
fig.clf()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(X, Y, p_foreground, cmap=cm.coolwarm)
ax.set_xlabel('$x_0$')
ax.set_ylabel('$x_1$')
ax.set_zlabel('p_foreground')
fig.savefig('p_foreground_grid.svg')
| [
8,
10,
12,
13,
17
] |
1,181 | 58385a7713a8f88925ced714d25f1522bc7e39d8 | <mask token>
class Scatter:
<mask token>
<mask token>
class Pie:
def __init__(self, values, labels, title):
self.style = 'fivethirtyeight'
self.values = values
self.labels = labels
self.explode = [(0) for i in range(len(values))]
self.title = title
def plot(self):
plt.style.use(self.style)
plt.pie(self.values, labels=self.labels, explode=self.explode,
shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={
'edgecolor': 'black'})
plt.title(self.title)
plt.tight_layout()
plt.show()
class Column:
pass
| <mask token>
class Scatter:
def __init__(self, values, ylabel, title):
self.values = values
self.range = list(range(len(values)))
self.ylabel = ylabel
self.title = title
<mask token>
class Pie:
def __init__(self, values, labels, title):
self.style = 'fivethirtyeight'
self.values = values
self.labels = labels
self.explode = [(0) for i in range(len(values))]
self.title = title
def plot(self):
plt.style.use(self.style)
plt.pie(self.values, labels=self.labels, explode=self.explode,
shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={
'edgecolor': 'black'})
plt.title(self.title)
plt.tight_layout()
plt.show()
class Column:
pass
| <mask token>
class Scatter:
def __init__(self, values, ylabel, title):
self.values = values
self.range = list(range(len(values)))
self.ylabel = ylabel
self.title = title
def plot(self):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.scatter(self.range, self.values, color='r', s=1)
ax.set_xlabel('Days')
ax.set_ylabel(self.ylabel)
ax.set_title(self.title)
plt.ylim(0, self.values[-1])
plt.show()
class Pie:
def __init__(self, values, labels, title):
self.style = 'fivethirtyeight'
self.values = values
self.labels = labels
self.explode = [(0) for i in range(len(values))]
self.title = title
def plot(self):
plt.style.use(self.style)
plt.pie(self.values, labels=self.labels, explode=self.explode,
shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={
'edgecolor': 'black'})
plt.title(self.title)
plt.tight_layout()
plt.show()
class Column:
pass
| import matplotlib.pyplot as plt
class Scatter:
def __init__(self, values, ylabel, title):
self.values = values
self.range = list(range(len(values)))
self.ylabel = ylabel
self.title = title
def plot(self):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.scatter(self.range, self.values, color='r', s=1)
ax.set_xlabel('Days')
ax.set_ylabel(self.ylabel)
ax.set_title(self.title)
plt.ylim(0, self.values[-1])
plt.show()
class Pie:
def __init__(self, values, labels, title):
self.style = 'fivethirtyeight'
self.values = values
self.labels = labels
self.explode = [(0) for i in range(len(values))]
self.title = title
def plot(self):
plt.style.use(self.style)
plt.pie(self.values, labels=self.labels, explode=self.explode,
shadow=True, startangle=90, autopct='%1.1f%%', wedgeprops={
'edgecolor': 'black'})
plt.title(self.title)
plt.tight_layout()
plt.show()
class Column:
pass
| import matplotlib.pyplot as plt
class Scatter:
def __init__(self, values, ylabel, title):
self.values = values
self.range = list(range(len(values)))
self.ylabel = ylabel
self.title = title
def plot(self):
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
ax.scatter(self.range, self.values, color='r', s=1)
ax.set_xlabel('Days')
ax.set_ylabel(self.ylabel)
ax.set_title(self.title)
plt.ylim(0, self.values[-1])
plt.show()
class Pie:
def __init__(self, values, labels, title):
self.style = "fivethirtyeight"
self.values = values
self.labels = labels
self.explode = [0 for i in range(len(values))]
self.title = title
def plot(self):
plt.style.use(self.style)
plt.pie(self.values, labels=self.labels, explode=self.explode, shadow=True,
startangle=90, autopct='%1.1f%%',
wedgeprops={'edgecolor': 'black'})
plt.title(self.title)
plt.tight_layout()
plt.show()
class Column:
pass | [
5,
6,
7,
8,
9
] |
1,182 | 70c78021a2544ea372545b037ed55298c26391d1 | <mask token>
def getIkbResult(search_str):
ans_list = get_search_res('ikb', 'kb', search_str)
for i in ans_list:
i['kb_id'] = i.pop('id')
return ans_list
def get_search_res(index, doc_type, query):
ans = {}
search_dsl = '{"query":{"regexp":{"text":".*%s.*"}}}' % query
es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (
index, doc_type)
child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(
'string-escape')], stdout=PIPE)
json_res = child.communicate(None)[0]
jres = json.loads(json_res)
ans_list = []
for item in jres['hits']['hits']:
cur = {}
cur['id'] = item['_id']
cur['summary'] = item['_source']['summary']
ans_list.append(cur)
return ans_list
@app.route('/regexSearch')
@crossdomain(origin='*')
def regexSearch():
res = dict()
para = request.args
data = para.get('data', '').strip()
data = json.loads(data)
results = list()
for regexItem in data:
bzResult = getBzResult(regexItem)
ikbResult = getIkbResult(regexItem)
results.append([regexItem, bzResult, ikbResult])
res['res'] = 'success'
res['data'] = render_template('search_result.html', results=results)
return render_template('search_result.html', results=results)
@app.route('/DefaultError')
@crossdomain(origin='*')
def defaultError():
return render_template('stop_sign.html')
<mask token>
| <mask token>
def getBzResult(search_str):
ans_list = get_search_res('bugzilla', 'text', search_str)
for i in ans_list:
i['bug_id'] = i.pop('id')
return ans_list
def getIkbResult(search_str):
ans_list = get_search_res('ikb', 'kb', search_str)
for i in ans_list:
i['kb_id'] = i.pop('id')
return ans_list
def get_search_res(index, doc_type, query):
ans = {}
search_dsl = '{"query":{"regexp":{"text":".*%s.*"}}}' % query
es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (
index, doc_type)
child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(
'string-escape')], stdout=PIPE)
json_res = child.communicate(None)[0]
jres = json.loads(json_res)
ans_list = []
for item in jres['hits']['hits']:
cur = {}
cur['id'] = item['_id']
cur['summary'] = item['_source']['summary']
ans_list.append(cur)
return ans_list
@app.route('/regexSearch')
@crossdomain(origin='*')
def regexSearch():
res = dict()
para = request.args
data = para.get('data', '').strip()
data = json.loads(data)
results = list()
for regexItem in data:
bzResult = getBzResult(regexItem)
ikbResult = getIkbResult(regexItem)
results.append([regexItem, bzResult, ikbResult])
res['res'] = 'success'
res['data'] = render_template('search_result.html', results=results)
return render_template('search_result.html', results=results)
@app.route('/DefaultError')
@crossdomain(origin='*')
def defaultError():
return render_template('stop_sign.html')
<mask token>
| <mask token>
def crossdomain(origin=None, methods=None, headers=None, max_age=21600,
attach_to_all=True, automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'
] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def getBzResult(search_str):
ans_list = get_search_res('bugzilla', 'text', search_str)
for i in ans_list:
i['bug_id'] = i.pop('id')
return ans_list
def getIkbResult(search_str):
ans_list = get_search_res('ikb', 'kb', search_str)
for i in ans_list:
i['kb_id'] = i.pop('id')
return ans_list
def get_search_res(index, doc_type, query):
ans = {}
search_dsl = '{"query":{"regexp":{"text":".*%s.*"}}}' % query
es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (
index, doc_type)
child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(
'string-escape')], stdout=PIPE)
json_res = child.communicate(None)[0]
jres = json.loads(json_res)
ans_list = []
for item in jres['hits']['hits']:
cur = {}
cur['id'] = item['_id']
cur['summary'] = item['_source']['summary']
ans_list.append(cur)
return ans_list
@app.route('/regexSearch')
@crossdomain(origin='*')
def regexSearch():
res = dict()
para = request.args
data = para.get('data', '').strip()
data = json.loads(data)
results = list()
for regexItem in data:
bzResult = getBzResult(regexItem)
ikbResult = getIkbResult(regexItem)
results.append([regexItem, bzResult, ikbResult])
res['res'] = 'success'
res['data'] = render_template('search_result.html', results=results)
return render_template('search_result.html', results=results)
@app.route('/DefaultError')
@crossdomain(origin='*')
def defaultError():
return render_template('stop_sign.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5555)
| <mask token>
app = Flask(__name__)
app.debug = True
<mask token>
def crossdomain(origin=None, methods=None, headers=None, max_age=21600,
attach_to_all=True, automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'
] = 'Origin, X-Requested-With, Content-Type, Accept, Authorization'
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def getBzResult(search_str):
ans_list = get_search_res('bugzilla', 'text', search_str)
for i in ans_list:
i['bug_id'] = i.pop('id')
return ans_list
def getIkbResult(search_str):
ans_list = get_search_res('ikb', 'kb', search_str)
for i in ans_list:
i['kb_id'] = i.pop('id')
return ans_list
def get_search_res(index, doc_type, query):
ans = {}
search_dsl = '{"query":{"regexp":{"text":".*%s.*"}}}' % query
es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' % (
index, doc_type)
child = Popen(['curl', es_url, '-d', str(search_dsl).lower().encode(
'string-escape')], stdout=PIPE)
json_res = child.communicate(None)[0]
jres = json.loads(json_res)
ans_list = []
for item in jres['hits']['hits']:
cur = {}
cur['id'] = item['_id']
cur['summary'] = item['_source']['summary']
ans_list.append(cur)
return ans_list
@app.route('/regexSearch')
@crossdomain(origin='*')
def regexSearch():
res = dict()
para = request.args
data = para.get('data', '').strip()
data = json.loads(data)
results = list()
for regexItem in data:
bzResult = getBzResult(regexItem)
ikbResult = getIkbResult(regexItem)
results.append([regexItem, bzResult, ikbResult])
res['res'] = 'success'
res['data'] = render_template('search_result.html', results=results)
return render_template('search_result.html', results=results)
@app.route('/DefaultError')
@crossdomain(origin='*')
def defaultError():
return render_template('stop_sign.html')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5555)
| #-*- coding:utf-8 -*-
'''
'''
from flask import Flask, jsonify
app = Flask(__name__)
app.debug = True
from datetime import timedelta
from flask import make_response, request, current_app, render_template
from functools import update_wrapper
import json
from subprocess import *
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
h['Access-Control-Allow-Credentials'] = 'true'
h['Access-Control-Allow-Headers'] = \
"Origin, X-Requested-With, Content-Type, Accept, Authorization"
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def getBzResult(search_str):
ans_list = get_search_res("bugzilla", "text", search_str)
for i in ans_list:
i['bug_id'] = i.pop('id')
#raise Exception('xyz')
return ans_list
def getIkbResult(search_str):
ans_list = get_search_res("ikb", "kb", search_str)
for i in ans_list:
i['kb_id'] = i.pop('id')
return ans_list
def get_search_res(index, doc_type, query):
ans = {}
search_dsl = '{"query":{"regexp":{"text":\".*%s.*\"}}}' %(query)
es_url = 'http://cybertron.eng.vmware.com:9200/%s/%s/_search?pretty=1' %(index, doc_type)
child = Popen(["curl", es_url, "-d", str(search_dsl).lower().encode('string-escape')], stdout=PIPE)
json_res = child.communicate(None)[0]
jres = json.loads(json_res)
ans_list = []
for item in jres['hits']['hits']:
cur = {}
cur['id'] = item['_id']
cur['summary'] = item['_source']['summary']
ans_list.append(cur)
#sorted to get the latest item
#newlist = list(reversed(sorted(ans_list, key=lambda k: k['id'])))
return ans_list
@app.route("/regexSearch")
@crossdomain(origin='*')
def regexSearch():
res = dict()
para = request.args
data = para.get('data', '').strip()
data = json.loads(data)
results = list()
for regexItem in data:
bzResult = getBzResult(regexItem)
ikbResult = getIkbResult(regexItem)
results.append([regexItem, bzResult, ikbResult])
#raise Exception('xyz')
res['res'] = 'success'
res['data'] = render_template('search_result.html', results = results)
return render_template('search_result.html', results = results)
@app.route("/DefaultError")
@crossdomain(origin='*')
def defaultError():
return render_template('stop_sign.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5555)
| [
4,
5,
7,
8,
10
] |
1,183 | d0dfea27128ca6966c85da6529ead5c95c86c4cf | <mask token>
| <mask token>
class Migration(migrations.Migration):
<mask token>
<mask token>
| <mask token>
class Migration(migrations.Migration):
dependencies = [('blog', '0033_auto_20171016_1334')]
operations = [migrations.AlterField(model_name='sponsor', name=
'email_text_markdown', field=models.CharField(default='',
max_length=1000))]
| from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('blog', '0033_auto_20171016_1334')]
operations = [migrations.AlterField(model_name='sponsor', name=
'email_text_markdown', field=models.CharField(default='',
max_length=1000))]
| # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-16 12:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0033_auto_20171016_1334'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='email_text_markdown',
field=models.CharField(default='', max_length=1000),
),
]
| [
0,
1,
2,
3,
4
] |
1,184 | 41006ff35299aa72b69c6dc1c71a45b44dca7d6c | <mask token>
| <mask token>
data.head()
<mask token>
sb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
sb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
| <mask token>
data = pd.read_csv('/Users/stevenbaez/Desktop/train.csv')
data.head()
subset = data[['Survived', 'Age', 'Sex']]
<mask token>
sb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
sb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
| import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib as mp
data = pd.read_csv('/Users/stevenbaez/Desktop/train.csv')
data.head()
subset = data[['Survived', 'Age', 'Sex']]
import numpy as np
import matplotlib
sb.catplot(x='Age', y='Sex', hue='Survived', col='Embarked', notch=False,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
sb.catplot(x='Age', y='Sex', hue='Survived', col='Pclass', notch=True,
palette='Set2', data=data, kind='box', height=4, aspect=0.7)
| #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib as mp
data = pd.read_csv("/Users/stevenbaez/Desktop/train.csv")
# In[2]:
data.head()
# In[3]:
subset = data[['Survived','Age', 'Sex']]
# In[5]:
import numpy as np
import matplotlib
# In[20]:
sb.catplot(x="Age", y="Sex",
hue="Survived", col="Embarked",
notch = False,
palette = "Set2",
data=data, kind="box",
height=4, aspect=.7);
# In[17]:
sb.catplot(x="Age", y="Sex",
hue="Survived", col="Pclass",
notch = True,
palette = "Set2",
data=data, kind="box",
height=4, aspect=.7);
# In[ ]:
| [
0,
1,
2,
3,
4
] |
1,185 | ebebdb0e79e9d78b818dab3f93d130ccddd2914e | <mask token>
def saveDatadic(file_path, name, dataset):
np.save(file_path + name + '_x', dataset['x'])
np.save(file_path + name + '_t', dataset['t'])
np.save(file_path + name + '_e', dataset['e'])
<mask token>
def encoder_z(mu_logvar, epsilon=None):
mu, logvar = tf.split(mu_logvar, num_or_size_splits=2, axis=1)
stddev = tf.sqrt(tf.exp(logvar))
if epsilon is None:
epsilon = tf.random_normal(tf.shape(stddev))
z = mu + tf.multiply(stddev, epsilon)
return z
def decoder(z, is_training):
"""Network p(t|z)"""
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.
leaky_relu, weights_initializer=tf.contrib.layers.xavier_initializer()
):
t_logits = slim.fully_connected(z, 64, scope='fc1')
t_logits = slim.fully_connected(t_logits, 64, scope='fc2')
t_logits = slim.fully_connected(t_logits, 64, scope='fc3')
t_logits = slim.fully_connected(t_logits, nbin, activation_fn=None,
scope='fc4')
return t_logits
def VAE_losses(t_logits, t_truncate, mu_logvar0, mu_logvar1, tiny=1e-08):
"""Define loss functions (reconstruction, KL divergence) and optimizer"""
t_dist = tf.nn.softmax(t_logits)
reconstruction = -tf.log(tf.reduce_sum(t_dist * t_truncate, axis=1))
mu0, logvar0 = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
mu1, logvar1 = tf.split(mu_logvar1, num_or_size_splits=2, axis=1)
kl_d = 0.5 * tf.reduce_sum(tf.exp(logvar1 - logvar0) + tf.divide(tf.
square(mu0 - mu1), tf.exp(logvar0) + tiny) + logvar0 - logvar1 - 1.0, 1
)
loss = tf.reduce_mean(reconstruction + kl_d)
return reconstruction, kl_d, loss
def pt_x(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training):
mu, logvar = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
epsilon = tf.random_normal(tf.shape(logvar))
z1_sample = encoder_z(mu_logvar, epsilon)
t_logits_l = decoder(z1_sample, is_training)
t_dist_l = tf.nn.softmax(t_logits_l)
p_t_z = tf.reduce_sum(t_truncate * t_dist_l, 1)
pq_z = tf.exp(MVNloglikeli(z1_sample, mu_logvar0, noise=1e-08) -
MVNloglikeli(z1_sample, mu_logvar, noise=1e-08))
pt_x_l = p_t_z * pq_z
pt_x_sum = pt_x_l
for k in range(num_sample - 1):
epsilon = tf.random_normal(tf.shape(logvar))
z1_sample = encoder_z(mu_logvar, epsilon)
t_logits_l = decoder(z1_sample, is_training)
t_dist_l = tf.nn.softmax(t_logits_l)
p_t_z = tf.reduce_sum(t_truncate * t_dist_l, 1)
pq_z = tf.exp(MVNloglikeli(z1_sample, mu_logvar0, noise=1e-08) -
MVNloglikeli(z1_sample, mu_logvar, noise=1e-08))
pt_x_l = p_t_z * pq_z
pt_x_sum = pt_x_sum + pt_x_l
pt_x_avg = pt_x_sum / num_sample
return pt_x_avg
def loglikeli_cVAE(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training):
pt_x_avg = pt_x(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training)
return tf.log(pt_x_avg)
<mask token>
def t_dist_avg(mu_logvar0, t_logits_init, num_sample, is_training):
mu, logvar = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
t_dist_new_sum = tf.nn.softmax(t_logits_init)
for k in range(num_sample - 1):
epsilon = tf.random_normal(tf.shape(logvar))
t_logits_new_k = decoder(encoder_z(mu_logvar0, epsilon), is_training)
t_dist_new_k = tf.nn.softmax(t_logits_new_k)
t_dist_new_sum = t_dist_new_sum + t_dist_new_k
t_dist_new_avg = np.divide(t_dist_new_sum, num_sample)
return t_dist_new_avg
<mask token>
| <mask token>
def saveDatadic(file_path, name, dataset):
np.save(file_path + name + '_x', dataset['x'])
np.save(file_path + name + '_t', dataset['t'])
np.save(file_path + name + '_e', dataset['e'])
<mask token>
def encoder0(x, is_training):
"""learned prior: Network p(z|x)"""
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.
leaky_relu, weights_initializer=tf.contrib.layers.xavier_initializer()
):
mu_logvar = slim.fully_connected(x, 64, scope='fc1')
mu_logvar = slim.fully_connected(mu_logvar, 64, scope='fc2')
mu_logvar = slim.fully_connected(mu_logvar, 64, activation_fn=None,
scope='fc3')
return mu_logvar
def encoder(x, t_, is_training):
"""Network q(z|x,t_)"""
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.
leaky_relu, weights_initializer=tf.contrib.layers.xavier_initializer()
):
inputs = tf.concat([t_, x], axis=1)
mu_logvar = slim.fully_connected(inputs, 64, scope='fc1')
mu_logvar = slim.fully_connected(mu_logvar, 64, scope='fc2')
mu_logvar = slim.fully_connected(mu_logvar, 64, activation_fn=None,
scope='fc3')
return mu_logvar
def encoder_z(mu_logvar, epsilon=None):
mu, logvar = tf.split(mu_logvar, num_or_size_splits=2, axis=1)
stddev = tf.sqrt(tf.exp(logvar))
if epsilon is None:
epsilon = tf.random_normal(tf.shape(stddev))
z = mu + tf.multiply(stddev, epsilon)
return z
def decoder(z, is_training):
"""Network p(t|z)"""
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.
leaky_relu, weights_initializer=tf.contrib.layers.xavier_initializer()
):
t_logits = slim.fully_connected(z, 64, scope='fc1')
t_logits = slim.fully_connected(t_logits, 64, scope='fc2')
t_logits = slim.fully_connected(t_logits, 64, scope='fc3')
t_logits = slim.fully_connected(t_logits, nbin, activation_fn=None,
scope='fc4')
return t_logits
def VAE_losses(t_logits, t_truncate, mu_logvar0, mu_logvar1, tiny=1e-08):
"""Define loss functions (reconstruction, KL divergence) and optimizer"""
t_dist = tf.nn.softmax(t_logits)
reconstruction = -tf.log(tf.reduce_sum(t_dist * t_truncate, axis=1))
mu0, logvar0 = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
mu1, logvar1 = tf.split(mu_logvar1, num_or_size_splits=2, axis=1)
kl_d = 0.5 * tf.reduce_sum(tf.exp(logvar1 - logvar0) + tf.divide(tf.
square(mu0 - mu1), tf.exp(logvar0) + tiny) + logvar0 - logvar1 - 1.0, 1
)
loss = tf.reduce_mean(reconstruction + kl_d)
return reconstruction, kl_d, loss
def pt_x(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training):
mu, logvar = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
epsilon = tf.random_normal(tf.shape(logvar))
z1_sample = encoder_z(mu_logvar, epsilon)
t_logits_l = decoder(z1_sample, is_training)
t_dist_l = tf.nn.softmax(t_logits_l)
p_t_z = tf.reduce_sum(t_truncate * t_dist_l, 1)
pq_z = tf.exp(MVNloglikeli(z1_sample, mu_logvar0, noise=1e-08) -
MVNloglikeli(z1_sample, mu_logvar, noise=1e-08))
pt_x_l = p_t_z * pq_z
pt_x_sum = pt_x_l
for k in range(num_sample - 1):
epsilon = tf.random_normal(tf.shape(logvar))
z1_sample = encoder_z(mu_logvar, epsilon)
t_logits_l = decoder(z1_sample, is_training)
t_dist_l = tf.nn.softmax(t_logits_l)
p_t_z = tf.reduce_sum(t_truncate * t_dist_l, 1)
pq_z = tf.exp(MVNloglikeli(z1_sample, mu_logvar0, noise=1e-08) -
MVNloglikeli(z1_sample, mu_logvar, noise=1e-08))
pt_x_l = p_t_z * pq_z
pt_x_sum = pt_x_sum + pt_x_l
pt_x_avg = pt_x_sum / num_sample
return pt_x_avg
def loglikeli_cVAE(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training):
pt_x_avg = pt_x(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training)
return tf.log(pt_x_avg)
<mask token>
def t_dist_avg(mu_logvar0, t_logits_init, num_sample, is_training):
mu, logvar = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
t_dist_new_sum = tf.nn.softmax(t_logits_init)
for k in range(num_sample - 1):
epsilon = tf.random_normal(tf.shape(logvar))
t_logits_new_k = decoder(encoder_z(mu_logvar0, epsilon), is_training)
t_dist_new_k = tf.nn.softmax(t_logits_new_k)
t_dist_new_sum = t_dist_new_sum + t_dist_new_k
t_dist_new_avg = np.divide(t_dist_new_sum, num_sample)
return t_dist_new_avg
<mask token>
def saveResults(dataset, session_dir, session_name, out_dir, tt, event_tt_prob
):
sess = tf.Session()
session_path = session_dir + session_name + '.ckpt'
saver.restore(sess, session_path)
batch_x, batch_t, batch_e = dataset['x'], dataset['t'], dataset['e']
batch_t_cat = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob)
batch_t_cat_likeli = batch_t_categorize(batch_t, batch_e, tt,
event_tt_prob, likelihood=True)
norm_batch_x = batch_x.copy()
norm_batch_x[:, cts_covariates] = normalize_batch(batch_x[:,
cts_covariates], norm_mean, norm_std)
test_pred_prob = sess.run(t_dist_new_avg, feed_dict={x: norm_batch_x,
is_training: False})
test_loglikeli = sess.run(total_loglikeli, feed_dict={t_truncate:
batch_t_cat_likeli, t_: batch_t_cat, x: norm_batch_x, event:
batch_e, is_training: False})
test_pred_avgt, test_avgt_mean, test_avgt_std = wAvg_t(sess,
norm_batch_x, test_pred_prob, tt, num_sample, return_wi=True)
test_pred_medt = [calculate_quantiles(post_prob, tt, 0.5) for post_prob in
test_pred_prob]
test_pred_medt = np.concatenate(test_pred_medt, axis=0)
test_pred_randomt = np.array([random_uniform_p(tt, post_prob, 1) for
post_prob in test_pred_prob])
np.save(out_dir + '/{}_test_pred_prob'.format(session_name), test_pred_prob
)
np.save(out_dir + '/{}_test_loglikeli'.format(session_name), test_loglikeli
)
np.save(out_dir + '/{}_test_pred_avgt'.format(session_name), test_pred_avgt
)
np.save(out_dir + '/{}_test_pred_medt'.format(session_name), test_pred_medt
)
np.save(out_dir + '/{}_test_pred_randomt'.format(session_name),
test_pred_randomt)
np.save(out_dir + '/{}_tt'.format(session_name), tt)
<mask token>
| <mask token>
def saveDatadic(file_path, name, dataset):
np.save(file_path + name + '_x', dataset['x'])
np.save(file_path + name + '_t', dataset['t'])
np.save(file_path + name + '_e', dataset['e'])
<mask token>
def encoder0(x, is_training):
"""learned prior: Network p(z|x)"""
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.
leaky_relu, weights_initializer=tf.contrib.layers.xavier_initializer()
):
mu_logvar = slim.fully_connected(x, 64, scope='fc1')
mu_logvar = slim.fully_connected(mu_logvar, 64, scope='fc2')
mu_logvar = slim.fully_connected(mu_logvar, 64, activation_fn=None,
scope='fc3')
return mu_logvar
def encoder(x, t_, is_training):
"""Network q(z|x,t_)"""
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.
leaky_relu, weights_initializer=tf.contrib.layers.xavier_initializer()
):
inputs = tf.concat([t_, x], axis=1)
mu_logvar = slim.fully_connected(inputs, 64, scope='fc1')
mu_logvar = slim.fully_connected(mu_logvar, 64, scope='fc2')
mu_logvar = slim.fully_connected(mu_logvar, 64, activation_fn=None,
scope='fc3')
return mu_logvar
def encoder_z(mu_logvar, epsilon=None):
mu, logvar = tf.split(mu_logvar, num_or_size_splits=2, axis=1)
stddev = tf.sqrt(tf.exp(logvar))
if epsilon is None:
epsilon = tf.random_normal(tf.shape(stddev))
z = mu + tf.multiply(stddev, epsilon)
return z
def decoder(z, is_training):
"""Network p(t|z)"""
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.
leaky_relu, weights_initializer=tf.contrib.layers.xavier_initializer()
):
t_logits = slim.fully_connected(z, 64, scope='fc1')
t_logits = slim.fully_connected(t_logits, 64, scope='fc2')
t_logits = slim.fully_connected(t_logits, 64, scope='fc3')
t_logits = slim.fully_connected(t_logits, nbin, activation_fn=None,
scope='fc4')
return t_logits
def VAE_losses(t_logits, t_truncate, mu_logvar0, mu_logvar1, tiny=1e-08):
"""Define loss functions (reconstruction, KL divergence) and optimizer"""
t_dist = tf.nn.softmax(t_logits)
reconstruction = -tf.log(tf.reduce_sum(t_dist * t_truncate, axis=1))
mu0, logvar0 = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
mu1, logvar1 = tf.split(mu_logvar1, num_or_size_splits=2, axis=1)
kl_d = 0.5 * tf.reduce_sum(tf.exp(logvar1 - logvar0) + tf.divide(tf.
square(mu0 - mu1), tf.exp(logvar0) + tiny) + logvar0 - logvar1 - 1.0, 1
)
loss = tf.reduce_mean(reconstruction + kl_d)
return reconstruction, kl_d, loss
def pt_x(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training):
mu, logvar = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
epsilon = tf.random_normal(tf.shape(logvar))
z1_sample = encoder_z(mu_logvar, epsilon)
t_logits_l = decoder(z1_sample, is_training)
t_dist_l = tf.nn.softmax(t_logits_l)
p_t_z = tf.reduce_sum(t_truncate * t_dist_l, 1)
pq_z = tf.exp(MVNloglikeli(z1_sample, mu_logvar0, noise=1e-08) -
MVNloglikeli(z1_sample, mu_logvar, noise=1e-08))
pt_x_l = p_t_z * pq_z
pt_x_sum = pt_x_l
for k in range(num_sample - 1):
epsilon = tf.random_normal(tf.shape(logvar))
z1_sample = encoder_z(mu_logvar, epsilon)
t_logits_l = decoder(z1_sample, is_training)
t_dist_l = tf.nn.softmax(t_logits_l)
p_t_z = tf.reduce_sum(t_truncate * t_dist_l, 1)
pq_z = tf.exp(MVNloglikeli(z1_sample, mu_logvar0, noise=1e-08) -
MVNloglikeli(z1_sample, mu_logvar, noise=1e-08))
pt_x_l = p_t_z * pq_z
pt_x_sum = pt_x_sum + pt_x_l
pt_x_avg = pt_x_sum / num_sample
return pt_x_avg
def loglikeli_cVAE(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training):
pt_x_avg = pt_x(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training)
return tf.log(pt_x_avg)
def MVNloglikeli(z, mu_logvar, noise=1e-08):
mu, logvar = tf.split(mu_logvar, num_or_size_splits=2, axis=1)
varmatrix = tf.exp(logvar)
loglikeli = -0.5 * (tf.log(varmatrix) + (z - mu) ** 2 / varmatrix + np.
log(2 * np.pi))
return tf.reduce_sum(loglikeli, axis=1)
def t_dist_avg(mu_logvar0, t_logits_init, num_sample, is_training):
mu, logvar = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
t_dist_new_sum = tf.nn.softmax(t_logits_init)
for k in range(num_sample - 1):
epsilon = tf.random_normal(tf.shape(logvar))
t_logits_new_k = decoder(encoder_z(mu_logvar0, epsilon), is_training)
t_dist_new_k = tf.nn.softmax(t_logits_new_k)
t_dist_new_sum = t_dist_new_sum + t_dist_new_k
t_dist_new_avg = np.divide(t_dist_new_sum, num_sample)
return t_dist_new_avg
def zero_outputs():
return 0.0, 0.0, 0.0
<mask token>
def wAvg_t(sess, new_x, post_prob, tt, num_sample, return_wi=False):
for j in range(num_sample):
t_hat_l = np.array([random_uniform_p(tt, post_prob[subj], 1) for
subj in range(post_prob.shape[0])])
t_hat_binned = batch_t_categorize(t_hat_l, np.ones(t_hat_l.shape),
tt, event_tt_prob=1.0)
mu_logvar0l = sess.run(mu_logvar0, feed_dict={x: new_x, is_training:
False})
mu_logvar1l = sess.run(mu_logvar1, feed_dict={x: new_x, t_:
t_hat_binned, is_training: False})
mu1l, logvar1l = np.split(mu_logvar1l, 2, 1)
epsilon_l = np.random.normal(size=logvar1l.shape)
stddevl = np.sqrt(np.exp(logvar1l))
z1l = mu1l + np.multiply(stddevl, epsilon_l)
wil = np.divide(np.exp(MVNloglikeli_np(z1l, mu_logvar0l, noise=
1e-08)), np.exp(MVNloglikeli_np(z1l, mu_logvar1l, noise=1e-08)))
if j == 0:
t_hat_all = np.array(t_hat_l).reshape(post_prob.shape[0], 1)
wl_all = wil.reshape(post_prob.shape[0], 1)
else:
t_hat_all = np.concatenate([t_hat_all, np.array(t_hat_l).
reshape(post_prob.shape[0], 1)], axis=1)
wl_all = np.concatenate([wl_all, wil.reshape(post_prob.shape[0],
1)], axis=1)
t_hat_i = np.sum(np.multiply(t_hat_all, wl_all), axis=1) / np.sum(wl_all,
axis=1)
if return_wi == False:
return t_hat_i
else:
return t_hat_i, np.mean(wl_all, axis=1), np.std(wl_all, axis=1)
def saveResults(dataset, session_dir, session_name, out_dir, tt, event_tt_prob
):
sess = tf.Session()
session_path = session_dir + session_name + '.ckpt'
saver.restore(sess, session_path)
batch_x, batch_t, batch_e = dataset['x'], dataset['t'], dataset['e']
batch_t_cat = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob)
batch_t_cat_likeli = batch_t_categorize(batch_t, batch_e, tt,
event_tt_prob, likelihood=True)
norm_batch_x = batch_x.copy()
norm_batch_x[:, cts_covariates] = normalize_batch(batch_x[:,
cts_covariates], norm_mean, norm_std)
test_pred_prob = sess.run(t_dist_new_avg, feed_dict={x: norm_batch_x,
is_training: False})
test_loglikeli = sess.run(total_loglikeli, feed_dict={t_truncate:
batch_t_cat_likeli, t_: batch_t_cat, x: norm_batch_x, event:
batch_e, is_training: False})
test_pred_avgt, test_avgt_mean, test_avgt_std = wAvg_t(sess,
norm_batch_x, test_pred_prob, tt, num_sample, return_wi=True)
test_pred_medt = [calculate_quantiles(post_prob, tt, 0.5) for post_prob in
test_pred_prob]
test_pred_medt = np.concatenate(test_pred_medt, axis=0)
test_pred_randomt = np.array([random_uniform_p(tt, post_prob, 1) for
post_prob in test_pred_prob])
np.save(out_dir + '/{}_test_pred_prob'.format(session_name), test_pred_prob
)
np.save(out_dir + '/{}_test_loglikeli'.format(session_name), test_loglikeli
)
np.save(out_dir + '/{}_test_pred_avgt'.format(session_name), test_pred_avgt
)
np.save(out_dir + '/{}_test_pred_medt'.format(session_name), test_pred_medt
)
np.save(out_dir + '/{}_test_pred_randomt'.format(session_name),
test_pred_randomt)
np.save(out_dir + '/{}_tt'.format(session_name), tt)
def saveResults_norun(session_name, out_dir, tt, test_pred_prob,
test_loglikeli, test_pred_avgt, test_pred_medt, test_pred_randomt):
np.save(out_dir + '/{}_test_pred_prob'.format(session_name), test_pred_prob
)
np.save(out_dir + '/{}_test_loglikeli'.format(session_name), test_loglikeli
)
np.save(out_dir + '/{}_test_pred_avgt'.format(session_name), test_pred_avgt
)
np.save(out_dir + '/{}_test_pred_medt'.format(session_name), test_pred_medt
)
np.save(out_dir + '/{}_test_pred_randomt'.format(session_name),
test_pred_randomt)
np.save(out_dir + '/{}_tt'.format(session_name), tt)
<mask token>
| import math
import os
import sys
import pandas
import numpy as np
import seaborn as sns
import tensorflow as tf
import logging
from utils.preprocessing import formatted_data, normalize_batch, event_t_bin_prob, risk_t_bin_prob, batch_t_categorize, next_batch, one_hot_encoder, one_hot_indices, flatten_nested
from utils.metrics import calculate_quantiles, random_multinomial, MVNloglikeli_np, random_uniform_p
name = 'cVAE_q_flchain'
output_dir = '/data/zidi/cVAE/results/flchain/saved_models' + '/'
log_file = output_dir + name + '.log'
logging.basicConfig(filename=log_file, filemode='w', level=logging.DEBUG)
out_dir = '/data/zidi/cVAE/results/flchain' + '/'
file_path = '/data/zidi/cVAE/datasets/'
training = True
path = os.path.abspath(os.path.join(file_path, '', 'flchain.csv'))
data_frame = pandas.read_csv(path, index_col=0)
data_frame = data_frame[data_frame.futime != 0]
data_frame['pat'] = np.arange(data_frame.shape[0])
to_drop = ['futime', 'death', 'chapter', 'pat']
dataset = data_frame.drop(labels=to_drop, axis=1)
one_hot_encoder_list = ['sex', 'flc.grp', 'sample.yr']
one_hot_encoder_list_idx = np.where(np.isin(dataset.columns.values, np.
array(one_hot_encoder_list)))
idx = np.arange(0, dataset.shape[0])
np.random.seed(123)
np.random.shuffle(idx)
num_examples = int(0.8 * dataset.shape[0])
print('num_examples:{}'.format(num_examples))
train_idx = idx[0:num_examples]
split = int((dataset.shape[0] - num_examples) / 2)
test_idx = idx[num_examples:num_examples + split]
valid_idx = idx[num_examples + split:dataset.shape[0]]
t_data = data_frame[['futime']]
e_data = data_frame[['death']]
pat_data = data_frame[['pat']]
cate_idx = np.where(np.isin(dataset.columns.values, np.array(
one_hot_encoder_list)))[0]
cts_idx = np.setdiff1d(np.arange(dataset.shape[1]), cate_idx)
continuous_median = dataset.iloc[train_idx, cts_idx].median(axis=0).values
categorical_mode = dataset.iloc[train_idx, cate_idx].mode(axis=0).values
impute_dict = dict(zip(dataset.columns.values[cate_idx], categorical_mode.
reshape(cate_idx.shape)))
impute_dict.update(dict(zip(dataset.columns.values[cts_idx],
continuous_median.reshape(cts_idx.shape))))
dataset.fillna(impute_dict, inplace=True)
dataset = one_hot_encoder(dataset, encode=one_hot_encoder_list)
encoded_indices = one_hot_indices(dataset, one_hot_encoder_list)
covariates = np.array(dataset.columns.values)
x = np.array(dataset).reshape(dataset.shape)
t = np.array(t_data).reshape(len(t_data))
e = np.array(e_data).reshape(len(e_data))
pat = np.array(pat_data).reshape(len(pat_data))
print('x_shape:{}'.format(x.shape))
x = x[idx]
t = t[idx]
e = e[idx]
pat = pat[idx]
end_time = max(t)
print('end_time:{}'.format(end_time))
print('observed percent:{}'.format(sum(e) / len(e)))
print('test:{}, valid:{}, train:{}, all: {}'.format(len(test_idx), len(
valid_idx), num_examples, len(test_idx) + len(valid_idx) + num_examples))
train = formatted_data(x=x, t=t, e=e, pat=pat, idx=train_idx)
test = formatted_data(x=x, t=t, e=e, pat=pat, idx=test_idx)
valid = formatted_data(x=x, t=t, e=e, pat=pat, idx=valid_idx)
cat_covariates = np.array(flatten_nested(encoded_indices))
cts_covariates = np.setdiff1d(np.arange(len(covariates)), cat_covariates)
norm_mean = np.nanmean(train['x'][:, cts_covariates], axis=0)
norm_std = np.nanstd(train['x'][:, cts_covariates], axis=0)
def saveDatadic(file_path, name, dataset):
np.save(file_path + name + '_x', dataset['x'])
np.save(file_path + name + '_t', dataset['t'])
np.save(file_path + name + '_e', dataset['e'])
saveDatadic(file_path, 'flchain_train', train)
saveDatadic(file_path, 'flchain_valid', valid)
saveDatadic(file_path, 'flchain_test', test)
np.save(file_path + 'flchain_encoded_indices', encoded_indices)
np.save(file_path + 'flchain_covariates', covariates)
m = 100
num_sample = 100
ncov = train['x'].shape[1]
w_e = 1.0
w_ne = 1.0
nbin = 100
tt = np.percentile(train['t'][train['e'] == 1], np.linspace(0.0, 100.0,
nbin, endpoint=True))
loss_of_info = np.mean(train['t'] > np.max(train['t'][train['e'] == 1]))
if loss_of_info > 0.0001:
nbin = nbin + 1
tt = np.append(tt, np.max(train['t']))
event_tt_prob = risk_t_bin_prob(train['t'], train['e'], tt)
else:
event_tt_bin, event_tt_prob = risk_t_bin_prob(train['t'], train['e'], tt)
slim = tf.contrib.slim
sample_size = 50
def encoder0(x, is_training):
"""learned prior: Network p(z|x)"""
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.
leaky_relu, weights_initializer=tf.contrib.layers.xavier_initializer()
):
mu_logvar = slim.fully_connected(x, 64, scope='fc1')
mu_logvar = slim.fully_connected(mu_logvar, 64, scope='fc2')
mu_logvar = slim.fully_connected(mu_logvar, 64, activation_fn=None,
scope='fc3')
return mu_logvar
def encoder(x, t_, is_training):
"""Network q(z|x,t_)"""
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.
leaky_relu, weights_initializer=tf.contrib.layers.xavier_initializer()
):
inputs = tf.concat([t_, x], axis=1)
mu_logvar = slim.fully_connected(inputs, 64, scope='fc1')
mu_logvar = slim.fully_connected(mu_logvar, 64, scope='fc2')
mu_logvar = slim.fully_connected(mu_logvar, 64, activation_fn=None,
scope='fc3')
return mu_logvar
def encoder_z(mu_logvar, epsilon=None):
mu, logvar = tf.split(mu_logvar, num_or_size_splits=2, axis=1)
stddev = tf.sqrt(tf.exp(logvar))
if epsilon is None:
epsilon = tf.random_normal(tf.shape(stddev))
z = mu + tf.multiply(stddev, epsilon)
return z
def decoder(z, is_training):
"""Network p(t|z)"""
with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.
leaky_relu, weights_initializer=tf.contrib.layers.xavier_initializer()
):
t_logits = slim.fully_connected(z, 64, scope='fc1')
t_logits = slim.fully_connected(t_logits, 64, scope='fc2')
t_logits = slim.fully_connected(t_logits, 64, scope='fc3')
t_logits = slim.fully_connected(t_logits, nbin, activation_fn=None,
scope='fc4')
return t_logits
def VAE_losses(t_logits, t_truncate, mu_logvar0, mu_logvar1, tiny=1e-08):
"""Define loss functions (reconstruction, KL divergence) and optimizer"""
t_dist = tf.nn.softmax(t_logits)
reconstruction = -tf.log(tf.reduce_sum(t_dist * t_truncate, axis=1))
mu0, logvar0 = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
mu1, logvar1 = tf.split(mu_logvar1, num_or_size_splits=2, axis=1)
kl_d = 0.5 * tf.reduce_sum(tf.exp(logvar1 - logvar0) + tf.divide(tf.
square(mu0 - mu1), tf.exp(logvar0) + tiny) + logvar0 - logvar1 - 1.0, 1
)
loss = tf.reduce_mean(reconstruction + kl_d)
return reconstruction, kl_d, loss
def pt_x(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training):
mu, logvar = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
epsilon = tf.random_normal(tf.shape(logvar))
z1_sample = encoder_z(mu_logvar, epsilon)
t_logits_l = decoder(z1_sample, is_training)
t_dist_l = tf.nn.softmax(t_logits_l)
p_t_z = tf.reduce_sum(t_truncate * t_dist_l, 1)
pq_z = tf.exp(MVNloglikeli(z1_sample, mu_logvar0, noise=1e-08) -
MVNloglikeli(z1_sample, mu_logvar, noise=1e-08))
pt_x_l = p_t_z * pq_z
pt_x_sum = pt_x_l
for k in range(num_sample - 1):
epsilon = tf.random_normal(tf.shape(logvar))
z1_sample = encoder_z(mu_logvar, epsilon)
t_logits_l = decoder(z1_sample, is_training)
t_dist_l = tf.nn.softmax(t_logits_l)
p_t_z = tf.reduce_sum(t_truncate * t_dist_l, 1)
pq_z = tf.exp(MVNloglikeli(z1_sample, mu_logvar0, noise=1e-08) -
MVNloglikeli(z1_sample, mu_logvar, noise=1e-08))
pt_x_l = p_t_z * pq_z
pt_x_sum = pt_x_sum + pt_x_l
pt_x_avg = pt_x_sum / num_sample
return pt_x_avg
def loglikeli_cVAE(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training):
pt_x_avg = pt_x(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training)
return tf.log(pt_x_avg)
def MVNloglikeli(z, mu_logvar, noise=1e-08):
mu, logvar = tf.split(mu_logvar, num_or_size_splits=2, axis=1)
varmatrix = tf.exp(logvar)
loglikeli = -0.5 * (tf.log(varmatrix) + (z - mu) ** 2 / varmatrix + np.
log(2 * np.pi))
return tf.reduce_sum(loglikeli, axis=1)
def t_dist_avg(mu_logvar0, t_logits_init, num_sample, is_training):
mu, logvar = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
t_dist_new_sum = tf.nn.softmax(t_logits_init)
for k in range(num_sample - 1):
epsilon = tf.random_normal(tf.shape(logvar))
t_logits_new_k = decoder(encoder_z(mu_logvar0, epsilon), is_training)
t_dist_new_k = tf.nn.softmax(t_logits_new_k)
t_dist_new_sum = t_dist_new_sum + t_dist_new_k
t_dist_new_avg = np.divide(t_dist_new_sum, num_sample)
return t_dist_new_avg
def zero_outputs():
return 0.0, 0.0, 0.0
is_training = tf.placeholder(tf.bool, [], name='is_training')
t_ = tf.placeholder(tf.float32, [None, nbin], name='t_')
t_truncate = tf.placeholder(tf.float32, [None, nbin], name='t_truncate')
event = tf.placeholder(tf.float32, [None], name='event')
x = tf.placeholder(tf.float32, [None, ncov], name='x')
e_idx = tf.where(tf.equal(event, 1.0))
e_idx = tf.reshape(e_idx, [tf.shape(e_idx)[0]])
ne_idx = tf.where(tf.equal(event, 0.0))
ne_idx = tf.reshape(ne_idx, [tf.shape(ne_idx)[0]])
e_is_empty = tf.equal(tf.size(e_idx), 0)
ne_is_empty = tf.equal(tf.size(ne_idx), 0)
with tf.variable_scope('encoder0'):
mu_logvar0 = encoder0(x, is_training)
z0 = encoder_z(mu_logvar0)
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE):
mu_logvar1 = encoder(x, t_, is_training)
z1 = encoder_z(mu_logvar1)
with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
t_logits_1 = decoder(z1, is_training)
t_logits_0 = decoder(z0, is_training)
t_dist_new = tf.nn.softmax(t_logits_0)
t_dist_new_avg = t_dist_avg(mu_logvar0, t_dist_new, num_sample, is_training
)
event_loglikeli = loglikeli_cVAE(tf.gather(t_truncate, e_idx), tf.
gather(mu_logvar0, e_idx), tf.gather(mu_logvar1, e_idx), num_sample,
is_training)
censor_loglikeli = loglikeli_cVAE(tf.gather(t_truncate, ne_idx), tf.
gather(mu_logvar0, ne_idx), tf.gather(mu_logvar1, ne_idx),
num_sample, is_training)
total_loglikeli = loglikeli_cVAE(t_truncate, mu_logvar0, mu_logvar1,
num_sample, is_training)
with tf.variable_scope('training') as scope:
e_recon, e_kl_d, eloss = tf.cond(e_is_empty, lambda : zero_outputs(),
lambda : VAE_losses(tf.gather(t_logits_1, e_idx), tf.gather(
t_truncate, e_idx), tf.gather(mu_logvar0, e_idx), tf.gather(
mu_logvar1, e_idx)))
ne_recon, ne_kl_d, closs = tf.cond(ne_is_empty, lambda : zero_outputs(),
lambda : VAE_losses(tf.gather(t_logits_1, ne_idx), tf.gather(
t_truncate, ne_idx), tf.gather(mu_logvar0, ne_idx), tf.gather(
mu_logvar1, ne_idx)))
loss = w_e * eloss + w_ne * closs
rec_all, kl_d_all, loss_all = VAE_losses(t_logits_1, t_truncate,
mu_logvar0, mu_logvar1)
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients(loss_all, params)
grads = zip(gradients, params)
optimizer = tf.train.AdamOptimizer(learning_rate=0.0005, beta1=0.9,
beta2=0.999)
train_step = optimizer.apply_gradients(grads)
def wAvg_t(sess, new_x, post_prob, tt, num_sample, return_wi=False):
for j in range(num_sample):
t_hat_l = np.array([random_uniform_p(tt, post_prob[subj], 1) for
subj in range(post_prob.shape[0])])
t_hat_binned = batch_t_categorize(t_hat_l, np.ones(t_hat_l.shape),
tt, event_tt_prob=1.0)
mu_logvar0l = sess.run(mu_logvar0, feed_dict={x: new_x, is_training:
False})
mu_logvar1l = sess.run(mu_logvar1, feed_dict={x: new_x, t_:
t_hat_binned, is_training: False})
mu1l, logvar1l = np.split(mu_logvar1l, 2, 1)
epsilon_l = np.random.normal(size=logvar1l.shape)
stddevl = np.sqrt(np.exp(logvar1l))
z1l = mu1l + np.multiply(stddevl, epsilon_l)
wil = np.divide(np.exp(MVNloglikeli_np(z1l, mu_logvar0l, noise=
1e-08)), np.exp(MVNloglikeli_np(z1l, mu_logvar1l, noise=1e-08)))
if j == 0:
t_hat_all = np.array(t_hat_l).reshape(post_prob.shape[0], 1)
wl_all = wil.reshape(post_prob.shape[0], 1)
else:
t_hat_all = np.concatenate([t_hat_all, np.array(t_hat_l).
reshape(post_prob.shape[0], 1)], axis=1)
wl_all = np.concatenate([wl_all, wil.reshape(post_prob.shape[0],
1)], axis=1)
t_hat_i = np.sum(np.multiply(t_hat_all, wl_all), axis=1) / np.sum(wl_all,
axis=1)
if return_wi == False:
return t_hat_i
else:
return t_hat_i, np.mean(wl_all, axis=1), np.std(wl_all, axis=1)
def saveResults(dataset, session_dir, session_name, out_dir, tt, event_tt_prob
):
sess = tf.Session()
session_path = session_dir + session_name + '.ckpt'
saver.restore(sess, session_path)
batch_x, batch_t, batch_e = dataset['x'], dataset['t'], dataset['e']
batch_t_cat = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob)
batch_t_cat_likeli = batch_t_categorize(batch_t, batch_e, tt,
event_tt_prob, likelihood=True)
norm_batch_x = batch_x.copy()
norm_batch_x[:, cts_covariates] = normalize_batch(batch_x[:,
cts_covariates], norm_mean, norm_std)
test_pred_prob = sess.run(t_dist_new_avg, feed_dict={x: norm_batch_x,
is_training: False})
test_loglikeli = sess.run(total_loglikeli, feed_dict={t_truncate:
batch_t_cat_likeli, t_: batch_t_cat, x: norm_batch_x, event:
batch_e, is_training: False})
test_pred_avgt, test_avgt_mean, test_avgt_std = wAvg_t(sess,
norm_batch_x, test_pred_prob, tt, num_sample, return_wi=True)
test_pred_medt = [calculate_quantiles(post_prob, tt, 0.5) for post_prob in
test_pred_prob]
test_pred_medt = np.concatenate(test_pred_medt, axis=0)
test_pred_randomt = np.array([random_uniform_p(tt, post_prob, 1) for
post_prob in test_pred_prob])
np.save(out_dir + '/{}_test_pred_prob'.format(session_name), test_pred_prob
)
np.save(out_dir + '/{}_test_loglikeli'.format(session_name), test_loglikeli
)
np.save(out_dir + '/{}_test_pred_avgt'.format(session_name), test_pred_avgt
)
np.save(out_dir + '/{}_test_pred_medt'.format(session_name), test_pred_medt
)
np.save(out_dir + '/{}_test_pred_randomt'.format(session_name),
test_pred_randomt)
np.save(out_dir + '/{}_tt'.format(session_name), tt)
def saveResults_norun(session_name, out_dir, tt, test_pred_prob,
test_loglikeli, test_pred_avgt, test_pred_medt, test_pred_randomt):
np.save(out_dir + '/{}_test_pred_prob'.format(session_name), test_pred_prob
)
np.save(out_dir + '/{}_test_loglikeli'.format(session_name), test_loglikeli
)
np.save(out_dir + '/{}_test_pred_avgt'.format(session_name), test_pred_avgt
)
np.save(out_dir + '/{}_test_pred_medt'.format(session_name), test_pred_medt
)
np.save(out_dir + '/{}_test_pred_randomt'.format(session_name),
test_pred_randomt)
np.save(out_dir + '/{}_tt'.format(session_name), tt)
if training == True:
valid_recon_loss = []
valid_epoch_recon_loss = []
valid_epoch_loss = []
valid_epoch_event_recon_loss = []
valid_epoch_censor_recon_loss = []
best_likelihood = -np.inf
best_i = 0
best_epoch = 0
num_epoch = 200
num_sample = 100
num_batch = int(train['x'].shape[0] / m)
require_impr = 3000
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(num_epoch * num_batch):
batch_x, batch_t, batch_e = next_batch(train, m=m)
batch_t_cat_likeli = batch_t_categorize(batch_t, batch_e, tt,
event_tt_prob, likelihood=True)
norm_batch_x = batch_x.copy()
norm_batch_x[:, cts_covariates] = normalize_batch(batch_x[:,
cts_covariates], norm_mean, norm_std)
batch_t_cat = batch_t_categorize(batch_t, batch_e, tt,
event_tt_prob)
sess.run(train_step, feed_dict={t_: batch_t_cat, t_truncate:
batch_t_cat_likeli, x: norm_batch_x, event: batch_e,
is_training: True})
if i % num_batch == 0:
batch_x, batch_t, batch_e = next_batch(valid, m=valid['x'].
shape[0])
batch_t_cat = batch_t_categorize(batch_t, batch_e, tt,
event_tt_prob)
batch_t_cat_likeli = batch_t_categorize(batch_t, batch_e,
tt, event_tt_prob, likelihood=True)
norm_batch_x = batch_x.copy()
norm_batch_x[:, cts_covariates] = normalize_batch(batch_x[:,
cts_covariates], norm_mean, norm_std)
epoch_loglikeli = np.mean(sess.run(total_loglikeli,
feed_dict={t_: batch_t_cat, t_truncate:
batch_t_cat_likeli, x: norm_batch_x, event: batch_e,
is_training: False}))
epoch_loss = sess.run(loss_all, feed_dict={t_: batch_t_cat,
t_truncate: batch_t_cat_likeli, x: norm_batch_x, event:
batch_e, is_training: False})
valid_epoch_recon_loss.append(epoch_loglikeli)
valid_epoch_loss.append(epoch_loss)
epoch_recon_closs = np.mean(sess.run(ne_recon, feed_dict={
t_: batch_t_cat, t_truncate: batch_t_cat_likeli, x:
norm_batch_x, event: batch_e, is_training: False}))
valid_epoch_censor_recon_loss.append(epoch_recon_closs)
epoch_recon_eloss = np.mean(sess.run(e_recon, feed_dict={t_:
batch_t_cat, t_truncate: batch_t_cat_likeli, x:
norm_batch_x, event: batch_e, is_training: False}))
valid_epoch_event_recon_loss.append(epoch_recon_eloss)
if best_likelihood <= epoch_loglikeli:
best_likelihood = epoch_loglikeli
best_i = i
save_path = saver.save(sess, output_dir + name + '.ckpt')
op_print = 'Epoch ' + str(i / num_batch) + ': Loss ' + str(
epoch_loss) + ' log-likelihood: ' + str(epoch_loglikeli
) + ' event rec loss: ' + str(epoch_recon_eloss
) + ' censor rec loss: ' + str(epoch_recon_closs)
logging.debug(op_print)
if i - best_i > require_impr:
print('Model stops improving for a while')
break
saveResults(test, session_dir=output_dir, session_name=name, out_dir=
out_dir, tt=tt, event_tt_prob=event_tt_prob)
else:
sess = tf.Session()
saver.restore(sess, output_dir + name + '.ckpt')
batch_x, batch_t, batch_e = test['x'], test['t'], test['e']
batch_t_cat = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob)
batch_t_cat_likeli = batch_t_categorize(batch_t, batch_e, tt,
event_tt_prob, likelihood=True)
norm_batch_x = batch_x.copy()
norm_batch_x[:, cts_covariates] = normalize_batch(batch_x[:,
cts_covariates], norm_mean, norm_std)
test_pred_prob = sess.run(t_dist_new_avg, feed_dict={x: norm_batch_x,
is_training: False})
test_loglikeli = sess.run(total_loglikeli, feed_dict={t_truncate:
batch_t_cat_likeli, t_: batch_t_cat, x: norm_batch_x, event:
batch_e, is_training: False})
test_pred_avgt, test_avgt_mean, test_avgt_std = wAvg_t(sess,
norm_batch_x, test_pred_prob, tt, num_sample, return_wi=True)
test_pred_medt = [calculate_quantiles(post_prob, tt, 0.5) for post_prob in
test_pred_prob]
test_pred_medt = np.concatenate(test_pred_medt, axis=0)
test_pred_randomt = np.array([random_uniform_p(tt, post_prob, 1) for
post_prob in test_pred_prob])
saveResults_norun(session_name=name, out_dir=out_dir, tt=tt,
test_pred_prob=test_pred_prob, test_loglikeli=test_loglikeli,
test_pred_avgt=test_pred_avgt, test_pred_medt=test_pred_medt,
test_pred_randomt=test_pred_randomt)
| import math
import os
import sys
import pandas
import numpy as np
import seaborn as sns
import tensorflow as tf
import logging
# from utils.simulation_functions import simulation_cox_gompertz
from utils.preprocessing import formatted_data, normalize_batch, event_t_bin_prob,risk_t_bin_prob,\
batch_t_categorize, next_batch, one_hot_encoder, one_hot_indices, flatten_nested
from utils.metrics import calculate_quantiles, random_multinomial, MVNloglikeli_np, random_uniform_p
# simulation settings
name = 'cVAE_q_flchain'
### on my mac
# directory of output model
# output_dir = '/Users/ZidiXiu/Dropbox/Research/VAE/datasets/flchain'+'/'
# directory of output test results
# out_dir = '/Users/ZidiXiu/Dropbox/Research/VAE/results/flchain'+'/'
# flchain dataset
# file_path = '/Users/ZidiXiu/Dropbox/Research/VAE/datasets'
### on GPU server
# directory of output model
output_dir = '/data/zidi/cVAE/results/flchain/saved_models'+'/'
log_file = output_dir+name+'.log'
logging.basicConfig(filename=log_file, filemode='w', level=logging.DEBUG)
# directory of output test results
out_dir = '/data/zidi/cVAE/results/flchain'+'/'
# flchain dataset
file_path = '/data/zidi/cVAE/datasets/'
training = True
path = os.path.abspath(os.path.join(file_path, '', 'flchain.csv'))
data_frame = pandas.read_csv(path, index_col=0)
# remove rows with 0 time-to-event
data_frame = data_frame[data_frame.futime != 0]
data_frame['pat'] = np.arange(data_frame.shape[0])
# x_data = data_frame[['age', 'sex', 'kappa', 'lambda', 'flc.grp', 'creatinine', 'mgus']]
# Preprocess
to_drop = ['futime', 'death', 'chapter', 'pat']
dataset = data_frame.drop(labels=to_drop, axis=1)
one_hot_encoder_list = ['sex', 'flc.grp', 'sample.yr']
one_hot_encoder_list_idx = np.where(np.isin(dataset.columns.values, np.array(one_hot_encoder_list)))
# split to train/valid/test before calculating imputation values
# first shuffling all indices
idx = np.arange(0, dataset.shape[0])
np.random.seed(123)
np.random.shuffle(idx)
num_examples = int(0.80 * dataset.shape[0])
print("num_examples:{}".format(num_examples))
train_idx = idx[0: num_examples]
split = int(( dataset.shape[0] - num_examples) / 2)
test_idx = idx[num_examples: num_examples + split]
valid_idx = idx[num_examples + split: dataset.shape[0]]
####
t_data = data_frame[['futime']]
e_data = data_frame[['death']]
pat_data = data_frame[['pat']]
# get imputation values from training dataset
cate_idx = np.where(np.isin(dataset.columns.values, np.array(one_hot_encoder_list)))[0]
cts_idx = np.setdiff1d(np.arange(dataset.shape[1]), cate_idx)
continuous_median= dataset.iloc[train_idx,cts_idx].median(axis=0).values
categorical_mode = dataset.iloc[train_idx,cate_idx].mode(axis=0).values
impute_dict = dict(zip(dataset.columns.values[cate_idx],categorical_mode.reshape(cate_idx.shape)))
impute_dict.update(dict(zip(dataset.columns.values[cts_idx],continuous_median.reshape(cts_idx.shape))))
# fill back the imputed values
dataset.fillna(impute_dict, inplace=True)
dataset = one_hot_encoder(dataset, encode=one_hot_encoder_list)
encoded_indices = one_hot_indices(dataset, one_hot_encoder_list)
# print("data description:{}".format(dataset.describe()))
covariates = np.array(dataset.columns.values)
# print("columns:{}".format(covariates))
x = np.array(dataset).reshape(dataset.shape)
t = np.array(t_data).reshape(len(t_data))
e = np.array(e_data).reshape(len(e_data))
pat = np.array(pat_data).reshape(len(pat_data))
# print("x:{}, t:{}, e:{}, len:{}".format(x[0], t[0], e[0], len(t)))
print("x_shape:{}".format(x.shape))
# here idx has been shuffled
x = x[idx]
t = t[idx]
e = e[idx]
pat = pat[idx]
end_time = max(t)
print("end_time:{}".format(end_time))
print("observed percent:{}".format(sum(e) / len(e)))
# print("shuffled x:{}, t:{}, e:{}, len:{}".format(x[0], t[0], e[0], len(t)))
print("test:{}, valid:{}, train:{}, all: {}".format(len(test_idx), len(valid_idx), num_examples,
len(test_idx) + len(valid_idx) + num_examples))
# print("test_idx:{}, valid_idx:{},train_idx:{} ".format(test_idx, valid_idx, train_idx))
train = formatted_data(x=x, t=t, e=e, pat = pat ,idx=train_idx)
test = formatted_data(x=x, t=t, e=e, pat = pat ,idx=test_idx)
valid = formatted_data(x=x, t=t, e=e, pat = pat ,idx=valid_idx)
cat_covariates = np.array(flatten_nested(encoded_indices))
cts_covariates = np.setdiff1d(np.arange(len(covariates)), cat_covariates)
# normalize inputs
norm_mean = np.nanmean(train['x'][:,cts_covariates],axis=0)
norm_std = np.nanstd(train['x'][:,cts_covariates],axis=0)
def saveDatadic(file_path, name, dataset):
np.save(file_path+name+'_x', dataset['x'])
np.save(file_path+name+'_t', dataset['t'])
np.save(file_path+name+'_e', dataset['e'])
saveDatadic(file_path, 'flchain_train', train)
saveDatadic(file_path, 'flchain_valid', valid)
saveDatadic(file_path, 'flchain_test', test)
np.save(file_path+'flchain_encoded_indices', encoded_indices)
np.save(file_path+'flchain_covariates', covariates)
## Model hyperparameters
m=100
num_sample = 100
ncov = train['x'].shape[1]
w_e = 1.0
w_ne = 1.0
# split training time based on bins
nbin=100
tt = np.percentile(train['t'][train['e']==1],np.linspace(0.,100.,nbin, endpoint=True))
# based on whether we have censoring after the largest observed t
loss_of_info = np.mean(train['t']>np.max(train['t'][train['e']==1]))
# need to convert t to different size of bins
if loss_of_info > 0.0001:
nbin = nbin + 1
# add the largest observed censoring time inside
tt = np.append(tt,np.max(train['t']))
event_tt_prob = risk_t_bin_prob(train['t'], train['e'], tt)
else:
# get empirical event rate for re-weighting censoring objects
event_tt_bin, event_tt_prob = risk_t_bin_prob(train['t'], train['e'], tt)
# define encoder and decoder
slim = tf.contrib.slim
sample_size = 50
# start with 3 layers each
def encoder0(x,is_training):
"""learned prior: Network p(z|x)"""
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.leaky_relu,
# normalizer_fn=slim.batch_norm,
# normalizer_params={'is_training': is_training},
weights_initializer=tf.contrib.layers.xavier_initializer()):
mu_logvar = slim.fully_connected(x, 64, scope='fc1')
mu_logvar = slim.fully_connected(mu_logvar, 64, scope='fc2')
mu_logvar = slim.fully_connected(mu_logvar, 64, activation_fn=None, scope='fc3')
return mu_logvar
def encoder(x,t_, is_training):
"""Network q(z|x,t_)"""
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.leaky_relu,
# normalizer_fn=slim.batch_norm,
# normalizer_params={'is_training': is_training},
weights_initializer=tf.contrib.layers.xavier_initializer()):
inputs = tf.concat([t_,x],axis=1)
mu_logvar = slim.fully_connected(inputs, 64, scope='fc1')
mu_logvar = slim.fully_connected(mu_logvar, 64, scope='fc2')
mu_logvar = slim.fully_connected(mu_logvar, 64, activation_fn=None, scope='fc3')
return mu_logvar
def encoder_z(mu_logvar, epsilon=None):
# Interpret z as concatenation of mean and log variance
mu, logvar = tf.split(mu_logvar, num_or_size_splits=2, axis=1)
# Standard deviation must be positive
stddev = tf.sqrt(tf.exp(logvar))
if epsilon is None:
# Draw a z from the distribution
epsilon = tf.random_normal(tf.shape(stddev))
z = mu + tf.multiply(stddev, epsilon)
return z
def decoder(z, is_training):
"""Network p(t|z)"""
# Decoding arm
with slim.arg_scope([slim.fully_connected],
activation_fn=tf.nn.leaky_relu,
# normalizer_fn=slim.batch_norm,
# normalizer_params={'is_training': is_training},
weights_initializer=tf.contrib.layers.xavier_initializer()):
t_logits = slim.fully_connected(z, 64, scope='fc1')
t_logits = slim.fully_connected(t_logits, 64, scope='fc2')
t_logits = slim.fully_connected(t_logits, 64, scope='fc3')
# returns multinomial distribution
t_logits = slim.fully_connected(t_logits, nbin, activation_fn=None, scope='fc4')
# t_logits = tf.nn.softmax(t_logits)
return (t_logits)
def VAE_losses(t_logits, t_truncate, mu_logvar0, mu_logvar1, tiny=1e-8):
# NEW ONE! with different strategy of calculating loss for censoring, adding \sum p_b, not \sum w_b*p_b
"""Define loss functions (reconstruction, KL divergence) and optimizer"""
# Reconstruction loss
t_dist = tf.nn.softmax(t_logits)
reconstruction = -tf.log(tf.reduce_sum(t_dist*t_truncate, axis=1))
# KL divergence
mu0, logvar0 = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
mu1, logvar1 = tf.split(mu_logvar1, num_or_size_splits=2, axis=1)
kl_d = 0.5 * tf.reduce_sum(tf.exp(logvar1-logvar0)\
+ tf.divide(tf.square(mu0-mu1),tf.exp(logvar0)+tiny) \
+ logvar0 - logvar1 -1.0, \
1)
# Total loss for event
loss = tf.reduce_mean(reconstruction + kl_d)
return reconstruction, kl_d, loss
def pt_x(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training):
# here t_ is known!
# for calculation purposes, censoring subject t_ need to be a truncated form like [0,0,0,1,1,1]
# which could calculete sum of all bins after censoring time
mu, logvar = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
# sample z_l
# q_{\beta}(z_l|t_i,x_i)
epsilon = tf.random_normal(tf.shape(logvar))
z1_sample = encoder_z(mu_logvar, epsilon)
# only have one dimension here
t_logits_l = decoder(z1_sample, is_training)
t_dist_l = tf.nn.softmax(t_logits_l)
p_t_z = tf.reduce_sum(t_truncate*t_dist_l,1)
pq_z = tf.exp(MVNloglikeli(z1_sample, mu_logvar0, noise = 1e-8)\
-MVNloglikeli(z1_sample, mu_logvar, noise = 1e-8))
pt_x_l = p_t_z*pq_z
pt_x_sum = pt_x_l
for k in range(num_sample-1):
# q_{\beta}(z_l|t_i,x_i)
epsilon = tf.random_normal(tf.shape(logvar))
z1_sample = encoder_z(mu_logvar, epsilon)
# # p_{\alpha}(t_i|z_l)
# epsilon = tf.random_normal(tf.shape(logvar))
# z0_sample = encoder_z(mu_logvar0, epsilon)
# # p_{\alpha}(z_l|x)
# epsilon = tf.random_normal(tf.shape(logvar))
# # only have one dimension here
t_logits_l = decoder(z1_sample, is_training)
t_dist_l = tf.nn.softmax(t_logits_l)
p_t_z = tf.reduce_sum(t_truncate*t_dist_l,1)
pq_z = tf.exp(MVNloglikeli(z1_sample, mu_logvar0, noise = 1e-8)\
-MVNloglikeli(z1_sample, mu_logvar, noise = 1e-8))
pt_x_l = p_t_z*pq_z
# sum up
pt_x_sum = pt_x_sum+pt_x_l
pt_x_avg = pt_x_sum/num_sample
return(pt_x_avg)
def loglikeli_cVAE(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training):
pt_x_avg = pt_x(t_truncate, mu_logvar0, mu_logvar, num_sample, is_training)
return(tf.log(pt_x_avg))
# MVN log-likelihood
def MVNloglikeli(z, mu_logvar, noise = 1e-8):
# Interpret z as concatenation of mean and log variance
mu, logvar = tf.split(mu_logvar, num_or_size_splits=2, axis=1)
# note that Sigma is a diagonal matrix and we only have the diagonal information here
varmatrix = tf.exp(logvar)
# calculate log-likelihood
# likeli = -0.5*(tf.log(tf.linalg.det(varmatrix)+noise)\
# +tf.matmul(tf.matmul((z-mu), tf.linalg.inv(varmatrix))\
# ,tf.transpose(z-mu))\
# +nbin*np.log(2*np.pi)
# )
# for diagonal matrix:
loglikeli = -0.5*(tf.log(varmatrix) + (z-mu)**2/varmatrix + np.log(2*np.pi))
# returns a log-likelihood for each z
return tf.reduce_sum(loglikeli, axis=1)
def t_dist_avg(mu_logvar0, t_logits_init, num_sample, is_training):
mu, logvar = tf.split(mu_logvar0, num_or_size_splits=2, axis=1)
t_dist_new_sum = tf.nn.softmax(t_logits_init)
for k in range(num_sample-1):
# graph resample basic implementation
epsilon = tf.random_normal(tf.shape(logvar))
t_logits_new_k = decoder(encoder_z(mu_logvar0, epsilon), is_training)
t_dist_new_k = tf.nn.softmax(t_logits_new_k)
t_dist_new_sum = t_dist_new_sum + t_dist_new_k
t_dist_new_avg = np.divide(t_dist_new_sum, num_sample)
return(t_dist_new_avg)
def zero_outputs():
# just to return 3 outputs to match previous function for events instead
return 0.0,0.0,0.0
####Main Structure
# training indicator
is_training = tf.placeholder(tf.bool, [], name="is_training");
# Define input placeholder
t_ = tf.placeholder(tf.float32,[None, nbin], name='t_')
# Define input placeholder only for calculating likelihood or survival function purpose
t_truncate = tf.placeholder(tf.float32,[None, nbin], name='t_truncate')
# each patient will only have 1 indicator of censoring or event
event = tf.placeholder(tf.float32,[None], name='event')
x = tf.placeholder(tf.float32,[None, ncov], name='x')
# separate the input as event and censoring
# we still keep observations in original order
e_idx = tf.where(tf.equal(event, 1.))
e_idx = tf.reshape(e_idx,[tf.shape(e_idx)[0]])
ne_idx = tf.where(tf.equal(event, 0.))
ne_idx = tf.reshape(ne_idx,[tf.shape(ne_idx)[0]])
e_is_empty = tf.equal(tf.size(e_idx), 0)
ne_is_empty = tf.equal(tf.size(ne_idx), 0)
# Define VAE graph
with tf.variable_scope('encoder0'):
# update parameters encoder0 for all observations
mu_logvar0 = encoder0(x, is_training)
z0 = encoder_z(mu_logvar0)
# update encoder q for both censoring and events
with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE):
# with events, true t is t_;
# for censoring, true time is t_r
mu_logvar1 = encoder(x,t_, is_training)
z1 = encoder_z(mu_logvar1)
with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE):
# update for all samples
t_logits_1 = decoder(z1, is_training)
# update for all samples
t_logits_0 = decoder(z0, is_training)
# predict posterior distribution based on multiple z
t_dist_new = tf.nn.softmax(t_logits_0)
# Calculating average distribution
t_dist_new_avg = t_dist_avg(mu_logvar0, t_dist_new, num_sample, is_training)
# calculate likelihood based on randomly sample multiple z1
event_loglikeli = loglikeli_cVAE(tf.gather(t_truncate,e_idx), tf.gather(mu_logvar0,e_idx), tf.gather(mu_logvar1,e_idx), num_sample, is_training)
censor_loglikeli = loglikeli_cVAE(tf.gather(t_truncate,ne_idx), tf.gather(mu_logvar0,ne_idx), tf.gather(mu_logvar1,ne_idx), num_sample, is_training)
total_loglikeli = loglikeli_cVAE(t_truncate, mu_logvar0, mu_logvar1, num_sample, is_training)
# Optimization
with tf.variable_scope('training') as scope:
# calculate the losses separately, just for debugging purposes
# calculate losses for events
e_recon, e_kl_d, eloss = tf.cond(e_is_empty, lambda: zero_outputs(),\
lambda:VAE_losses(tf.gather(t_logits_1,e_idx), tf.gather(t_truncate,e_idx), \
tf.gather(mu_logvar0,e_idx), tf.gather(mu_logvar1,e_idx)))
# calculate losses for censor
ne_recon, ne_kl_d, closs = tf.cond(ne_is_empty, lambda: zero_outputs(),\
lambda: VAE_losses(tf.gather(t_logits_1,ne_idx), tf.gather(t_truncate,ne_idx), \
tf.gather(mu_logvar0,ne_idx), tf.gather(mu_logvar1,ne_idx)))
loss = w_e*eloss+w_ne*closs
# compute together
rec_all, kl_d_all, loss_all = VAE_losses(t_logits_1,t_truncate, mu_logvar0, mu_logvar1)
# train_step_unlabeled = tf.train.AdamOptimizer().minimize(loss)
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
gradients = tf.gradients(loss_all, params)
#gradients = tf.Print(gradients,[gradients], message ='gradients',summarize=2000)
grads = zip(gradients, params)
optimizer = tf.train.AdamOptimizer(learning_rate=5e-4, beta1=0.9, beta2=0.999)
train_step = optimizer.apply_gradients(grads)
def wAvg_t(sess, new_x, post_prob, tt, num_sample, return_wi=False):
# calculate weighted average
for j in range(num_sample):
t_hat_l = np.array([random_uniform_p(tt, post_prob[subj], 1) for subj in range(post_prob.shape[0])])
t_hat_binned = batch_t_categorize(t_hat_l, np.ones(t_hat_l.shape), tt, event_tt_prob=1.0)
mu_logvar0l = sess.run(mu_logvar0, feed_dict={x: new_x, is_training:False})
mu_logvar1l = sess.run(mu_logvar1, feed_dict={x: new_x, t_: t_hat_binned,is_training:False})
# sample z1l
mu1l,logvar1l = np.split(mu_logvar1l,2,1)
epsilon_l = np.random.normal(size=logvar1l.shape)
# Standard deviation must be positive
stddevl = np.sqrt(np.exp(logvar1l))
z1l = mu1l + np.multiply(stddevl, epsilon_l)
## calculate weight
wil = np.divide(np.exp(MVNloglikeli_np(z1l, mu_logvar0l, noise = 1e-8)),\
np.exp(MVNloglikeli_np(z1l, mu_logvar1l, noise = 1e-8)))
if j == 0:
t_hat_all = np.array(t_hat_l).reshape(post_prob.shape[0],1)
wl_all = wil.reshape(post_prob.shape[0],1)
else:
t_hat_all = np.concatenate([t_hat_all, np.array(t_hat_l).reshape(post_prob.shape[0],1)], axis=1)
wl_all = np.concatenate([wl_all, wil.reshape(post_prob.shape[0],1)], axis=1)
t_hat_i = np.sum(np.multiply(t_hat_all,wl_all),axis=1)/np.sum(wl_all,axis=1)
if return_wi==False:
return t_hat_i
else:
return (t_hat_i, np.mean(wl_all, axis=1), np.std(wl_all, axis=1))
def saveResults(dataset, session_dir, session_name, out_dir, tt, event_tt_prob):
sess = tf.Session()
session_path = session_dir+session_name+".ckpt"
saver.restore(sess, session_path)
# run over all samples in test
batch_x, batch_t, batch_e = dataset['x'], dataset['t'], dataset['e']
batch_t_cat = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob)
batch_t_cat_likeli = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob,likelihood=True)
norm_batch_x = batch_x.copy()
norm_batch_x[:,cts_covariates] = normalize_batch(batch_x[:,cts_covariates],norm_mean,norm_std)
test_pred_prob = sess.run(t_dist_new_avg, feed_dict={x: norm_batch_x, is_training:False})
test_loglikeli = sess.run(total_loglikeli, feed_dict={t_truncate:batch_t_cat_likeli, t_:batch_t_cat, x:norm_batch_x, event:batch_e, is_training:False})
# this provide likelihood
# test_pt_x_avg = sess.run(total_pt_x_avg, feed_dict={t_truncate:batch_t_cat_likeli, t_:batch_t_cat, x:batch_x, event:batch_e, is_training:False})
test_pred_avgt, test_avgt_mean, test_avgt_std = wAvg_t(sess, norm_batch_x, test_pred_prob, tt, num_sample, return_wi=True)
test_pred_medt = [calculate_quantiles(post_prob,tt,0.5) for post_prob in test_pred_prob]
test_pred_medt = np.concatenate(test_pred_medt,axis=0)
test_pred_randomt = np.array([random_uniform_p(tt, post_prob, 1) for post_prob in test_pred_prob])
np.save(out_dir+'/{}_test_pred_prob'.format(session_name), test_pred_prob)
np.save(out_dir+'/{}_test_loglikeli'.format(session_name), test_loglikeli)
np.save(out_dir+'/{}_test_pred_avgt'.format(session_name), test_pred_avgt)
np.save(out_dir+'/{}_test_pred_medt'.format(session_name), test_pred_medt)
np.save(out_dir+'/{}_test_pred_randomt'.format(session_name), test_pred_randomt)
np.save(out_dir+'/{}_tt'.format(session_name), tt)
def saveResults_norun(session_name, out_dir, tt, test_pred_prob, test_loglikeli, test_pred_avgt, test_pred_medt, test_pred_randomt):
np.save(out_dir+'/{}_test_pred_prob'.format(session_name), test_pred_prob)
np.save(out_dir+'/{}_test_loglikeli'.format(session_name), test_loglikeli)
np.save(out_dir+'/{}_test_pred_avgt'.format(session_name), test_pred_avgt)
np.save(out_dir+'/{}_test_pred_medt'.format(session_name), test_pred_medt)
np.save(out_dir+'/{}_test_pred_randomt'.format(session_name), test_pred_randomt)
np.save(out_dir+'/{}_tt'.format(session_name), tt)
##########################
#### Training ############
##########################
if training==True:
valid_recon_loss = []
valid_epoch_recon_loss = []
valid_epoch_loss = []
valid_epoch_event_recon_loss = []
valid_epoch_censor_recon_loss = []
best_likelihood = -np.inf
best_i = 0
best_epoch = 0
num_epoch = 200
num_sample = 100
num_batch = int(train['x'].shape[0]/m)
require_impr = 3000
saver = tf.train.Saver()
# event_tt_prob = event_t_bin_prob_unif(tt)
with tf.Session() as sess:
# Initialize all variables
sess.run(tf.global_variables_initializer())
# Train VAE model
for i in range(num_epoch*num_batch):
# Get a training minibatch
batch_x, batch_t, batch_e = next_batch(train, m=m)
batch_t_cat_likeli = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob,likelihood=True)
# normalize input
norm_batch_x = batch_x.copy()
norm_batch_x[:,cts_covariates] = normalize_batch(batch_x[:,cts_covariates],norm_mean,norm_std)
# Binarize the data
batch_t_cat = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob)
# Train on minibatch
sess.run(train_step, feed_dict={t_:batch_t_cat, t_truncate: batch_t_cat_likeli, x:norm_batch_x, event:batch_e, is_training:True})
# sess.run(train_step_SGD, feed_dict={t_:batch_t_cat, x:batch_x, event:batch_e, is_training:True})
if i % num_batch == 0:
batch_x, batch_t, batch_e = next_batch(valid, m=valid['x'].shape[0])
batch_t_cat = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob)
batch_t_cat_likeli = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob,likelihood=True)
norm_batch_x = batch_x.copy()
norm_batch_x[:,cts_covariates] = normalize_batch(batch_x[:,cts_covariates],norm_mean,norm_std)
epoch_loglikeli = np.mean(sess.run(total_loglikeli, feed_dict={t_:batch_t_cat, t_truncate: batch_t_cat_likeli,\
x: norm_batch_x, event:batch_e, is_training:False}))
epoch_loss = sess.run(loss_all, feed_dict={t_:batch_t_cat, t_truncate: batch_t_cat_likeli, x: norm_batch_x, event:batch_e, is_training:False})
valid_epoch_recon_loss.append(epoch_loglikeli)
valid_epoch_loss.append(epoch_loss)
epoch_recon_closs = np.mean(sess.run(ne_recon, feed_dict={t_:batch_t_cat, t_truncate: batch_t_cat_likeli, x: norm_batch_x, event:batch_e, is_training:False}))
valid_epoch_censor_recon_loss.append(epoch_recon_closs)
epoch_recon_eloss = np.mean(sess.run(e_recon, feed_dict={t_:batch_t_cat, t_truncate: batch_t_cat_likeli, x: norm_batch_x, event:batch_e, is_training:False}))
valid_epoch_event_recon_loss.append(epoch_recon_eloss)
if (best_likelihood <= epoch_loglikeli):
best_likelihood = epoch_loglikeli
best_i = i
# save the learned model
save_path = saver.save(sess, output_dir+name+".ckpt")
op_print = ('Epoch '+str(i/num_batch)+': Loss '+str(epoch_loss)\
+' log-likelihood: ' + str(epoch_loglikeli)\
+' event rec loss: ' + str(epoch_recon_eloss)\
+' censor rec loss: ' + str(epoch_recon_closs))
logging.debug(op_print)
# early stopping
if (i-best_i) > require_impr:
print("Model stops improving for a while")
break
##### return results on testing dataset #####
# run over all samples in test
saveResults(test, session_dir=output_dir, session_name=name, out_dir=out_dir, tt=tt, event_tt_prob=event_tt_prob)
#### only for testing #####
else:
sess = tf.Session()
# Restore variables from disk.
saver.restore(sess, output_dir+name+".ckpt")
# run over all samples in test
# run over all samples in test
batch_x, batch_t, batch_e = test['x'], test['t'], test['e']
batch_t_cat = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob)
batch_t_cat_likeli = batch_t_categorize(batch_t, batch_e, tt, event_tt_prob,likelihood=True)
norm_batch_x = batch_x.copy()
norm_batch_x[:,cts_covariates] = normalize_batch(batch_x[:,cts_covariates],norm_mean,norm_std)
test_pred_prob = sess.run(t_dist_new_avg, feed_dict={x: norm_batch_x, is_training:False})
test_loglikeli = sess.run(total_loglikeli, feed_dict={t_truncate:batch_t_cat_likeli, t_:batch_t_cat, x:norm_batch_x, event:batch_e, is_training:False})
test_pred_avgt, test_avgt_mean, test_avgt_std = wAvg_t(sess, norm_batch_x, test_pred_prob, tt, num_sample, return_wi=True)
test_pred_medt = [calculate_quantiles(post_prob,tt,0.5) for post_prob in test_pred_prob]
test_pred_medt = np.concatenate(test_pred_medt,axis=0)
test_pred_randomt = np.array([random_uniform_p(tt, post_prob, 1) for post_prob in test_pred_prob])
saveResults_norun(session_name=name, out_dir=out_dir, tt=tt, test_pred_prob=test_pred_prob, test_loglikeli=test_loglikeli, test_pred_avgt=test_pred_avgt, test_pred_medt=test_pred_medt, test_pred_randomt=test_pred_randomt)
| [
7,
10,
14,
17,
18
] |
1,186 | 0ac99816248e3306ca6340f7bee8a518877bc3e9 | <mask token>
def drawPieChart(central_angles, angle_of_rest, probability_of_rest):
turtle.reset()
window.colormode(255)
turtle.fillcolor('gray')
turtle.speed(10)
turtle.begin_fill()
turtle.circle(120)
turtle.end_fill()
turtle.up()
angle_counter = 0
prev_angle = 0
for index, (letter, angle, probability) in enumerate(central_angles):
if index == 0:
angle_counter += angle * (360 / math.pi)
turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255
), random.randrange(0, 255)))
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
angle_counter += angle * (360 / math.pi)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle * (360 / math.pi))
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(
'Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle * (360 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
prev_angle += angle_counter
if index == len(central_angles) - 1:
turtle.fillcolor('gray')
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle_of_rest * (180 / math.pi))
angle_counter += angle_of_rest * (180 / math.pi)
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('All other letters, {}'.format(round(
probability_of_rest, 3)), font=('Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
def calculateFrequencies(arg=None):
try:
result = int(entry.get())
if result >= 54:
return
entry.delete(0, END)
most_frequent_characters = frequency.getNthMostFrequentCharacters(
result)
probability_of_other_characters = frequency.sumOfAllOtherProbabilites(
most_frequent_characters)
angle_of_rest = probability_of_other_characters * 2 * math.pi
central_angles = frequency.getCentralAngles(most_frequent_characters)
drawPieChart(central_angles, angle_of_rest,
probability_of_other_characters)
except ValueError:
return
<mask token>
| <mask token>
def drawPieChart(central_angles, angle_of_rest, probability_of_rest):
turtle.reset()
window.colormode(255)
turtle.fillcolor('gray')
turtle.speed(10)
turtle.begin_fill()
turtle.circle(120)
turtle.end_fill()
turtle.up()
angle_counter = 0
prev_angle = 0
for index, (letter, angle, probability) in enumerate(central_angles):
if index == 0:
angle_counter += angle * (360 / math.pi)
turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255
), random.randrange(0, 255)))
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
angle_counter += angle * (360 / math.pi)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle * (360 / math.pi))
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(
'Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle * (360 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
prev_angle += angle_counter
if index == len(central_angles) - 1:
turtle.fillcolor('gray')
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle_of_rest * (180 / math.pi))
angle_counter += angle_of_rest * (180 / math.pi)
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('All other letters, {}'.format(round(
probability_of_rest, 3)), font=('Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
def calculateFrequencies(arg=None):
try:
result = int(entry.get())
if result >= 54:
return
entry.delete(0, END)
most_frequent_characters = frequency.getNthMostFrequentCharacters(
result)
probability_of_other_characters = frequency.sumOfAllOtherProbabilites(
most_frequent_characters)
angle_of_rest = probability_of_other_characters * 2 * math.pi
central_angles = frequency.getCentralAngles(most_frequent_characters)
drawPieChart(central_angles, angle_of_rest,
probability_of_other_characters)
except ValueError:
return
entry.bind('<Return>', calculateFrequencies)
label_1.grid(row=0)
entry.grid(row=0, column=1)
root.mainloop()
window.exitonclick()
| <mask token>
root = Tk()
window = turtle.Screen()
label_1 = Label(root, text=
'Enter a number less than 54 to get the Nth most frequent letters in Words.txt: '
)
entry = Entry(root)
def drawPieChart(central_angles, angle_of_rest, probability_of_rest):
turtle.reset()
window.colormode(255)
turtle.fillcolor('gray')
turtle.speed(10)
turtle.begin_fill()
turtle.circle(120)
turtle.end_fill()
turtle.up()
angle_counter = 0
prev_angle = 0
for index, (letter, angle, probability) in enumerate(central_angles):
if index == 0:
angle_counter += angle * (360 / math.pi)
turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255
), random.randrange(0, 255)))
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
angle_counter += angle * (360 / math.pi)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle * (360 / math.pi))
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(
'Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle * (360 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
prev_angle += angle_counter
if index == len(central_angles) - 1:
turtle.fillcolor('gray')
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle_of_rest * (180 / math.pi))
angle_counter += angle_of_rest * (180 / math.pi)
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('All other letters, {}'.format(round(
probability_of_rest, 3)), font=('Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
def calculateFrequencies(arg=None):
try:
result = int(entry.get())
if result >= 54:
return
entry.delete(0, END)
most_frequent_characters = frequency.getNthMostFrequentCharacters(
result)
probability_of_other_characters = frequency.sumOfAllOtherProbabilites(
most_frequent_characters)
angle_of_rest = probability_of_other_characters * 2 * math.pi
central_angles = frequency.getCentralAngles(most_frequent_characters)
drawPieChart(central_angles, angle_of_rest,
probability_of_other_characters)
except ValueError:
return
entry.bind('<Return>', calculateFrequencies)
label_1.grid(row=0)
entry.grid(row=0, column=1)
root.mainloop()
window.exitonclick()
| from tkinter import *
import frequency
import turtle
import math
import random
root = Tk()
window = turtle.Screen()
label_1 = Label(root, text=
'Enter a number less than 54 to get the Nth most frequent letters in Words.txt: '
)
entry = Entry(root)
def drawPieChart(central_angles, angle_of_rest, probability_of_rest):
turtle.reset()
window.colormode(255)
turtle.fillcolor('gray')
turtle.speed(10)
turtle.begin_fill()
turtle.circle(120)
turtle.end_fill()
turtle.up()
angle_counter = 0
prev_angle = 0
for index, (letter, angle, probability) in enumerate(central_angles):
if index == 0:
angle_counter += angle * (360 / math.pi)
turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255
), random.randrange(0, 255)))
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
angle_counter += angle * (360 / math.pi)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle * (360 / math.pi))
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('{}, {}'.format(letter, round(probability, 3)), font=(
'Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle * (360 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
prev_angle += angle_counter
if index == len(central_angles) - 1:
turtle.fillcolor('gray')
turtle.begin_fill()
turtle.goto(x=0, y=120)
turtle.setheading(angle_counter)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle_of_rest * (180 / math.pi))
angle_counter += angle_of_rest * (180 / math.pi)
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('All other letters, {}'.format(round(
probability_of_rest, 3)), font=('Arial', 10, 'normal'))
turtle.backward(50)
turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)
turtle.goto(x=0, y=120)
turtle.end_fill()
def calculateFrequencies(arg=None):
try:
result = int(entry.get())
if result >= 54:
return
entry.delete(0, END)
most_frequent_characters = frequency.getNthMostFrequentCharacters(
result)
probability_of_other_characters = frequency.sumOfAllOtherProbabilites(
most_frequent_characters)
angle_of_rest = probability_of_other_characters * 2 * math.pi
central_angles = frequency.getCentralAngles(most_frequent_characters)
drawPieChart(central_angles, angle_of_rest,
probability_of_other_characters)
except ValueError:
return
entry.bind('<Return>', calculateFrequencies)
label_1.grid(row=0)
entry.grid(row=0, column=1)
root.mainloop()
window.exitonclick()
| # Patrick Vanegas - Final project
from tkinter import *
import frequency
import turtle
import math
import random
# intitalize a blank window
root = Tk()
# initialize turtle window
window = turtle.Screen()
# Create widgets to be viewed on the Tkinter window
label_1 = Label(root, text = "Enter a number less than 54 to get the Nth most frequent letters in Words.txt: ")
entry = Entry(root)
def drawPieChart(central_angles, angle_of_rest, probability_of_rest):
# reset turtle to redraw the piechart if the user enters a new value for N.
turtle.reset()
# set color mode to accept rgb values
window.colormode(255)
turtle.fillcolor('gray')
turtle.speed(10)
# draw base circle and fill it with color
turtle.begin_fill()
turtle.circle(120)
turtle.end_fill()
turtle.up()
angle_counter = 0
prev_angle = 0
# draw arc sectors for each probability in the circle
for index, (letter, angle, probability) in enumerate(central_angles):
if index == 0:
# turn radians to degrees
angle_counter += angle * (360 / math.pi)
turtle.fillcolor((random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255)))
turtle.begin_fill()
turtle.goto(x = 0, y = 120)
turtle.setheading(angle_counter)
angle_counter += angle * (360 / math.pi)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle * (360 / math.pi))
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('{}, {}'.format(letter, round(probability, 3)), font = ("Arial", 10, "normal"))
turtle.backward(50)
turtle.setheading(angle * (360 / math.pi) + prev_angle)
turtle.goto(x = 0, y = 120)
turtle.end_fill()
prev_angle += angle_counter
# draw the arc for the remaining probabilites.
if index == len(central_angles) - 1:
turtle.fillcolor('gray')
turtle.begin_fill()
turtle.goto(x = 0, y = 120)
turtle.setheading(angle_counter)
turtle.forward(120)
turtle.right(270)
turtle.circle(120, angle_of_rest * (180 / math.pi) )
angle_counter += angle_of_rest * (180 / math.pi)
turtle.setheading(angle_counter)
turtle.forward(50)
turtle.write('All other letters, {}'.format(round(probability_of_rest, 3)), font = ("Arial", 10, "normal"))
turtle.backward(50)
turtle.setheading(angle_of_rest * (180 / math.pi) + prev_angle)
turtle.goto(x = 0, y = 120)
turtle.end_fill()
def calculateFrequencies(arg = None):
# get the text value from the entry field
# if the value is not a valid integer, simply return and do nothing.
try:
result = int(entry.get())
# return if the input is greater than 54
if (result >= 54):
return
# delete the text in the entry field
entry.delete(0, END)
# calculate the most frequent characters
most_frequent_characters = frequency.getNthMostFrequentCharacters(result)
# calculate the probability of all other letters not included in the top N.
probability_of_other_characters = frequency.sumOfAllOtherProbabilites(most_frequent_characters)
# calculate the central angle of the rest of the letters.
angle_of_rest = probability_of_other_characters * 2 * math.pi
# calculate central angles of the most frequenct character's probabilities
central_angles = frequency.getCentralAngles(most_frequent_characters)
# draw pie chart
drawPieChart(central_angles, angle_of_rest, probability_of_other_characters)
except ValueError:
return
# When the user presses enter on the entry field, calculate frequencies
entry.bind('<Return>', calculateFrequencies)
# Position widgets on a grid layout
label_1.grid(row=0)
entry.grid(row=0, column=1)
# keep both the turtle and tkinter windows open until user presses the close button on either
root.mainloop()
window.exitonclick()
| [
2,
3,
4,
5,
6
] |
1,187 | 004a02f7ff49cb1b63ebedfcfcb4937377859099 | <mask token>
| print('hello world123')
| null | null | null | [
0,
1
] |
1,188 | 44214492dd7283da4b9a77bd2a1fa9d9c0643ff2 | <mask token>
class MfccLocal(Mfcc):
<mask token>
abstract_class = False
@staticmethod
def sample_result_filename(out_sample_path):
return f'{out_sample_path[:-5]}_mfcc_result.json'
@staticmethod
def filenames_to_skip_sample(out_sample_path):
return [f'{out_sample_path[:-5]}_mfcc_result.csv']
@staticmethod
def serialize_to_json(mfcc_result):
"""
:param mfcc_result: list of mfcc measurements with
necessary metadata
:return: serialized object of proper schema
"""
mfcc_schema = MfccLocalSchema()
mfcc_dict = {'mfcc_info': mfcc_result}
return mfcc_schema.dumps(mfcc_dict)
def compute_mfcc(self, segments_path, phonemes_result_path):
"""
:param segments_path: path to the input wav
:param phonemes_result_path: path to phonemes results
that is required by the Local version of the Mfcc
:return: computed list of mfcc features with all required metadata
"""
wav = get_segment(segments_path, 'wav')
frequency = wav.frame_rate
phoneme_len = self.process_settings.get('phoneme_len', 2048)
ignore_shorter_phonemes = self.process_settings.get(
'ignore_shorter_phonemes', True)
mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)
mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)
with open(phonemes_result_path, 'r') as f:
schema = DecoderOutputSchema()
json_file = json.load(f)
phonemes_result = schema.load(json_file)
phonemes_info = [info for info in phonemes_result[
'segment_info'] if info['word'] not in self.
blacklisted_phonemes]
mfcc_result = []
for info in phonemes_info:
start, stop = 1000 * info['start'], 1000 * info['end']
segment = np.array(wav[start:stop].get_array_of_samples())
if ignore_shorter_phonemes and segment.size < phoneme_len:
continue
mfcc_features = mfcc(segment, samplerate=frequency, nfft=
mfcc_nfft, winstep=mfcc_winstep)
for i in range(len(mfcc_features)):
ith_mfcc = np.array(mfcc_features[i, :])
ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),
'mfcc': ith_mfcc, **info}
mfcc_result.append(ith_mfcc_result_row)
return mfcc_result
| <mask token>
class MfccLocal(Mfcc):
"""
MfccLocal computes Mfcc features for each phoneme from the sample
that are not blacklisted based on phoneme label that is
received from Phoneme chain.
It subclasses Formants to not repeat the sample_layer logic
which is valid also in this context
"""
abstract_class = False
@staticmethod
def sample_result_filename(out_sample_path):
return f'{out_sample_path[:-5]}_mfcc_result.json'
@staticmethod
def filenames_to_skip_sample(out_sample_path):
return [f'{out_sample_path[:-5]}_mfcc_result.csv']
@staticmethod
def serialize_to_json(mfcc_result):
"""
:param mfcc_result: list of mfcc measurements with
necessary metadata
:return: serialized object of proper schema
"""
mfcc_schema = MfccLocalSchema()
mfcc_dict = {'mfcc_info': mfcc_result}
return mfcc_schema.dumps(mfcc_dict)
def compute_mfcc(self, segments_path, phonemes_result_path):
"""
:param segments_path: path to the input wav
:param phonemes_result_path: path to phonemes results
that is required by the Local version of the Mfcc
:return: computed list of mfcc features with all required metadata
"""
wav = get_segment(segments_path, 'wav')
frequency = wav.frame_rate
phoneme_len = self.process_settings.get('phoneme_len', 2048)
ignore_shorter_phonemes = self.process_settings.get(
'ignore_shorter_phonemes', True)
mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)
mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)
with open(phonemes_result_path, 'r') as f:
schema = DecoderOutputSchema()
json_file = json.load(f)
phonemes_result = schema.load(json_file)
phonemes_info = [info for info in phonemes_result[
'segment_info'] if info['word'] not in self.
blacklisted_phonemes]
mfcc_result = []
for info in phonemes_info:
start, stop = 1000 * info['start'], 1000 * info['end']
segment = np.array(wav[start:stop].get_array_of_samples())
if ignore_shorter_phonemes and segment.size < phoneme_len:
continue
mfcc_features = mfcc(segment, samplerate=frequency, nfft=
mfcc_nfft, winstep=mfcc_winstep)
for i in range(len(mfcc_features)):
ith_mfcc = np.array(mfcc_features[i, :])
ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),
'mfcc': ith_mfcc, **info}
mfcc_result.append(ith_mfcc_result_row)
return mfcc_result
| <mask token>
logger = logging.getLogger()
class MfccLocal(Mfcc):
"""
MfccLocal computes Mfcc features for each phoneme from the sample
that are not blacklisted based on phoneme label that is
received from Phoneme chain.
It subclasses Formants to not repeat the sample_layer logic
which is valid also in this context
"""
abstract_class = False
@staticmethod
def sample_result_filename(out_sample_path):
return f'{out_sample_path[:-5]}_mfcc_result.json'
@staticmethod
def filenames_to_skip_sample(out_sample_path):
return [f'{out_sample_path[:-5]}_mfcc_result.csv']
@staticmethod
def serialize_to_json(mfcc_result):
"""
:param mfcc_result: list of mfcc measurements with
necessary metadata
:return: serialized object of proper schema
"""
mfcc_schema = MfccLocalSchema()
mfcc_dict = {'mfcc_info': mfcc_result}
return mfcc_schema.dumps(mfcc_dict)
def compute_mfcc(self, segments_path, phonemes_result_path):
"""
:param segments_path: path to the input wav
:param phonemes_result_path: path to phonemes results
that is required by the Local version of the Mfcc
:return: computed list of mfcc features with all required metadata
"""
wav = get_segment(segments_path, 'wav')
frequency = wav.frame_rate
phoneme_len = self.process_settings.get('phoneme_len', 2048)
ignore_shorter_phonemes = self.process_settings.get(
'ignore_shorter_phonemes', True)
mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)
mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)
with open(phonemes_result_path, 'r') as f:
schema = DecoderOutputSchema()
json_file = json.load(f)
phonemes_result = schema.load(json_file)
phonemes_info = [info for info in phonemes_result[
'segment_info'] if info['word'] not in self.
blacklisted_phonemes]
mfcc_result = []
for info in phonemes_info:
start, stop = 1000 * info['start'], 1000 * info['end']
segment = np.array(wav[start:stop].get_array_of_samples())
if ignore_shorter_phonemes and segment.size < phoneme_len:
continue
mfcc_features = mfcc(segment, samplerate=frequency, nfft=
mfcc_nfft, winstep=mfcc_winstep)
for i in range(len(mfcc_features)):
ith_mfcc = np.array(mfcc_features[i, :])
ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),
'mfcc': ith_mfcc, **info}
mfcc_result.append(ith_mfcc_result_row)
return mfcc_result
| import json
import logging
import numpy as np
from python_speech_features import mfcc
from format_converters import get_segment
from schemas import *
from chains.mfcc import Mfcc
logger = logging.getLogger()
class MfccLocal(Mfcc):
"""
MfccLocal computes Mfcc features for each phoneme from the sample
that are not blacklisted based on phoneme label that is
received from Phoneme chain.
It subclasses Formants to not repeat the sample_layer logic
which is valid also in this context
"""
abstract_class = False
@staticmethod
def sample_result_filename(out_sample_path):
return f'{out_sample_path[:-5]}_mfcc_result.json'
@staticmethod
def filenames_to_skip_sample(out_sample_path):
return [f'{out_sample_path[:-5]}_mfcc_result.csv']
@staticmethod
def serialize_to_json(mfcc_result):
"""
:param mfcc_result: list of mfcc measurements with
necessary metadata
:return: serialized object of proper schema
"""
mfcc_schema = MfccLocalSchema()
mfcc_dict = {'mfcc_info': mfcc_result}
return mfcc_schema.dumps(mfcc_dict)
def compute_mfcc(self, segments_path, phonemes_result_path):
"""
:param segments_path: path to the input wav
:param phonemes_result_path: path to phonemes results
that is required by the Local version of the Mfcc
:return: computed list of mfcc features with all required metadata
"""
wav = get_segment(segments_path, 'wav')
frequency = wav.frame_rate
phoneme_len = self.process_settings.get('phoneme_len', 2048)
ignore_shorter_phonemes = self.process_settings.get(
'ignore_shorter_phonemes', True)
mfcc_nfft = self.process_settings.get('mfcc_nfft', 2048)
mfcc_winstep = self.process_settings.get('mfcc_winstep', 0.1)
with open(phonemes_result_path, 'r') as f:
schema = DecoderOutputSchema()
json_file = json.load(f)
phonemes_result = schema.load(json_file)
phonemes_info = [info for info in phonemes_result[
'segment_info'] if info['word'] not in self.
blacklisted_phonemes]
mfcc_result = []
for info in phonemes_info:
start, stop = 1000 * info['start'], 1000 * info['end']
segment = np.array(wav[start:stop].get_array_of_samples())
if ignore_shorter_phonemes and segment.size < phoneme_len:
continue
mfcc_features = mfcc(segment, samplerate=frequency, nfft=
mfcc_nfft, winstep=mfcc_winstep)
for i in range(len(mfcc_features)):
ith_mfcc = np.array(mfcc_features[i, :])
ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),
'mfcc': ith_mfcc, **info}
mfcc_result.append(ith_mfcc_result_row)
return mfcc_result
| import json
import logging
import numpy as np
from python_speech_features import mfcc
from format_converters import get_segment
from schemas import *
from chains.mfcc import Mfcc
logger = logging.getLogger()
class MfccLocal(Mfcc):
"""
MfccLocal computes Mfcc features for each phoneme from the sample
that are not blacklisted based on phoneme label that is
received from Phoneme chain.
It subclasses Formants to not repeat the sample_layer logic
which is valid also in this context
"""
abstract_class = False
@staticmethod
def sample_result_filename(out_sample_path):
return f'{out_sample_path[:-5]}_mfcc_result.json'
@staticmethod
def filenames_to_skip_sample(out_sample_path):
return [f'{out_sample_path[:-5]}_mfcc_result.csv']
@staticmethod
def serialize_to_json(mfcc_result):
"""
:param mfcc_result: list of mfcc measurements with
necessary metadata
:return: serialized object of proper schema
"""
mfcc_schema = MfccLocalSchema()
mfcc_dict = {'mfcc_info': mfcc_result}
return mfcc_schema.dumps(mfcc_dict)
def compute_mfcc(self, segments_path, phonemes_result_path):
"""
:param segments_path: path to the input wav
:param phonemes_result_path: path to phonemes results
that is required by the Local version of the Mfcc
:return: computed list of mfcc features with all required metadata
"""
wav = get_segment(segments_path, 'wav')
frequency = wav.frame_rate
phoneme_len = self.process_settings.get("phoneme_len", 2048)
ignore_shorter_phonemes = self.process_settings.get("ignore_shorter_phonemes", True)
mfcc_nfft = self.process_settings.get("mfcc_nfft", 2048)
mfcc_winstep = self.process_settings.get("mfcc_winstep", 0.1)
with open(phonemes_result_path, 'r') as f:
schema = DecoderOutputSchema()
json_file = json.load(f)
phonemes_result = schema.load(json_file)
phonemes_info = [info for info in phonemes_result['segment_info']
if info['word'] not in self.blacklisted_phonemes]
mfcc_result = []
for info in phonemes_info:
start, stop = (1000 * info['start'], 1000 * info['end'])
segment = np.array(wav[start:stop].get_array_of_samples())
if ignore_shorter_phonemes and segment.size < phoneme_len:
continue
mfcc_features = mfcc(segment, samplerate=frequency,
nfft=mfcc_nfft, winstep=mfcc_winstep)
for i in range(len(mfcc_features)):
ith_mfcc = np.array(mfcc_features[i, :])
ith_mfcc_result_row = {'i': i, 'length': len(mfcc_features),
'mfcc': ith_mfcc, **info}
mfcc_result.append(ith_mfcc_result_row)
return mfcc_result | [
6,
7,
8,
9,
10
] |
1,189 | 4e9fd3ee2a78fae164d9f38704443ac5b2f4c11c | <mask token>
class colour:
purple = '\x1b[95m'
cyan = '\x1b[96m'
darkcyan = '\x1b[36m'
blue = '\x1b[94m'
green = '\x1b[92m'
yellow = '\x1b[93m'
red = '\x1b[91m'
bold = '\x1b[1m'
underline = '\x1b[4m'
end = '\x1b[0m'
| <mask token>
GPIO.setmode(GPIO.BCM)
GPIO.setup(solenoid1, GPIO.OUT)
GPIO.setup(solenoid2, GPIO.OUT)
GPIO.setup(solenoid3, GPIO.OUT)
GPIO.setup(solenoid4, GPIO.OUT)
GPIO.setup(led1, GPIO.OUT)
GPIO.setup(motor1, GPIO.OUT)
<mask token>
GPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class colour:
purple = '\x1b[95m'
cyan = '\x1b[96m'
darkcyan = '\x1b[36m'
blue = '\x1b[94m'
green = '\x1b[92m'
yellow = '\x1b[93m'
red = '\x1b[91m'
bold = '\x1b[1m'
underline = '\x1b[4m'
end = '\x1b[0m'
| <mask token>
solenoid1 = 23
solenoid2 = 24
solenoid3 = 4
solenoid4 = 17
motor1 = 18
led1 = 25
switch1 = 6
switch2 = 13
GPIO.setmode(GPIO.BCM)
GPIO.setup(solenoid1, GPIO.OUT)
GPIO.setup(solenoid2, GPIO.OUT)
GPIO.setup(solenoid3, GPIO.OUT)
GPIO.setup(solenoid4, GPIO.OUT)
GPIO.setup(led1, GPIO.OUT)
GPIO.setup(motor1, GPIO.OUT)
motor1pwm = GPIO.PWM(motor1, 100)
GPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class colour:
purple = '\x1b[95m'
cyan = '\x1b[96m'
darkcyan = '\x1b[36m'
blue = '\x1b[94m'
green = '\x1b[92m'
yellow = '\x1b[93m'
red = '\x1b[91m'
bold = '\x1b[1m'
underline = '\x1b[4m'
end = '\x1b[0m'
| import RPi.GPIO as GPIO
import time
import timeit
import sys
import os
import random
import datetime
import collections
import threading
from Queue import Queue
solenoid1 = 23
solenoid2 = 24
solenoid3 = 4
solenoid4 = 17
motor1 = 18
led1 = 25
switch1 = 6
switch2 = 13
GPIO.setmode(GPIO.BCM)
GPIO.setup(solenoid1, GPIO.OUT)
GPIO.setup(solenoid2, GPIO.OUT)
GPIO.setup(solenoid3, GPIO.OUT)
GPIO.setup(solenoid4, GPIO.OUT)
GPIO.setup(led1, GPIO.OUT)
GPIO.setup(motor1, GPIO.OUT)
motor1pwm = GPIO.PWM(motor1, 100)
GPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class colour:
purple = '\x1b[95m'
cyan = '\x1b[96m'
darkcyan = '\x1b[36m'
blue = '\x1b[94m'
green = '\x1b[92m'
yellow = '\x1b[93m'
red = '\x1b[91m'
bold = '\x1b[1m'
underline = '\x1b[4m'
end = '\x1b[0m'
| #!/usr/bin/env python
# Standardised set up
import RPi.GPIO as GPIO # External module imports GPIO
import time # Library to slow or give a rest to the script
import timeit # Alternative timing library for platform specific timing
import sys # Library to access program arguments and call exits
import os # Library provides functionality to clear screen
import random
import datetime
import collections
import threading
from Queue import Queue
# Pin definiton using Broadcom scheme
solenoid1 = 23 # GPIO 16
solenoid2 = 24 # GPIO 18
solenoid3 = 4 # GPIO 07
solenoid4 = 17 # GPIO 11
motor1 = 18 # GPIO 12
led1 = 25 # GPIO 22
switch1 = 6 # GPIO 31
switch2 = 13 # GPIO 33
# Pin setup
GPIO.setmode(GPIO.BCM) # Broadcom pin-numbering scheme
GPIO.setup(solenoid1, GPIO.OUT) # set as I/O output
GPIO.setup(solenoid2, GPIO.OUT) # set as I/O output
GPIO.setup(solenoid3, GPIO.OUT) # set as I/O output
GPIO.setup(solenoid4, GPIO.OUT) # set as I/O output
GPIO.setup(led1, GPIO.OUT) # set as I/O output
GPIO.setup(motor1, GPIO.OUT) # set as I/O output
motor1pwm = GPIO.PWM(motor1,100) # set pwm on motor1 pin
GPIO.setup(switch1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(switch2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
class colour:
purple = '\033[95m'
cyan = '\033[96m'
darkcyan = '\033[36m'
blue = '\033[94m'
green = '\033[92m'
yellow = '\033[93m'
red = '\033[91m'
bold = '\033[1m'
underline = '\033[4m'
end = '\033[0m'
| [
2,
3,
4,
5,
6
] |
1,190 | ee1ce3ea4b31246703530478d6550b0c8866197e | <mask token>
| <mask token>
client.request(method='POST', url='/', body=post_data.encode('utf-8'),
headers=head_dict)
<mask token>
client.close()
print(content)
| <mask token>
client = http.client.HTTPConnection('127.0.0.1:9000')
post_data = {'usertag': 'test', 'password': '123456', 'code':
"print('Hello Web')"}
head_dict = {'Content-Type': 'application/x-www-form-urlencoded'}
post_data = urlencode(post_data)
client.request(method='POST', url='/', body=post_data.encode('utf-8'),
headers=head_dict)
resp = client.getresponse()
content = resp.read().decode('utf-8')
client.close()
print(content)
| import http.client
from urllib.parse import urlencode
client = http.client.HTTPConnection('127.0.0.1:9000')
post_data = {'usertag': 'test', 'password': '123456', 'code':
"print('Hello Web')"}
head_dict = {'Content-Type': 'application/x-www-form-urlencoded'}
post_data = urlencode(post_data)
client.request(method='POST', url='/', body=post_data.encode('utf-8'),
headers=head_dict)
resp = client.getresponse()
content = resp.read().decode('utf-8')
client.close()
print(content)
| import http.client
from urllib.parse import urlencode
client = http.client.HTTPConnection("127.0.0.1:9000")
post_data = {
"usertag": "test",
"password": '123456',
'code': "print('Hello Web')"
}
head_dict = {'Content-Type': 'application/x-www-form-urlencoded'}
post_data = urlencode(post_data)
client.request(method="POST", url='/',
body=post_data.encode('utf-8'),
headers=head_dict)
resp = client.getresponse()
content = resp.read().decode("utf-8")
client.close()
print(content)
| [
0,
1,
2,
3,
4
] |
1,191 | 7badb7c9f1e00dfc379468b7bd73a3f09bffe6de | <mask token>
def downgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(1024))
op.alter_column('auto_result', 'skip', type_=ty.String(65535))
op.alter_column('auto_result', 'failure', type_=ty.String(65535))
op.alter_column('auto_result', 'comment', type_=ty.String(65535))
op.alter_column('manual_result', 'comment', type_=ty.String(65535))
| <mask token>
def upgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(65535))
op.alter_column('auto_result', 'skip', type_=ty.Text())
op.alter_column('auto_result', 'failure', type_=ty.Text())
op.alter_column('auto_result', 'comment', type_=ty.Text())
op.alter_column('manual_result', 'comment', type_=ty.Text())
def downgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(1024))
op.alter_column('auto_result', 'skip', type_=ty.String(65535))
op.alter_column('auto_result', 'failure', type_=ty.String(65535))
op.alter_column('auto_result', 'comment', type_=ty.String(65535))
op.alter_column('manual_result', 'comment', type_=ty.String(65535))
| <mask token>
revision = '6374505f9e6e'
down_revision = '9dc91bb7d2ba'
<mask token>
def upgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(65535))
op.alter_column('auto_result', 'skip', type_=ty.Text())
op.alter_column('auto_result', 'failure', type_=ty.Text())
op.alter_column('auto_result', 'comment', type_=ty.Text())
op.alter_column('manual_result', 'comment', type_=ty.Text())
def downgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(1024))
op.alter_column('auto_result', 'skip', type_=ty.String(65535))
op.alter_column('auto_result', 'failure', type_=ty.String(65535))
op.alter_column('auto_result', 'comment', type_=ty.String(65535))
op.alter_column('manual_result', 'comment', type_=ty.String(65535))
| <mask token>
revision = '6374505f9e6e'
down_revision = '9dc91bb7d2ba'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.types as ty
def upgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(65535))
op.alter_column('auto_result', 'skip', type_=ty.Text())
op.alter_column('auto_result', 'failure', type_=ty.Text())
op.alter_column('auto_result', 'comment', type_=ty.Text())
op.alter_column('manual_result', 'comment', type_=ty.Text())
def downgrade():
op.alter_column('run', 'polarion_id', type_=ty.String(1024))
op.alter_column('auto_result', 'skip', type_=ty.String(65535))
op.alter_column('auto_result', 'failure', type_=ty.String(65535))
op.alter_column('auto_result', 'comment', type_=ty.String(65535))
op.alter_column('manual_result', 'comment', type_=ty.String(65535))
| """empty message
Revision ID: 6374505f9e6e
Revises: 9dc91bb7d2ba
Create Date: 2016-11-14 10:55:08.418923
"""
# revision identifiers, used by Alembic.
revision = '6374505f9e6e'
down_revision = '9dc91bb7d2ba'
from alembic import op
import sqlalchemy as sa
import sqlalchemy.types as ty
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('run', 'polarion_id', type_=ty.String(65535))
op.alter_column('auto_result', 'skip', type_=ty.Text())
op.alter_column('auto_result', 'failure', type_=ty.Text())
op.alter_column('auto_result', 'comment', type_=ty.Text())
op.alter_column('manual_result', 'comment', type_=ty.Text())
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('run', 'polarion_id', type_=ty.String(1024))
op.alter_column('auto_result', 'skip', type_=ty.String(65535))
op.alter_column('auto_result', 'failure', type_=ty.String(65535))
op.alter_column('auto_result', 'comment', type_=ty.String(65535))
op.alter_column('manual_result', 'comment', type_=ty.String(65535))
### end Alembic commands ###
| [
1,
2,
3,
4,
5
] |
1,192 | be894830bb0dde6bacaea6be823391e0445603c3 | <mask token>
| <mask token>
urlpatterns = [path('', views.index, name='listings'), path(
'<int:listing_id>', views.listing, name='listing'), path('search',
views.search, name='search')]
| from django.urls import path
from . import views
urlpatterns = [path('', views.index, name='listings'), path(
'<int:listing_id>', views.listing, name='listing'), path('search',
views.search, name='search')]
| # This handle the url for routing
from django.urls import path
from . import views
# Defines views to pass dynamic data to listings page
urlpatterns = [
path('', views.index, name='listings'),
path('<int:listing_id>', views.listing, name='listing'),
path('search', views.search, name='search')
] | null | [
0,
1,
2,
3
] |
1,193 | 89605ff723d2f78e85cae458d576494718b5d456 | <mask token>
class InspectTest(unittest.TestCase):
def test_func(self):
self.assertTrue(find_top_pyfile())
self.assertTrue(caller_name())
<mask token>
<mask token>
| <mask token>
class LittleCatC(object):
pass
class LittleCatD(LittleCatB):
pass
class InspectTest(unittest.TestCase):
def test_func(self):
self.assertTrue(find_top_pyfile())
self.assertTrue(caller_name())
def test_all_subclasses(self):
self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])
<mask token>
| <mask token>
class LittleCatB(LittleCatA):
pass
class LittleCatC(object):
pass
class LittleCatD(LittleCatB):
pass
class InspectTest(unittest.TestCase):
def test_func(self):
self.assertTrue(find_top_pyfile())
self.assertTrue(caller_name())
def test_all_subclasses(self):
self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])
<mask token>
| from __future__ import division, unicode_literals
import unittest
from monty.inspect import *
class LittleCatA(object):
pass
class LittleCatB(LittleCatA):
pass
class LittleCatC(object):
pass
class LittleCatD(LittleCatB):
pass
class InspectTest(unittest.TestCase):
def test_func(self):
self.assertTrue(find_top_pyfile())
self.assertTrue(caller_name())
def test_all_subclasses(self):
self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])
if __name__ == '__main__':
unittest.main()
| # coding: utf-8
from __future__ import division, unicode_literals
import unittest
from monty.inspect import *
class LittleCatA(object):
pass
class LittleCatB(LittleCatA):
pass
class LittleCatC(object):
pass
class LittleCatD(LittleCatB):
pass
class InspectTest(unittest.TestCase):
def test_func(self):
# Not a real test. Need something better.
self.assertTrue(find_top_pyfile())
self.assertTrue(caller_name())
def test_all_subclasses(self):
self.assertEqual(all_subclasses(LittleCatA), [LittleCatB, LittleCatD])
if __name__ == "__main__":
unittest.main()
| [
2,
5,
6,
9,
10
] |
1,194 | 81573b4a57f540733ff2faaf82bab78381b9dd46 | <mask token>
| <mask token>
def parse_arguments() ->Namespace:
"""
Parse arguments
:return: Arguments
"""
parser = ArgumentParser(description=
'DLP project: Stock Prediction using Transformer')
parser.add_argument('-e', '--epochs', default=10, type=int, help=
'Number of epochs')
parser.add_argument('-w', '--warmup', default=2, type=int, help=
'Number of epochs for warmup')
parser.add_argument('-l', '--learning_rate', default=0.001, type=float,
help='Learning rate')
parser.add_argument('-b', '--batch_size', default=64, type=int, help=
'Batch size')
parser.add_argument('-s', '--seq_len', default=128, type=int, help=
'Sequence length (consecutive days)')
parser.add_argument('-ne', '--num_encoders', default=3, type=int, help=
'Number of transformer encoder in the network')
parser.add_argument('-a', '--attn_dim', default=96, type=int, help=
'Dimension of single attention output')
parser.add_argument('-nh', '--num_heads', default=12, type=int, help=
'Number of heads for multi-attention')
parser.add_argument('-d', '--dropout_rate', default=0.3, type=float,
help='Dropout rate')
parser.add_argument('-hs', '--hidden_size', default=256, type=int, help
='Hidden size between the linear layers in the encoder')
parser.add_argument('-loss', '--loss_function', default='l2', type=str,
choices=['l1', 'l2'], help='Loss function')
parser.add_argument('-i', '--inference_only', action='store_true', help
='Inference only or not')
parser.add_argument('-r', '--root_dir', default='archive', type=str,
help='Directory containing the downloaded data')
parser.add_argument('-v', '--verbosity', default=0, type=int, choices=[
0, 1, 2], help='Verbosity level')
return parser.parse_args()
| from argparse import ArgumentParser, Namespace
def parse_arguments() ->Namespace:
"""
Parse arguments
:return: Arguments
"""
parser = ArgumentParser(description=
'DLP project: Stock Prediction using Transformer')
parser.add_argument('-e', '--epochs', default=10, type=int, help=
'Number of epochs')
parser.add_argument('-w', '--warmup', default=2, type=int, help=
'Number of epochs for warmup')
parser.add_argument('-l', '--learning_rate', default=0.001, type=float,
help='Learning rate')
parser.add_argument('-b', '--batch_size', default=64, type=int, help=
'Batch size')
parser.add_argument('-s', '--seq_len', default=128, type=int, help=
'Sequence length (consecutive days)')
parser.add_argument('-ne', '--num_encoders', default=3, type=int, help=
'Number of transformer encoder in the network')
parser.add_argument('-a', '--attn_dim', default=96, type=int, help=
'Dimension of single attention output')
parser.add_argument('-nh', '--num_heads', default=12, type=int, help=
'Number of heads for multi-attention')
parser.add_argument('-d', '--dropout_rate', default=0.3, type=float,
help='Dropout rate')
parser.add_argument('-hs', '--hidden_size', default=256, type=int, help
='Hidden size between the linear layers in the encoder')
parser.add_argument('-loss', '--loss_function', default='l2', type=str,
choices=['l1', 'l2'], help='Loss function')
parser.add_argument('-i', '--inference_only', action='store_true', help
='Inference only or not')
parser.add_argument('-r', '--root_dir', default='archive', type=str,
help='Directory containing the downloaded data')
parser.add_argument('-v', '--verbosity', default=0, type=int, choices=[
0, 1, 2], help='Verbosity level')
return parser.parse_args()
| null | null | [
0,
1,
2
] |
1,195 | 6ebf6bdfc6a4a1fe49f4eed1a2c1802f8adeef08 | <mask token>
| <mask token>
def progresses_format(users):
json = dict()
json['users_progresses'] = list()
for user in users:
json['users_progresses'].append(progress_format(user))
return json
<mask token>
| def progress_format(user):
json = dict()
json['progres_id'] = user[0]
json['percentage'] = user[1]
json['user_id'] = user[2]
json['technology'] = user[3]
return json
def progresses_format(users):
json = dict()
json['users_progresses'] = list()
for user in users:
json['users_progresses'].append(progress_format(user))
return json
<mask token>
| def progress_format(user):
json = dict()
json['progres_id'] = user[0]
json['percentage'] = user[1]
json['user_id'] = user[2]
json['technology'] = user[3]
return json
def progresses_format(users):
json = dict()
json['users_progresses'] = list()
for user in users:
json['users_progresses'].append(progress_format(user))
return json
def progress_percentage_formating(progresses):
response = dict()
response['response'] = list()
for progress in progresses:
json = dict()
json['name'] = progress[1]
json['percentage'] = progress[0]
response['response'].append(json)
return response
| def progress_format(user):
json = dict()
json["progres_id"] = user[0]
json["percentage"] = user[1]
json["user_id"] = user[2]
json["technology"] = user[3]
return json
def progresses_format(users):
json = dict()
json["users_progresses"] = list()
for user in users:
json["users_progresses"].append(progress_format(user))
return json
def progress_percentage_formating(progresses):
response = dict()
response['response'] = list()
for progress in progresses:
json = dict()
json["name"] = progress[1]
json["percentage"] = progress[0]
response['response'].append(json)
return response | [
0,
1,
2,
3,
4
] |
1,196 | 8334478c8b7fc7688477cdb837467e00e857c07c | <mask token>
class DuckList(generics.ListCreateAPIView):
<mask token>
<mask token>
<mask token>
| <mask token>
class DuckList(generics.ListCreateAPIView):
<mask token>
<mask token>
def get_object(self):
queryset = self.get_queryset()
obj = get_object_or_404(queryset, pk=self.kwargs['pk'])
return obj
| <mask token>
class DuckList(generics.ListCreateAPIView):
queryset = Duck.objects.all()
serializer_class = Duck_Serializer
def get_object(self):
queryset = self.get_queryset()
obj = get_object_or_404(queryset, pk=self.kwargs['pk'])
return obj
| from django.shortcuts import get_object_or_404
from rest_framework import generics
from .models import Duck
from .serializers import Duck_Serializer
class DuckList(generics.ListCreateAPIView):
queryset = Duck.objects.all()
serializer_class = Duck_Serializer
def get_object(self):
queryset = self.get_queryset()
obj = get_object_or_404(queryset, pk=self.kwargs['pk'])
return obj
| from django.shortcuts import get_object_or_404
from rest_framework import generics
from .models import Duck
from .serializers import Duck_Serializer
class DuckList(generics.ListCreateAPIView):
queryset = Duck.objects.all()
serializer_class = Duck_Serializer
def get_object(self):
queryset = self.get_queryset()
obj = get_object_or_404(
queryset,
pk = self.kwargs['pk'],
)
return obj
| [
1,
2,
3,
4,
5
] |
1,197 | 18b73a06c80272aff5c0e4b10473e95bd58466f3 | <mask token>
def _get_stats(candidate_pairs, truth_pairs):
tp = len(candidate_pairs.intersection(truth_pairs))
prec = 1.0 * tp / len(candidate_pairs)
rec = 1.0 * tp / len(truth_pairs)
print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(
candidate_pairs), tp, prec, rec))
return prec, rec
<mask token>
| <mask token>
def _read_truthfile(filepath):
with open(filepath, 'r') as f:
truth_pairs = [tuple(sorted(line.strip().split())) for line in f]
return set(truth_pairs)
def _get_stats(candidate_pairs, truth_pairs):
tp = len(candidate_pairs.intersection(truth_pairs))
prec = 1.0 * tp / len(candidate_pairs)
rec = 1.0 * tp / len(truth_pairs)
print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(
candidate_pairs), tp, prec, rec))
return prec, rec
<mask token>
| <mask token>
def _read_truthfile(filepath):
with open(filepath, 'r') as f:
truth_pairs = [tuple(sorted(line.strip().split())) for line in f]
return set(truth_pairs)
def _get_stats(candidate_pairs, truth_pairs):
tp = len(candidate_pairs.intersection(truth_pairs))
prec = 1.0 * tp / len(candidate_pairs)
rec = 1.0 * tp / len(truth_pairs)
print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(
candidate_pairs), tp, prec, rec))
return prec, rec
def run(mh, truthfile, ts):
truth_pairs = _read_truthfile(truthfile)
prec_series = []
rec_series = []
for t in ts:
print('Doing LSH with t=', t)
lsh = LSH(t)
lsh.do_lsh(mh)
candidate_pairs = set(lsh.get_candidates())
prec, rec = _get_stats(candidate_pairs, truth_pairs)
prec_series.append(prec)
rec_series.append(rec)
exp_df = pd.DataFrame({'t': ts, 'prec': prec_series, 'rec': rec_series})
return exp_df
| <mask token>
from plagiarism_lib.article_db import ArticleDB
from plagiarism_lib.minhash import MinHash
from plagiarism_lib.lsh import LSH
import pandas as pd
import numpy as np
def _read_truthfile(filepath):
with open(filepath, 'r') as f:
truth_pairs = [tuple(sorted(line.strip().split())) for line in f]
return set(truth_pairs)
def _get_stats(candidate_pairs, truth_pairs):
tp = len(candidate_pairs.intersection(truth_pairs))
prec = 1.0 * tp / len(candidate_pairs)
rec = 1.0 * tp / len(truth_pairs)
print(' returned: %d, tp=%.4f, prec=%.4f, rec=%.4f' % (len(
candidate_pairs), tp, prec, rec))
return prec, rec
def run(mh, truthfile, ts):
truth_pairs = _read_truthfile(truthfile)
prec_series = []
rec_series = []
for t in ts:
print('Doing LSH with t=', t)
lsh = LSH(t)
lsh.do_lsh(mh)
candidate_pairs = set(lsh.get_candidates())
prec, rec = _get_stats(candidate_pairs, truth_pairs)
prec_series.append(prec)
rec_series.append(rec)
exp_df = pd.DataFrame({'t': ts, 'prec': prec_series, 'rec': rec_series})
return exp_df
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 7 07:51:26 2017
@author: hcorrada
"""
from plagiarism_lib.article_db import ArticleDB
from plagiarism_lib.minhash import MinHash
from plagiarism_lib.lsh import LSH
import pandas as pd
import numpy as np
def _read_truthfile(filepath):
with open(filepath, 'r') as f:
truth_pairs = [tuple(sorted(line.strip().split()))
for line in f]
return set(truth_pairs)
def _get_stats(candidate_pairs, truth_pairs):
tp = len(candidate_pairs.intersection(truth_pairs))
prec = 1.0 * tp / len(candidate_pairs)
rec = 1.0 * tp / len(truth_pairs)
print (" returned: %d, tp=%.4f, prec=%.4f, rec=%.4f" % (len(candidate_pairs), tp, prec, rec))
return prec, rec
def run(mh, truthfile, ts):
truth_pairs = _read_truthfile(truthfile)
prec_series = []
rec_series = []
for t in ts:
print("Doing LSH with t=", t)
lsh = LSH(t)
lsh.do_lsh(mh)
candidate_pairs = set(lsh.get_candidates())
prec, rec = _get_stats(candidate_pairs, truth_pairs)
prec_series.append(prec)
rec_series.append(rec)
exp_df = pd.DataFrame({'t': ts, 'prec': prec_series, 'rec': rec_series})
return exp_df | [
1,
2,
3,
4,
5
] |
1,198 | bdfd941be29a31d6c1bbedd270dadac844f49fc4 | <mask token>
class GameSequence:
<mask token>
<mask token>
def changeMode(self, number):
self.currentMode = self.modes[number]
def startGame(self):
self.currentTurn = 0
"""
does some intro animaton -> starts game
"""
return
<mask token>
def getCurrentPlayer(self):
return self.players[self.currentTurn]
def changeTurn(self):
self.players[self.currentTurn].changeTurn(False)
self.currentTurn += 1
self.currentTurn = self.currentTurn % len(self.players)
def endTurn(self):
self.players[self.currentTurn].changeTurn(False)
| <mask token>
class GameSequence:
<mask token>
def __init__(self, ArrayofPlayers):
if len(ArrayofPlayers) < 2:
return False
self.players = ArrayofPlayers
self.currentTurn = None
NOTHING = 2
ATTACK = 1
MOVE = 0
self.modes = [MOVE, ATTACK, NOTHING]
self.currentMode = NOTHING
def changeMode(self, number):
self.currentMode = self.modes[number]
def startGame(self):
self.currentTurn = 0
"""
does some intro animaton -> starts game
"""
return
<mask token>
def getCurrentPlayer(self):
return self.players[self.currentTurn]
def changeTurn(self):
self.players[self.currentTurn].changeTurn(False)
self.currentTurn += 1
self.currentTurn = self.currentTurn % len(self.players)
def endTurn(self):
self.players[self.currentTurn].changeTurn(False)
| <mask token>
class GameSequence:
<mask token>
def __init__(self, ArrayofPlayers):
if len(ArrayofPlayers) < 2:
return False
self.players = ArrayofPlayers
self.currentTurn = None
NOTHING = 2
ATTACK = 1
MOVE = 0
self.modes = [MOVE, ATTACK, NOTHING]
self.currentMode = NOTHING
def changeMode(self, number):
self.currentMode = self.modes[number]
def startGame(self):
self.currentTurn = 0
"""
does some intro animaton -> starts game
"""
return
def startTurn(self):
self.players[self.currentTurn].changeTurn(True)
"""
maybe some camera change animation to player location
"""
return
def getCurrentPlayer(self):
return self.players[self.currentTurn]
def changeTurn(self):
self.players[self.currentTurn].changeTurn(False)
self.currentTurn += 1
self.currentTurn = self.currentTurn % len(self.players)
def endTurn(self):
self.players[self.currentTurn].changeTurn(False)
| <mask token>
class GameSequence:
"""
GameSequence summary: Keeps track of player turn sequence and Game end
Functionalities
-start game
-must start turns
-change turns
-end turns
-end game
"""
def __init__(self, ArrayofPlayers):
if len(ArrayofPlayers) < 2:
return False
self.players = ArrayofPlayers
self.currentTurn = None
NOTHING = 2
ATTACK = 1
MOVE = 0
self.modes = [MOVE, ATTACK, NOTHING]
self.currentMode = NOTHING
def changeMode(self, number):
self.currentMode = self.modes[number]
def startGame(self):
self.currentTurn = 0
"""
does some intro animaton -> starts game
"""
return
def startTurn(self):
self.players[self.currentTurn].changeTurn(True)
"""
maybe some camera change animation to player location
"""
return
def getCurrentPlayer(self):
return self.players[self.currentTurn]
def changeTurn(self):
self.players[self.currentTurn].changeTurn(False)
self.currentTurn += 1
self.currentTurn = self.currentTurn % len(self.players)
def endTurn(self):
self.players[self.currentTurn].changeTurn(False)
| from Player import Player
class GameSequence:
'''
GameSequence summary: Keeps track of player turn sequence and Game end
Functionalities
-start game
-must start turns
-change turns
-end turns
-end game
'''
def __init__(self, ArrayofPlayers):
if (len(ArrayofPlayers) < 2):
return False
self.players = ArrayofPlayers
self.currentTurn = None
NOTHING = 2
ATTACK = 1
MOVE = 0
self.modes = [MOVE, ATTACK,NOTHING]
self.currentMode = NOTHING
def changeMode(self,number):
self.currentMode = self.modes[number]
def startGame(self):
self.currentTurn = 0
'''
does some intro animaton -> starts game
'''
return
def startTurn(self):
self.players[self.currentTurn].changeTurn(True)
'''
maybe some camera change animation to player location
'''
return
def getCurrentPlayer(self):
return self.players[self.currentTurn]
def changeTurn(self):
self.players[self.currentTurn].changeTurn(False)
self.currentTurn += 1
self.currentTurn = self.currentTurn % len(self.players)
def endTurn(self):
self.players[self.currentTurn].changeTurn(False)
| [
6,
7,
8,
9,
11
] |
1,199 | f9edbef46494cc2993c6a633fe35406524dbbf67 | <mask token>
| from mtots.parser import base
from mtots.parser import combinator
from mtots.parser.combinator import All
from mtots.parser.combinator import Any
from mtots.parser.combinator import AnyTokenBut
from mtots.parser.combinator import Forward
from mtots.parser.combinator import Peek
from mtots.parser.combinator import Required
from mtots.parser.combinator import Token
| null | null | null | [
0,
1
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.