max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
eval-ccs2019/benchmark.py | nibau/zkay | 0 | 12795051 | <reponame>nibau/zkay<gh_stars>0
#!/usr/bin/env python3
# usage ./benchmark.py [example_dir]
# (example_dir contains subdirectories with example sol/zkay and scenario files)
# requires installed memory-profiler and zkay packages
import os
import datetime
import sys
import shutil
clean=False
file_dir = os.path.realpath(os.path.dirname(__file__))
base_dir = os.path.join(file_dir, 'examples') if len(sys.argv) < 2 else os.path.realpath(sys.argv[1])
backends = ['dummy', 'ecdh-chaskey', 'ecdh-aes'] #, 'rsa-pkcs1.5', 'rsa-oaep'] # rsa consumes >100 GB hdd space
for backend in backends:
for dirname in os.listdir(base_dir):
p = os.path.join(base_dir, dirname)
if os.path.isdir(p):
file = None
for filename in os.listdir(p):
if filename.endswith(('.sol', '.zkay')):
file = os.path.join(p, filename)
break
if file is not None:
out_dir = os.path.join(p, f'out_{backend}')
if clean and os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir, exist_ok=True)
print(f'compiling {file}, at {datetime.datetime.utcnow()}')
os.system(f"mprof run --include-children --nopython -o '{out_dir}/mprof_compile.dat' zkay compile '{file}' --verbosity 0 --crypto-backend {backend} --opt-hash-threshold 0 -o '{out_dir}' --log --log-dir '{out_dir}'")
scenario_file = os.path.join(p, 'scenario.py')
if os.path.exists(scenario_file):
print(f'running {scenario_file}, at {datetime.datetime.utcnow()}')
os.system(f"mprof run --include-children --nopython -o '{out_dir}/mprof_run.dat' python '{scenario_file}' '{out_dir}'")
| 1.96875 | 2 |
src/models/ffhq_1024_haar/__init__.py | YorkUCVIL/Wavelet-Flow | 59 | 12795052 | from models.ffhq_1024_haar.Training_data import *
from models.ffhq_1024_haar.Validation_data import *
from models.ffhq_1024_haar.Network_body import *
from models.ffhq_1024_haar.Conditioning_network import *
import models.shared.routines as routines
from models.ffhq_1024_haar.build_training_graph import *
model_config_path = 'data/ffhq_1024_haar/config.hjson'
| 1.1875 | 1 |
mkAD/modify_init.py | arielthomas1/SHEMAT-Suite-Open | 0 | 12795053 | <filename>mkAD/modify_init.py<gh_stars>0
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
import sys
import os
import re
def contains(theString, theQueryValue):
return theString.find(theQueryValue) > -1
targetfile = sys.argv[1]
if os.path.exists(os.getcwd() + "/" + targetfile):
openFile = open(targetfile)
writeFile = open(targetfile+".new","w")
print "Begin parsing head file to include init call"
allLines = openFile.readlines()
lines = set(allLines)
search = 0
for line in allLines:
if(search == 2):
# schon gefunden
writeFile.write(line)
else:
if search == 0:
if contains(line,"IMPLICIT NONE"):
# we begin search for final INCLUDE
writeFile.write(line)
search = 1
else:
writeFile.write(line)
else:
if search == 1:
if line.startswith("!"):
# just a comment
writeFile.write(line)
else:
if contains(line,"INCLUDE") or contains(line,"INTEGER") or contains(line,"DOUBLE") or contains(line,"EXTERNAL"):
# just an include
writeFile.write(line)
else:
# HERE we introduce the call
writeFile.write(" CALL INIT()\n");
writeFile.write(line)
search = 2
openFile.close()
writeFile.close()
else:
print "Data " + targetfile +" does not exist!"
| 3.140625 | 3 |
Lib/xml/dom/minitraversal.py | M-Spencer-94/configNOW | 3 | 12795054 | """A DOM implementation that offers traversal and ranges on top of
minidom, using the 4DOM traversal implementation."""
import minidom, string
class DOMImplementation(minidom.DOMImplementation):
def hasFeature(self, feature, version):
if version not in ("1.0", "2.0"):
return 0
feature = string.lower(feature)
if feature in ['traversal','range']:
return 1
return minidom.DOMImplementation.hasFeature(self, feature, version)
def _createDocument(self):
return Document()
class Document(minidom.Document):
implementation = DOMImplementation()
def createNodeIterator(self, root, whatToShow, filter, entityReferenceExpansion):
from xml.dom import NodeIterator
nodi = NodeIterator.NodeIterator(root, whatToShow, filter, entityReferenceExpansion)
return nodi
def createTreeWalker(self, root, whatToShow, filter, entityReferenceExpansion):
from TreeWalker import TreeWalker
return TreeWalker(root, whatToShow, filter, entityReferenceExpansion)
def createRange(self):
import Range
return Range.Range(self)
def getDOMImplementation():
return Document.implementation
| 2.78125 | 3 |
module/reTestIP.py | yangliangguang/CrawlFreeProxy | 8 | 12795055 | import redis
from tools.common import test_http_proxy
import threading
def http_task():
# 连接redis数据库
POOL = redis.ConnectionPool(host='127.0.0.1', port=6379)
CONN_REDIS = redis.Redis(connection_pool=POOL)
# 取出一个ip进行测试
# proxy = CONN_REDIS.("freeProxy:AfterVerifyOKhttp")
ip = CONN_REDIS.srandmember("freeProxy:AfterVerifyOKhttp",1)
# 判断redis中ip数量是否为空
if not ip:
return 0
else:
# print("INFO: Get proxy from Redis freeProxy:BeforeVerifyhttp list")
proxy = str(ip[0], encoding="utf-8")
flag = test_http_proxy(proxy)
if flag == True:
# CONN_REDIS.sadd("freeProxy:AfterVerifyOKhttp", proxy)
# print("INFO: Save this Proxy IP in freeProxy:AfterVerifyOKhttp")
with open("pass.txt", "a+") as f:
f.write(proxy + "/n")
print("Pass:", proxy)
else:
# CONN_REDIS.sadd("freeProxy_Bad:AfterVerifyFailhttp", proxy)
# print("INFO: Abandon this Proxy IP!")
with open("fail.txt", "a+") as f:
f.write(proxy + "+/n")
print("Fail:", proxy)
return 1
def loop_test(name):
print("*Start thread task %s" % name)
while True:
result = http_task()
print("\n")
if result == 0:
break
if __name__ == "__main__":
jobs = []
num = 8
for i in range(1, num+1):
name = "Thread-" + str(i)
jobs.append(threading.Thread(target=loop_test, args=(name,)))
# 开启多线程
for t in jobs:
t.start()
for t in jobs:
t.join()
| 2.875 | 3 |
draw_lines.py | ounessy/tabular-horizontal-line-drawing | 0 | 12795056 | import pytesseract
from pytesseract import Output
import cv2
import os
from shapely.geometry import Polygon
pytesseract.pytesseract.tesseract_cmd = "Tesseract path e.g c:\Tesseract-OCR\tesseract "
import sys
from os import chdir, listdir
from os.path import join
## Hyper Params
L = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
CHAR_THRESHOLD = 3
LINE_WIDTH = 2
LINE_COLOR = (0, 0, 0)
## Algo
def get_image_data(img_path):
img = cv2.imread(img_path)
image_to_data = pytesseract.image_to_data(img, output_type=Output.DICT)
Xmax = img.shape[1]
Ymax = img.shape[0]
return image_to_data, Xmax, Ymax
def draw_lines_v1(img_path, image_to_data):
img = cv2.imread(img_path)
Xmax = img.shape[1]
n_boxes = len(image_to_data['level'])
for i in range(n_boxes):
if filter_boxes(image_to_data, i) :
(x, y, w, h) = (image_to_data['left'][i], image_to_data['top'][i], image_to_data['width'][i], image_to_data['height'][i])
#cv2.line(img, (0 , y +h +5 ),(Xmax, y +h +5) ,(0, 0, 0), 3)
#cv2.line(img, (0 , y+h ), (Xmax + w, y + h), (0, 255, 0), 1)
cv2.rectangle(img, (x, y), ( x + w, y + h), LINE_COLOR, LINE_WIDTH)
"""
cv2.line(img, (0 , 0),(0, Ymax) ,(0, 0, 0), 5)
cv2.line(img, (0 , 0),(Xmax, 0) ,(0, 0, 0), 5)
cv2.line(img, (0, Ymax),(Xmax, Ymax) ,(0, 0, 0), 5)
cv2.line(img, (Xmax , 0),(Xmax, Ymax) ,(0, 0, 0), 5)
"""
cv2.namedWindow("output2", cv2.WINDOW_NORMAL)
cv2.imshow('output2', img)
def draw_lines(img_path, image_to_data, margin = 0):
"""
Draw extracted and filtred boxes
"""
img = cv2.imread(img_path)
Xmax = img.shape[1]
Ymax = img.shape[0]
n_boxes = len(image_to_data)
for i in range(n_boxes-1):
"""
For each line, we will draw a line between the bottom of the line and the next line top
"""
(x, y, w, h) = (image_to_data[i][0], image_to_data[i][1], image_to_data[i][2], image_to_data[i][3])
y_next = image_to_data[i+1][1]
y_middle = (y+h+y_next)//2
"""
To avoid the case of drawin a line over a word, we will set a threshold to y_middle, In case a hole section is not detected.
"""
y_new = min(y_middle, y+h+margin)
cv2.line(img, (x , y_new),(w, y_new) ,LINE_COLOR, LINE_WIDTH)
#cv2.line(img, (0 , y+h ), (Xmax + w, y + h), (0, 255, 0), 1)
#cv2.rectangle(img, (x, y), ( x + w, y + h), (0, 255, 0), 1)
cv2.line(img, (0 , 0),(0, Ymax) ,LINE_COLOR, 5)
cv2.line(img, (0 , 0),(Xmax, 0) ,LINE_COLOR, 5)
cv2.line(img, (0, Ymax),(Xmax, Ymax) ,LINE_COLOR, 5)
cv2.line(img, (Xmax , 0),(Xmax, Ymax) ,LINE_COLOR, 5)
#cv2.namedWindow("output", cv2.WINDOW_NORMAL)
#cv2.imshow('output', img)
return img
def check_intersection(elem1, elem2):
for l in elem1:
if l in elem2:
return True
return False
## Processing extracted boxes
def check_polygon_intersection(p1, p2):
if p1.distance(p2) == 0 :
return True
return False
def create_polygon(x, y, w, h):
p = Polygon([(x, y),(x+w, y),(x+w, y + h),(x, y + h)])
return p
def filter_boxes(image_to_data, ind):
text = image_to_data["text"][ind]
h = image_to_data["height"][ind]
w = image_to_data["width"][ind]
if len(text) > CHAR_THRESHOLD and w > h:
return True
return False
def process_image_to_data(image_to_data, Xmax, Ymax):
boxes_list = list()
boxes_list.append([0, 0, 0, 0])
all_zero_distance = list()
n_boxes = len(image_to_data['level'])
"""
A first loop to merge close boxes
"""
for i in range(n_boxes):
if filter_boxes(image_to_data, i) :
(y, h) = (image_to_data['top'][i], image_to_data['height'][i])
p1 = create_polygon(0, y, Xmax, h)
n_b = len(boxes_list)
flag = 0
zero_distance = list()
for j in range(n_b):
elem = boxes_list[j]
p2 = create_polygon(elem[0], elem[1], elem[2], elem[3])
if check_polygon_intersection(p1, p2):
zero_distance.append(j)
new_y = min(y, elem[1])
new_h = max(y+h, elem[1] + elem[3]) - min(y, elem[1])
new_elem = [0, new_y, Xmax, new_h]
boxes_list[j]=new_elem
flag = 1
if flag == 0 :
new_elem = [0, y, Xmax, h]
boxes_list.append(new_elem)
return boxes_list
def clean_loop(boxes_list):
Xmax = boxes_list[1][2]
n = len(boxes_list)
global_flag = 0
all_to_be_merged = list()
used_ind = list()
for i in range(n):
if i not in used_ind:
to_be_merged = list()
boxe1 = boxes_list[i]
p1 = create_polygon(boxe1[0],boxe1[1],boxe1[2],boxe1[3])
m = len(boxes_list)
for j in range(m):
if j not in used_ind:
boxe2=boxes_list[j]
p2 = create_polygon(boxe2[0],boxe2[1],boxe2[2],boxe2[3])
if check_polygon_intersection(p1, p2):
to_be_merged.append(boxe2)
used_ind.append(j)
all_to_be_merged.append(to_be_merged)
n_detected = len(all_to_be_merged)
new_boxes_list = list()
for i in range(n_detected):
small_list = all_to_be_merged[i]
p = len(small_list)
new_y = min([boxe[1] for boxe in small_list])
new_h = max([boxe[1] + boxe[3] - new_y for boxe in small_list])
new_elem = [0, new_y, Xmax, new_h]
new_boxes_list.append(new_elem)
return new_boxes_list
def process_table(img_path,draw_path):
#try:
image_to_data, Xmax, Ymax = get_image_data(img_path)
image_to_data = process_image_to_data(image_to_data, Xmax, Ymax)
image_to_data = clean_loop(image_to_data)
img = draw_lines(img_path, image_to_data, margin =2)
image_name = os.path.basename(img_path).split(os.extsep)[0].replace(" ", "_")
processed_im_path = draw_path+"\\"+image_name+'pro.png'
cv2.imwrite(processed_im_path, img)
def process_path(file_path,draw_path):
all_files = listdir(file_path)
n = len(all_files)
for i in range(n):
f = all_files[i]
img_path = join(file_path, f)
process_table(img_path,draw_path)
| 2.859375 | 3 |
danceschool/private_lessons/models.py | django-danceschool/django-danceschool | 32 | 12795057 | from django.db import models
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
from django.utils import timezone
from django.urls import reverse
from datetime import timedelta
from danceschool.core.models import (
Instructor, Location, Room, DanceRole, Event, PricingTier,
EventRegistration, Customer, StaffMember
)
from danceschool.core.constants import getConstant
from danceschool.core.mixins import EmailRecipientMixin
from danceschool.core.utils.timezone import ensure_localtime
class InstructorPrivateLessonDetails(models.Model):
instructor = models.OneToOneField(StaffMember, on_delete=models.CASCADE)
defaultPricingTier = models.ForeignKey(
PricingTier, verbose_name=_('Default Pricing Tier'), null=True,
blank=True, on_delete=models.SET_NULL
)
roles = models.ManyToManyField(DanceRole, blank=True)
couples = models.BooleanField(_('Private lessons for couples'), default=True)
smallGroups = models.BooleanField(_('Private lessons for small groups'), default=True)
def __str__(self):
return str(_('Instructor Private lesson details for %s' % self.instructor.fullName))
class Meta:
ordering = ('instructor__lastName', 'instructor__firstName')
verbose_name = _('Instructor private lesson details')
verbose_name_plural = _('Instructors\' private lesson details')
class PrivateLessonEvent(Event):
'''
This is the event object for which an individual registers. The event is created when the user books a lesson.
All of the registration logic is still handled by the core app, and this model inherits all of the fields
associated with other types of events (location, etc.)
'''
pricingTier = models.ForeignKey(
PricingTier, verbose_name=_('Pricing Tier'), null=True, blank=True,
on_delete=models.SET_NULL
)
participants = models.PositiveSmallIntegerField(_('Expected # of Participants'), null=True, blank=True, default=1)
comments = models.TextField(
_('Comments/Notes'), null=True, blank=True, help_text=_('For internal use and recordkeeping.')
)
def getBasePrice(self, **kwargs):
'''
This method overrides the method of the base Event class by
checking the pricingTier associated with this PrivateLessonEvent and getting
the appropriate price for it.
'''
if not self.pricingTier:
return None
return self.pricingTier.getBasePrice(**kwargs) * max(self.numSlots, 1)
def finalizeBooking(self, **kwargs):
notifyStudent = kwargs.get('notifyStudent', True)
notifyTeachers = kwargs.get('notifyTeachers', getConstant('privateLessons__notifyInstructor'))
eventRegistration = kwargs.get('eventRegistration', None)
affectedSlots = self.instructoravailabilityslot_set.all()
affectedSlots.update(
status=InstructorAvailabilitySlot.SlotStatus.booked,
eventRegistration=eventRegistration,
)
if notifyStudent:
# This is the email template used to notify students that their private lesson has been
# successfully scheduled
template = getConstant('privateLessons__lessonBookedEmailTemplate')
if template.defaultFromAddress and template.content:
for customer in self.customers:
customer.email_recipient(
template.subject,
template.content,
send_html=False,
from_address=template.defaultFromAddress,
from_name=template.defaultFromName,
cc=template.defaultCC,
to=customer.email,
lesson=self,
)
if notifyTeachers:
# This is the email template used to notify individuals who run registration
# that they have been compensated
template = getConstant('privateLessons__lessonBookedInstructorEmailTemplate')
if template.defaultFromAddress and template.content:
emailMixin = EmailRecipientMixin()
instructors = [
x.staffMember for x in
self.eventstaffmember_set.exclude(
Q(staffMember__privateEmail__isnull=True) & Q(staffMember__publicEmail__isnull=True)
)
]
for instructor in instructors:
if not instructor.privateEmail and not instructor.publicEmail:
# Without an email address, instructor cannot be notified
continue
emailMixin.email_recipient(
template.subject,
template.content,
send_html=False,
from_address=template.defaultFromAddress,
from_name=template.defaultFromName,
cc=template.defaultCC,
to=instructor.privateEmail or instructor.publicEmail,
lesson=self,
instructor=instructor,
customers=self.customers,
calendarUrl=reverse('privateCalendar'),
)
@property
def customers(self):
'''
List both any individuals signed up via the registration and payment system,
and any individuals signed up without payment.
'''
return Customer.objects.filter(
Q(privatelessoncustomer__lesson=self) |
Q(registration__eventregistration__event=self)
).distinct()
customers.fget.short_description = _('Customers')
@property
def numSlots(self):
''' Used for various pricing discounts related things '''
return self.instructoravailabilityslot_set.count()
@property
def discountPointsMultiplier(self):
'''
If installed, the discounts app looks for this property to determine
how many points this lesson is worth toward a discount. Since private
lesson points are based on the number of slots booked, this just returns
the number of slots associated with this event (or 1).
'''
return max(self.numSlots, 1)
def nameAndDate(self, withDate=True):
teacherNames = ' and '.join([x.staffMember.fullName for x in self.eventstaffmember_set.all()])
if self.customers:
customerNames = ' ' + ' and '.join([x.fullName for x in self.customers])
elif self.eventregistration_set.all():
names = ' and '.join([x.registration.fullName for x in self.eventregistration_set.all()])
customerNames = ' ' + names if names else ''
else:
customerNames = ''
if not teacherNames and not customerNames and not withDate:
return _('Private Lesson')
return _('Private Lesson: %s%s%s%s' % (
teacherNames,
_(' for ') if teacherNames and customerNames else '',
customerNames,
(
(', ' if (teacherNames or customerNames) else '') +
self.startTime.strftime('%Y-%m-%d')
) if withDate else ''
))
@property
def name(self):
return self.nameAndDate(withDate=True)
def save(self, *args, **kwargs):
''' Set registration status to hidden if it is not specified otherwise '''
if not self.status:
self.status == Event.RegStatus.hidden
super().save(*args, **kwargs)
def __str__(self):
return str(self.name)
class Meta:
permissions = (
('view_others_lessons', _('Can view scheduled private lessons for all instructors')),
)
verbose_name = _('Private lesson')
verbose_name_plural = _('Private lessons')
class PrivateLessonCustomer(models.Model):
'''
For private lessons that go through registration and payment, the customers
are the individuals who are registered. For private lessons that are booked
without payment, this just provides a record that they signed up for
the lesson.
'''
customer = models.ForeignKey(
Customer, verbose_name=_('Customer'), on_delete=models.CASCADE
)
lesson = models.ForeignKey(
PrivateLessonEvent, verbose_name=_('Lesson'), on_delete=models.CASCADE
)
def __str__(self):
return str(_('Private lesson customer: %s for lesson #%s' % (self.customer.fullName, self.lesson.id)))
class Meta:
unique_together = ('customer', 'lesson')
verbose_name = _('Private lesson customer')
verbose_name_plural = _('Private lesson customers')
class InstructorAvailabilitySlot(models.Model):
class SlotStatus(models.TextChoices):
available = ('A', _('Available'))
booked = ('B', _('Booked'))
tentative = ('T', _('Tentative Booking'))
unavailable = ('U', _('Unavailable'))
instructor = models.ForeignKey(Instructor, verbose_name=_('Instructor'), on_delete=models.CASCADE)
pricingTier = models.ForeignKey(
PricingTier, verbose_name=_('Pricing Tier'), null=True, blank=True, on_delete=models.SET_NULL
)
startTime = models.DateTimeField(_('Start time'))
duration = models.PositiveSmallIntegerField(_('Slot duration (minutes)'), default=30)
location = models.ForeignKey(
Location, verbose_name=_('Location'), null=True, blank=True, on_delete=models.SET_NULL,
)
room = models.ForeignKey(
Room, verbose_name=_('Room'), null=True, blank=True, on_delete=models.SET_NULL,
)
status = models.CharField(max_length=1, choices=SlotStatus.choices, default=SlotStatus.available)
# We need both a link to the registrations and a link to the event because
# in the event that an expired (temporary) Registration is deleted, we still want to
# be able to identify the Event that was created for this private lesson.
lessonEvent = models.ForeignKey(
PrivateLessonEvent, verbose_name=_('Scheduled lesson'), null=True, blank=True,
on_delete=models.SET_NULL,
)
eventRegistration = models.ForeignKey(
EventRegistration, verbose_name=_('event registration'),
null=True, blank=True, on_delete=models.SET_NULL, related_name='privateLessonSlots'
)
creationDate = models.DateTimeField(auto_now_add=True)
modifiedDate = models.DateTimeField(auto_now=True)
@property
def availableDurations(self):
'''
A lesson can always be booked for the length of a single slot, but this method
checks if multiple slots are available. This method requires that slots are
non-overlapping, which needs to be enforced on slot save.
'''
potential_slots = InstructorAvailabilitySlot.objects.filter(
instructor=self.instructor,
location=self.location,
room=self.room,
pricingTier=self.pricingTier,
startTime__gte=self.startTime,
startTime__lte=self.startTime + timedelta(minutes=getConstant('privateLessons__maximumLessonLength')),
).exclude(id=self.id).order_by('startTime')
duration_list = [self.duration, ]
last_start = self.startTime
last_duration = self.duration
max_duration = self.duration
for slot in potential_slots:
if max_duration + slot.duration > getConstant('privateLessons__maximumLessonLength'):
break
if (
slot.startTime == last_start + timedelta(minutes=last_duration) and
slot.isAvailable
):
duration_list.append(max_duration + slot.duration)
last_start = slot.startTime
last_duration = slot.duration
max_duration += slot.duration
return duration_list
@property
def availableRoles(self):
'''
Some instructors only offer private lessons for certain roles, so we should only allow booking
for the roles that have been selected for the instructor.
'''
if not hasattr(self.instructor, 'instructorprivatelessondetails'):
return []
return [
[x.id, x.name] for x in
self.instructor.instructorprivatelessondetails.roles.all()
]
def checkIfAvailable(self, dateTime=timezone.now()):
'''
Available slots are available, but also tentative slots that have been held as tentative
past their expiration date
'''
return (
self.startTime >= dateTime + timedelta(days=getConstant('privateLessons__closeBookingDays')) and
self.startTime <= dateTime + timedelta(days=getConstant('privateLessons__openBookingDays')) and not
self.eventRegistration and (
self.status == self.SlotStatus.available or (
self.status == self.SlotStatus.tentative and
getattr(
getattr(
getattr(self.eventRegistration, 'invoiceItem', None),
'invoice', None
),
'expirationDate',
timezone.now()
) <= timezone.now()
)
)
)
# isAvailable indicates if a slot is currently available
isAvailable = property(fget=checkIfAvailable)
isAvailable.fget.short_description = _('Available')
@property
def name(self):
return _('%s: %s at %s') % (
self.instructor.fullName,
ensure_localtime(self.startTime).strftime('%b %-d, %Y %-I:%M %p'),
self.location
)
def __str__(self):
return str(self.name)
class Meta:
ordering = ('-startTime', 'instructor__lastName', 'instructor__firstName')
verbose_name = _('Private lesson availability slot')
verbose_name_plural = _('Private lesson availability slots')
permissions = (
('edit_own_availability', _('Can edit one\'s own private lesson availability.')),
('edit_others_availability', _('Can edit other instructors\' private lesson availability.')),
)
| 2.03125 | 2 |
src/preprocess/modify_txts.py | Jeevesh8/relational-pt | 0 | 12795058 | for filename in ["test", "train", "valid"]:
with open("subtrees-text-4096/" + filename + ".txt") as f, open(
"subtrees-text-4096-64-comps/" + filename + ".txt", "w+") as g:
for line in f.readlines():
if line.count("<post") <= 64:
g.write(line)
| 2.4375 | 2 |
pyinsar/processing/deformation/__init__.py | MITeaps/pyinsar | 8 | 12795059 | <filename>pyinsar/processing/deformation/__init__.py
__all__ = ["elastic_halfspace", "inversion"] | 0.972656 | 1 |
Code/passiveRandomLearners.py | prajwalppv/Active-Learning-Uncertainity-sampling | 2 | 12795060 | <gh_stars>1-10
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
import matplotlib.patches as mpatches
import random
def compute_loss(model,testData,labels):
'''
This funtion computes the loss of the model at a given iteration. Loss here has been defined as the fraction of misclassified points by the model.
Inputs:
1) model - Trained model
2) testData - test input features
3) labels - True labels for the given test data
Outputs:
1) loss - fraction of wrongly classified points
'''
predicted_labels = model.predict(testData)
count = 0
for predicted,true in zip(predicted_labels,labels):
if predicted != true:
count+=1
loss = count/len(labels)
if(loss>1):
loss = 1
return loss
def learner(type,mode,base="gaussianNB"):
'''
This funtions trains a passive or random learner based on the given input parameters.
Inputs:
1) type - "passive" learner or "random" learner
2) mode - Type of dataset "EASY", "MODERATE" or "DIFFICULT"
3) base - Base learner to use - "gaussianNB" or "randomForest" or "svm"
Ouputs:
[losses,accuracies] where,
losses - holds the loss of the model at each iteration
accuracies - holds the accuracies of the model at each iteration
'''
sns.set()
losses = []
accuracies = []
svm_loss = []
rf_loss = []
nb_loss = []
svm_accuracy = []
rf_accuracy = []
nb_accuracy = []
x = []
train_file = mode+ "_TRAIN.csv"
test_file = mode + "_TEST.csv"
blinded_file = mode + "_BLINDED.csv"
train_data = pd.read_csv(train_file)
#If type is passive, Initialize a random starting point and choose the next 2500 consecutive points
if type == "passive":
start_index = random.randint(0,1500)
end_index = start_index + 2500
train_data = train_data.iloc[start_index:end_index,:]
#If type is random, uniformly sample 2500 points from the dataset
if type == "random":
train_data = train_data.sample(frac=1).reset_index(drop=True)
train_data = train_data.iloc[:2500,:]
test_data = pd.read_csv(test_file)
#Extract input features and labels based on type of dataset
if mode != "DIFFICULT":
test_features = test_data.iloc[:,:26]
test_labels = test_data.iloc[:,-1]
else:
test_features = test_data.iloc[:,:52]
test_labels = test_data.iloc[:,-1]
print("-----------------",type," ",base,": ", mode,"-----------------")
minb = 250
#batch_size defines how many input samples we process at a time
batch_size = 250
iter = int(2500/batch_size)
for i in range(iter):
if mode !="DIFFICULT":
train_features = train_data.iloc[:batch_size,:26]
train_labels = train_data.iloc[:batch_size,-1]
else:
train_features = train_data.iloc[:batch_size,:52]
train_labels = train_data.iloc[:batch_size,-1]
print(type , " learner ----> Iteration ",i+1 ," out of ", iter )
#Train appropriate model based on "base" parameter
if base == "svm":
#Using multi-class SVM
svm = LinearSVC(multi_class='ovr')
svm_model = svm.fit(train_features,train_labels)
svm_score = svm_model.score(test_features,test_labels)
svm_l = compute_loss(svm_model,test_features,test_labels)
svm_accuracy.append(svm_score)
svm_loss.append(svm_l)
elif base == "randomForest":
#Using decision trees
rf = RandomForestClassifier(n_estimators=100,criterion="gini")
rf_model = rf.fit(train_features,train_labels)
rf_l = compute_loss(rf_model,test_features,test_labels)
rf_score = 1 - rf_l
rf_accuracy.append(rf_score)
rf_loss.append(rf_l)
else:
#Using Gaussian Naive Bayes classifier
nb = GaussianNB()
nb_model = nb.fit(train_features,train_labels)
nb_l = compute_loss(nb_model,test_features,test_labels)
nb_score = 1 - nb_l
nb_accuracy.append(nb_score)
nb_loss.append(nb_l)
x.append(batch_size)
batch_size += minb
losses.append(x)
accuracies.append(x)
if(base == "svm"):
losses.append(svm_loss)
accuracies.append(svm_accuracy)
if base == "randomForest":
losses.append(rf_loss)
accuracies.append(rf_accuracy)
else:
losses.append(nb_loss)
accuracies.append(nb_accuracy)
return ([losses,accuracies])
| 2.96875 | 3 |
poppurri/currency/tests/test_templatetags.py | ariel17/poppurri | 0 | 12795061 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Description: Templatetags test units.
"""
__author__ = "<NAME> (<EMAIL>)"
from decimal import Decimal
from django.test import TestCase
from currency.models import Currency
from currency.templatetags.to_currency import to_currency
class ToCurrencyTestCase(TestCase):
"""
Test unit for ```to_currency``` template tag.
"""
def setUp(self):
super(ToCurrencyTestCase, self).setUp()
def tearDown(self):
super(ToCurrencyTestCase, self).tearDown()
def test_to_currency(self):
"""
Tests a successful currency convertion, when the scale exists in model.
"""
currency = Currency.objects.get(code='ARS')
self.assertEquals(Decimal('13'),
to_currency(Decimal('1.55'), currency))
currency = Currency.objects.get(code='USD')
self.assertEquals(Decimal('2.0'),
to_currency(Decimal('1.55'), currency))
| 2.78125 | 3 |
src/metrics/psnr_metric.py | ryanwongsa/image-inpainting | 0 | 12795062 | <reponame>ryanwongsa/image-inpainting
from ignite.metrics import Metric
from ignite.exceptions import NotComputableError
# These decorators helps with distributed settings
from ignite.metrics.metric import sync_all_reduce, reinit__is_reduced
import torch
import torch.nn as nn
from math import log10
class PSNR_Metric(Metric):
def __init__(self, output_transform=lambda x: x, device=None):
self._psnr_values = None
self._num_examples = None
self.criterion = nn.MSELoss()
super(PSNR_Metric, self).__init__(output_transform=output_transform, device=device)
@reinit__is_reduced
def reset(self):
self._psnr_values = 0
self._num_examples = 0
super(PSNR_Metric, self).reset()
@reinit__is_reduced
def update(self, output):
y_pred, y_true = output
mse = self.criterion(y_pred, y_true)
psnr = 10 * log10(1 / mse.item())
self._psnr_values += psnr
self._num_examples += y_true.shape[0]
@sync_all_reduce("_num_examples", "_psnr_values")
def compute(self):
if self._num_examples == 0:
raise NotComputableError('PSNR_Metric must have at least one example before it can be computed.')
return self._psnr_values / self._num_examples | 2.25 | 2 |
atoms_simulator/bin/__init__.py | Jorewin/atoms_simulator | 0 | 12795063 | import math
import click
import os.path
import shutil
import atoms_simulator
import numpy
import matplotlib.pyplot as plt
def get_project_path():
return os.path.dirname(atoms_simulator.__file__)
def get_path(path):
i = 1
while True:
if not os.path.lexists(f"{path}{i}"):
return f"{path}{i}"
i += 1
@click.group()
def ats():
"""Allows to perform detailed tests using atoms_simulator module."""
pass
@ats.command()
def init():
"""Creates a settings_ats.toml file in the current directory."""
if not os.path.isfile("settings_ats.toml"):
source = os.path.join(get_project_path(), "assets/settings_source.toml")
target = os.path.join(os.getcwd(), "settings_ats.toml")
shutil.copy(source, target)
click.echo("Settings file generated successfully.")
else:
click.echo("Settings file already exists. Please delete it in order to generate a new configuration file.")
@ats.command()
@click.option("-g", "--graphics", "graphics", help="Turn on pygame simulation", is_flag=True)
@click.option("--no-save", "no_save", help="Disable saving the results of the test.", is_flag=True)
def test(graphics, no_save):
"""Performs a series of tests based on the data in the settings_ats.toml file."""
settings_ats = atoms_simulator.Settings("settings_ats.toml")
if not settings_ats.load():
click.echo("No settings file detected. Generate the file first.")
return
if settings_ats["N_min"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
if settings_ats["N_step"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
if settings_ats["N_number"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
if settings_ats["R"] is None:
click.echo("The settings file is corrupted, please generate a new settings file.")
return
click.echo("Starting simulation...")
n_stop = settings_ats["N_min"] + settings_ats["N_step"] * (settings_ats["N_number"] - 1)
# size = max([settings_ats['h'], settings_ats['w'], math.ceil((4 * (n_stop + 1)) ** 0.5)])
# settings_ats['h'] = size
# settings_ats['w'] = size
test_cases = [
[i for _ in range(settings_ats['R'])] for i in range(settings_ats["N_min"], n_stop + 1, settings_ats["N_step"])
]
bounce = numpy.empty((len(test_cases), settings_ats['R']), dtype=int)
bounce_results = numpy.empty(len(test_cases), dtype=int)
cop = numpy.empty((len(test_cases), settings_ats['R']), dtype=float)
cop_results = numpy.empty(len(test_cases), dtype=float)
settings_ats.new('N', settings_ats["N_min"])
with click.progressbar(
range(len(test_cases) * settings_ats['R'] - 1, -1, -1), label="Performing simulations:", show_eta=False
) as progress:
for i in progress:
settings_ats['N'] = test_cases[i // settings_ats['R']][i % settings_ats['R']]
try:
bounce[i // settings_ats['R']][i % settings_ats['R']], \
cop[i // settings_ats['R']][i % settings_ats['R']] = atoms_simulator.simulate(settings_ats, graphics)
except ValueError as error:
click.echo(f"\n{error} Please generate a new settings file.")
return
if i % settings_ats['R'] == 0:
bounce_results[i // settings_ats['R']] = int(bounce[i // settings_ats['R']].mean())
cop_results[i // settings_ats['R']] = cop[i // settings_ats['R']].mean()
if not no_save:
if not os.path.isdir(results_path := os.path.join(os.getcwd(), "ats_results")):
os.mkdir(results_path)
target_path = get_path(os.path.join(results_path, "data_batch"))
os.mkdir(target_path)
numpy.savetxt(os.path.join(target_path, "bounces.csv"), bounce_results)
numpy.savetxt(os.path.join(target_path, "change_of_position.csv"), cop_results)
settings_ats.save(target=os.path.join(target_path, "used.toml"))
@ats.command()
@click.option("-b", "--data_batch", "data_batch", prompt=True, help="Name of the previously generated data batch.")
def plot(data_batch):
"""Plots the previously generated data."""
if not os.path.isdir(results_path := os.path.join(os.getcwd(), "ats_results")):
click.echo(
"The ats_results catalog doesn't exist within the current working directory. Generate some data first."
)
return
if not os.path.isdir(path := os.path.join(os.getcwd(), "ats_results", data_batch)):
click.echo(
f"The ats_results/{data_batch} catalog doesn't exist within the current working directory."
)
return
target_path = get_path(os.path.join(results_path, "figures_batch"))
os.mkdir(target_path)
settings_ats = atoms_simulator.Settings(os.path.join(path, "used.toml"))
if not (settings_ats.load() and os.path.isfile(os.path.join(path, "bounces.csv"))
and os.path.isfile(os.path.join(path, "change_of_position.csv"))):
click.echo("This data batch is corrupted.")
return
n_stop = settings_ats["N_min"] + settings_ats["N_step"] * (settings_ats["N_number"] - 1)
x = numpy.arange(settings_ats["N_min"], n_stop + 1, settings_ats["N_step"])
bounce = numpy.loadtxt(os.path.join(path, "bounces.csv"))
plt.plot(x, bounce, marker='o')
plt.title(f"Zależność liczby zderzeń od ilości atomów, M = {settings_ats['M']}")
plt.xlabel("Liczba atomów w pojemniku")
plt.ylabel("Liczba odbić atomu czerownego")
plt.grid(True)
plt.savefig(os.path.join(target_path, "bounces.png"))
plt.clf()
cop = numpy.loadtxt(os.path.join(path, "change_of_position.csv"))
plt.plot(x, cop, marker='o')
plt.title(f"Zależność średniej drogi swobodnej od ilości atomów, M = {settings_ats['M']}")
plt.xlabel("Liczba atomów w pojemniku")
plt.ylabel("Średnia droga swobodna atomu czerwonego")
plt.grid(True)
plt.savefig(os.path.join(target_path, "change_of_position.png"))
plt.clf()
settings_ats.save(os.path.join(target_path, "used.toml"))
click.echo("Figures created successfullly.")
| 2.75 | 3 |
Workshop2/6.py | Camiloasc1/OptimizationUNAL | 0 | 12795064 | import numpy as np
from scipy.optimize import linprog
c = [10, 3.8, 1.5]
A_ub = [
[1, 1, 1],
[-1, -1, -1],
[-1, -1. / 3., -1. / 6.]]
b_ub = [18, -12, -9]
res = linprog(c, A_ub=A_ub, b_ub=b_ub)
print(res)
| 2.484375 | 2 |
src/data_management/cleaning.py | timmens/tsa | 2 | 12795065 | <gh_stars>1-10
from pathlib import Path
import numpy as np
import pandas as pd
import pyarrow.parquet as pq
from bld.project_paths import project_paths_join as ppj
from src.shared import to_parquet_in_date_chunks
UNNECESSARY_COLUMNS = ["formatted_date", "geo"]
def load_data():
paths = list(Path(ppj("IN_DATA", "corona_data")).glob("2020*/**/*.parquet"))
dfs = []
for path in paths:
table = pq.read_table(path)
df = table.to_pandas()
# Add state and city from path.
df["state"] = path.parents[3].name
df["city"] = path.parents[2].name
dfs.append(df)
df = pd.concat(dfs, sort=False)
return df
def minimal_preprocessing(df):
replace_to = {None: np.nan, "": np.nan}
df = df.replace(replace_to)
df = df.drop_duplicates(subset="id")
df = df.drop(columns=UNNECESSARY_COLUMNS)
df.id = df.id.astype(np.uint64)
df = df.set_index("id")
return df
def main():
df = load_data()
df = minimal_preprocessing(df)
to_parquet_in_date_chunks(df, ppj("OUT_DATA", "tweets-cleaned"))
if __name__ == "__main__":
main()
| 2.328125 | 2 |
depoco/plot_results.py | shengxihua/deep-point-map-compression | 59 | 12795066 | <filename>depoco/plot_results.py<gh_stars>10-100
import numpy as np
import matplotlib.pyplot as plt
import glob
import argparse
from ruamel import yaml
import depoco.utils.point_cloud_utils as pcu
def plotResults(files, x_key, y_key, ax, draw_line=False, label=None, set_lim=True):
x = []
y = []
for f in files:
eval_dict = pcu.load_obj(f)
if((x_key in eval_dict.keys()) & (y_key in eval_dict.keys())):
for v in eval_dict.values():
v = np.array(v)
if not draw_line:
ax.plot(np.mean(eval_dict[x_key]),
np.mean(eval_dict[y_key]), '.')
ax.text(np.mean(eval_dict[x_key]), np.mean(
eval_dict[y_key]), f.split('/')[-1][:-4])
x.append(np.mean(eval_dict[x_key]))
y.append(np.mean(eval_dict[y_key]))
if draw_line:
line, = ax.plot(x, y, '-x', label=label)
# line.set_label(label)
ax.set_xlabel(x_key)
ax.set_ylabel(y_key)
if set_lim:
ax.set_xlim(0,None)
ax.set_ylim(0,None)
# ax.grid()
def genPlots(files, f, ax, draw_line=False, label=None, x_key='memory'):
# print('shape',ax[0,0])
plotResults(files, x_key=x_key, y_key='chamfer_dist_abs',
ax=ax[0], draw_line=draw_line, label=label)
plotResults(files, x_key=x_key, y_key='chamfer_dist_plane',
ax=ax[1], draw_line=draw_line, label=label)
plotResults(files, x_key=x_key, y_key='iou',
ax=ax[2], draw_line=draw_line, label=label)
if __name__ == "__main__":
####### radius fct ##############
path = 'experiments/results/kitti/'
files = sorted(glob.glob(path+'*.pkl'))
f, ax = plt.subplots(1, 3)
f.suptitle('Radius FPS')
genPlots(files, f, ax, draw_line=True, label='our', x_key='bpp')
for a in ax:
a.grid()
# a.set_ylim([0,None])
a.legend()
plt.show()
| 2.328125 | 2 |
code_prefixes/warp2code-prefixes.py | ipsilon/eof | 0 | 12795067 | #!/usr/bin/env python
# Processes OpenEthereum warp snapshot and collects 4-byte code prefixes of all accounts.
#
# openethereum --chain=kovan snapshot --snapshot-threads=8 snapshot.warp
# warp2code-prefixes.py snapshot.warp
import sys
import rlp
import snappy
import collections
prefix_map = collections.defaultdict(int)
filename = sys.argv[1]
print(f"{filename=}")
with open(filename, 'rb') as f:
f.seek(0,2)
size = f.tell()
print(f"{size=}")
f.seek(-8,2)
manifest_end = f.tell()
manifest_off_bytes = f.read(8)
print(f"{manifest_off_bytes=}")
manifest_off = int.from_bytes(manifest_off_bytes, 'little')
print(f"{manifest_off=}")
f.seek(manifest_off,0)
manifest_bytes = f.read(manifest_end-manifest_off)
manifest = rlp.decode(manifest_bytes)
manifest_ver = int.from_bytes(manifest[0], 'big')
block_number = int.from_bytes(manifest[4], 'big')
block_hash = manifest[5]
print(f"{manifest_ver=}")
print(f"{block_number=}")
print(f"block_hash={block_hash.hex()}")
state_chunks = manifest[1]
num_chunks=len(state_chunks)
print(f"{num_chunks=}")
for i in range(num_chunks):
info = state_chunks[i]
chunk_len = int.from_bytes(info[1], 'big')
chunk_pos = int.from_bytes(info[2], 'big')
print(f"{i}/{num_chunks}: {chunk_pos=} {chunk_len=}", end='')
f.seek(chunk_pos)
chunk_compressed = f.read(chunk_len)
chunk_bytes = snappy.uncompress(chunk_compressed)
chunk = rlp.decode(chunk_bytes)
print(f" uncompressed_len={len(chunk_bytes)} num_accounts={len(chunk)}", flush=True)
for entry in chunk:
acc = entry[1]
has_code = acc[2] == b'\x01'
if has_code:
code_prefix = bytes(acc[3][:4])
prefix_map[code_prefix] += 1
for k,v in prefix_map.items():
print(f"{k.hex()} : {v}")
| 2.171875 | 2 |
worker.py | lordmauve/python-now | 1 | 12795068 | """Web Worker script."""
# In web workers, "window" is replaced by "self".
from browser import bind, self
import contextlib
import traceback
class OutputWriter:
def __init__(self, id, window):
self.id = id
self.window = window
self.buf = []
def write(self, text):
"""Write output to the screen."""
self.buf.append(text)
self.window.send([self.id, 'output', text])
def getvalue(self):
"""Get everything that was printed."""
return ''.join(self.buf)
@bind(self, "message")
def on_message(event):
"""Handle a message sent by the main script.
evt.data is the message body.
"""
msg = event.data
try:
id = msg['id']
except KeyError:
return
source = msg['source']
mode = msg['mode']
buff = OutputWriter(id, self)
with contextlib.redirect_stdout(buff), contextlib.redirect_stderr(buff):
self.send([id, 'ready', 0])
try:
code = compile(source, filename='python-now', mode=mode)
namespace = {
'__name__': '__main__',
'__filename__': '<python-now>'
}
result = exec(code, namespace)
except BaseException:
self.send([id, 'err', traceback.format_exc()])
else:
if result is not None:
print(repr(result))
# If we have exercises, run them as tests
if msg['exercises']:
if mode == 'exec':
test_ns = namespace.copy()
else:
test_ns = {}
test_ns.update(
source=source,
result=result,
output=buff.getvalue(),
)
exec(msg['exercises'], test_ns)
tests = []
for name, test in test_ns.items():
if name.startswith('test_') and callable(test):
tests.append(test)
for test_id, test in enumerate(tests):
try:
test()
except BaseException:
err = traceback.format_exc() + repr(test_ns)
else:
err = None
self.send([id, 'ex_result', (test_id, err)])
| 2.734375 | 3 |
NitroFE/time_based_features/indicator_features/_MovingAverageConvergenceDivergence.py | NITRO-AI/NitroFE | 81 | 12795069 | import numpy as np
import pandas as pd
from typing import Union, Callable
from pandas.core.frame import DataFrame
from NitroFE.time_based_features.indicator_features._AbsolutePriceOscillator import (
AbsolutePriceOscillator,
)
from NitroFE.time_based_features.moving_average_features.moving_average_features import (
ExponentialMovingFeature,
)
class MovingAverageConvergenceDivergence:
"""
Provided dataframe must be in ascending order.
"""
def __init__(
self,
fast_period: int = 26,
slow_period: int = 12,
smoothing_period: int = 9,
fast_operation: str = "mean",
slow_operation: str = "mean",
smoothing_operation: str = "mean",
initialize_using_operation: bool = False,
initialize_span: int = None,
min_periods: int = 0,
ignore_na: bool = False,
axis: int = 0,
times: str = None,
return_histogram=False,
):
"""
Parameters
----------
fast_period : int, optional
specify decay in terms of span, for the fast moving feature, by default 12
slow_period : int, optional
specify decay in terms of span, for the slow moving feature, by default 26
smoothing_period : int, optional
specify decay in terms of span, for the smoothing moving feature, by default 9
fast_operation : str, {'mean','var','std'}
operation to be performed for the fast moving feature, by default 'mean'
slow_operation : str, {'mean','var','std'}
operation to be performed for the slow moving feature, by default 'mean'
smoothing_operation : str, optional
operation to be performed for the smoothing moving feature, by default 'mean'
initialize_using_operation : bool, optional
If True, then specified 'operation' is performed on the first 'initialize_span' values, and then the exponential moving average is calculated, by default False
initialize_span : int, optional
the span over which 'operation' would be performed for initialization, by default None
min_periods : int, optional
Minimum number of observations in window required to have a value (otherwise result is NA), by default 0
ignore_na : bool, optional
Ignore missing values when calculating weights; specify True to reproduce pre-0.15.0 behavior, by default False
axis : int, optional
The axis to use. The value 0 identifies the rows, and 1 identifies the columns, by default 0
times : str, optional
Times corresponding to the observations. Must be monotonically increasing and datetime64[ns] dtype, by default None
"""
self.span_fast = fast_period
self.span_slow = slow_period
self.min_periods = min_periods
self.ignore_na = ignore_na
self.axis = axis
self.times = times
self.fast_operation = fast_operation
self.slow_operation = slow_operation
self.smoothing_operation = smoothing_operation
self.smoothing_period = smoothing_period
self.return_histogram = return_histogram
self.initialize_using_operation = initialize_using_operation
self.initialize_span = initialize_span
def fit(
self,
dataframe: Union[pd.DataFrame, pd.Series],
first_fit: bool = True,
):
"""
For your training/initial fit phase (very first fit) use fit_first=True, and for any production/test implementation pass fit_first=False
Returns --> Smoothed signal line , macd histogram
Parameters
----------
dataframe : Union[pd.DataFrame, pd.Series]
dataframe containing column values to create feature over
first_fit : bool, optional
Indicator features require past values for calculation.
Use True, when calculating for training data (very first fit)
Use False, when calculating for subsequent testing/production data { in which case the values, which
were saved during the last phase, will be utilized for calculation }, by default True
"""
if first_fit:
self._raw_macd_object = AbsolutePriceOscillator(
fast_period=self.span_fast,
slow_period=self.span_slow,
fast_operation=self.fast_operation,
slow_operation=self.slow_operation,
min_periods=self.min_periods,
initialize_using_operation=self.initialize_using_operation,
initialize_span=self.initialize_span,
ignore_na=self.ignore_na,
axis=self.axis,
times=self.times,
)
self._macd_object = ExponentialMovingFeature(
span=self.smoothing_period,
ignore_na=self.ignore_na,
axis=self.axis,
times=self.times,
operation=self.smoothing_operation,
initialize_using_operation=self.initialize_using_operation,
initialize_span=self.initialize_span,
)
raw_macd = self._raw_macd_object.fit(dataframe, first_fit=first_fit)
macd = self._macd_object.fit(dataframe=raw_macd, first_fit=first_fit)
return raw_macd - macd if self.return_histogram else macd
| 2.765625 | 3 |
src/datamodules/archery_bowling_datamodule.py | V1ct0reo/lightning-fast-hydra | 0 | 12795070 | from pathlib import Path
from typing import Union, List, Dict, Optional
import pandas as pd
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from torchvision.transforms import transforms
from src.utils.utils import get_logger
class ArcheryBowlingDataModule(pl.LightningDataModule):
def __init__(self,
data_root: str,
test: bool = False,
val_ratio: float = None,
batch_size: int = 1,
window_size: int = 10,
normalisation: str = 'WithoutNormalization',
szenario: str = 'Archery',
features: List[str] = ['CenterEyeAnchor_pos_X', 'LeftVirtualHand_pos_X', 'RightVirtualHand_pos_X'],
identifier_col: str = 'seq_id',
label_col: str = 'ParticipantID',
sorting_cols: List[str] = None,
num_workers: int = 1,
shuffle_windows=False
):
super(ArcheryBowlingDataModule, self).__init__()
self.num_workers = num_workers
self.logger = get_logger(name='A-B-DataModule')
self.szenario = szenario
self.features = features
self.identifier_col = identifier_col if identifier_col is not None else 'seq_id'
self.label_col = label_col if label_col is not None else 'ParticipantID'
self.sorting_cols = sorting_cols
self.normalisation = normalisation
self.window_size = window_size
self.batch_size = batch_size
self.val_ratio = val_ratio
self.separate = test
self.data_root = Path(data_root) # Path is just more convenient
self.transform = transforms.Compose([
transforms.ToTensor(),
])
self.shuffle_windows = shuffle_windows
self.num_features = len(features)
self.dims = (self.window_size, self.num_features)
self.train_dataset, self.val_dataset, self.test_dataset = None, None, None
self.logger.info('__init__ done.')
def setup(self, stage: Optional[str] = None) -> None:
# do i want to load all data at once, and slice afterwords?
# slice all modulo matching repetitions to validation
# keep the remaining as train
# drop unused columns
# initiate DatasetObjects and return them
# return ArcheryBowlingDataset(None, 1, 1), ArcheryBowlingDataset(None, 1, 1), ArcheryBowlingDataset(None, 1, 1)
if stage in (None, 'fit'): # TODO no validation set throws a Nonetype Error on val data loader...
self.logger.info(f'stage:{stage}. creating Dataset...')
# regexing or sorting the file path seems to be a pain. therefore ill load all relevant (normalized + session1)
train_val_files = self.get_file_list(session=1)
train_val_files = list(train_val_files)
self.logger.info(f'found {len(train_val_files)} files.')
train_val_df = ArcheryBowlingDataModule.load_dataframe_from_multiple_files(train_val_files)
# TODO refactor this ifelse structure to a neat structure
if self.val_ratio and self.val_ratio > 0: # not none and > 0
modulo = int(1 / self.val_ratio)
if modulo > 12 or modulo < 2:
self.logger.info(
f'validation split ratio({self.val_ratio}) was set, '
f'but would result in either all or no data being available for training. '
f'Therefore all Data will be used as train-set!')
from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset
self.train_dataset = ArcheryBowlingDataset.create_from_dataframe(train_val_df, self.window_size,
self.batch_size, name='TRAIN',
feature_cols=self.features,
identifier_col=self.identifier_col,
label_col=self.label_col,
shuffle_windows=self.shuffle_windows,
sorting_cols=self.sorting_cols
)
else:
val_df = train_val_df[train_val_df['repetition'] % modulo == 0]
from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset
self.val_dataset = ArcheryBowlingDataset.create_from_dataframe(val_df, self.window_size,
self.batch_size, name='VAL',
feature_cols=self.features,
identifier_col=self.identifier_col,
label_col=self.label_col,
shuffle_windows=self.shuffle_windows,
sorting_cols=self.sorting_cols
)
del val_df
train_df = train_val_df[train_val_df['repetition'] % modulo != 0]
del train_val_df
self.train_dataset = ArcheryBowlingDataset.create_from_dataframe(train_df, self.window_size,
self.batch_size, name='TRAIN',
feature_cols=self.features,
identifier_col=self.identifier_col,
label_col=self.label_col,
shuffle_windows=self.shuffle_windows,
sorting_cols=self.sorting_cols
)
del train_df
else:
from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset
self.train_dataset = ArcheryBowlingDataset.create_from_dataframe(train_val_df, self.window_size,
self.batch_size, name='TRAIN',
feature_cols=self.features,
identifier_col=self.identifier_col,
label_col=self.label_col,
shuffle_windows=self.shuffle_windows,
sorting_cols=self.sorting_cols
)
self.val_dataset = None
self.logger.info('train/val Data initialized!')
if stage in (None, 'test'):
# slice all 'session2' entries for test data
# create a list of paths for test data files (basically everything with session 2
self.logger.info(f'stage:{stage}. creating Dataset...')
test_files = self.get_file_list(session=2)
test_files = (list(test_files))
self.logger.info(f'found {len(test_files)} test-files.')
# create test Dataset
from src.datamodules.datasets.archery_bowling_dataset import ArcheryBowlingDataset
test_df = ArcheryBowlingDataModule.load_dataframe_from_multiple_files(test_files)
computed_batch_size = self.batch_size
rest = len(test_df) % self.batch_size
computed_batch_size -= rest
self.test_dataset = ArcheryBowlingDataset.create_from_dataframe(test_df, self.window_size, computed_batch_size,
name='TEST', feature_cols=self.features,
identifier_col=self.identifier_col,
label_col=self.label_col,
shuffle_windows=False,
sorting_cols=self.sorting_cols
)
self.logger.info('test Data initialized!')
self.logger.info(f'Datasets are setup.')
self.logger.info(self)
def get_file_list(self, session=1):
train_val_files = self.data_root.glob(f'{self.szenario}*{self.normalisation}*session{session}*.csv')
return train_val_files
@staticmethod
def load_dataframe_from_multiple_files(file_list: List[Path]):
df_list = []
for i in file_list:
tmp = pd.read_csv(i)
df_list.append(tmp)
return pd.concat(df_list, ignore_index=True)
def _create_info_dict(self):
return {
'train dataset': None if not self.train_dataset else str(self.train_dataset),
'val dataset': None if not self.val_dataset else str(self.val_dataset),
'test dataset': None if not self.test_dataset else str(self.test_dataset),
'dims': self.dims,
'#batches': len(self.test_dataset),
'window size': self.window_size,
'batch size': self.batch_size,
'normalisation name': self.normalisation
}
def train_dataloader(self) -> Union[DataLoader, List[DataLoader], Dict[str, DataLoader]]:
return DataLoader(self.train_dataset, batch_size=None, num_workers=self.num_workers
)
def val_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(self.val_dataset, batch_size=None, num_workers=self.num_workers
)
def test_dataloader(self) -> Union[DataLoader, List[DataLoader]]:
# TODO handle num_workers...
return DataLoader(self.test_dataset, batch_size=None, num_workers=self.num_workers
)
def __repr__(self):
return f"DataModule(train_dataset={self.train_dataset!r}, " \
f"val_dataset={self.val_dataset!r}, " \
f"test_dataset={self.test_dataset!r}, " \
f"dims={self.dims!r}, " \
f"normalisation_name={self.normalisation!r}), " \
f"Szenario={self.szenario})"
def __rich_repr__(self):
yield "train_dataset", self.train_dataset
yield "val_dataset", self.val_dataset
yield "test_dataset", self.test_dataset
yield "dims", self.dims
yield "normalisation_name", self.normalisation
yield "szenario", self.szenario
| 2.40625 | 2 |
commander.py | MauTrib/gnn-en-folie | 0 | 12795071 | <gh_stars>0
import yaml
import toolbox.utils as utils
import os
from models import get_pipeline, get_pl_model, get_torch_model, get_optim_args, is_dummy
from models.base_model import GNN_Abstract_Base_Class
from data import get_test_dataset, get_train_val_datasets
from metrics import setup_metric
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
import argparse
import toolbox.wandb_helper as wbh
def get_config(filename='default_config.yaml') -> dict:
with open(filename, 'r') as f:
config = yaml.safe_load(f)
return config
def get_observer(config: dict):
path = config['observers']['base_dir']
path = os.path.join(os.getcwd(), path)
utils.check_dir(path)
observer = config['observers']['observer']
if observer=='wandb':
logger = WandbLogger(project=f"{config['project']}_{config['problem']}", log_model="all", save_dir=path)
try:
logger.experiment.config.update(config)
except AttributeError as ae:
return None
else:
raise NotImplementedError(f"Observer {observer} not implemented.")
return logger
def load_model(config: dict, path: str, add_metric=True, **add_metric_kwargs) -> GNN_Abstract_Base_Class:
"""
- config : dict. The configuration dictionary (careful, must correspond to the model trying to be loaded). If set to None, will try to download a model from W&B
- path : str. The local path of the Pytorch Lightning experiment, or the id of the run if need to be fetched on W&B
- add_metric: bool. Adds an external metric to the pytorch lightninh module.
- add_metric_kwargs: Arguments passed to the setup_metric function if activated.
"""
if is_dummy(config['arch']['name']):
pl_model = get_pipeline(config)
else:
print(f'Loading base model from {path}... ', end = "")
try:
PL_Model_Class = get_pl_model(config)
pl_model = PL_Model_Class.load_from_checkpoint(path, model=get_torch_model(config), optim_args=get_optim_args(config))
except (FileNotFoundError) as e:
if config['observers']['observer']=='wandb':
print(f"Failed at finding model locally with error : {e}. Trying to use W&B.")
project = f"{config['project']}_{config['problem']}"
wb_config, path = wbh.download_model(project, path)
PL_Model_Class = get_pl_model(config)
pl_model = PL_Model_Class.load_from_checkpoint(path, model=get_torch_model(wb_config), optim_args=get_optim_args(wb_config))
else:
raise e
print('Done.')
if add_metric:
setup_metric(pl_model, config, **add_metric_kwargs)
return pl_model
def get_trainer_config(config: dict, only_test=False) -> dict:
trainer_config = config['train']
accelerator_config = utils.get_accelerator_dict(config['device'])
trainer_config.update(accelerator_config)
if not only_test:
early_stopping = EarlyStopping('lr', verbose=True, mode='max', patience=1+config['train']['max_epochs'], divergence_threshold=config['train']['optim_args']['lr_stop'])
checkpoint_callback = ModelCheckpoint(monitor="val_loss", save_top_k=1, verbose=True)
trainer_config['callbacks'] = [early_stopping, checkpoint_callback]
clean_config = utils.restrict_dict_to_function(pl.Trainer.__init__, trainer_config)
return clean_config
def setup_trainer(config: dict, model: GNN_Abstract_Base_Class, watch=True, only_test=False) -> pl.Trainer:
trainer_config = get_trainer_config(config, only_test=only_test)
if config['observers']['use']:
logger = get_observer(config)
if logger is None:
print("Logger did not load. Could mean an error or that we are not in the zero_ranked experiment.")
else:
if watch: logger.watch(model)
trainer_config['logger'] = logger
trainer = pl.Trainer(**trainer_config)
return trainer
def train(config: dict)->pl.Trainer:
if is_dummy(config['arch']['name']):
print("Dummy architecture, can't train.")
return None
if config['train']['anew']:
pl_model = get_pipeline(config)
setup_metric(pl_model, config)
else:
pl_model = load_model(config, config['train']['start_model'])
trainer = setup_trainer(config, pl_model)
train_dataset, val_dataset = get_train_val_datasets(config)
trainer.fit(pl_model, train_dataset, val_dataset)
return trainer
def test(config: dict, trainer=None, model=None, dataloaders=None, **kwargs) -> None:
if dataloaders is None: dataloaders = get_test_dataset(config)
arg_dict = {'dataloaders': dataloaders,
'verbose':True
}
if trainer is None:
pl_model = model
if pl_model is None: pl_model = load_model(config, config['train']['start_model'], add_metric=False)
trainer = setup_trainer(config, pl_model, **kwargs)
else:
arg_dict['ckpt_path'] = 'best'
pl_model = trainer.model
setup_metric(pl_model, config, istest=True)
arg_dict['model'] = pl_model
trainer.test(**arg_dict)
return trainer
def main():
parser = argparse.ArgumentParser(description='Main file for creating experiments.')
parser.add_argument('command', metavar='c', choices=['train','test'],
help='Command to execute : train or test')
parser.add_argument('--config', default='default_config.yaml', type=str, help='Name of the configuration file.')
args = parser.parse_args()
if args.command=='train':
training=True
default_test = False
elif args.command=='test':
training=False
default_test=True
config = get_config(args.config)
config = utils.clean_config(config)
trainer=None
if training:
trainer = train(config)
if default_test or config['test_enabled']:
test(config, trainer)
if __name__=="__main__":
pl.seed_everything(3787, workers=True)
main()
| 2.03125 | 2 |
bulk_import_rename/main.py | williamjamir/bulk-import-rename | 0 | 12795072 | import sys
import click
from bulk_import_rename.commands.detect_modifications import track_modifications
from bulk_import_rename.commands.rename_import import run_rename
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version='0.0.1')
def app():
pass
@app.command()
@click.argument('project_path', type=click.Path(exists=True))
@click.option('--origin_branch', default='master', help='Branch to start the evaluation')
@click.option('--work_branch', default=False, help='Name of the branch that has the modifications')
@click.option('--output_file', default='list_output.py', help='Change the name of the output file')
def track(**kwargs):
track_modifications(**kwargs)
@app.command()
@click.argument('project_path', nargs=-1, type=click.Path(exists=True))
@click.argument('moved_imports_file', type=click.Path(exists=True, resolve_path=True))
def rename(**kwargs):
run_rename(**kwargs)
if __name__ == '__main__':
# The sys.argv[1:] is necessary for debug on python2
# Link: https://goo.gl/vp5hfz
app(sys.argv[1:])
| 2.390625 | 2 |
census_data/core/variables.py | cj-on-campus/dabp-summer-reading | 0 | 12795073 | <filename>census_data/core/variables.py<gh_stars>0
import censusdata
import requests
import pandas as pd
census_key = '' # scrubbed for now, TODO: secrets config
def get_counties(state, year):
counties = []
geo = censusdata.geographies(censusdata.censusgeo([('state', state), ('county', '*')]), 'acs5', year)
for key in list(geo):
value = geo[key].params()
county = value[1][1]
counties.append(county)
return counties
def get_data_by_block_group(variables, state, county_list, api_key, variable_names):
data = []
for county in county_list:
url = 'https://api.census.gov/data/2019/acs/acs5?get=' + variables + '&for=block%20group:*&in=state:' + state + '%20county:' + county + '&key=' + census_key
response = requests.get(url)
if response.status_code == 200:
batch = response.json()[1:]
data.extend(batch)
else:
print('API connection failed')
headers = variable_names + ["state", "county", "tract", "block_group"]
df = pd.DataFrame(data=data, columns=headers)
return df
def get_data_by_tract(variables, state, api_key, variable_names):
data = []
url = 'https://api.census.gov/data/2019/acs/acs5?get=' + variables + '&for=tract:*&in=state:' + state + '&key=' + census_key
response = requests.get(url)
if response.status_code == 200:
batch = response.json()[1:]
data.extend(batch)
else:
print('API connection failed')
headers = variable_names + ["state", "county", "tract"]
df = pd.DataFrame(data=data, columns=headers)
return df | 3.0625 | 3 |
server/module_stats.py | codingismycraft/pinta | 0 | 12795074 | <filename>server/module_stats.py
"""Exposes a class that holds change statistics for all modules.
Using the git log we are creating a history of changes for each module
and also append to it the number of target dependencies.
"""
import csv
import change_history
import settings
import targets
# Aliases.
settings = settings.settings
Targets = targets.Targets
class ModuleStats:
"""Holds the statistics for a module.
:ivar str _module_name: The name of the module
:ivar list _affected_targets: The affected targets.
:ivar int dependency_count: The number of dependencies.
"""
def __init__(self, module_name, affected_targets, dependency_count,
change_rate, all_changes_count, latest_changes_count,
lifespan_in_days, filepath):
"""Initializer.
:param str module_name: The module name.
:param set affected_targets: The affected targets.
"""
self._module_name = module_name
self._affected_targets = list(affected_targets)
self._dependency_count = dependency_count
self._change_rate = change_rate
self._all_changes_count = all_changes_count
self._latest_changes_count = latest_changes_count
self._lifespan_in_days = lifespan_in_days
self._filepath = filepath
@property
def module_name(self):
return self._module_name
@property
def affected_targets(self):
return len(self._affected_targets)
@property
def dependency_count(self):
return self._dependency_count
@property
def change_rate(self):
return self._change_rate
@property
def all_changes_count(self):
return self._all_changes_count
@property
def latest_changes_count(self):
return self._latest_changes_count
@property
def lifespan_in_days(self):
return self._lifespan_in_days
@property
def filepath(self):
return self._filepath
def __repr__(self):
"""Make debugging easier!"""
return f'{self._module_name}, ' \
f'Affected targets: {len(self._affected_targets)} ' \
f'Dependencies: {self._dependency_count} ' \
f'Change Rate: {self._change_rate} ' \
f'Total Changes: {self._all_changes_count} ' \
f'Latest Changes: {self._latest_changes_count} ' \
f'Lifespan: {self._lifespan_in_days}'
@classmethod
def load_module_stats(cls):
stats = []
stats_per_module, dependency_counts = _module_stats()
history = change_history.load_change_history()
for module_name, affected_targets in stats_per_module.items():
if module_name not in history:
continue
ch = history.get(module_name)
lifespan_in_days = ch.lifespan_in_days if ch else 'n/a'
change_rate = ch.change_rate if ch else 'n/a'
all_changes_count = ch.changes_count if ch else 'n/a'
latest_changes_count = ch.latest_changes_count if ch else 'n/a'
filepath = ch.filepath if ch else 'n/a'
stats.append(
cls(
module_name=module_name,
affected_targets=affected_targets,
dependency_count=dependency_counts[module_name],
change_rate=change_rate,
all_changes_count=all_changes_count,
latest_changes_count=latest_changes_count,
lifespan_in_days=lifespan_in_days,
filepath=filepath
)
)
return stats
def _module_stats():
graph_from_target, graph_to_target = _create_graph()
stats_per_module = {parent: set() for parent in graph_from_target.keys()}
all_targets = Targets()
for target in all_targets.get_all():
_upadate_dependencies(
graph_from_target,
target.module_name,
stats_per_module
)
dependency_counts = _count_dependencies(graph_to_target)
return stats_per_module, dependency_counts
def _create_graph():
"""Creates the reversed dependencies graph.
The adjacent edges as the are recorded in the dependencies file represent
an "out" relationship between the imported and the importing modules.
Here we need the reversed dependency meaning the "in" dependency because
the goal is to discover how each module affects each of the targets. This
why we are constructing the graph in the opposite direction meaning from
the second to the first node as it appears in the dependency file.
:return: The "in" dependency graph for the dependencies file.
:rtype: dict
"""
graph_from_target = {}
graph_to_target = {}
with open(settings.dependencies_filename) as file:
for tokens in csv.reader(file):
n1 = tokens[0].strip()
n2 = tokens[1].strip()
if n1 not in graph_from_target:
graph_from_target[n1] = []
if n2 not in graph_from_target:
graph_from_target[n2] = []
graph_from_target[n2].append(n1)
if n1 not in graph_to_target:
graph_to_target[n1] = []
if n2 not in graph_to_target:
graph_to_target[n2] = []
graph_to_target[n1].append(n2)
return graph_from_target, graph_to_target
def _upadate_dependencies(graph, target, stats_per_module):
stack = [(target, iter(graph[target]))]
stats_per_module[target].add(target)
visited = set()
visited.add(target)
while stack:
parent, iter_to_children = stack[-1]
stats_per_module[parent].add(target)
try:
child = next(iter_to_children)
if child not in visited:
stack.append((child, iter(graph[child])))
visited.add(child)
except StopIteration:
stack.pop()
def _count_dependencies(graph):
"""Assigns the total number of dependencies to each node."""
dependency_counter = {parent: 0 for parent in graph}
for current_node in graph:
stack = [[current_node, iter(graph[current_node])]]
visited = set()
visited.add(current_node)
while stack:
parent, children_iter = stack[-1]
try:
child = next(children_iter)
if child not in visited:
visited.add(child)
dependency_counter[current_node] += 1
stack.append([child, iter(graph[child])])
except StopIteration:
stack.pop()
return dependency_counter
def export_to_csv(filename):
with open(filename, 'w') as file:
tokens = [
"name",
"targets",
"dependencies",
"change-rate",
"all-changes",
"latest-changes",
"lifespan-in-days"
]
file.write(','.join(tokens))
file.write("\n")
for module in ModuleStats.load_module_stats():
tokens = [
module.module_name,
module.affected_targets,
module.dependency_count,
module.change_rate,
module.all_changes_count,
module.latest_changes_count,
module.lifespan_in_days
]
file.write(','.join(str(t) for t in tokens))
file.write("\n")
if __name__ == '__main__':
export_to_csv("change_history.csv")
| 2.40625 | 2 |
colaboradores/urls.py | SurielRuano/Orientador-Legal | 0 | 12795075 | from django.conf.urls import url, include
from django.conf import settings
from . import views
urlpatterns = [
url(r'^solicitud-colaboracion/$', views.Solicitud_colaboracion.as_view(),name="solicitud"),
] | 1.4375 | 1 |
app/views.py | julesc00/travel-agency | 0 | 12795076 | <reponame>julesc00/travel-agency
from django.shortcuts import render
# Create your views here.
def index(request):
context = {
"moto": "The framework for perfectionist!"
}
return render(request, "app/index.html", context)
| 1.789063 | 2 |
main.py | dark7py/da | 0 | 12795077 | <filename>main.py
import pandas as pd
def non_matches(firs_param, second_param, data):
count = 0
for (f1, f2) in zip(data[firs_param], data[second_param]):
if not is_contains(f1, f2) and not is_contains(f2, f1):
count += 1
return count
def is_contains(first_field, second_field):
for word in first_field.lower().replace('-', ' ').split():
if word in second_field.lower():
return True
return False
def get_top(size, data, field1, field2, word_to_search):
return data[data[field1].str.lower().str.contains(word_to_search[:-2])][field2]\
.str\
.lower()\
.value_counts()\
.head(size)
works = pd.read_csv("works.csv").dropna()
not_matches_count = non_matches("jobTitle", "qualification", works)
managers = get_top(5, works, "jobTitle", "qualification", "менеджер")
engineers = get_top(5, works, "qualification", "jobTitle", "инженер")
output_string = f"{works.shape[0]} не совпадает {not_matches_count}\n\n" \
f"Топ 5 образовний менеджеров\n" \
f"{managers}\n\n" \
f"Топ 5 должностей инженеров\n" \
f"{engineers}"
with open('output.txt', 'w', encoding='utf-8') as file:
file.write(output_string) | 3.484375 | 3 |
PYTHON_LESSON/05-class_fifth.py | sly1314sly/selenium_basic | 1 | 12795078 | <gh_stars>1-10
#多继承
# class father1():
# def have(self):
# print("父级有的东西")
# class father2():
# def money(self):
# print("父级有的东西2")
# class son(father1,father2):
# pass
# john = son() #两个父级都调用
#如果两个父级有同样的东西,调用的是第一个里面的,优先继承第一个类
# class father1():
# def have(self):
# print("父级有的东西")
# class father2():
# def have(self):
# print("父级有的东西2")
# class son(father1,father2):
# pass
# john = son()
# john.have()
#可以一层套一层继承
# class grandfather():
# def yeye(self):
# print("爷爷有的东西")
# class father1(grandfather):
# def have(self):
# print("父级有的东西")
# class father2(grandfather):
# def yiu(self):
# print("父级有的东西2")
# class son(father1,father2):
# pass
# john = son()
# john.yeye()
#儿子层只能先在父级层找,找不到才去爷爷层找
# class grandfather():
# def yeye(self):
# print("爷爷有的东西")
# class father1(grandfather):
# def yeye(self):
# print("父级有的东西")
# class son(father1):
# pass
# john = son()
# john.yeye()
# 第一个父级没有,会找第二个父级,没有的话再找爷爷级
# class grandfather():
# def yeye(self):
# print("爷爷有的东西")
# class father1(grandfather):
# pass
# class father2(grandfather):
# def yeye(self):
# print("父级有的东西2")
# class son(father1,father2):
# pass
# john = son()
# john.yeye()
#python3继承顺序
# 新式类的继承方式是:先找最亲的爸爸(括号里面第一个继承的类),然后再去找第二个爸爸(括号里面第二个继承的类),
#当爸爸类都找不到的时候,找第一个爸爸的父类
#类里面的方法不能相互调用,单现在讲一个方法可以相互调用
#类的命名方式和方法命名方式不一样,类的命名方式,多个单词把首字母大写, 方法的是单词中间用下划线;
# class PlusNum:
# def plus_int(self,a,b):
# a = int(a)
# b = int(b)
# return a+b
# def plus_float(self,a,b):
# a = float(a)
# b = float(b)
# return a+b
# def plus_all(self,a,b,c,d):
# return self.plus_int(a,b)+self.plus_float(c,d) #参数名字可以变,但是个数不能变
# num = PlusNum()
# # print(num.plus_int(3,4))
# # print(num.plus_float(2.1,3.4))
# print(num.plus_all(1,2,3.3,4.3))
# 如下例子:
# class Act:
# @classmethod #表示类的方法
# def use(cls): #加上了上面的,此处变为cls,是class的缩写
# print("www")
# Act.use()
#类方法不仅可以被类调用,还可以被对象调用
# class Run:
# @staticmethod #类的静态方法,不传对象也不传类,既不属于类做的事情,也不属于对象做的事情,但可以被类和对象调用
# def have_breakfast(): #静态方法括号为空,是针对于cls和self方法用的,可以传自己的参数如a,b等,
# print('eat eggs')
# r = Run
# r.have_breakfast() #被对象调用
# Run.have_breakfast() #被类调用
#————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————-
# Python 实例方法、类方法、静态方法的区别与作用
# Python中至少有三种比较常见的方法类型,即实例方法,类方法、静态方法。它们是如何定义的呢?如何调用的呢?它们又有何区别和作用呢?且看下文。
# 首先,这三种方法都定义在类中。下面我先简单说一下怎么定义和调用的。(PS:实例对象的权限最大。)
# 实例方法
# 定义:第一个参数必须是实例对象,该参数名一般约定为“self”,通过它来传递实例的属性和方法(也可以传类的属性和方法);
# 调用:只能由实例对象调用。
# 类方法
# 定义:使用装饰器@classmethod。第一个参数必须是当前类对象,该参数名一般约定为“cls”,通过它来传递类的属性和方法(不能传实例的属性和方法);
# 调用:实例对象和类对象都可以调用。
# 静态方法
# 定义:使用装饰器@staticmethod。参数随意,没有“self”和“cls”参数,但是方法体中不能使用类或实例的任何属性和方法;
# 调用:实例对象和类对象都可以调用。
# 实例方法
# 简而言之,实例方法就是类的实例能够使用的方法。这里不做过多解释。
# 类方法
# 使用装饰器@classmethod。
# 原则上,类方法是将类本身作为对象进行操作的方法。假设有个方法,且这个方法在逻辑上采用类本身作为对象来调用更合理,那么这个方法就可以定义为类方法。另外,如果需要继承,也可以定义为类方法。
# 如下场景:
# 假设我有一个学生类和一个班级类,想要实现的功能为:
# 执行班级人数增加的操作、获得班级的总人数;
# 学生类继承自班级类,每实例化一个学生,班级人数都能增加;
# 最后,我想定义一些学生,获得班级中的总人数。
# 思考:这个问题用类方法做比较合适,为什么?因为我实例化的是学生,但是如果我从学生这一个实例中获得班级总人数,在逻辑上显然是不合理的。同时,如果想要获得班级总人数,如果生成一个班级的实例也是没有必要的。
# 复制代码
# class ClassTest(object):
# __num = 0
# @classmethod
# def addNum(cls):
# cls.__num += 1
# @classmethod
# def getNum(cls):
# return cls.__num
# # 这里我用到魔术方法__new__,主要是为了在创建实例的时候调用累加方法。
# def __new__(self):
# ClassTest.addNum()
# return super(ClassTest, self).__new__(self)
# class Student(ClassTest):
# def __init__(self):
# self.name = ''
# a = Student()
# b = Student()
# print(ClassTest.getNum())
# 复制代码
# 静态方法
# 使用装饰器@staticmethod。
# 静态方法是类中的函数,不需要实例。静态方法主要是用来存放逻辑性的代码,逻辑上属于类,但是和类本身没有关系,也就是说在静态方法中,不会涉及到类中的属性和方法的操作。可以理解为,静态方法是个独立的、单纯的函数,它仅仅托管于某个类的名称空间中,便于使用和维护。
# 譬如,我想定义一个关于时间操作的类,其中有一个获取当前时间的函数。
# 复制代码
# import time
# class TimeTest(object):
# def __init__(self, hour, minute, second):
# self.hour = hour
# self.minute = minute
# self.second = second
# @staticmethod
# def showTime():
# return time.strftime("%H:%M:%S", time.localtime())
# print(TimeTest.showTime())
# t = TimeTest(2, 10, 10)
# nowTime = t.showTime()
# print(nowTime)
# 复制代码
# 如上,使用了静态方法(函数),然而方法体中并没使用(也不能使用)类或实例的属性(或方法)。若要获得当前时间的字符串时,并不一定需要实例化对象,此时对于静态方法而言,所在类更像是一种名称空间。
# 其实,我们也可以在类外面写一个同样的函数来做这些事,但是这样做就打乱了逻辑关系,也会导致以后代码维护困难。
# 以上就是我对Python的实例方法,类方法和静态方法之间的区别和作用的简要阐述。
#——————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————————
import re
import requests
import json
# response = requests.get('http://news.baidu.com') #用这个方法访问百度新闻
# print(response) #打印获取的是状态码
# content = response.text
# print(content) #打印获取整个的网页内容
# with open('duban.json','w',encoding='utf-8') as file: #把网页内容写到文件中
# file.write(content)
##############################################拓展:
# file = open('duban.json','r',encoding='utf-8') #读取这个文件
# a = file.read()
# # pattern = re.compile(r'新闻')
# # pattern = re.compile(r'\d{1,10}') #找到所有数字匹配,下标从一位到10位数字
# # pattern = re.compile(r'\d{1,}') #左边不输入,代表从0开始,右边不输入,最大随便出现 取出这个文件中的所有数字
# # pattern = re.compile(r'\d+') #跟上面的[1,]效果是一样的
# # pattern = re.compile(r'\w') #把数字、字母、下划线、汉字都打印出来了
# # pattern = re.compile(r'\w+') #把数字、字母、下划线、汉字都打印连在一起
# # pattern = re.compile(r'\W+') #除掉数字、字母、下划线和汉字剩下的东西(大写的W)
# # pattern = re.compile(r'\D+') #非数字的一切内容
# pattern = re.compile(r'\w*') #表示匹配0此或者多次,等于[0,]
# result = pattern.findall(a) #找到这个文件中所有的“新闻”字样
# print(result)
# file.close() #打开文件记得关闭
# file = open('duban.json','r',encoding='utf-8') #读取这个文件
# a = file.read()
# a2 = '<EMAIL>,song <EMAIL>,<EMAIL>'
# # pattern = re.compile(r'<EMAIL>') #因为‘。’导致都匹配上了
# # pattern = re.compile(r'.') #'.'能匹配所有的东西,代表匹配所有
# pattern = re.compile(r'song\.lu@errc\.com') #在点前面加上反斜杠,就能完全匹配
# result = pattern.findall(a2)
# print(result)
# file.close() #打开文件记得关闭
# 比如匹配所有邮箱
# file = open('duban.json','r',encoding='utf-8') #读取这个文件
# a = file.read()
# a2 = '<EMAIL>,<EMAIL>,<EMAIL>'
# # pattern = re.compile(r'.+@\w+\.com') #匹配所有邮箱,但是打印出来的是一组字符串,需要分开打印,用下面方法
# # pattern = re.compile(r'\w+.\w+@\w+\.com') #正常情况只要后面的\w+@\w+\.com(正常的邮箱正则表达式)即可,但是因为此题前面有个点
# result = pattern.findall(a2)
# print(result)
# file.close() #打开文件记得关闭
# # 题目:<EMAIL>,只要匹配@hit
# file = open('duban.json','r',encoding='utf-8') #读取这个文件
# a = file.read()
# a2 = '<EMAIL>.cf.aa.ee.dd'
# # pattern = re.compile(r'@.+\.') #正则表达式默认贪婪模式,尽可能多的匹配点,会匹配到最后一个,如何避免呢。前面加个问号即可,如下:
# pattern = re.compile(r'@.+?\.') #正则表达式的点的懒惰模式,问号是解除贪婪模式变为懒惰模式
# result = pattern.findall(a2)
# print(result)
# file.close() #打开文件记得关闭
# file = open('duban.json','r',encoding='utf-8') #读取这个文件
# a = file.read()
# a2 = '<HtMl>hello</hTmL>' #网页,要把hello匹配出来
# pattern = re.compile(r'<[Hh][Tt][Mm][Ll]>hello</[Hh][Tt][Mm][Ll]>') #中括号里面是你可以匹配的对象
# result = pattern.findall(a2)
# print(result)
# file.close() #打开文件记得关闭
# .*? 代表啥意思=====代表万能表达式。
# \d 换成中括号可以用[0123456789] 或者[0-9] 如\d{,2}可以写成[012]
#\w 字母数字_ 也可以写成[a-zA-Z0-9_]
# \W 非数字字母_
# \d 数字
# \D 非数字
# . 所有
# + 匹配一次或多次等于{1,}
# {1,4} 匹配一次到4次
# {,4} 匹配至多四次
# {1,} 匹配至少一次
# ? 解除贪婪模式,接在次数的正则表达式后面使用
# * 表示匹配0次或多次,等于{0,}
file = open('duban.json','r',encoding='utf-8') #读取这个文件
a = file.read()
a2 = '<EMAIL>,song <EMAIL>,song!<EMAIL>'
pattern = re.compile(r'@(.*?)\.com') #万能表达式需要告诉在哪里结束
result = pattern.findall(a2)
print(result)
file.close() #打开文件记得关闭
###################################################拓展:
# import json #json仅限于拿接口数据,content拿所有格式
# import requests
# response = requests.get('http://news.baidu.com') #用这个方法访问百度新闻
# # print(str(response.text)) #类型是字符串类型
# # print(str(response.content)) #用concent会生成字节流,前面有个b,加上“encoding=‘utf-8’”就可以了
# print(response.json) #类型是字典类型 | 3.96875 | 4 |
get_address.py | trentlo/btc-utils | 0 | 12795079 | <gh_stars>0
import sys
from getopt import getopt
from crypto.key import PublicKey, PrivateKey
if __name__ == '__main__':
# Always use the compressed form now.
compressed = True
net = "main"
mnemonic = None
num_hash = 3
argv = sys.argv[1:]
try:
# -n (net)
# `-d mnemonic`: deterministic key gen using
opts, args = getopt(argv, "n:d:h:", ["net=", "deterministic="])
except:
print("Error")
for opt, arg in opts:
if opt in ['-n', "--net"]:
net = arg
elif opt in ['-d', '--deterministic']:
mnemonic = arg
elif opt in ['-h']:
num_hash = int(arg)
if mnemonic == None:
prvk = PrivateKey.gen_random_key()
else:
print('gen secret key using mnemonic: ')
print(mnemonic)
prvk = PrivateKey.from_mnemonic(mnemonic, num_hash)
print('secret key:')
print(hex(prvk.key).upper())
wif_prvk : str = prvk.get_wif(net = net, compressed = compressed)
print('WIF secret key:')
print(wif_prvk)
pubk = PublicKey.from_private_key(prvk.key)
print('public key:')
print('x:', format(pubk.x, '064x'))
print('y:', format(pubk.y, '064x'))
addr = pubk.address(net = net, compressed = compressed)
print('bitcoin address:')
print(addr)
| 2.546875 | 3 |
AttackMethods/AttackMethod.py | Valentijn1995/Kn0ckKn0ck | 0 | 12795080 | import thread
import threading
import abc
from time import sleep
class AttackMethod:
"""
The AttackMethod class represents a DOS attack. The AttackMethod class is an abstract class and needs to be
extended by other classes. An AttackMethod runs in its own thread. The thread loop starts when the
start_attack() function is called and stops when the stop_attack() function is called.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, proxy, target):
"""
Constructor. Creates a new AttackMethod instance.
:type target: Destination
:type proxy: Proxy
"""
self._proxy = proxy
self._attack_is_active = False
self._innerThread = None
self._attack_lock = threading.Lock()
self._attack_target = target
self._loop_delay = 0.050
self.exception = None
def start_attack(self):
"""
Starts the DOS attack.
"""
self._attack_lock.acquire()
if not self._attack_is_active:
self._attack_is_active = True
self._attack_lock.release()
self._innerThread = thread.start_new_thread(self._thread_loop, ())
else:
self._attack_lock.release()
def stop_attack(self):
"""
Stops the attack loop.
"""
self._set_attack_active(False)
def has_exception(self):
if not self.is_active():
return self.exception is not None
else:
return False
def get_exception(self):
return self.exception
def _thread_loop(self):
"""
The main loop of the attack thread. This function is called by the attack thread and could not be called
directly.
"""
while self.is_active():
try:
self._attack_loop()
sleep(self._loop_delay)
except Exception as ex:
self.exception = ex
self.stop_attack()
def is_active(self):
"""
Checks the value of the _attack_is_active value in a thread safe was.
Use this function to get the value of _attack_is_active instead of checking the value directly.
:return: True if the attack is active and False otherwise
"""
self._attack_lock.acquire()
attack_active = self._attack_is_active
self._attack_lock.release()
return attack_active
def _set_attack_active(self, value):
"""
Thread-safe setter for the _attack_is_active value. This function is only for internal use.
:param value: New value of the _attack_is_value value (True or False)
"""
if not isinstance(value, bool):
raise ValueError('set_attack_active value has to be a boolean and not a ' + type(value))
self._attack_lock.acquire()
self._attack_is_active = value
self._attack_lock.release()
@abc.abstractmethod
def _attack_loop(self):
"""
Part of the _thread_loop. This function has to be implemented by the class which extends from the
AttackMethod class. The function gets called repeatedly until the stop_attack function gets called.
The class which extends from this class has to implement it's attack logic in this function.
"""
return
| 3.859375 | 4 |
alipay/aop/api/domain/SsdataDataserviceDatapropertyBatchqueryModel.py | antopen/alipay-sdk-python-all | 213 | 12795081 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SsdataDataserviceDatapropertyBatchqueryModel(object):
def __init__(self):
self._action = None
self._action_param = None
self._base = None
self._data_channel = None
self._visit_ac = None
self._visit_biz_line = None
self._visit_bu = None
@property
def action(self):
return self._action
@action.setter
def action(self, value):
self._action = value
@property
def action_param(self):
return self._action_param
@action_param.setter
def action_param(self, value):
self._action_param = value
@property
def base(self):
return self._base
@base.setter
def base(self, value):
self._base = value
@property
def data_channel(self):
return self._data_channel
@data_channel.setter
def data_channel(self, value):
self._data_channel = value
@property
def visit_ac(self):
return self._visit_ac
@visit_ac.setter
def visit_ac(self, value):
self._visit_ac = value
@property
def visit_biz_line(self):
return self._visit_biz_line
@visit_biz_line.setter
def visit_biz_line(self, value):
self._visit_biz_line = value
@property
def visit_bu(self):
return self._visit_bu
@visit_bu.setter
def visit_bu(self, value):
self._visit_bu = value
def to_alipay_dict(self):
params = dict()
if self.action:
if hasattr(self.action, 'to_alipay_dict'):
params['action'] = self.action.to_alipay_dict()
else:
params['action'] = self.action
if self.action_param:
if hasattr(self.action_param, 'to_alipay_dict'):
params['action_param'] = self.action_param.to_alipay_dict()
else:
params['action_param'] = self.action_param
if self.base:
if hasattr(self.base, 'to_alipay_dict'):
params['base'] = self.base.to_alipay_dict()
else:
params['base'] = self.base
if self.data_channel:
if hasattr(self.data_channel, 'to_alipay_dict'):
params['data_channel'] = self.data_channel.to_alipay_dict()
else:
params['data_channel'] = self.data_channel
if self.visit_ac:
if hasattr(self.visit_ac, 'to_alipay_dict'):
params['visit_ac'] = self.visit_ac.to_alipay_dict()
else:
params['visit_ac'] = self.visit_ac
if self.visit_biz_line:
if hasattr(self.visit_biz_line, 'to_alipay_dict'):
params['visit_biz_line'] = self.visit_biz_line.to_alipay_dict()
else:
params['visit_biz_line'] = self.visit_biz_line
if self.visit_bu:
if hasattr(self.visit_bu, 'to_alipay_dict'):
params['visit_bu'] = self.visit_bu.to_alipay_dict()
else:
params['visit_bu'] = self.visit_bu
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SsdataDataserviceDatapropertyBatchqueryModel()
if 'action' in d:
o.action = d['action']
if 'action_param' in d:
o.action_param = d['action_param']
if 'base' in d:
o.base = d['base']
if 'data_channel' in d:
o.data_channel = d['data_channel']
if 'visit_ac' in d:
o.visit_ac = d['visit_ac']
if 'visit_biz_line' in d:
o.visit_biz_line = d['visit_biz_line']
if 'visit_bu' in d:
o.visit_bu = d['visit_bu']
return o
| 2.046875 | 2 |
app/gui/cryptotools/__init__.py | vasilypht/Cryptographic-methods | 0 | 12795082 | from typing import Final
from .freqanalysis import FreqAnalysisWidget
from .index_of_coincidence import ICWidget
from .autocorrelation import AutocorrelationWidget
from .kasiski import KasiskiWidget
WIDGETS_CRYPTOTOOLS: Final = (
FreqAnalysisWidget,
ICWidget,
AutocorrelationWidget,
KasiskiWidget
)
| 1.304688 | 1 |
utils/build_test_db.py | eternalconcert/robotframework-sqless | 0 | 12795083 | # This file is needed to initialize the models and migrations
import os
import sys
from nopea.dbobject import DbObject
from nopea import fields
from nopea.migrations import Migration
if 'sqlite' in sys.argv:
from nopea.adaptors.sqlite import SQLiteAdaptor
DbObject.adaptor = SQLiteAdaptor('sqless.db')
elif 'mysql' in sys.argv:
from nopea.adaptors.mysql import MySQLAdaptor
DbObject.adaptor = MySQLAdaptor({
'host': 'localhost',
'user': 'sqless',
'db': 'sqless',
'use_unicode': True,
'charset': 'utf8'
})
elif 'postgres' in sys.argv:
from nopea.adaptors.postgres import PostgreSQLAdaptor
DbObject.adaptor = PostgreSQLAdaptor({
'host': 'localhost',
'user': 'sqless',
'database': 'sqless',
'password': '<PASSWORD>'
})
class User(DbObject):
username = fields.CharField(max_length=20)
email = fields.CharField(max_length=100)
failed_logins = fields.IntegerField(default=0)
class Post(DbObject):
title = fields.CharField(max_length=100)
content = fields.TextField()
class Song(DbObject):
title = fields.CharField(max_length=100)
artist = fields.CharField(max_length=100)
album = fields.CharField(max_length=100)
in_collection = fields.BooleanField(default=False)
Migration.migration_dir = os.path.join(os.getcwd(), 'utils/migrations')
migrations = Migration()
migrations.create_migrations()
migrations.run_migrations()
users = [
{"username": "TestUser1", "email": "<EMAIL>"},
{"username": "TestUser2", "email": "<EMAIL>"},
{"username": "TestUser3", "email": "<EMAIL>", "failed_logins": 12}
]
for user in users:
User.objects.get_or_create(**user)
posts = [
{"title": "TestPosting", "content": "Lorem Ipsum Dolor Sit"},
{"title": "SomeOtherStuff", "content": "hello, world!"},
]
for post in posts:
Post.objects.get_or_create(**post)
songs = [
{
"title": "Love Like Cyanide",
"artist": "Sirenia",
"album": "Arcane Astral Aeons"
},
{
"title": "The Greatest Show On Earth",
"artist": "Nightwish",
"album": "Decades"
},
{
"title": "Ghost Love Score",
"artist": "Nightwish",
"album": "Decades"
},
{
"title": "Devil And The Deep Dark Ocean",
"artist": "Nightwish",
"album": "Decades"
},
{
"title": "One By One",
"artist": "Immortal_",
"album": "Sons Of Northern Darkness"
},
{
"title": "Sons Of Northern Darkness",
"artist": "Immortal_",
"album": "Sons Of Northern Darkness"
}
]
for song in songs:
Song.objects.get_or_create(**song)
| 2.609375 | 3 |
mp/drawing/aoi_analysis.py | Ecotrust/COMPASS | 1 | 12795084 | <reponame>Ecotrust/COMPASS<filename>mp/drawing/aoi_analysis.py<gh_stars>1-10
from django.shortcuts import render
#from madrona.raster_stats.models import RasterDataset, zonal_stats
from settings import *
from general.utils import default_value, sq_meters_to_sq_miles
from drawing.models import *
'''
'''
def display_aoi_analysis(request, aoi, template='aoi/reports/aoi_report.html'):
context = get_wind_analysis(aoi)
return render(request, template, context)
'''
Run the analysis, create the cache, and return the results as a context dictionary so they may be rendered with template
'''
def get_wind_analysis(aoi):
#compile context
area = sq_meters_to_sq_miles(aoi.geometry_final.area)
context = { 'aoi': aoi, 'default_value': default_value, 'area': area }
return context
| 2.234375 | 2 |
signaling-games/algs/model_agents/__init__.py | vbhatt-cs/inference-based-messaging | 3 | 12795085 | <reponame>vbhatt-cs/inference-based-messaging
from .model_s import ModelS
from .model_r import ModelR
__all__ = ["ModelR", "ModelS"]
| 0.996094 | 1 |
src/kgmk/dsa/misc/online_update_query/set_point_get_range/abstract/segtree/__init__.py | kagemeka/python | 0 | 12795086 | <gh_stars>0
from kgmk.dsa.algebra.abstract.structure.monoid import (
Monoid,
)
from \
kgmk.dsa.tree.misc.segment.normal.one_indexed.topdown \
.non_recursive \
import (
SegmentTree,
)
# TODO cut below
import typing
import typing
T = typing.TypeVar('T')
class SetPointGetRange(typing.Generic[T]):
def __init__(
self,
monoid: Monoid[T],
a: typing.List[T],
) -> typing.NoReturn:
self.__seg = SegmentTree(monoid, a)
self.__monoid = monoid
def set_point(self, i: int, x: T) -> typing.NoReturn:
self.__seg[i] = x
def operate_point(self, i: int, x: T) -> typing.NoReturn:
self.set_point(i, self.__monoid.op(self.get_point(i), x))
def get_point(self, i: int) -> T:
return self.__seg[i]
def get_range(self, l: int, r: int) -> T:
return self.__seg.get_range(l, r) | 2.28125 | 2 |
data/us-tn/co-knox/covid_age/harvester.py | mtna/covid-19 | 10 | 12795087 | import os
import math
import pandas as pd
import datetime
variables = [
'date_stamp',
'age_group',
'cnt_confirmed',
'pct_confirmed'
]
def cleanData(data, fileName):
# source data frame from csv file
source = pd.DataFrame(data)
source.columns = ['v1','v2','v3']
print(source)
# the target data frame
df = pd.DataFrame(columns = variables)
df['age_group'] = source['v1'].map({ '0-10':'00', '11-20': '11', '21-30': '21', '31-40': '31', '41-50': '41', '51-60': '51', '61-70': '61', '71-80': '71', '81-90': '81', '90+': '91', 'Age Unknown': '99' })
df['cnt_confirmed'] = source['v2']
df['pct_confirmed'] = list(map(lambda x: x[:-1], source['v3'].values))
df['date_stamp'] = fileName[0:-4]
# apply data types
df['date_stamp'] = pd.to_datetime(df['date_stamp']).dt.strftime('%Y-%m-%d')
df['cnt_confirmed'] = df['cnt_confirmed'].astype(pd.Int32Dtype())
return df
def deleteFiles(path):
today = datetime.date.today();
one_week = datetime.timedelta(days=7)
week = today - one_week
week_ago = datetime.datetime.combine(week, datetime.time(0, 0))
for filename in os.listdir(path):
if(filename.endswith('.csv')):
newFilename = filename.replace('.csv', '');
filedate = datetime.datetime.strptime(newFilename, '%Y-%m-%d')
if(filedate < week_ago):
print('removing files that are more than a week old: ',path,'/',filename)
os.remove(f"{path}/{filename}")
return None
if __name__ == "__main__":
path = os.path
# Loop over the files within the folder
for filename in sorted(os.listdir('./data/us-tn/co-knox/covid_age/raw')):
if filename.endswith('.csv') and path.exists(f"./data/us-tn/co-knox/covid_age/clean/{filename}") == False:
print(filename)
# For each csv file, map the transformed data to its respective file in the harvested folder
data = pd.read_csv(f"./data/us-tn/co-knox/covid_age/raw/{filename}")
df = cleanData(data, filename)
df.to_csv(f"./data/us-tn/co-knox/covid_age/clean/{filename}", index=False)
# if there is no aggregate file create one, otherwise append to it.
if path.exists(f"./data/us-tn/co-knox/covid_age/latest.csv"):
df.to_csv(f"./data/us-tn/co-knox/covid_age/latest.csv", mode='a', header=False, index=False)
else:
df.to_csv(f"./data/us-tn/co-knox/covid_age/latest.csv", index=False)
deleteFiles('./data/us-tn/co-knox/covid_age/raw')
deleteFiles('./data/us-tn/co-knox/covid_age/clean')
| 3.03125 | 3 |
example_algos/models/aes.py | MIC-DKFZ/mood | 42 | 12795088 | <gh_stars>10-100
import numpy as np
import torch
import torch.distributions as dist
from example_algos.models.nets import BasicEncoder, BasicGenerator
class VAE(torch.nn.Module):
def __init__(
self,
input_size,
z_dim=256,
fmap_sizes=(16, 64, 256, 1024),
to_1x1=True,
conv_op=torch.nn.Conv2d,
conv_params=None,
tconv_op=torch.nn.ConvTranspose2d,
tconv_params=None,
normalization_op=None,
normalization_params=None,
activation_op=torch.nn.LeakyReLU,
activation_params=None,
block_op=None,
block_params=None,
*args,
**kwargs
):
"""Basic VAE build up of a symetric BasicEncoder (Encoder) and BasicGenerator (Decoder)
Args:
input_size ((int, int, int): Size of the input in format CxHxW):
z_dim (int, optional): [description]. Dimension of the latent / Input dimension (C channel-dim). Defaults to 256
fmap_sizes (tuple, optional): [Defines the Upsampling-Levels of the generator, list/ tuple of ints, where each
int defines the number of feature maps in the layer]. Defaults to (16, 64, 256, 1024).
to_1x1 (bool, optional): [If True, then the last conv layer goes to a latent dimesion is a z_dim x 1 x 1 vector (similar to fully connected)
or if False allows spatial resolution not to be 1x1 (z_dim x H x W, uses the in the conv_params given conv-kernel-size) ].
Defaults to True.
conv_op ([torch.nn.Module], optional): [Convolutioon operation used in the encoder to downsample to a new level/ featuremap size]. Defaults to nn.Conv2d.
conv_params ([dict], optional): [Init parameters for the conv operation]. Defaults to dict(kernel_size=3, stride=2, padding=1, bias=False).
tconv_op ([torch.nn.Module], optional): [Upsampling/ Transposed Conv operation used in the decoder to upsample to a new level/ featuremap size]. Defaults to nn.ConvTranspose2d.
tconv_params ([dict], optional): [Init parameters for the conv operation]. Defaults to dict(kernel_size=3, stride=2, padding=1, bias=False).
normalization_op ([torch.nn.Module], optional): [Normalization Operation (e.g. BatchNorm, InstanceNorm,...) -> see ConvModule]. Defaults to nn.BatchNorm2d.
normalization_params ([dict], optional): [Init parameters for the normalization operation]. Defaults to None.
activation_op ([torch.nn.Module], optional): [Actiovation Operation/ Non-linearity (e.g. ReLU, Sigmoid,...) -> see ConvModule]. Defaults to nn.LeakyReLU.
activation_params ([dict], optional): [Init parameters for the activation operation]. Defaults to None.
block_op ([torch.nn.Module], optional): [Block operation used for each feature map size after each upsample op of e.g. ConvBlock/ ResidualBlock]. Defaults to NoOp.
block_params ([dict], optional): [Init parameters for the block operation]. Defaults to None.
"""
super(VAE, self).__init__()
input_size_enc = list(input_size)
input_size_dec = list(input_size)
self.enc = BasicEncoder(
input_size=input_size_enc,
fmap_sizes=fmap_sizes,
z_dim=z_dim * 2,
conv_op=conv_op,
conv_params=conv_params,
normalization_op=normalization_op,
normalization_params=normalization_params,
activation_op=activation_op,
activation_params=activation_params,
block_op=block_op,
block_params=block_params,
to_1x1=to_1x1,
)
self.dec = BasicGenerator(
input_size=input_size_dec,
fmap_sizes=fmap_sizes[::-1],
z_dim=z_dim,
upsample_op=tconv_op,
conv_params=tconv_params,
normalization_op=normalization_op,
normalization_params=normalization_params,
activation_op=activation_op,
activation_params=activation_params,
block_op=block_op,
block_params=block_params,
to_1x1=to_1x1,
)
self.hidden_size = self.enc.output_size
def forward(self, inpt, sample=True, no_dist=False, **kwargs):
y1 = self.enc(inpt, **kwargs)
mu, log_std = torch.chunk(y1, 2, dim=1)
std = torch.exp(log_std)
z_dist = dist.Normal(mu, std)
if sample:
z_sample = z_dist.rsample()
else:
z_sample = mu
x_rec = self.dec(z_sample)
if no_dist:
return x_rec
else:
return x_rec, z_dist
def encode(self, inpt, **kwargs):
"""Encodes a sample and returns the paramters for the approx inference dist. (Normal)
Args:
inpt ([tensor]): The input to encode
Returns:
mu : The mean used to parameterized a Normal distribution
std: The standard deviation used to parameterized a Normal distribution
"""
enc = self.enc(inpt, **kwargs)
mu, log_std = torch.chunk(enc, 2, dim=1)
std = torch.exp(log_std)
return mu, std
def decode(self, inpt, **kwargs):
"""Decodes a latent space sample, used the generative model (decode = mu_{gen}(z) as used in p(x|z) = N(x | mu_{gen}(z), 1) ).
Args:
inpt ([type]): A sample from the latent space to decode
Returns:
[type]: [description]
"""
x_rec = self.dec(inpt, **kwargs)
return x_rec
class AE(torch.nn.Module):
def __init__(
self,
input_size,
z_dim=1024,
fmap_sizes=(16, 64, 256, 1024),
to_1x1=True,
conv_op=torch.nn.Conv2d,
conv_params=None,
tconv_op=torch.nn.ConvTranspose2d,
tconv_params=None,
normalization_op=None,
normalization_params=None,
activation_op=torch.nn.LeakyReLU,
activation_params=None,
block_op=None,
block_params=None,
*args,
**kwargs
):
"""Basic AE build up of a symetric BasicEncoder (Encoder) and BasicGenerator (Decoder)
Args:
input_size ((int, int, int): Size of the input in format CxHxW):
z_dim (int, optional): [description]. Dimension of the latent / Input dimension (C channel-dim). Defaults to 256
fmap_sizes (tuple, optional): [Defines the Upsampling-Levels of the generator, list/ tuple of ints, where each
int defines the number of feature maps in the layer]. Defaults to (16, 64, 256, 1024).
to_1x1 (bool, optional): [If True, then the last conv layer goes to a latent dimesion is a z_dim x 1 x 1 vector (similar to fully connected)
or if False allows spatial resolution not to be 1x1 (z_dim x H x W, uses the in the conv_params given conv-kernel-size) ].
Defaults to True.
conv_op ([torch.nn.Module], optional): [Convolutioon operation used in the encoder to downsample to a new level/ featuremap size]. Defaults to nn.Conv2d.
conv_params ([dict], optional): [Init parameters for the conv operation]. Defaults to dict(kernel_size=3, stride=2, padding=1, bias=False).
tconv_op ([torch.nn.Module], optional): [Upsampling/ Transposed Conv operation used in the decoder to upsample to a new level/ featuremap size]. Defaults to nn.ConvTranspose2d.
tconv_params ([dict], optional): [Init parameters for the conv operation]. Defaults to dict(kernel_size=3, stride=2, padding=1, bias=False).
normalization_op ([torch.nn.Module], optional): [Normalization Operation (e.g. BatchNorm, InstanceNorm,...) -> see ConvModule]. Defaults to nn.BatchNorm2d.
normalization_params ([dict], optional): [Init parameters for the normalization operation]. Defaults to None.
activation_op ([torch.nn.Module], optional): [Actiovation Operation/ Non-linearity (e.g. ReLU, Sigmoid,...) -> see ConvModule]. Defaults to nn.LeakyReLU.
activation_params ([dict], optional): [Init parameters for the activation operation]. Defaults to None.
block_op ([torch.nn.Module], optional): [Block operation used for each feature map size after each upsample op of e.g. ConvBlock/ ResidualBlock]. Defaults to NoOp.
block_params ([dict], optional): [Init parameters for the block operation]. Defaults to None.
"""
super(AE, self).__init__()
input_size_enc = list(input_size)
input_size_dec = list(input_size)
self.enc = BasicEncoder(
input_size=input_size_enc,
fmap_sizes=fmap_sizes,
z_dim=z_dim,
conv_op=conv_op,
conv_params=conv_params,
normalization_op=normalization_op,
normalization_params=normalization_params,
activation_op=activation_op,
activation_params=activation_params,
block_op=block_op,
block_params=block_params,
to_1x1=to_1x1,
)
self.dec = BasicGenerator(
input_size=input_size_dec,
fmap_sizes=fmap_sizes[::-1],
z_dim=z_dim,
upsample_op=tconv_op,
conv_params=tconv_params,
normalization_op=normalization_op,
normalization_params=normalization_params,
activation_op=activation_op,
activation_params=activation_params,
block_op=block_op,
block_params=block_params,
to_1x1=to_1x1,
)
self.hidden_size = self.enc.output_size
def forward(self, inpt, **kwargs):
y1 = self.enc(inpt, **kwargs)
x_rec = self.dec(y1)
return x_rec
def encode(self, inpt, **kwargs):
"""Encodes a input sample to a latent space sample
Args:
inpt ([tensor]): Input sample
Returns:
enc: Encoded input sample in the latent space
"""
enc = self.enc(inpt, **kwargs)
return enc
def decode(self, inpt, **kwargs):
"""Decodes a latent space sample back to the input space
Args:
inpt ([tensor]): [Latent space sample]
Returns:
[rec]: [Encoded latent sample back in the input space]
"""
rec = self.dec(inpt, **kwargs)
return rec
| 2.40625 | 2 |
sme/test/test_parameter.py | henryiii/spatial-model-editor | 4 | 12795089 | <reponame>henryiii/spatial-model-editor<filename>sme/test/test_parameter.py
import unittest
import sme
class TestParameter(unittest.TestCase):
def test_parameter(self):
# get an existing parameter
m = sme.open_example_model()
p = m.parameters["param"]
# verify name and properties
self.assertEqual(repr(p), "<sme.Parameter named 'param'>")
self.assertEqual(str(p)[0:33], "<sme.Parameter>\n - name: 'param'")
self.assertEqual(p.name, "param")
self.assertEqual(p.value, "1")
# assign new values
p.name = "New param"
p.value = "0.8765"
self.assertEqual(repr(p), "<sme.Parameter named 'New param'>")
self.assertEqual(str(p)[0:37], "<sme.Parameter>\n - name: 'New param'")
self.assertEqual(p.name, "New param")
self.assertEqual(p.value, "0.8765")
# check change was propagated to model
self.assertRaises(
sme.InvalidArgument,
lambda: m.parameters["param"],
)
p2 = m.parameters["New param"]
self.assertEqual(p2.name, "New param")
self.assertEqual(p2.value, "0.8765")
self.assertEqual(p2, p)
self.assertEqual(p2, m.parameters[0])
self.assertEqual(p2, m.parameters[-1])
| 2.953125 | 3 |
tests/conftest.py | YellowFlash2012/stock-portfolio-io | 0 | 12795090 | import pytest
import requests
from project import create_app, db
from flask import current_app
from project.models import Stock, User
from datetime import datetime
########################
#### Helper Classes ####
########################
class MockSuccessResponse(object):
def __init__(self, url):
self.status_code = 200
self.url = url
self.headers = {'blaa': '1234'}
def json(self):
return {
'Meta Data': {
"2. Symbol": "MSFT",
"3. Last Refreshed": "2022-02-10"
},
'Time Series (Daily)': {
"2022-02-10": {
"4. close": "302.3800",
},
"2022-02-09": {
"4. close": "301.9800",
}
}
}
class MockFailedResponse(object):
def __init__(self, url):
self.status_code = 404
self.url = url
self.headers = {'blaa': '1234'}
def json(self):
return {'error': 'bad'}
class MockSuccessResponseDaily(object):
def __init__(self, url):
self.status_code = 200
self.url = url
def json(self):
return {
'Meta Data': {
"2. Symbol": "AAPL",
"3. Last Refreshed": "2020-03-24"
},
'Time Series (Daily)': {
"2022-02-10": {
"4. close": "148.3400",
},
"2022-02-09": {
"4. close": "135.9800",
}
}
}
class MockApiRateLimitExceededResponse(object):
def __init__(self, url):
self.status_code = 200
self.url = url
def json(self):
return {
'Note': 'Thank you for using Alpha Vantage! Our standard API call frequency is ' +
'5 calls per minute and 500 calls per day.'
}
class MockFailedResponse(object):
def __init__(self, url):
self.status_code = 404
self.url = url
def json(self):
return {'error': 'bad'}
class MockSuccessResponseWeekly(object):
def __init__(self, url):
self.status_code = 200
self.url = url
def json(self):
return {
'Meta Data': {
"2. Symbol": "AAPL",
"3. Last Refreshed": "2020-07-28"
},
'Weekly Adjusted Time Series': {
"2020-07-24": {
"4. close": "379.2400",
},
"2020-07-17": {
"4. close": "362.7600",
},
"2020-06-11": {
"4. close": "354.3400",
},
"2020-02-25": {
"4. close": "432.9800",
}
}
}
@pytest.fixture(scope='module')
def test_client():
flask_app = create_app()
flask_app.config.from_object('config.TestingConfig')
flask_app.extensions['mail'].suppress = True #to avoid sending emails during the tests
# Create a test client using the Flask application configured for testing
with flask_app.test_client() as testing_client:
# establish an app ctx be4 accessing the logger
with flask_app.app_context():
flask_app.logger.info('Creating database tables in test_client fixture...')
yield testing_client #where the test happens
@pytest.fixture(scope='function')
def new_stock():
stock = Stock('AAPL', '16', '406.78', 7, datetime(2022, 2, 12))
return stock
@pytest.fixture(scope='module')
def new_user():
user = User('<EMAIL>', '<PASSWORD>')
return user
# to register a default user
@pytest.fixture(scope='module')
def register_default_user(test_client):
# Register the default user
test_client.post('/users/register',
data={'name':'<NAME>', 'email': '<EMAIL>',
'password': '<PASSWORD>'},
follow_redirects=True)
return
# is default user logged in?
@pytest.fixture(scope='function')
def log_in_default_user(test_client, register_default_user):
# Log in the default user
test_client.post('/users/login',
data={'email': '<EMAIL>',
'password': '<PASSWORD>'},
follow_redirects=True)
yield # this is where the testing happens!
# Log out the default user
test_client.get('/users/logout', follow_redirects=True)
@pytest.fixture(scope='function')
def confirm_email_default_user(test_client, log_in_default_user):
# Mark the user as having their email address confirmed
user = User.query.filter_by(email='<EMAIL>').first()
user.email_confirmed = True
user.email_confirmed_on = datetime(2020, 7, 8)
db.session.add(user)
db.session.commit()
yield user # this is where the testing happens!
# Mark the user as not having their email address confirmed (clean up)
user = User.query.filter_by(email='<EMAIL>').first()
user.email_confirmed = False
user.email_confirmed_on = None
db.session.add(user)
db.session.commit()
@pytest.fixture(scope='function')
def afterwards_reset_default_user_password():
yield # this is where the testing happens!
# Since a test using this fixture could change the password for the default user,
# reset the password back to the default password
user = User.query.filter_by(email='<EMAIL>').first()
user.set_password('<PASSWORD>')
db.session.add(user)
db.session.commit()
@pytest.fixture(scope='function')
def add_stocks_for_default_user(test_client, log_in_default_user):
# Add three stocks for the default user
test_client.post('/add_stock', data={'stock_symbol': 'SAM',
'number_of_shares': '27',
'purchase_price': '301.23',
'purchase_date': '2020-07-01'})
test_client.post('/add_stock', data={'stock_symbol': 'COST',
'number_of_shares': '76',
'purchase_price': '14.67',
'purchase_date': '2019-05-26'})
test_client.post('/add_stock', data={'stock_symbol': 'TWTR',
'number_of_shares': '146',
'purchase_price': '34.56',
'purchase_date': '2020-02-03'})
return
# ***fixtures for moking requests.get()***
@pytest.fixture(scope='function')
def mock_requests_get_success_daily(monkeypatch):
# Create a mock for the requests.get() call to prevent making the actual API call
def mock_get(url):
return MockSuccessResponseDaily(url)
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MSFT&apikey=demo'
monkeypatch.setattr(requests, 'get', mock_get)
@pytest.fixture(scope='function')
def mock_requests_get_api_rate_limit_exceeded(monkeypatch):
def mock_get(url):
return MockApiRateLimitExceededResponse(url)
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MSFT&apikey=demo'
monkeypatch.setattr(requests, 'get', mock_get)
@pytest.fixture(scope='function')
def mock_requests_get_failure(monkeypatch):
def mock_get(url):
return MockFailedResponse(url)
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MSFT&apikey=demo'
monkeypatch.setattr(requests, 'get', mock_get)
@pytest.fixture(scope='function')
def mock_requests_get_success_weekly(monkeypatch):
# Create a mock for the requests.get() call to prevent making the actual API call
def mock_get(url):
return MockSuccessResponseWeekly(url)
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_WEEKLY_ADJUSTED&symbol=MSFT&apikey=demo'
monkeypatch.setattr(requests, 'get', mock_get)
# ***register-login-logout 2nd user***
@pytest.fixture(scope='module')
def register_second_user(test_client):
"""Registers the second user using the '/users/register' route."""
test_client.post('/users/register',
data={'name':'<NAME>', 'email': '<EMAIL>',
'password': '<PASSWORD>'})
@pytest.fixture(scope='function')
def log_in_second_user(test_client, register_second_user):
# Log in the user
test_client.post('/users/login',
data={'email': '<EMAIL>',
'password': '<PASSWORD>'})
yield # this is where the testing happens!
# Log out the user
test_client.get('/users/logout', follow_redirects=True) | 2.453125 | 2 |
extractenronlib/extractenron.py | NitiBaghel/Enron-Dataset-Field-Extraction-Library | 0 | 12795091 | import email
import pandas as pd
def extract(data, structured_fields=[], extract_payload=True):
r"""This function extracts data for the given header list from the Enron email dataset.
It provides flexibilty to choose which fields needs to be extracted.
The header list provided by the user are the tags in the email of Enron dataset, eg. Date, Subject etc.
By default, if no header is provided, this function returns only the email text body of the Enron dataset.
Arguments:
1) data: Dataframe It is the Enron dataset with column headings. This argument can not be kept empty.
2) structured_fields: List It is a of tags for which data needs to be extracted. Example: ['Date', 'Subject', 'X-To']. This argument can be droppped if not required.
3) extract_pyload: Boolean True if email text body is required. False in case only structured_fields needs to be extracted. This field can alo be dropped while calling the function. In case nothing is specified, default boolean value True is used.
return: Dataframe A dataframe with specified fields along with the original columns passsed as the data argument.
This function is created to take off the burden of extracting desired fields from the Enron dataset. However, this does not clean the data, eg. it does not remove the empty rows or columns. Neither it does the pre-processing of data like lowercase and removal of unwanted characters.
In order to make it more powerful, above functions can be added.
"""
headers=data.columns
emails = data.rename(columns={headers[0]:'email_path', headers[1]:'email'})
#getting structured text
def create_dict(dictionary, key, value):
if key in dictionary:
values = dictionary.get(key)
values.append(value)
dictionary[key] = values
else:
dictionary[key] = [value]
return dictionary
def get_structured_data(df, fields):
structured_data = {}
messages = df["email"]
for message in messages:
e = email.message_from_string(message)
for header in fields:
header_data = e.get(header)
create_dict(dictionary = structured_data, key = header, value = header_data)
return pd.DataFrame(structured_data)
#getting unstructured text
def get_unstructured_email(df):
messages = []
for item in df["email"]:
e = email.message_from_string(item)
message_body = e.get_payload()
#message_body = message_body.lower()
messages.append(message_body)
return messages
if extract_payload == True:
email_body = get_unstructured_email(emails)
emails["Message-Body"] = email_body
structured_data = get_structured_data(emails, structured_fields)
emails = pd.concat([emails, structured_data], axis=1)
else:
structured_data = get_structured_data(emails, structured_fields)
emails = pd.concat([emails, structured_data], axis=1)
return emails | 3.875 | 4 |
python/adjacency_to_edgelist.py | conorfalvey/Python-MultilayerExtraction | 1 | 12795092 | <filename>python/adjacency_to_edgelist.py
# adjacency_to_edgelist
#
# Function that converts a list of adjacency matrices to an edgelist
# @param adjacency: a list whose ith entry is an adjacency matrix representing the ith layer of a multilayer network
# @future_param mode: directed or undirected
# @future_param weighted: currently not functioning. Coming in later version.
#
# @keywords community detection, multilayer networks, configuration model, random graph models
# @return edgelist: a matrix with three columns representing edge connections: node1, node2, layer
#
# Basis: Wilson, <NAME>., Palowitch, <NAME>, Shankar, and Nobel, <NAME>. (2017) "Significance based
# extraction in multilayer networks with heterogeneous community structure." Journal of Machine Learning Research
# Original R Code: <NAME>
# Revised Python Code: <NAME>
import networkx as nx
import numpy as np
import pandas as pd
def adjacency_to_edgelist(adjacency):
# Instantiate labeled dataframe to make sure appends later on merge correctly
edgelist = pd.DataFrame({'node1': [0], 'node2': [0], 'layer': [0]})
m = len(adjacency)
for i in range(0, m + 1):
# Convert each matrix to a NetworkX Graph
temp_graph = nx.from_numpy_matrix(np.asarray(adjacency[i]), False)
# Convert NetworkX Graph to an edgelist and preprocess the resulting DataFrame
edges = nx.convert_matrix.to_pandas_edgelist(temp_graph)
edges = edges.drop(['weight'], axis=1) # Drop unnecessary weight column (Unweighted graph)
# Rename source and target columns to node1 and node2 (Undirected graph)
edges = edges.rename(columns={"source": "node1", "target": "node2"})
edges['layer'] = i # Set a third column to the layer number
edgelist = edgelist.append(edges) # Now with correct data structures, we can append to the edgelist
# The indices will be jumbled from many appends, so we can reset the index
edgelist = edgelist.reset_index(drop=True)
# Since we cannot selectively drop self looping edges in a NetworkX Graph structure,
# we can do so here by creating a boolean series of entries who's nodes are equal,
# and drop them from the DataFrame to preserve community structure later on.
edgelist = edgelist.drop(edgelist[edgelist['node1'] == edgelist['node2']].index)
# Since we dropped items in the line above, we must once again reset the indices
edgelist = edgelist.reset_index(drop=True)
return edgelist
| 3.546875 | 4 |
lab_3/lab_3_2.py | rybkinaliza/Python | 0 | 12795093 | #!/usr/bin/env python
# coding: utf-8
# In[13]:
import json
# In[18]:
def pluz(arg1, arg2):
try:
s = arg1 + arg2
except TypeError:
s = int(arg1) + int(arg2)
return s
# In[ ]:
| 3.1875 | 3 |
domonic/events.py | Jordan-Cottle/domonic | 1 | 12795094 | """
domonic.events
====================================
dom events
"""
# from typing import *
import time
# TODO - bring EventTarget here and get rid of this one?
class EventDispatcher(object):
""" EventDispatcher is a class you can extend to give your obj event dispatching abilities """
def __init__(self, *args, **kwargs):
self.listeners = {}
def hasEventListener(self, _type):
return _type in self.listeners
# TODO - event: str, function, useCapture: bool
# def addEventListener(self, event: str, function, useCapture: bool) -> None:
def addEventListener(self, _type, callback, *args, **kwargs):
if _type not in self.listeners:
self.listeners[_type] = []
self.listeners[_type].append(callback)
def removeEventListener(self, _type, callback):
if _type not in self.listeners:
return
stack = self.listeners[_type]
for thing in stack:
if thing == callback:
stack.remove(thing)
return
def dispatchEvent(self, event):
if event.type not in self.listeners:
return True # huh?. surely false?
stack = self.listeners[event.type]
# .slice()
event.target = self # TODO/NOTE - is this correct? - cant think where else would set it
for thing in stack:
try:
thing(event)
# type(thing, (Event,), self)
except Exception as e:
print(e)
thing() # try calling without params, user may not create param
return not event.defaultPrevented
class Event(object):
""" event """
EMPTIED = "emptied" #:
ABORT = "abort" #:
AFTERPRINT = "afterprint" #:
BEFOREPRINT = "beforeprint" #:
BEFOREUNLOAD = "beforeunload" #:
CANPLAY = "canplay" #:
CANPLAYTHROUGH = "canplaythrough" #:
CHANGE = "change" #:
DURATIONCHANGE = "durationchange" #:
ENDED = "ended" #:
ERROR = "error" #:
FULLSCREENCHANGE = "fullscreenchange" #:
FULLSCREENERROR = "fullscreenerror" #:
INPUT = "input" #:
INVALID = "invalid" #:
LOAD = "load" #:
LOADEDDATA = "loadeddata" #:
LOADEDMETADATA = "loadedmetadata" #:
MESSAGE = "message" #:
OFFLINE = "offline" #:
ONLINE = "online" #:
OPEN = "open" #:
PAUSE = "pause" #:
PLAY = "play" #:
PLAYING = "playing" #:
PROGRESS = "progress" #:
RATECHANGE = "ratechange" #:
RESIZE = "resize" #:
RESET = "reset" #:
SCROLL = "scroll" #:
SEARCH = "search" #:
SEEKED = "seeked" #:
SEEKING = "seeking" #:
SELECT = "select" #:
SHOW = "show" #:
STALLED = "stalled" #:
SUBMIT = "submit" #:
SUSPEND = "suspend" #:
TOGGLE = "toggle" #:
UNLOAD = "unload" #:
VOLUMECHANGE = "volumechange" #:
WAITING = "waiting" #:
# Event("look", {"bubbles":true, "cancelable":false});
def __init__(self, _type=None, *args, **kwargs):
# print('type', _type)
self.type = _type
self.bubbles = None
self.cancelable = None
self.cancelBubble = None
self.composed = None
self.currentTarget = None
self.defaultPrevented = False
self.eventPhase = None
self.explicitOriginalTarget = None
self.isTrusted = None
self.originalTarget = None
self.returnValue = None
self.srcElement = None
self.target = None
# ms = time.time_ns() // 1000000 3.7 up
self.timeStamp = int(round(time.time() * 1000))
def composedPath(self):
return self.type + ":" + str(self.timeStamp)
def initEvent(self, _type=None, *args, **kwargs):
self.__init__(_type, args, kwargs)
def stopPropagation(self):
"""[prevents further propagation of the current event in the capturing and bubbling phases]"""
# self.defaultPrevented = True
# self.returnValue = None
# self.originalTarget = None
# self.explicitOriginalTarget = None
# self.target = None
# self.srcElement = None
# self.bubbles = None
# self.cancelable = None
# self.cancelBubble = None
# self.composed = None
# self.currentTarget = None
# self.eventPhase = None
# self.isTrusted = None
# self.returnValue = None
# self.timeStamp = int(round(time.time() * 1000))
# self.type = None
pass
def msConvertURL(self):
pass
def preventDefault(self):
pass
def stopImmediatePropagation(self):
pass
class MouseEvent(Event):
""" mouse events """
CLICK = "click" #:
CONTEXTMENU = "contextmenu" #:
DBLCLICK = "dblclick" #:
MOUSEDOWN = "mousedown" #:
MOUSEENTER = "mouseenter" #:
MOUSELEAVE = "mouseleave" #:
MOUSEMOVE = "mousemove" #:
MOUSEOVER = "mouseover" #:
MOUSEOUT = "mouseout" #:
MOUSEUP = "mouseup" #:
def __init__(self, _type, *args, **kwargs):
# self.args = args
# self.kwargs = kwargs
self.x = 0
self.y = 0
self._clientX = 0
self._clientX = 0
self._altKey = False
self._ctrlKey = False
self._shiftKey = False
self._metaKey = False
self._button = None
self._buttons = []
super().__init__(_type, *args, **kwargs)
def initMouseEvent(self, _type=None, canBubble=True, cancelable=True, view=None,
detail=None, screenX=0, screenY=0, clientX=0, clientY=0,
ctrlKey=False, altKey=False, shiftKey=False, metaKey=False,
button=None, relatedTarget=None, from_json={}, *args, **kwargs):
# print('initMouseEvent')
self._type = _type
self.canBubble = canBubble
self.cancelable = cancelable
self.view = view
self.detail = detail
self.screenX = screenX
self.screenY = screenY
self._clientX = clientX
self._clientY = clientY
self._ctrlKey = ctrlKey
self._altKey = altKey
self._shiftKey = shiftKey
self._metaKey = metaKey
self._button = button
self.relatedTarget = relatedTarget
# TODO - parse from_json - so can relay
@property
def clientX(self):
return self.x
@property
def clientY(self):
return self.y
@property
def altKey(self):
return self._altKey
@property
def ctrlKey(self):
return self._ctrlKey
@property
def shiftKey(self):
return self._shiftKey
@property
def metaKey(self):
return self._metaKey
@property
def button(self):
return self._button
@property
def buttons(self):
return self._buttons
@property
def which(self):
return self._button
# MOUSE_EVENT
# getModifierState() Returns an array containing target ranges that will be affected by the insertion/deletion MouseEvent
# MovementX Returns the horizontal coordinate of the mouse pointer relative to the position of the last mousemove event MouseEvent
# MovementY Returns the vertical coordinate of the mouse pointer relative to the position of the last mousemove event MouseEvent
# offsetX Returns the horizontal coordinate of the mouse pointer relative to the position of the edge of the target element MouseEvent
# offsetY Returns the vertical coordinate of the mouse pointer relative to the position of the edge of the target element MouseEvent
# pageX Returns the horizontal coordinate of the mouse pointer, relative to the document, when the mouse event was triggered MouseEvent
# pageY Returns the vertical coordinate of the mouse pointer, relative to the document, when the mouse event was triggered MouseEvent
# region MouseEvent
# relatedTarget Returns the element related to the element that triggered the mouse event MouseEvent, FocusEvent
class KeyboardEvent(Event):
""" keyboard events """
KEYDOWN = "keydown" #:
KEYPRESS = "keypress" #:
KEYUP = "keyup" #:
def __init__(self, _type, *args, **kwargs):
# self.args = args
# self.kwargs = kwargs
self._altKey = False
self._ctrlKey = False
self._shiftKey = False
self._metaKey = False
self.charCode = None
self.code = None
self.key = None
self.keyCode = None
super().__init__(_type, *args, **kwargs)
def initKeyboardEvent(self, typeArg, canBubbleArg, cancelableArg, viewArg, charArg, keyArg,
locationArg, modifiersListArg, repeat):
self._type = typeArg
self.canBubbleArg = canBubbleArg
self.cancelableArg = cancelableArg
self.viewArg = viewArg
self.charArg = charArg
self.keyArg = keyArg
self.locationArg = locationArg
self.modifiersListArg = modifiersListArg
self.repeat = repeat
@property
def altKey(self):
return self._altKey
@property
def ctrlKey(self):
return self._ctrlKey
@property
def shiftKey(self):
return self._shiftKey
@property
def metaKey(self):
return self._metaKey
@property
def unicode(self):
return self.key
# @property
# def keyCode(self):
# return self.keyCode
# @property
# def charCode(self):
# return self.charCode
# @property
# def code(self):
# return self.code
# @property
# def key(self):
# return self.key
# def isComposing(self, *args, **kwargs):
# pass
# KeyboardEvent
# isComposing Returns whether the state of the event is composing or not InputEvent, KeyboardEvent
# repeat Returns whether a key is being hold down repeatedly, or not KeyboardEvent
# location Returns the location of a key on the keyboard or device KeyboardEvent
class UIEvent(Event):
""" UIEvent """
def __init__(self, _type, *args, **kwargs):
self.detail = None
self.view = None
super().__init__(_type, *args, **kwargs)
class CompositionEvent(UIEvent):
""" CompositionEvent """
START = "compositionstart"
END = "compositionend"
UPDATE = "compositionupdate"
def __init__(self, _type, *args, **kwargs):
self.data = None #: Returns the characters generated by the input method that raised the event
self.locale = None
super().__init__(_type, *args, **kwargs)
class FocusEvent(Event):
""" FocusEvent """
BLUR = "blur" #:
FOCUS = "focus" #:
FOCUSIN = "focusin" #:
FOCUSOUT = "focusout" #:
def __init__(self, _type, *args, **kwargs):
self.relatedTarget = None
super().__init__(_type, *args, **kwargs)
class TouchEvent(Event):
""" TouchEvent """
TOUCHCANCEL = "touchcancel" #:
TOUCHEND = "touchend" #:
TOUCHMOVE = "touchmove" #:
TOUCHSTART = "touchstart" #:
def __init__(self, _type, *args, **kwargs):
self.shiftKey = None
self.altKey = None
self.changedTouches = None
self.ctrlKey = None
self.metaKey = None
self.shiftKey = None
self.targetTouches = None
self.touches = None
super().__init__(_type, *args, **kwargs)
class WheelEvent(Event):
""" WheelEvent """
MOUSEWHEEL = "mousewheel" # DEPRECATED - USE WHEEL #:
WHEEL = "wheel" #:
def __init__(self, _type, *args, **kwargs):
self.deltaX = None
self.deltaY = None
self.deltaZ = None
self.deltaMode = None
super().__init__(_type, *args, **kwargs)
class AnimationEvent(Event):
""" AnimationEvent """
ANIMATIONEND = "animationend" #:
ANIMATIONITERATION = "animationiteration" #:
ANIMATIONSTART = "animationstart" #:
def __init__(self, _type, *args, **kwargs):
self.animationName = None
""" Returns the name of the animation """
self.elapsedTime = None
""" Returns the number of seconds an animation has been running """
self.pseudoElement = None
""" Returns the name of the pseudo-element of the animation """
super().__init__(_type, *args, **kwargs)
class ClipboardEvent(Event):
""" ClipboardEvent """
COPY = "copy" #:
CUT = "cut" #:
PASTE = "paste" #:
def __init__(self, _type, *args, **kwargs):
self.clipboardData = None
""" Returns an object containing the data affected by the clipboard operation """
super().__init__(_type, *args, **kwargs)
class ErrorEvent(Event):
""" ErrorEvent """
ERROR = "error" #:
def __init__(self, _type, *args, **kwargs):
self.message = None
# self.filename=None
# self.lineno=0
# self.colno=0
# self.error={}
super().__init__(_type, *args, **kwargs)
class SubmitEvent(Event):
""" SubmitEvent """
SUBMIT = "submit" #:
def __init__(self, _type, *args, **kwargs):
super().__init__(_type, *args, **kwargs)
class PointerEvent(Event):
""" PointerEvent """
POINTER = "pointer" #:
def __init__(self, _type, *args, **kwargs):
self.pointerId = None
self.width = None
self.height = None
self.pressure = None
self.tangentialPressure = None
self.tiltX = None
self.tiltY = None
self.twist = None
self.pointerType = None
self.isPrimary = None
super().__init__(_type, *args, **kwargs)
class BeforeUnloadEvent(Event):
BEFOREUNLOAD = "beforeunload" #:
""" BeforeUnloadEvent """
def __init__(self, _type, *args, **kwargs):
super().__init__(_type, *args, **kwargs)
class SVGEvent(Event):
""" SVGEvent """
def __init__(self, _type, *args, **kwargs):
super().__init__(_type, *args, **kwargs)
class TimerEvent(Event):
TIMER = "timer" #:
""" TimerEvent """
def __init__(self, _type, *args, **kwargs):
super().__init__(_type, *args, **kwargs)
class DragEvent(Event):
""" DragEvent """
DRAG = "drag" #:
END = "dragend" #:
ENTER = "dragenter" #:
EXIT = "dragexit" #:
LEAVE = "dragleave" #:
OVER = "dragover" #:
START = "dragstart" #:
DROP = "drop" #:
def __init__(self, _type, *args, **kwargs):
self.dataTransfer = None
""" Returns the data that is dragged/dropped """
super().__init__(_type, *args, **kwargs)
class HashChangeEvent(Event):
""" HashChangeEvent """
CHANGE = "hashchange" #:
def __init__(self, _type, *args, **kwargs):
self.newURL = None
self.oldURL = None
super().__init__(_type, *args, **kwargs)
class InputEvent(Event):
""" InputEvent """
def __init__(self, _type, *args, **kwargs):
self.data = None
""" Returns the inserted characters """
self.dataTransfer
""" Returns an object containing information about the inserted/deleted data """
self.getTargetRanges
""" Returns an array containing target ranges that will be affected by the insertion/deletion """
self.inputType
""" Returns the type of the change (i.e "inserting" or "deleting") """
self.isComposing
""" Returns whether the state of the event is composing or not """
super().__init__(_type, *args, **kwargs)
class PageTransitionEvent(Event):
""" PageTransitionEvent """
PAGEHIDE = "pagehide" #:
PAGESHOW = "pageshow" #:
def __init__(self, _type, *args, **kwargs):
self.persisted = None
""" Returns whether the webpage was cached by the browser """
super().__init__(_type, *args, **kwargs)
class PopStateEvent(Event):
""" PopStateEvent """
def __init__(self, _type, *args, **kwargs):
self.state = None
""" Returns an object containing a copy of the history entries """
super().__init__(_type, *args, **kwargs)
class StorageEvent(Event):
""" StorageEvent """
def __init__(self, _type, *args, **kwargs):
self.key = None
""" Returns the key of the changed storage item """
self.newValue = None
""" Returns the new value of the changed storage item """
self.oldValue = None
""" Returns the old value of the changed storage item """
self.storageArea = None
""" Returns an object representing the affected storage object """
self.url = None
""" Returns the URL of the changed item's document """
super().__init__(_type, *args, **kwargs)
class TransitionEvent(Event):
""" TransitionEvent """
TRANSITIONEND = "transitionend" #:
def __init__(self, _type, *args, **kwargs):
self.propertyName = None
""" Returns the name of the transition"""
self.elapsedTime = None
""" Returns the number of seconds a transition has been running """
self.pseudoElement = None
""" Returns the name of the pseudo-element of the transition """
super().__init__(_type, *args, **kwargs)
class ProgressEvent(Event):
""" ProgressEvent """
LOADSTART = "loadstart" #:
def __init__(self, _type, *args, **kwargs):
super().__init__(_type, *args, **kwargs)
class CustomEvent(Event):
""" CustomEvent """
def __init__(self, _type, *args, **kwargs):
self.detail = None
super().__init__(_type, *args, **kwargs)
def initCustomEvent(self):
pass
class GamePadEvent(Event):
""" GamePadEvent """
START = "gamepadconnected" #:
STOP = "gamepaddisconnected" #:
def __init__(self, _type, *args, **kwargs):
self.gamepad = None
super().__init__(_type, *args, **kwargs)
class TweenEvent(Event):
""" TweenEvent """
START = "onStart" #:
STOP = "onStop" #:
RESET = "onReset" #:
PAUSE = "onPause" #:
UNPAUSE = "onUnPause" #:
UPDATE_START = "onUpdateStart" #:
UPDATE_END = "onUpdateEnd" #:
COMPLETE = "onComplete" #:
TIMER = "onTimer" #:
_source = None
@property
def source(self):
return self._source
@source.setter
def source(self, source):
self._source = source
def __init__(self, _type, source=None, bubbles=False, cancelable=False):
# super.__init__(self, type, bubbles, cancelable)
super().__init__(_type) # TODO -
self.source = source
class GlobalEventHandler: # (EventDispatcher):
# def __init__(self):
# super().__init__(self)
# self.addEventListener(KeyboardEvent.KEYDOWN, self.onkeydown)
# self.addEventListener(KeyboardEvent.KEYUP, self.onkeyup)
# self.addEventListener(MouseEvent.MOUSEMOVE, self.onmousemove)
# self.addEventListener(MouseEvent.MOUSEDOWN, self.onmousedown)
# self.addEventListener(MouseEvent.MOUSEUP, self.onmouseup)
# self.addEventListener(DragEvent.DRAG, self.ondrag)
# self.addEventListener(DragEvent.END, self.ondragend)
# self.addEventListener(DragEvent.ENTER, self.ondragenter)
# self.addEventListener(DragEvent.EXIT, self.ondragexit)
# self.addEventListener(DragEvent.LEAVE, self.ondragleave)
# self.addEventListener(DragEvent.OVER, self.ondragover)
# self.addEventListener(DragEvent.START, self.ondragstart)
# self.addEventListener(DragEvent.DROP, self.ondrop)
# self.addEventListener(ClipboardEvent.CUT, self.oncut)
# self.addEventListener(ClipboardEvent.COPY, self.oncopy)
# self.addEventListener(ClipboardEvent.PASTE, self.onpaste)
def onabort(self, event):
print(event)
raise NotImplementedError
def onblur(self, event):
print(event)
raise NotImplementedError
def oncancel(self, event):
print(event)
raise NotImplementedError
def oncanplay(self, event):
print(event)
raise NotImplementedError
def oncanplaythrough(self, event):
print(event)
raise NotImplementedError
def onchange(self, event):
print(event)
raise NotImplementedError
def onclick(self, event):
print(event)
raise NotImplementedError
def onclose(self, event):
print(event)
raise NotImplementedError
def oncontextmenu(self, event):
print(event)
raise NotImplementedError
def oncuechange(self, event):
print(event)
raise NotImplementedError
def ondblclick(self, event):
print(event)
raise NotImplementedError
def ondrag(self, event):
print(event)
raise NotImplementedError
def ondragend(self, event):
print(event)
raise NotImplementedError
def ondragenter(self, event):
print(event)
raise NotImplementedError
def ondragexit(self, event):
print(event)
raise NotImplementedError
def ondragleave(self, event):
print(event)
raise NotImplementedError
def ondragover(self, event):
print(event)
raise NotImplementedError
def ondragstart(self, event):
print(event)
raise NotImplementedError
def ondrop(self, event):
print(event)
raise NotImplementedError
def ondurationchange(self, event):
print(event)
raise NotImplementedError
def onemptied(self, event):
print(event)
raise NotImplementedError
def onended(self, event):
print(event)
raise NotImplementedError
def onerror(self, event):
print(event)
raise NotImplementedError
def onfocus(self, event):
print(event)
raise NotImplementedError
def ongotpointercapture(self, event):
print(event)
raise NotImplementedError
def oninput(self, event):
print(event)
raise NotImplementedError
def oninvalid(self, event):
print(event)
raise NotImplementedError
def onkeydown(self, event):
print(event)
raise NotImplementedError
def onkeypress(self, event):
print(event)
raise NotImplementedError
def onkeyup(self, event):
print(event)
raise NotImplementedError
def onload(self, event):
print(event)
raise NotImplementedError
def onloadeddata(self, event):
print(event)
raise NotImplementedError
def onloadedmetadata(self, event):
print(event)
raise NotImplementedError
def onloadend(self, event):
print(event)
raise NotImplementedError
def onloadstart(self, event):
print(event)
raise NotImplementedError
def onlostpointercapture(self, event):
print(event)
raise NotImplementedError
def onmouseenter(self, event):
print(event)
raise NotImplementedError
def onmouseleave(self, event):
print(event)
raise NotImplementedError
def onmousemove(self, event):
print(event)
raise NotImplementedError
def onmouseout(self, event):
print(event)
raise NotImplementedError
def onmouseover(self, event):
print(event)
raise NotImplementedError
def onmouseup(self, event):
print(event)
raise NotImplementedError
def onpause(self, event):
print(event)
raise NotImplementedError
def onplay(self, event):
print(event)
raise NotImplementedError
def onplaying(self, event):
print(event)
raise NotImplementedError
def onpointercancel(self, event):
print(event)
raise NotImplementedError
def onpointerdown(self, event):
print(event)
raise NotImplementedError
def onpointerenter(self, event):
print(event)
raise NotImplementedError
def onpointerleave(self, event):
print(event)
raise NotImplementedError
def onpointermove(self, event):
print(event)
raise NotImplementedError
def onpointerout(self, event):
print(event)
raise NotImplementedError
def onpointerover(self, event):
print(event)
raise NotImplementedError
def onpointerup(self, event):
print(event)
raise NotImplementedError
def onprogress(self, event):
print(event)
raise NotImplementedError
def onratechange(self, event):
print(event)
raise NotImplementedError
def onreset(self, event):
print(event)
raise NotImplementedError
def onresize(self, event):
print(event)
raise NotImplementedError
def onscroll(self, event):
print(event)
raise NotImplementedError
def onseeked(self, event):
print(event)
raise NotImplementedError
def onseeking(self, event):
print(event)
raise NotImplementedError
def onselect(self, event):
print(event)
raise NotImplementedError
def onselectionchange(self, event):
print(event)
raise NotImplementedError
def onselectstart(self, event):
print(event)
raise NotImplementedError
def onshow(self, event):
print(event)
raise NotImplementedError
def onstalled(self, event):
print(event)
raise NotImplementedError
def onsubmit(self, event):
print(event)
raise NotImplementedError
def onsuspend(self, event):
print(event)
raise NotImplementedError
def ontimeupdate(self, event):
print(event)
raise NotImplementedError
def onvolumechange(self, event):
print(event)
raise NotImplementedError
def onwaiting(self, event):
print(event)
raise NotImplementedError
def onwheel(self, event):
print(event)
raise NotImplementedError
def onanimationcancel(self, event):
print(event)
raise NotImplementedError
def onanimationend(self, event):
print(event)
raise NotImplementedError
def onanimationiteration(self, event):
print(event)
raise NotImplementedError
def onauxclick(self, event):
print(event)
raise NotImplementedError
def onformdata(self, event):
print(event)
raise NotImplementedError
def onmousedown(self, event):
print(event)
raise NotImplementedError
def ontouchcancel(self, event):
print(event)
raise NotImplementedError
def ontouchstart(self, event):
print(event)
raise NotImplementedError
def ontransitioncancel(self, event):
print(event)
raise NotImplementedError
def ontransitionend(self, event):
print(event)
raise NotImplementedError
| 2.6875 | 3 |
load_dataset.py | AmanPriyanshu/Mapping-Temperature-using-DBNs | 0 | 12795095 | import pandas as pd
import numpy as np
import torch
def min_max_x(x):
for index, col in enumerate(x.T):
min_col = np.min(col)
max_col = np.max(col)
if min_col != max_col:
x.T[index] = (x.T[index] - min_col)/(max_col - min_col)
else:
x.T[index] = x.T[index] - min_col
return x
def load_dataset(path='./processed_dataset/data.csv', split=0.8, shuffle=True, seed=0):
np.random.seed(seed)
df = pd.read_csv(path)
df = df.values
if shuffle:
np.random.shuffle(df)
train = df[:int(df.shape[0]*split)]
validation = df[int(df.shape[0]*split):]
train_x, train_y = train.T[:12].T, train.T[12:].T
validation_x, validation_y = validation.T[:12].T, validation.T[12:].T
train_x, validation_x = min_max_x(train_x), min_max_x(validation_x)
train_x, train_y, validation_x, validation_y = train_x.astype(np.float32), train_y.astype(np.float32), validation_x.astype(np.float32), validation_y.astype(np.float32)
train_x, train_y, validation_x, validation_y = torch.from_numpy(train_x), torch.from_numpy(train_y), torch.from_numpy(validation_x), torch.from_numpy(validation_y)
return train_x, train_y, validation_x, validation_y
if __name__ == '__main__':
train_x, train_y, validation_x, validation_y = load_dataset()
print(train_x.shape, train_y.shape, validation_x.shape, validation_y.shape) | 2.59375 | 3 |
musictools/custom_exceptions.py | zfazli/zfmusicc | 49 | 12795096 | class SongNotFound(Exception):
def __init__(self, message, dErrorArg):
Exception.__init__(self, message, dErrorArg)
| 2.046875 | 2 |
tree_only.py | andrewli77/DISC | 7 | 12795097 | """
This code returns a DFA that is equivalent to the Tree constructed by compressing all the traces into one tree.
"""
import read_traces, DFA_utils_tree_only, time, tree_utils
def solve_tree_only(g_pos, G, Sigma, T, timeout, info, be_quiet=False):
assert g_pos in G, f"Error, g_pos not in G"
# creating the auxiliary tree structure
tree = tree_utils.create_tree(g_pos, G, Sigma, T, prune=False)
nodes = tree_utils.get_reachable_nodes(tree)
# creating an equivalent DFA
q_0 = 0
q_pos = 1
q_neg = 2
# assigning ids to each node
n_current = 3
for n in nodes:
if n.is_root():
n.assign_id(q_0)
elif n.is_positive_node():
n.assign_id(q_pos)
elif n.is_negative_node():
n.assign_id(q_neg)
else:
n.assign_id(n_current)
n_current += 1
# creating the dfa
dfa = {}
for ni in nodes:
if ni.is_terminal():
continue
ni_id = ni.get_id()
for nj in ni.get_children():
nj_id = nj.get_id()
ni_sigma = nj.get_psigma()
dfa[(ni_id,ni_sigma)] = nj_id
DFA_utils_tree_only.clean_dfa(q_0, dfa, T)
# Adding the probabilities
pos_prob = DFA_utils_tree_only.add_probabilities(q_0, dfa, T, g_pos)
return q_0, dfa, pos_prob
| 2.734375 | 3 |
reference/read_csv.py | SeanSyue/TensorflowReferences | 0 | 12795098 | # Source:
# https://www.tensorflow.org/api_guides/python/reading_data
import tensorflow as tf
# creates a FIFO queue for holding the filenames until the reader needs them.
# The following line is equivalent to :
# filename_queue = tf.train.string_input_producer(["file0.csv", "file1.csv"])
filename_queue = tf.train.string_input_producer([("file%d" % i) for i in range(2)])
reader = tf.TextLineReader()
key, value = reader.read(filename_queue)
# Default values, in case of empty columns. Also specifies the type of the decoded result.
# Try a simpler expression:
# col1, col2, col3, col4, col5 = tf.decode_csv(value, record_defaults=[[1]]*5)
record_defaults = [[1], [1], [1], [1], [1]]
col1, col2, col3, col4, col5 = tf.decode_csv(
value, record_defaults=record_defaults)
features = tf.stack([col1, col2, col3, col4])
with tf.Session() as sess:
# Start populating the filename queue.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(1200):
# Retrieve a single instance:
example, label = sess.run([features, col5])
coord.request_stop()
coord.join(threads)
| 3.421875 | 3 |
mlapp/managers/pipeline_manager.py | kerenleibovich/mlapp | 33 | 12795099 | <reponame>kerenleibovich/mlapp<filename>mlapp/managers/pipeline_manager.py
import time
import datetime as dt
import importlib.util
import sys
import os
from mlapp.config import settings
from mlapp.utils.exceptions.base_exceptions import PipelineManagerException, FrameworkException
from mlapp.managers.io_manager import IOManager
AVAILABLE_STAGES = {}
BASE_CLASS_NAME = ''
TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
MANAGER_TYPES = {
'data_manager': 'DataManager',
'model_manager': 'ModelManager'
}
class PipelineManager(object):
def __init__(self, run_id, pipeline_input, _input: IOManager, _output: IOManager, config, *args, **kwargs):
"""
:param pipeline_input: the pipeline name string or list of strings
:param _input: IOmanager instance with input to the pipeline
:param _output: IOmanager instance to store the outputs of the pipelines to be saved externally
:param config: config string of the pipeline
:param args:
:param kwargs:
"""
for asset_name in AVAILABLE_STAGES:
if asset_name != BASE_CLASS_NAME:
AVAILABLE_STAGES[asset_name] = {}
self.pipeline_name = ''
# pipeline can be either list of stages or string of a default pipeline
if isinstance(pipeline_input, list):
self.stages = pipeline_input
if isinstance(pipeline_input, str):
self.pipeline_name = " '" + pipeline_input + "'"
self.stages = settings.get('pipelines', {}).get(pipeline_input, [])
self.config = config
self.run_id = run_id
self.input_manager = _input
self.output_manager = _output
self.asset_name = self.config.get('job_settings', {}).get('asset_name', '')
self.data_manager_instance = self.create_manager_instance('data')
self.model_manager_instance = self.create_manager_instance('model')
# first inputs
self.state = dict.fromkeys(self.stages, {})
def create_manager_instance(self, manager_type):
"""
Creates manager instance which class is defined in the asset. For example: model_manager or data_manader
:param manager_type: the type : e.g. "data", "model"
:return: instance of the manager
"""
manager_file_name = self.asset_name + '_' + manager_type + '_manager'
manager_module = 'assets.' + self.asset_name + '.' + manager_file_name
manager_module_path = os.path.join('assets', self.asset_name, f'{manager_file_name }.py')
manager_class_name = ''.join(x.capitalize() or '_' for x in manager_file_name.split('_')) # CamelCase
try:
spec = importlib.util.spec_from_file_location(manager_module, manager_module_path)
module = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = module
spec.loader.exec_module(module)
manager_class = getattr(module, manager_class_name)
return manager_class(self.config.copy(), self.input_manager, self.output_manager, self.run_id)
except Exception as e:
print("Couldn't import class of " + manager_type + " manager for model: " + self.asset_name)
print(">>> Please verify your " + manager_type +
" manager file/directory/class names are in the following convention:")
print(">>> Directory: {{asset_name}}")
print(">>> File: {{asset_name}}_" + manager_type + "_manager.py")
print(">>> Class: {{asset_name_capitalized}}" + manager_type.capitalize() + "Manager")
print(">>> Expected Directory name: " + self.asset_name)
print(">>> Expected Manager file name: " + manager_file_name)
print(">>> Expected Manager class name: " + manager_class_name)
raise FrameworkException(str(e))
def extract_stage(self, stage_name):
"""
Gets the pipeline stage dictioanry containing the function and the manager class it resides in.
:param stage_name: the name of the stage (e.g. "load_data")
:return: pipeline stage dictionary
"""
asset_name = ''.join(x.capitalize() or '_' for x in self.asset_name.split('_')) # CamelCase
if asset_name not in AVAILABLE_STAGES:
raise PipelineManagerException(
"Missing decoration for your pipeline functions! Add '@pipeline' decorator above functions"
" you want to use in your asset '{}'s Data Manager and Model Manager.".format(asset_name))
if stage_name not in AVAILABLE_STAGES[asset_name]:
# exists in one if the base classes
if stage_name in AVAILABLE_STAGES[BASE_CLASS_NAME]:
return AVAILABLE_STAGES[BASE_CLASS_NAME][stage_name]
raise PipelineManagerException(
"Function '{}' was not found in your asset! Add '@pipeline' decorator above your '{}' "
"function if you want to use it in your pipeline.".format(stage_name, stage_name))
return AVAILABLE_STAGES[asset_name][stage_name]
def extract_manager_instance(self, manager_type):
"""
Gets the instance of the manager - model_manager instance or data_manager instance
:param manager_type: string "data_manager" or "model_manager"
:return: model_manager instance or data_manager instance
"""
if manager_type == 'data_manager':
return self.data_manager_instance
else:
return self.model_manager_instance
def run(self, *arguments):
"""
Runs through the pipeline stages and passes relevant values between them
:param arguments: input for the first stage in the pipeline, will be passed with *args
:return: IOmanager of all the outputs to be stored
"""
print(">>>>>> Running pipeline" + self.pipeline_name + "...")
prev_stage_name = ''
for stage_name in self.stages:
start_time = time.strftime(TIME_FORMAT)
print(">>>>>> Running stage: {}...".format(stage_name))
stage = self.extract_stage(stage_name)
if prev_stage_name:
args = self.state[prev_stage_name]
else:
args = arguments
# execute stage
self.state[stage_name] = stage['function'](self.extract_manager_instance(stage['manager']), *args)
if not isinstance(self.state[stage_name], tuple):
self.state[stage_name] = (self.state[stage_name],)
prev_stage_name = stage_name
end_time = dt.datetime.strptime(
time.strftime(TIME_FORMAT), TIME_FORMAT) - dt.datetime.strptime(start_time, TIME_FORMAT)
print(">>>>>> It took me, {}.".format(end_time))
print(">>>>>> Finished running pipeline.")
return self.output_manager
class pipeline:
def __init__(self, fn):
self.fn = fn
def __set_name__(self, owner, name):
asset_name = owner.__name__
manager_type = None
for manager_type_key in MANAGER_TYPES:
if MANAGER_TYPES[manager_type_key] in asset_name:
manager_type = manager_type_key
if manager_type is None:
raise Exception("Wrong class name or placement of decorator! ('{}')".format(asset_name))
asset_name = asset_name.replace('DataManager', '').replace('ModelManager', '')
if asset_name not in AVAILABLE_STAGES:
AVAILABLE_STAGES[asset_name] = {}
if name in AVAILABLE_STAGES[asset_name]:
raise Exception("Duplicate stage name '{}' for pipelines found in asset '{}'"
.format(asset_name, name))
AVAILABLE_STAGES[asset_name][name] = {
'function': self.fn,
'manager': manager_type
}
return self.fn
| 2.3125 | 2 |
ajax_datatable/templatetags/ajax_datatable_tags.py | ivi3/django-ajax-datatable | 101 | 12795100 | <gh_stars>100-1000
from django import template
register = template.Library()
################################################################################
# Support for generic editing in the front-end
@register.filter
def model_verbose_name(model):
"""
Sample usage:
{{model|model_name}}
"""
return model._meta.verbose_name
@register.filter
def model_verbose_name_plural(model):
"""
Sample usage:
{{model|model_name}}
"""
return model._meta.verbose_name_plural
@register.filter
def model_name(model):
"""
Sample usage:
{{model|model_name}}
"""
return model._meta.model_name
@register.filter
def app_label(model):
"""
Sample usage:
{{model|app_label}}
"""
return model._meta.app_label
@register.simple_tag(takes_context=True)
def testhasperm(context, model, action):
"""
Returns True iif the user have the specified permission over the model.
For 'model', we accept either a Model class, or a string formatted as "app_label.model_name".
Sample usage:
{% testhasperm model 'view' as can_view_objects %}
{% if not can_view_objects %}
<h2>Sorry, you have no permission to view these objects</h2>
{% endif %}
"""
user = context['request'].user
if isinstance(model, str):
app_label, model_name = model.split('.')
else:
app_label = model._meta.app_label
model_name = model._meta.model_name
required_permission = '%s.%s_%s' % (app_label, action, model_name)
return user.is_authenticated and user.has_perm(required_permission)
@register.tag
def ifhasperm(parser, token):
"""
Check user permission over specified model.
(You can specify either a model or an object).
Sample usage:
{% ifhasperm model 'add' %}
<div style="color: #090">User can add objects</div>
{% else %}
<div style="color: #900">User cannot add objects</div>
{% endifhasperm %}
"""
# Separating the tag name from the parameters
try:
tag, model, action = token.contents.split()
except (ValueError, TypeError):
raise template.TemplateSyntaxError(
"'%s' tag takes three parameters" % tag)
default_states = ['ifhasperm', 'else']
end_tag = 'endifhasperm'
# Place to store the states and their values
states = {}
# Let's iterate over our context and find our tokens
while token.contents != end_tag:
current = token.contents
states[current.split()[0]] = parser.parse(default_states + [end_tag])
token = parser.next_token()
model_var = parser.compile_filter(model)
action_var = parser.compile_filter(action)
return CheckPermNode(states, model_var, action_var)
class CheckPermNode(template.Node):
def __init__(self, states, model_var, action_var):
self.states = states
self.model_var = model_var
self.action_var = action_var
def render(self, context):
# Resolving variables passed by the user
model = self.model_var.resolve(context)
action = self.action_var.resolve(context)
# Check user permission
if testhasperm(context, model, action):
html = self.states['ifhasperm'].render(context)
else:
html = self.states['else'].render(context) if 'else' in self.states else ''
return html
| 2.328125 | 2 |
vang/pio/replace_in_zip.py | mattiasl/scripts | 6 | 12795101 | <filename>vang/pio/replace_in_zip.py<gh_stars>1-10
#!/usr/bin/env python3
from argparse import ArgumentParser
from difflib import ndiff
from sys import argv
from zipfile import ZipFile, ZIP_DEFLATED
def diff(name, content, updated_content):
diffs = list(ndiff(content.splitlines(1), updated_content.splitlines(1)))
if diffs:
print(f"{name}\n{''.join(diffs)}")
def replace(content, replacements):
updated_content = content
for old, new in replacements.items():
updated_content = updated_content.replace(old, new)
return updated_content
def zip_read(zip_file, encoding='utf-8'):
with ZipFile(zip_file, 'r') as z:
for name in z.namelist():
yield (name, z.read(name).decode(encoding))
def update_zip(original_zip, updated_zip, replacements, encoding='utf-8', verbose=False):
with ZipFile(updated_zip, 'w', compression=ZIP_DEFLATED) as z:
for name, content in zip_read(original_zip, encoding=encoding):
updated_content = replace(content, replacements)
if verbose:
diff(name, content, updated_content)
z.writestr(name, updated_content)
def main(original_zip, updated_zip, replacements, encoding='utf-8', verbose=False):
# make a dict of replacements list
d = dict(zip(replacements[::2], replacements[1::2]))
update_zip(original_zip, updated_zip, d, encoding, verbose)
def parse_args(args):
parser = ArgumentParser(description='Replace in zip (e.g. zip, jar or war')
parser.add_argument('original_zip', help='original zip path')
parser.add_argument('updated_zip', help='updated zip path')
parser.add_argument(
'replacements',
nargs='+',
help='A list of replacements to be done in the zip. Index 0 will be replaced with index 1 and so on.'
)
parser.add_argument(
'-e',
'--encoding',
default='utf-8',
help='TFS projects, e.g organisation/project')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Verbose output')
return parser.parse_args(args)
if __name__ == '__main__':
"""
Example:
./replace_in_zip.py original.zip updated.zip old1 new1 old2 new2 oldN newN
"""
main(**parse_args(argv[1:]).__dict__)
| 3.078125 | 3 |
graphene/dijkstra.py | sushanttripathy/graphene | 0 | 12795102 | <filename>graphene/dijkstra.py
__author__ = 'Sushant'
from graph import Graph
import threading
from prioritydict import PriorityDict
class Dijkstra(Graph):
def __init__(self, num_threads=0, use_priority_queue=1):
super(Dijkstra, self).__init__()
self.shortest_paths = {}
self.shortest_paths_guide = {}
self.num_threads = num_threads
if num_threads:
self.lock = threading.Lock()
self.use_priority_queue = use_priority_queue
return
def get_node_with_minimum_distance(self, q, distance, indices):
min_dist = float("inf")
min_ind = None
min_node_id = None
for ind, x in enumerate(q):
if distance[x] < min_dist:
min_ind = ind
min_node_id = x
min_dist = distance[x]
indices.append(min_ind)
return min_node_id
def calculate_shortest_paths_from(self, source_id):
distance = {}
previous = {}
q = None
if self.use_priority_queue:
q = PriorityDict()
else:
q = []
ind = []
distance[source_id] = 0
for x in self.nodes:
if x is not source_id:
distance[x] = float("inf")
previous[x] = None
if self.use_priority_queue:
q[x] = distance[x]
else:
q.append(x)
while len(q):
u = None
if self.use_priority_queue:
u = q.pop_smallest()
else:
u = self.get_node_with_minimum_distance(q, distance, ind)
index = ind.pop()
if type(index) is int:
del q[index]
else:
break
if isinstance(self.edges[u], dict):
for v in self.edges[u]:
if v in q:
alt = distance[u] + self.edges[u][v].strength
if alt < distance[v]:
distance[v] = alt
previous[v] = u
if self.use_priority_queue:
q[v] = distance[v]
if not self.num_threads:
self.shortest_paths[source_id] = distance
self.shortest_paths_guide[source_id] = previous
else:
self.lock.acquire()
self.shortest_paths[source_id] = distance
self.shortest_paths_guide[source_id] = previous
self.lock.release()
return
def calculate_all_shortest_paths(self):
if not self.num_threads:
for source_id in self.nodes:
self.calculate_shortest_paths_from(source_id)
else:
th = []
for source_id in self.nodes:
t = threading.Thread(target=self.calculate_shortest_paths_from, args=[source_id])
t.start()
th.append(t)
if len(th) >= self.num_threads:
while len(th):
_t = th.pop()
_t.join()
if len(th):
while len(th):
_t = th.pop()
_t.join()
return
def get_shortest_path_length(self, source_id, target_id):
if source_id in self.shortest_paths and isinstance(self.shortest_paths[source_id], dict):
if target_id in self.shortest_paths[source_id]:
return self.shortest_paths[source_id][target_id]
return None
def get_shortest_route(self, source_id, target_id, append_target=1):
if source_id in self.shortest_paths_guide and isinstance(self.shortest_paths_guide[source_id], dict):
if target_id in self.shortest_paths_guide[source_id]:
r = []
if self.shortest_paths_guide[source_id][target_id] is not source_id:
r = r + self.get_shortest_route(source_id, self.shortest_paths_guide[source_id][target_id], 0)
r.append(self.shortest_paths_guide[source_id][target_id])
if append_target:
r.append(target_id)
return r
return []
"""
G = Dijkstra(10, 1)
G.add_edge(1, 2, 0.1)
G.add_edge(1, 3, 0.3)
G.add_edge(2, 3, 0.1)
G.add_edge(3, 4, 0.1)
G.add_edge(2, 4, 0.5)
print "Calculating all possible shortest paths"
G.calculate_all_shortest_paths()
print "Shortest paths calculation finished"
print G.get_shortest_path_length(1, 4)
print G.get_shortest_route(1, 4)
"""
| 3.125 | 3 |
i18nparse/__init__.py | nueh/i18nparse | 5 | 12795103 | <reponame>nueh/i18nparse<gh_stars>1-10
from .version import __version__
from .i18nparse import activate, deactivate
| 0.804688 | 1 |
Machine Learning/TensorflowExamples/simple_gradient_descent.py | sarojjethva/Learning-Resources | 639 | 12795104 | """
Author: <NAME>
Github: github.com/yashbmewada
Program for demonstrating simple line fitting using Tensorflow and Gradient Descent Algorithm
This program trains the model to fit two values, slope(m) and x-intercept(b) in the equation
of line y=mx+b. Here we would provide very small dataset of randomly generated pointset xs and ys
and train the tensorflow model to adjust the values of m and b in order to fit a straight line.
This straight line can further be used to predict any unknown value Y for a given unknown X based on the
learned value of m and b.
"""
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # called in order to minimize the warnings about SSE4.1 instructions.
import tensorflow as tf
"""
Random points of X and Y form the training data. aka Dataset (only training. no validation or test)
"""
xs = [0.00,2.00,4.00,6.00,8.00,10.00,12.00,14.00] #features
ys = [-0.82,-0.90,-0.12,0.26,0.31,0.64,1.02,1.00] #labels (actual outputs)
"""
Initial values for m and b. These values would be adjusted to fit the above dataset point
"""
m_initial = -0.50
b_initial = 1.00
"""
tf.Variable : allows us to create variables whose values can be adjusted in order to learn at each pass on the dataset.
"""
m = tf.Variable(m_initial)
b = tf.Variable(b_initial)
"""
In order to adjust and fit the line, we try to minimize the "error" between two given values of (x,y) so that the
line can be fit properly as we minimize the value of distances between our m and b i.e. predicted_y and actual y
(from "ys").
"""
error = 0.0
"""
We write an operation for calculation of error and also iteration over the value of X and Y from the Dataset [xs,ys].
Running this over around 1000 times we would be able to minimize the error to a respecable fit for the line.
"""
for x,y in zip(xs,ys):
predicted_y = m*x + b
error += (y-predicted_y)**2 # this is the square of difference of error added to the total error 'cost' which we minimize.
"""
Now, in order to train over this operation set we defined above, we use tensorflow Gradient Descent Optimizer which allows
us to train over this data set and we pass the "error" to the minimize() function of this optimizer as a parameter.abs
here while initialization of the Gradient Descent optimizer, we define a learning_rate = 0.001.
This learning rate defines the magnitude OR "how big" of a jump we want to make while minimizing the "cost" / "error".abs
Remember Too Small a learning rate would make your training very slow and Too big learning rate would make the training never find
an optimum solution. Best Learning Rate can be found by trying different values. Here we take 0.001 randomly as it usually works in
most cases.
"""
optimizer_op = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(error)
"""
Tensorflow uses a "session" to run the above mentioned training steps.
So before starting the session it is always advisable to initialize variables randomly.
"""
init_op = tf.global_variables_initializer()
"""
All the calculations would now be done in a Session
"""
with tf.Session() as session:
session.run(init_op)
_ITERATIONS = 1000 #number of passes on the dataset
for iteration in range(_ITERATIONS):
session.run(optimizer_op) #calling our optimization operator to minimize error
slope, intercept = session.run((m,b)) #calling our adjusted values
print('slope: ', slope , 'Intercept: ', intercept) | 4.09375 | 4 |
blog/views/post_view.py | ShahadatShuvo/blood_lagbe | 3 | 12795105 | <gh_stars>1-10
from django.shortcuts import render
def postView(request, id):
return render(request, 'blog/blog.html', context={}) | 1.5625 | 2 |
sandbox/static/robots.txt.py | vituocgia/wshop-core | 0 | 12795106 | <filename>sandbox/static/robots.txt.py
XXXXXXXXXXX X
XXXXXXXXX
| 1.039063 | 1 |
Python/AI-ToolBox/preprocess ToolBox/convert_tool/convert_txt2excel.py | Lornatang/DayHR | 7 | 12795107 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 23 17:20:22 2019
convert txt to excel
@author: zyb_as
"""
import os
import argparse, textwrap
import xlwt
# set options
parser = argparse.ArgumentParser(description = 'convert txt to excel',
usage = textwrap.dedent('''\
command example:
python %(prog)s --file_name='test.txt' --splitter='\\t' '''),
formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('--file_name', type = str, default = 'test.txt',
help = 'the path of the txt file')
parser.add_argument('--splitter', type = str, default = '\t',
help = 'the splitter for each line in the txt file.')
#parser.add_argument('--fields_num', type = int, default = 1,
# help = 'the fields number each line.')
parser.add_argument('--max_lines', type = int, default = 50000,
help = 'max lines number in one excel')
def download_from_txt():
# get options
args = parser.parse_args()
file_name = args.file_name
splitter = args.splitter
#fields_num = args.fields_num
max_lines = args.max_lines
if not os.path.exists(file_name):
print("ERROR! the file need to be convert does't exists")
excel_file = ''
if file_name[-4:] == '.txt':
excel_file = file_name[:-4] + '.xls'
else:
excel_file = file_name + '.xls'
if splitter == '\\t':
splitter = '\t'
cnt = 0
xls_index = 0
cur_excel_file = excel_file[:-4] + '_' + str(xls_index) + '.xls'
# 创建表
workbook = xlwt.Workbook(encoding = 'utf-8')
worksheet = workbook.add_sheet('temp', cell_overwrite_ok = True)
worksheet.write(0, 0, label = 'Row 0, Column 0 Value')
for line in open(file_name, 'r').readlines():
if cnt == max_lines:
workbook.save(cur_excel_file)
xls_index += 1
cur_excel_file = excel_file[:-4] + '_' + str(xls_index) + '.xls'
workbook = xlwt.Workbook(encoding = 'utf-8')
worksheet = workbook.add_sheet('temp')
cnt = 0
item = line.split(splitter)
print(cnt)
for idx, it in enumerate(item):
worksheet.write(cnt, idx, it.decode('utf-8', 'ignore'))
cnt += 1
if cnt <= max_lines:
workbook.save(cur_excel_file)
if __name__ == "__main__":
download_from_txt() | 3.484375 | 3 |
backend/schema.py | ReynaldoCC/arango-backend | 0 | 12795108 | <gh_stars>0
from abc import ABC
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor, ABC):
pass
| 1.484375 | 1 |
ex17.py | Eieiphyu/python_exercises | 0 | 12795109 | <gh_stars>0
from sys import argv
from os.path import exists
script, from_file, to_file = argv
print(f"Copy from {from_file} to {to_file}")
#2 on i line how
in_file = open(from_file)
indata = in_file.read()
print(f"Input file is {len(indata)} bytes long")
print(f"Doesoutput file exist? {exists(to_file)}")
print(f"Ready, hit RETURN to continute CTRL-C to abort.")
out_file = open(to_file, 'w')
out_file.write(indata)
print("Alright all sone")
out_file.close()
in_file.close()
| 3.015625 | 3 |
src/x2df/fileIOhandlers/fileIOhandler_parquet.py | mb-89/x2df | 0 | 12795110 | <gh_stars>0
from x2df.fileIOhandlers.__fileIOhandler__ import FileIOhandler
# we want to do the imports as late as possible to
# keep it snappy once we have more and more fileIOhandlers
class Handler(FileIOhandler):
def dump(self, df, dst, **kwargs):
# we import pyarrow here to make sure that is found by pyreqs.
# if it is not found, we get an error and need to install it.
if not dst:
return
import pyarrow # noqa: F401
df.to_parquet(dst)
def parse(self, path, postprocess=True, **kwargs):
import pyarrow # noqa: F401
import pandas as pd
dfraw = pd.read_parquet(path)
if postprocess:
return self.processRawDF(dfraw)
else:
return [dfraw]
def claim(self, path):
import pyarrow # noqa: F401
try:
pyarrow.parquet.read_schema(path)
return [path]
except: # noqa: E722 #this is fine. Reject any exception but never crash.
return []
| 2.390625 | 2 |
build.py | Dachaz/scenery | 9 | 12795111 | from pybuilder.core import init, use_plugin, Author
use_plugin('python.core')
use_plugin('python.flake8')
use_plugin('python.unittest')
use_plugin('python.coverage')
use_plugin('python.distutils')
use_plugin("python.install_dependencies")
authors = [Author('Dachaz', '<EMAIL>')]
license = 'MIT'
name = 'scenery'
summary = 'A pattern-based scene release renamer'
description = """A command-line tool that automates renaming of so-called "Scene Release"
files by fetching episode names (from TVMaze) and which uses pattern-based generic building
blocks (show name, season number, episode number, episode title) to format the output.
"""
url = 'https://github.com/dachaz/scenery'
version = '1.0.1'
requires_python = ">=2.7"
default_task = ["install_dependencies", "analyze", "publish"]
@init
def initialize(project):
project.build_depends_on('mockito')
project.set_property('dir_source_main_python', 'src')
project.set_property('dir_source_unittest_python', 'test')
project.set_property('flake8_break_build', True)
project.set_property('flake8_include_test_sources', True)
project.set_property('flake8_include_scripts', True)
# relevant tests are in Scenery_tests.py
project.get_property('coverage_exceptions').append('scenery.__main__')
project.get_property('coverage_exceptions').append('scenery')
project.set_property('distutils_console_scripts', ['scenery = scenery:main'])
project.set_property('distutils_classifiers', [
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Topic :: Communications :: File Sharing',
'Topic :: Multimedia',
'Topic :: Multimedia :: Video',
'Topic :: Utilities'
])
| 2.125 | 2 |
pygsti/objects/matrixforwardsim.py | drewrisinger/pyGSTi | 1 | 12795112 | <filename>pygsti/objects/matrixforwardsim.py<gh_stars>1-10
""" Defines the MatrixForwardSimulator calculator class"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import warnings as _warnings
import numpy as _np
import numpy.linalg as _nla
import time as _time
import itertools as _itertools
import collections as _collections
from ..tools import mpitools as _mpit
from ..tools import slicetools as _slct
from ..tools.matrixtools import _fas
from .profiler import DummyProfiler as _DummyProfiler
from .label import Label as _Label
from .matrixevaltree import MatrixEvalTree as _MatrixEvalTree
from .forwardsim import ForwardSimulator
_dummy_profiler = _DummyProfiler()
# Smallness tolerances, used internally for conditional scaling required
# to control bulk products, their gradients, and their Hessians.
PSMALL = 1e-100
DSMALL = 1e-100
HSMALL = 1e-100
class MatrixForwardSimulator(ForwardSimulator):
"""
Encapsulates a calculation tool used by model objects to perform product
and derivatives-of-product calculations.
This is contained in a class separate from Model to allow for additional
model classes (e.g. ones which use entirely different -- non-gate-local
-- parameterizations of operation matrices and SPAM vectors) access to these
fundamental operations.
"""
def __init__(self, dim, simplified_op_server, paramvec):
"""
Construct a new MatrixForwardSimulator object.
Parameters
----------
dim : int
The gate-dimension. All operation matrices should be dim x dim, and all
SPAM vectors should be dim x 1.
gates, preps, effects : OrderedDict
Ordered dictionaries of LinearOperator, SPAMVec, and SPAMVec objects,
respectively. Must be *ordered* dictionaries to specify a
well-defined column ordering when taking derivatives.
paramvec : ndarray
The parameter vector of the Model.
autogator : AutoGator
An auto-gator object that may be used to construct virtual gates
for use in computations.
"""
super(MatrixForwardSimulator, self).__init__(
dim, simplified_op_server, paramvec)
if self.evotype not in ("statevec", "densitymx"):
raise ValueError(("Evolution type %s is incompatbile with "
"matrix-based calculations" % self.evotype))
def copy(self):
""" Return a shallow copy of this MatrixForwardSimulator """
return MatrixForwardSimulator(self.dim, self.sos, self.paramvec)
def product(self, circuit, bScale=False):
"""
Compute the product of a specified sequence of operation labels.
Note: LinearOperator matrices are multiplied in the reversed order of the tuple. That is,
the first element of circuit can be thought of as the first gate operation
performed, which is on the far right of the product of matrices.
Parameters
----------
circuit : Circuit or tuple of operation labels
The sequence of operation labels.
bScale : bool, optional
When True, return a scaling factor (see below).
Returns
-------
product : numpy array
The product or scaled product of the operation matrices.
scale : float
Only returned when bScale == True, in which case the
actual product == product * scale. The purpose of this
is to allow a trace or other linear operation to be done
prior to the scaling.
"""
if bScale:
scaledGatesAndExps = {}
scale_exp = 0
G = _np.identity(self.dim)
for lOp in circuit:
if lOp not in scaledGatesAndExps:
opmx = self.sos.get_operation(lOp).todense()
ng = max(_nla.norm(opmx), 1.0)
scaledGatesAndExps[lOp] = (opmx / ng, _np.log(ng))
gate, ex = scaledGatesAndExps[lOp]
H = _np.dot(gate, G) # product of gates, starting with identity
scale_exp += ex # scale and keep track of exponent
if H.max() < PSMALL and H.min() > -PSMALL:
nG = max(_nla.norm(G), _np.exp(-scale_exp))
G = _np.dot(gate, G / nG); scale_exp += _np.log(nG) # LEXICOGRAPHICAL VS MATRIX ORDER
else: G = H
old_err = _np.seterr(over='ignore')
scale = _np.exp(scale_exp)
_np.seterr(**old_err)
return G, scale
else:
G = _np.identity(self.dim)
for lOp in circuit:
G = _np.dot(self.sos.get_operation(lOp).todense(), G) # LEXICOGRAPHICAL VS MATRIX ORDER
return G
def _process_wrtFilter(self, wrtFilter, obj):
""" Helper function for doperation and hoperation below: pulls out pieces of
a wrtFilter argument relevant for a single object (gate or spam vec) """
#Create per-gate with-respect-to parameter filters, used to
# select a subset of all the derivative columns, essentially taking
# a derivative of only a *subset* of all the gate's parameters
if isinstance(wrtFilter, slice):
wrtFilter = _slct.indices(wrtFilter)
if wrtFilter is not None:
obj_wrtFilter = [] # values = object-local param indices
relevant_gpindices = [] # indices into original wrtFilter'd indices
gpindices = obj.gpindices_as_array()
for ii, i in enumerate(wrtFilter):
if i in gpindices:
relevant_gpindices.append(ii)
obj_wrtFilter.append(list(gpindices).index(i))
relevant_gpindices = _np.array(relevant_gpindices, _np.int64)
if len(relevant_gpindices) == 1:
#Don't return a length-1 list, as this doesn't index numpy arrays
# like length>1 lists do... ugh.
relevant_gpindices = slice(relevant_gpindices[0],
relevant_gpindices[0] + 1)
elif len(relevant_gpindices) == 0:
#Don't return a length-0 list, as this doesn't index numpy arrays
# like length>1 lists do... ugh.
relevant_gpindices = slice(0, 0) # slice that results in a zero dimension
else:
obj_wrtFilter = None
relevant_gpindices = obj.gpindices
return obj_wrtFilter, relevant_gpindices
#Vectorizing Identities. (Vectorization)
# Note when vectorizing op uses numpy.flatten rows are kept contiguous, so the first identity below is valid.
# Below we use E(i,j) to denote the elementary matrix where all entries are zero except the (i,j) entry == 1
# if vec(.) concatenates rows (which numpy.flatten does)
# vec( A * E(0,1) * B ) = vec( mx w/ row_i = A[i,0] * B[row1] ) = A tensor B^T * vec( E(0,1) )
# In general: vec( A * X * B ) = A tensor B^T * vec( X )
# if vec(.) stacks columns
# vec( A * E(0,1) * B ) = vec( mx w/ col_i = A[col0] * B[0,1] ) = B^T tensor A * vec( E(0,1) )
# In general: vec( A * X * B ) = B^T tensor A * vec( X )
def doperation(self, opLabel, flat=False, wrtFilter=None):
""" Return the derivative of a length-1 (single-gate) sequence """
dim = self.dim
gate = self.sos.get_operation(opLabel)
op_wrtFilter, gpindices = self._process_wrtFilter(wrtFilter, gate)
# Allocate memory for the final result
num_deriv_cols = self.Np if (wrtFilter is None) else len(wrtFilter)
flattened_dprod = _np.zeros((dim**2, num_deriv_cols), 'd')
_fas(flattened_dprod, [None, gpindices],
gate.deriv_wrt_params(op_wrtFilter)) # (dim**2, nParams[opLabel])
if _slct.length(gpindices) > 0: # works for arrays too
# Compute the derivative of the entire operation sequence with respect to the
# gate's parameters and fill appropriate columns of flattened_dprod.
#gate = self.sos.get_operation[opLabel] UNNEEDED (I think)
_fas(flattened_dprod, [None, gpindices],
gate.deriv_wrt_params(op_wrtFilter)) # (dim**2, nParams in wrtFilter for opLabel)
if flat:
return flattened_dprod
else:
# axes = (gate_ij, prod_row, prod_col)
return _np.swapaxes(flattened_dprod, 0, 1).reshape((num_deriv_cols, dim, dim))
def hoperation(self, opLabel, flat=False, wrtFilter1=None, wrtFilter2=None):
""" Return the hessian of a length-1 (single-gate) sequence """
dim = self.dim
gate = self.sos.get_operation(opLabel)
op_wrtFilter1, gpindices1 = self._process_wrtFilter(wrtFilter1, gate)
op_wrtFilter2, gpindices2 = self._process_wrtFilter(wrtFilter2, gate)
# Allocate memory for the final result
num_deriv_cols1 = self.Np if (wrtFilter1 is None) else len(wrtFilter1)
num_deriv_cols2 = self.Np if (wrtFilter2 is None) else len(wrtFilter2)
flattened_hprod = _np.zeros((dim**2, num_deriv_cols1, num_deriv_cols2), 'd')
if _slct.length(gpindices1) > 0 and _slct.length(gpindices2) > 0: # works for arrays too
# Compute the derivative of the entire operation sequence with respect to the
# gate's parameters and fill appropriate columns of flattened_dprod.
_fas(flattened_hprod, [None, gpindices1, gpindices2],
gate.hessian_wrt_params(op_wrtFilter1, op_wrtFilter2))
if flat:
return flattened_hprod
else:
return _np.transpose(flattened_hprod, (1, 2, 0)).reshape(
(num_deriv_cols1, num_deriv_cols2, dim, dim)) # axes = (gate_ij1, gateij2, prod_row, prod_col)
def dproduct(self, circuit, flat=False, wrtFilter=None):
"""
Compute the derivative of a specified sequence of operation labels.
Parameters
----------
circuit : Circuit or tuple of operation labels
The sequence of operation labels.
flat : bool, optional
Affects the shape of the returned derivative array (see below).
wrtFilter : list of ints, optional
If not None, a list of integers specifying which gate parameters
to include in the derivative. Each element is an index into an
array of gate parameters ordered by concatenating each gate's
parameters (in the order specified by the model). This argument
is used internally for distributing derivative calculations across
multiple processors.
Returns
-------
deriv : numpy array
* if flat == False, a M x G x G array, where:
- M == length of the vectorized model (number of model parameters)
- G == the linear dimension of a operation matrix (G x G operation matrices).
and deriv[i,j,k] holds the derivative of the (j,k)-th entry of the product
with respect to the i-th model parameter.
* if flat == True, a N x M array, where:
- N == the number of entries in a single flattened gate (ordering as numpy.flatten)
- M == length of the vectorized model (number of model parameters)
and deriv[i,j] holds the derivative of the i-th entry of the flattened
product with respect to the j-th model parameter.
"""
# LEXICOGRAPHICAL VS MATRIX ORDER
# we do matrix multiplication in this order (easier to think about)
revOpLabelList = tuple(reversed(tuple(circuit)))
N = len(revOpLabelList) # length of operation sequence
# prod = G1 * G2 * .... * GN , a matrix # noqa
# dprod/d(opLabel)_ij = sum_{L s.t. G(L) == oplabel} [ G1 ... G(L-1) dG(L)/dij G(L+1) ... GN ] , a matrix for each given (i,j) # noqa
# vec( dprod/d(opLabel)_ij ) = sum_{L s.t. G(L) == oplabel} [ (G1 ... G(L-1)) tensor (G(L+1) ... GN)^T vec( dG(L)/dij ) ] # noqa
# = [ sum_{L s.t. G(L) == oplabel} [ (G1 ... G(L-1)) tensor (G(L+1) ... GN)^T ]] * vec( dG(L)/dij) ) # noqa
# if dG(L)/dij = E(i,j) # noqa
# = vec(i,j)-col of [ sum_{L s.t. G(L) == oplabel} [ (G1 ... G(L-1)) tensor (G(L+1) ... GN)^T ]] # noqa
#
# So for each opLabel the matrix [ sum_{L s.t. GL == oplabel} [ (G1 ... G(L-1)) tensor (G(L+1) ... GN)^T ]] has
# columns which correspond to the vectorized derivatives of each of the product components (i.e. prod_kl) with
# respect to a given gateLabel_ij. This function returns a concatenated form of the above matrices, so that
# each column corresponds to a (opLabel,i,j) tuple and each row corresponds to an element of the product (els of
# prod.flatten()).
#
# Note: if gate G(L) is just a matrix of parameters, then dG(L)/dij = E(i,j), an elementary matrix
dim = self.dim
#Cache partial products (relatively little mem required)
leftProds = []
G = _np.identity(dim); leftProds.append(G)
for opLabel in revOpLabelList:
G = _np.dot(G, self.sos.get_operation(opLabel).todense())
leftProds.append(G)
rightProdsT = []
G = _np.identity(dim); rightProdsT.append(_np.transpose(G))
for opLabel in reversed(revOpLabelList):
G = _np.dot(self.sos.get_operation(opLabel).todense(), G)
rightProdsT.append(_np.transpose(G))
# Allocate memory for the final result
num_deriv_cols = self.Np if (wrtFilter is None) else len(wrtFilter)
flattened_dprod = _np.zeros((dim**2, num_deriv_cols), 'd')
# For each operation label, compute the derivative of the entire operation sequence
# with respect to only that gate's parameters and fill the appropriate
# columns of flattened_dprod.
uniqueOpLabels = sorted(list(set(revOpLabelList)))
for opLabel in uniqueOpLabels:
gate = self.sos.get_operation(opLabel)
op_wrtFilter, gpindices = self._process_wrtFilter(wrtFilter, gate)
dop_dopLabel = gate.deriv_wrt_params(op_wrtFilter)
for (i, gl) in enumerate(revOpLabelList):
if gl != opLabel: continue # loop over locations of opLabel
LRproduct = _np.kron(leftProds[i], rightProdsT[N - 1 - i]) # (dim**2, dim**2)
_fas(flattened_dprod, [None, gpindices],
_np.dot(LRproduct, dop_dopLabel), add=True) # (dim**2, nParams[opLabel])
if flat:
return flattened_dprod
else:
# axes = (gate_ij, prod_row, prod_col)
return _np.swapaxes(flattened_dprod, 0, 1).reshape((num_deriv_cols, dim, dim))
def hproduct(self, circuit, flat=False, wrtFilter1=None, wrtFilter2=None):
"""
Compute the hessian of a specified sequence of operation labels.
Parameters
----------
circuit : Circuit or tuple of operation labels
The sequence of operation labels.
flat : bool, optional
Affects the shape of the returned derivative array (see below).
wrtFilter1, wrtFilter2 : list of ints, optional
If not None, a list of integers specifying which gate parameters
to differentiate with respect to in the first (row) and second (col)
derivative operations, respectively. Each element is an index into an
array of gate parameters ordered by concatenating each gate's
parameters (in the order specified by the model). This argument
is used internally for distributing derivative calculations across
multiple processors.
Returns
-------
hessian : numpy array
* if flat == False, a M x M x G x G numpy array, where:
- M == length of the vectorized model (number of model parameters)
- G == the linear dimension of a operation matrix (G x G operation matrices).
and hessian[i,j,k,l] holds the derivative of the (k,l)-th entry of the product
with respect to the j-th then i-th model parameters.
* if flat == True, a N x M x M numpy array, where:
- N == the number of entries in a single flattened gate (ordered as numpy.flatten)
- M == length of the vectorized model (number of model parameters)
and hessian[i,j,k] holds the derivative of the i-th entry of the flattened
product with respect to the k-th then k-th model parameters.
"""
# LEXICOGRAPHICAL VS MATRIX ORDER
# we do matrix multiplication in this order (easier to think about)
revOpLabelList = tuple(reversed(tuple(circuit)))
# prod = G1 * G2 * .... * GN , a matrix # noqa
# dprod/d(opLabel)_ij = sum_{L s.t. GL == oplabel} [ G1 ... G(L-1) dG(L)/dij G(L+1) ... GN ] , a matrix for each given (i,j) # noqa
# d2prod/d(opLabel1)_kl*d(opLabel2)_ij = sum_{M s.t. GM == gatelabel1} sum_{L s.t. GL == gatelabel2, M < L} # noqa
# [ G1 ... G(M-1) dG(M)/dkl G(M+1) ... G(L-1) dG(L)/dij G(L+1) ... GN ] + {similar with L < M} # noqa
# + sum{M==L} [ G1 ... G(M-1) d2G(M)/(dkl*dij) G(M+1) ... GN ] # noqa
# a matrix for each given (i,j,k,l) # noqa
# vec( d2prod/d(opLabel1)_kl*d(opLabel2)_ij ) = sum{...} [ G1 ... G(M-1) dG(M)/dkl G(M+1) ... G(L-1) tensor (G(L+1) ... GN)^T vec( dG(L)/dij ) ] # noqa
# = sum{...} [ unvec( G1 ... G(M-1) tensor (G(M+1) ... G(L-1))^T vec( dG(M)/dkl ) ) # noqa
# tensor (G(L+1) ... GN)^T vec( dG(L)/dij ) ] # noqa
# + sum{ L < M} [ G1 ... G(L-1) tensor # noqa
# ( unvec( G(L+1) ... G(M-1) tensor (G(M+1) ... GN)^T vec( dG(M)/dkl ) ) )^T vec( dG(L)/dij ) ] # noqa
# + sum{ L == M} [ G1 ... G(M-1) tensor (G(M+1) ... GN)^T vec( d2G(M)/dkl*dji ) # noqa
#
# Note: ignoring L == M terms assumes that d^2 G/(dij)^2 == 0, which is true IF each operation matrix element
# is at most *linear* in each of the gate parameters. If this is not the case, need LinearOperator objects to
# have a 2nd-deriv method in addition of deriv_wrt_params
#
# Note: unvec( X ) can be done efficiently by actually computing X^T ( note (A tensor B)^T = A^T tensor B^T )
# and using numpy's reshape
dim = self.dim
uniqueOpLabels = sorted(list(set(revOpLabelList)))
used_operations = _collections.OrderedDict()
#Cache processed parameter filters for multiple uses below
gpindices1 = {}; gate_wrtFilters1 = {}
gpindices2 = {}; gate_wrtFilters2 = {}
for l in uniqueOpLabels:
used_operations[l] = self.sos.get_operation(l)
gate_wrtFilters1[l], gpindices1[l] = self._process_wrtFilter(wrtFilter1, used_operations[l])
gate_wrtFilters2[l], gpindices2[l] = self._process_wrtFilter(wrtFilter2, used_operations[l])
#Cache partial products (relatively little mem required)
prods = {}
ident = _np.identity(dim)
for (i, opLabel1) in enumerate(revOpLabelList): # loop over "starting" gate
prods[(i, i - 1)] = ident # product of no gates
G = ident
for (j, opLabel2) in enumerate(revOpLabelList[i:], start=i): # loop over "ending" gate (>= starting gate)
G = _np.dot(G, self.sos.get_operation(opLabel2).todense())
prods[(i, j)] = G
prods[(len(revOpLabelList), len(revOpLabelList) - 1)] = ident # product of no gates
#Also Cache gate jacobians (still relatively little mem required)
dop_dopLabel1 = {
opLabel: gate.deriv_wrt_params(gate_wrtFilters1[opLabel])
for opLabel, gate in used_operations.items()}
if wrtFilter1 == wrtFilter2:
dop_dopLabel2 = dop_dopLabel1
else:
dop_dopLabel2 = {
opLabel: gate.deriv_wrt_params(gate_wrtFilters2[opLabel])
for opLabel, gate in used_operations.items()}
#Finally, cache any nonzero gate hessians (memory?)
hop_dopLabels = {}
for opLabel, gate in used_operations.items():
if gate.has_nonzero_hessian():
hop_dopLabels[opLabel] = gate.hessian_wrt_params(
gate_wrtFilters1[opLabel], gate_wrtFilters2[opLabel])
# Allocate memory for the final result
num_deriv_cols1 = self.Np if (wrtFilter1 is None) else len(wrtFilter1)
num_deriv_cols2 = self.Np if (wrtFilter2 is None) else len(wrtFilter2)
flattened_d2prod = _np.zeros((dim**2, num_deriv_cols1, num_deriv_cols2), 'd')
# For each pair of gates in the string, compute the hessian of the entire
# operation sequence with respect to only those two gates' parameters and fill
# add the result to the appropriate block of flattened_d2prod.
#NOTE: if we needed to perform a hessian calculation (i.e. for l==m) then
# it could make sense to iterate through the self.operations.keys() as in
# dproduct(...) and find the labels in the string which match the current
# gate (so we only need to compute this gate hessian once). But since we're
# assuming that the gates are at most linear in their parameters, this
# isn't currently needed.
N = len(revOpLabelList)
for m, opLabel1 in enumerate(revOpLabelList):
inds1 = gpindices1[opLabel1]
nDerivCols1 = dop_dopLabel1[opLabel1].shape[1]
if nDerivCols1 == 0: continue
for l, opLabel2 in enumerate(revOpLabelList):
inds2 = gpindices1[opLabel2]
#nDerivCols2 = dop_dopLabel2[opLabel2].shape[1]
# FUTURE: we could add logic that accounts for the symmetry of the Hessian, so that
# if gl1 and gl2 are both in opsToVectorize1 and opsToVectorize2 we only compute d2(prod)/d(gl1)d(gl2)
# and not d2(prod)/d(gl2)d(gl1) ...
if m < l:
x0 = _np.kron(_np.transpose(prods[(0, m - 1)]), prods[(m + 1, l - 1)]) # (dim**2, dim**2)
x = _np.dot(_np.transpose(dop_dopLabel1[opLabel1]), x0); xv = x.view() # (nDerivCols1,dim**2)
xv.shape = (nDerivCols1, dim, dim) # (reshape without copying - throws error if copy is needed)
y = _np.dot(_np.kron(xv, _np.transpose(prods[(l + 1, N - 1)])), dop_dopLabel2[opLabel2])
# above: (nDerivCols1,dim**2,dim**2) * (dim**2,nDerivCols2) = (nDerivCols1,dim**2,nDerivCols2)
flattened_d2prod[:, inds1, inds2] += _np.swapaxes(y, 0, 1)
# above: dim = (dim2, nDerivCols1, nDerivCols2);
# swapaxes takes (kl,vec_prod_indx,ij) => (vec_prod_indx,kl,ij)
elif l < m:
x0 = _np.kron(_np.transpose(prods[(l + 1, m - 1)]), prods[(m + 1, N - 1)]) # (dim**2, dim**2)
x = _np.dot(_np.transpose(dop_dopLabel1[opLabel1]), x0); xv = x.view() # (nDerivCols1,dim**2)
xv.shape = (nDerivCols1, dim, dim) # (reshape without copying - throws error if copy is needed)
# transposes each of the now un-vectorized dim x dim mxs corresponding to a single kl
xv = _np.swapaxes(xv, 1, 2)
y = _np.dot(_np.kron(prods[(0, l - 1)], xv), dop_dopLabel2[opLabel2])
# above: (nDerivCols1,dim**2,dim**2) * (dim**2,nDerivCols2) = (nDerivCols1,dim**2,nDerivCols2)
flattened_d2prod[:, inds1, inds2] += _np.swapaxes(y, 0, 1)
# above: dim = (dim2, nDerivCols1, nDerivCols2);
# swapaxes takes (kl,vec_prod_indx,ij) => (vec_prod_indx,kl,ij)
else:
# l==m, which we *used* to assume gave no contribution since we assume all gate elements are at most
# linear in the parameters
assert(opLabel1 == opLabel2)
if opLabel1 in hop_dopLabels: # indicates a non-zero hessian
x0 = _np.kron(_np.transpose(prods[(0, m - 1)]), prods[(m + 1, N - 1)]) # (dim**2, dim**2)
# (nDerivCols1,nDerivCols2,dim**2)
x = _np.dot(_np.transpose(hop_dopLabels[opLabel1], axes=(1, 2, 0)), x0); xv = x.view()
xv = _np.transpose(xv, axes=(2, 0, 1)) # (dim2, nDerivCols1, nDerivCols2)
flattened_d2prod[:, inds1, inds2] += xv
if flat:
return flattened_d2prod # axes = (vectorized_op_el_index, model_parameter1, model_parameter2)
else:
vec_kl_size, vec_ij_size = flattened_d2prod.shape[1:3] # == num_deriv_cols1, num_deriv_cols2
return _np.rollaxis(flattened_d2prod, 0, 3).reshape((vec_kl_size, vec_ij_size, dim, dim))
# axes = (model_parameter1, model_parameter2, model_element_row, model_element_col)
def prs(self, rholabel, elabels, circuit, clipTo, bUseScaling=False, time=None):
"""
Compute probabilities of a multiple "outcomes" (spam-tuples) for a single
operation sequence. The spam tuples may only vary in their effect-label (their
prep labels must be the same)
Parameters
----------
rholabel : Label
The state preparation label.
elabels : list
A list of :class:`Label` objects giving the *simplified* effect labels.
circuit : Circuit or tuple
A tuple-like object of *simplified* gates (e.g. may include
instrument elements like 'Imyinst_0')
clipTo : 2-tuple
(min,max) to clip returned probability to if not None.
Only relevant when prMxToFill is not None.
bUseScaling : bool, optional
Whether to use a post-scaled product internally. If False, this
routine will run slightly faster, but with a chance that the
product will overflow and the subsequent trace operation will
yield nan as the returned probability.
time : float, optional
The *start* time at which `circuit` is evaluated.
Returns
-------
numpy.ndarray
An array of floating-point probabilities, corresponding to
the elements of `elabels`.
"""
assert(time is None), "MatrixForwardSimulator cannot be used to simulate time-dependent circuits"
rho, Es = self._rhoEs_from_spamTuples(rholabel, elabels)
#shapes: rho = (N,1), Es = (len(elabels),N)
if bUseScaling:
old_err = _np.seterr(over='ignore')
G, scale = self.product(circuit, True)
if self.evotype == "statevec":
ps = _np.real(_np.abs(_np.dot(Es, _np.dot(G, rho)) * scale)**2)
else: # evotype == "densitymx"
# probability, with scaling applied (may generate overflow, but OK)
ps = _np.real(_np.dot(Es, _np.dot(G, rho)) * scale)
_np.seterr(**old_err)
else: # no scaling -- faster but susceptible to overflow
G = self.product(circuit, False)
if self.evotype == "statevec":
ps = _np.real(_np.abs(_np.dot(Es, _np.dot(G, rho)))**2)
else: # evotype == "densitymx"
ps = _np.real(_np.dot(Es, _np.dot(G, rho)))
ps = ps.flatten()
if _np.any(_np.isnan(ps)):
if len(circuit) < 10:
strToPrint = str(circuit)
else:
strToPrint = str(circuit[0:10]) + " ... (len %d)" % len(circuit)
_warnings.warn("pr(%s) == nan" % strToPrint)
#DEBUG: print "backtrace" of product leading up to nan
#G = _np.identity( self.dim ); total_exp = 0.0
#for i,lOp in enumerate(gateLabelList):
# G = _np.dot(G,self[lOp]) # product of gates, starting with G0
# nG = norm(G); G /= nG; total_exp += log(nG) # scale and keep track of exponent
#
# p = _mt.trace( _np.dot(self.SPAMs[spamLabel],G) ) * exp(total_exp) # probability
# print "%d: p = %g, norm %g, exp %g\n%s" % (i,p,norm(G),total_exp,str(G))
# if _np.isnan(p): raise ValueError("STOP")
if clipTo is not None:
ret = _np.clip(ps, clipTo[0], clipTo[1])
else:
ret = ps
#DEBUG CHECK
#check_ps = _np.array( [ self.pr( (rholabel,elabel), circuit, clipTo, bScale) for elabel in elabels ])
#assert(_np.linalg.norm(ps-check_ps) < 1e-8)
return ret
def dpr(self, spamTuple, circuit, returnPr, clipTo):
"""
Compute the derivative of a probability generated by a operation sequence and
spam tuple as a 1 x M numpy array, where M is the number of model
parameters.
Parameters
----------
spamTuple : (rho_label, simplified_effect_label)
Specifies the prep and POVM effect used to compute the probability.
circuit : Circuit or tuple
A tuple-like object of *simplified* gates (e.g. may include
instrument elements like 'Imyinst_0')
returnPr : bool
when set to True, additionally return the probability itself.
clipTo : 2-tuple
(min,max) to clip returned probability to if not None.
Only relevant when prMxToFill is not None.
Returns
-------
derivative : numpy array
a 1 x M numpy array of derivatives of the probability w.r.t.
each model parameter (M is the length of the vectorized model).
probability : float
only returned if returnPr == True.
"""
if self.evotype == "statevec": raise NotImplementedError("Unitary evolution not fully supported yet!")
# To support unitary evolution we need to:
# - alter product, dproduct, etc. to allow for *complex* derivatives, since matrices can be complex
# - update probability-derivative computations: dpr/dx -> d|pr|^2/dx = d(pr*pr.C)/dx = dpr/dx*pr.C + pr*dpr/dx.C
# = 2 Re(dpr/dx*pr.C) , where dpr/dx is the usual density-matrix-mode probability
# (TODO in FUTURE)
# pr = Tr( |rho><E| * prod ) = sum E_k prod_kl rho_l
# dpr/d(opLabel)_ij = sum E_k [dprod/d(opLabel)_ij]_kl rho_l
# dpr/d(rho)_i = sum E_k prod_ki
# dpr/d(E)_i = sum prod_il rho_l
rholabel, elabel = spamTuple # can't deal w/"custom" spam label...
rho, E = self._rhoE_from_spamTuple(spamTuple)
rhoVec = self.sos.get_prep(rholabel) # distinct from rho,E b/c rho,E are
EVec = self.sos.get_effect(elabel) # arrays, these are SPAMVecs
#Derivs wrt Gates
old_err = _np.seterr(over='ignore')
prod, scale = self.product(circuit, True)
dprod_dOps = self.dproduct(circuit)
dpr_dOps = _np.empty((1, self.Np))
for i in range(self.Np):
dpr_dOps[0, i] = float(_np.dot(E, _np.dot(dprod_dOps[i], rho)))
if returnPr:
p = _np.dot(E, _np.dot(prod, rho)) * scale # may generate overflow, but OK
if clipTo is not None: p = _np.clip(p, clipTo[0], clipTo[1])
#Derivs wrt SPAM
derivWrtAnyRhovec = scale * _np.dot(E, prod)
dpr_drhos = _np.zeros((1, self.Np))
_fas(dpr_drhos, [0, self.sos.get_prep(rholabel).gpindices],
_np.dot(derivWrtAnyRhovec, rhoVec.deriv_wrt_params())) # may overflow, but OK
dpr_dEs = _np.zeros((1, self.Np))
derivWrtAnyEvec = scale * _np.transpose(_np.dot(prod, rho)) # may overflow, but OK
# (** doesn't depend on eIndex **) -- TODO: should also conjugate() here if complex?
_fas(dpr_dEs, [0, EVec.gpindices],
_np.dot(derivWrtAnyEvec, EVec.deriv_wrt_params()))
_np.seterr(**old_err)
if returnPr:
return dpr_drhos + dpr_dEs + dpr_dOps, p
else: return dpr_drhos + dpr_dEs + dpr_dOps
def hpr(self, spamTuple, circuit, returnPr, returnDeriv, clipTo):
"""
Compute the Hessian of a probability generated by a operation sequence and
spam tuple as a 1 x M x M array, where M is the number of model
parameters.
Parameters
----------
spamTuple : (rho_label, simplified_effect_label)
Specifies the prep and POVM effect used to compute the probability.
circuit : Circuit or tuple
A tuple-like object of *simplified* gates (e.g. may include
instrument elements like 'Imyinst_0')
returnPr : bool
when set to True, additionally return the probability itself.
returnDeriv : bool
when set to True, additionally return the derivative of the
probability.
clipTo : 2-tuple
(min,max) to clip returned probability to if not None.
Only relevant when prMxToFill is not None.
Returns
-------
hessian : numpy array
a 1 x M x M array, where M is the number of model parameters.
hessian[0,j,k] is the derivative of the probability w.r.t. the
k-th then the j-th model parameter.
derivative : numpy array
only returned if returnDeriv == True. A 1 x M numpy array of
derivatives of the probability w.r.t. each model parameter.
probability : float
only returned if returnPr == True.
"""
if self.evotype == "statevec": raise NotImplementedError("Unitary evolution not fully supported yet!")
# pr = Tr( |rho><E| * prod ) = sum E_k prod_kl rho_l
# d2pr/d(opLabel1)_mn d(opLabel2)_ij = sum E_k [dprod/d(opLabel1)_mn d(opLabel2)_ij]_kl rho_l
# d2pr/d(rho)_i d(opLabel)_mn = sum E_k [dprod/d(opLabel)_mn]_ki (and same for other diff order)
# d2pr/d(E)_i d(opLabel)_mn = sum [dprod/d(opLabel)_mn]_il rho_l (and same for other diff order)
# d2pr/d(E)_i d(rho)_j = prod_ij (and same for other diff order)
# d2pr/d(E)_i d(E)_j = 0
# d2pr/d(rho)_i d(rho)_j = 0
rholabel, elabel = spamTuple
rho, E = self._rhoE_from_spamTuple(spamTuple)
rhoVec = self.sos.get_prep(rholabel) # distinct from rho,E b/c rho,E are
EVec = self.sos.get_effect(elabel) # arrays, these are SPAMVecs
d2prod_dGates = self.hproduct(circuit)
assert(d2prod_dGates.shape[0] == d2prod_dGates.shape[1])
d2pr_dOps2 = _np.empty((1, self.Np, self.Np))
for i in range(self.Np):
for j in range(self.Np):
d2pr_dOps2[0, i, j] = float(_np.dot(E, _np.dot(d2prod_dGates[i, j], rho)))
old_err = _np.seterr(over='ignore')
prod, scale = self.product(circuit, True)
if returnPr:
p = _np.dot(E, _np.dot(prod, rho)) * scale # may generate overflow, but OK
if clipTo is not None: p = _np.clip(p, clipTo[0], clipTo[1])
dprod_dOps = self.dproduct(circuit)
assert(dprod_dOps.shape[0] == self.Np)
if returnDeriv: # same as in dpr(...)
dpr_dOps = _np.empty((1, self.Np))
for i in range(self.Np):
dpr_dOps[0, i] = float(_np.dot(E, _np.dot(dprod_dOps[i], rho)))
#Derivs wrt SPAM
if returnDeriv: # same as in dpr(...)
dpr_drhos = _np.zeros((1, self.Np))
derivWrtAnyRhovec = scale * _np.dot(E, prod)
_fas(dpr_drhos, [0, self.sos.get_prep(rholabel).gpindices],
_np.dot(derivWrtAnyRhovec, rhoVec.deriv_wrt_params())) # may overflow, but OK
dpr_dEs = _np.zeros((1, self.Np))
derivWrtAnyEvec = scale * _np.transpose(_np.dot(prod, rho)) # may overflow, but OK
_fas(dpr_dEs, [0, EVec.gpindices],
_np.dot(derivWrtAnyEvec, EVec.deriv_wrt_params()))
dpr = dpr_drhos + dpr_dEs + dpr_dOps
d2pr_drhos = _np.zeros((1, self.Np, self.Np))
_fas(d2pr_drhos, [0, None, self.sos.get_prep(rholabel).gpindices],
_np.dot(_np.dot(E, dprod_dOps), rhoVec.deriv_wrt_params())[0]) # (= [0,:,:])
d2pr_dEs = _np.zeros((1, self.Np, self.Np))
derivWrtAnyEvec = _np.squeeze(_np.dot(dprod_dOps, rho), axis=(2,))
_fas(d2pr_dEs, [0, None, EVec.gpindices],
_np.dot(derivWrtAnyEvec, EVec.deriv_wrt_params()))
d2pr_dErhos = _np.zeros((1, self.Np, self.Np))
derivWrtAnyEvec = scale * _np.dot(prod, rhoVec.deriv_wrt_params()) # may generate overflow, but OK
_fas(d2pr_dErhos, [0, EVec.gpindices, self.sos.get_prep(rholabel).gpindices],
_np.dot(_np.transpose(EVec.deriv_wrt_params()), derivWrtAnyEvec))
#Note: these 2nd derivatives are non-zero when the spam vectors have
# a more than linear dependence on their parameters.
if self.sos.get_prep(rholabel).has_nonzero_hessian():
derivWrtAnyRhovec = scale * _np.dot(E, prod) # may overflow, but OK
d2pr_d2rhos = _np.zeros((1, self.Np, self.Np))
_fas(d2pr_d2rhos, [0, self.sos.get_prep(rholabel).gpindices, self.sos.get_prep(rholabel).gpindices],
_np.tensordot(derivWrtAnyRhovec, self.sos.get_prep(rholabel).hessian_wrt_params(), (1, 0)))
# _np.einsum('ij,jkl->ikl', derivWrtAnyRhovec, self.sos.get_prep(rholabel).hessian_wrt_params())
else:
d2pr_d2rhos = 0
if self.sos.get_effect(elabel).has_nonzero_hessian():
derivWrtAnyEvec = scale * _np.transpose(_np.dot(prod, rho)) # may overflow, but OK
d2pr_d2Es = _np.zeros((1, self.Np, self.Np))
_fas(d2pr_d2Es, [0, self.sos.get_effect(elabel).gpindices, self.sos.get_effect(elabel).gpindices],
_np.tensordot(derivWrtAnyEvec, self.sos.get_effect(elabel).hessian_wrt_params(), (1, 0)))
# _np.einsum('ij,jkl->ikl',derivWrtAnyEvec,self.sos.get_effect(elabel).hessian_wrt_params())
else:
d2pr_d2Es = 0
ret = d2pr_dErhos + _np.transpose(d2pr_dErhos, (0, 2, 1)) + \
d2pr_drhos + _np.transpose(d2pr_drhos, (0, 2, 1)) + \
d2pr_dEs + _np.transpose(d2pr_dEs, (0, 2, 1)) + \
d2pr_d2rhos + d2pr_d2Es + d2pr_dOps2
# Note: add transposes b/c spam terms only compute one triangle of hessian
# Note: d2pr_d2rhos and d2pr_d2Es terms are always zero
_np.seterr(**old_err)
if returnDeriv:
if returnPr: return ret, dpr, p
else: return ret, dpr
else:
if returnPr: return ret, p
else: return ret
## BEGIN CACHE FUNCTIONS
def _compute_product_cache(self, evalTree, comm=None):
"""
Computes a tree of products in a linear cache space. Will *not*
parallelize computation, even if given a split tree (since there's
no good way to reconstruct the parent tree's *non-final* elements from
those of the sub-trees). Note also that there would be no memory savings
from using a split tree. In short, parallelization should be done at a
higher level.
"""
dim = self.dim
#Note: previously, we tried to allow for parallelization of
# _compute_product_cache when the tree was split, but this is was
# incorrect (and luckily never used) - so it's been removed.
if comm is not None: # ignoring comm since can't do anything with it!
#_warnings.warn("More processors than can be used for product computation")
pass # this is a fairly common occurrence, and doesn't merit a warning
# ------------------------------------------------------------------
if evalTree.is_split():
_warnings.warn("Ignoring tree splitting in product cache calc.")
cacheSize = len(evalTree)
prodCache = _np.zeros((cacheSize, dim, dim))
scaleCache = _np.zeros(cacheSize, 'd')
#First element of cache are given by evalTree's initial single- or zero-operation labels
for i, opLabel in zip(evalTree.get_init_indices(), evalTree.get_init_labels()):
if opLabel == "": # special case of empty label == no gate
prodCache[i] = _np.identity(dim)
# Note: scaleCache[i] = 0.0 from initialization
else:
gate = self.sos.get_operation(opLabel).todense()
nG = max(_nla.norm(gate), 1.0)
prodCache[i] = gate / nG
scaleCache[i] = _np.log(nG)
#evaluate operation sequences using tree (skip over the zero and single-gate-strings)
#cnt = 0
for i in evalTree.get_evaluation_order():
# combine iLeft + iRight => i
# LEXICOGRAPHICAL VS MATRIX ORDER Note: we reverse iLeft <=> iRight from evalTree because
# (iRight,iLeft,iFinal) = tup implies circuit[i] = circuit[iLeft] + circuit[iRight], but we want:
# since then matrixOf(circuit[i]) = matrixOf(circuit[iLeft]) * matrixOf(circuit[iRight])
(iRight, iLeft) = evalTree[i]
L, R = prodCache[iLeft], prodCache[iRight]
prodCache[i] = _np.dot(L, R)
scaleCache[i] = scaleCache[iLeft] + scaleCache[iRight]
if prodCache[i].max() < PSMALL and prodCache[i].min() > -PSMALL:
nL, nR = max(_nla.norm(L), _np.exp(-scaleCache[iLeft]),
1e-300), max(_nla.norm(R), _np.exp(-scaleCache[iRight]), 1e-300)
sL, sR = L / nL, R / nR
prodCache[i] = _np.dot(sL, sR); scaleCache[i] += _np.log(nL) + _np.log(nR)
#print "bulk_product DEBUG: %d rescalings out of %d products" % (cnt, len(evalTree))
nanOrInfCacheIndices = (~_np.isfinite(prodCache)).nonzero()[0] # may be duplicates (a list, not a set)
# since all scaled gates start with norm <= 1, products should all have norm <= 1
assert(len(nanOrInfCacheIndices) == 0)
return prodCache, scaleCache
def _compute_dproduct_cache(self, evalTree, prodCache, scaleCache,
comm=None, wrtSlice=None, profiler=None):
"""
Computes a tree of product derivatives in a linear cache space. Will
use derivative columns and then (and only when needed) a split tree
to parallelize computation, since there are no memory savings
from using a split tree.
"""
if profiler is None: profiler = _dummy_profiler
dim = self.dim
nDerivCols = self.Np if (wrtSlice is None) \
else _slct.length(wrtSlice)
deriv_shape = (nDerivCols, dim, dim)
cacheSize = len(evalTree)
# ------------------------------------------------------------------
#print("MPI: _compute_dproduct_cache begin: %d deriv cols" % nDerivCols)
if comm is not None and comm.Get_size() > 1:
#print("MPI: _compute_dproduct_cache called w/comm size %d" % comm.Get_size())
# parallelize of deriv cols, then sub-trees (if available and necessary)
if comm.Get_size() > nDerivCols:
#If there are more processors than deriv cols, give a
# warning -- note that we *cannot* make use of a tree being
# split because there's no good way to reconstruct the
# *non-final* parent-tree elements from those of the sub-trees.
_warnings.warn("Increased speed could be obtained"
" by giving dproduct cache computation"
" *fewer* processors and *smaller* (sub-)tree"
" (e.g. by splitting tree beforehand), as there"
" are more cpus than derivative columns.")
# Use comm to distribute columns
allDerivColSlice = slice(0, nDerivCols) if (wrtSlice is None) else wrtSlice
_, myDerivColSlice, _, mySubComm = \
_mpit.distribute_slice(allDerivColSlice, comm)
#print("MPI: _compute_dproduct_cache over %d cols (%s) (rank %d computing %s)" \
# % (nDerivCols, str(allDerivColIndices), comm.Get_rank(), str(myDerivColIndices)))
if mySubComm is not None and mySubComm.Get_size() > 1:
_warnings.warn("Too many processors to make use of in "
" _compute_dproduct_cache.")
if mySubComm.Get_rank() > 0: myDerivColSlice = slice(0, 0)
#don't compute anything on "extra", i.e. rank != 0, cpus
my_results = self._compute_dproduct_cache(
evalTree, prodCache, scaleCache, None, myDerivColSlice, profiler)
# pass None as comm, *not* mySubComm, since we can't do any
# further parallelization
tm = _time.time()
all_results = comm.allgather(my_results)
profiler.add_time("MPI IPC", tm)
return _np.concatenate(all_results, axis=1) # TODO: remove this concat w/better gather?
# ------------------------------------------------------------------
tSerialStart = _time.time()
if evalTree.is_split():
_warnings.warn("Ignoring tree splitting in dproduct cache calc.")
dProdCache = _np.zeros((cacheSize,) + deriv_shape)
# This iteration **must** match that in bulk_evaltree
# in order to associate the right single-gate-strings w/indices
wrtIndices = _slct.indices(wrtSlice) if (wrtSlice is not None) else None
for i, opLabel in zip(evalTree.get_init_indices(), evalTree.get_init_labels()):
if opLabel == "": # special case of empty label == no gate
dProdCache[i] = _np.zeros(deriv_shape)
else:
#doperation = self.dproduct( (opLabel,) , wrtFilter=wrtIndices)
doperation = self.doperation(opLabel, wrtFilter=wrtIndices)
dProdCache[i] = doperation / _np.exp(scaleCache[i])
#profiler.print_mem("DEBUGMEM: POINT1"); profiler.comm.barrier()
#evaluate operation sequences using tree (skip over the zero and single-gate-strings)
for i in evalTree.get_evaluation_order():
tm = _time.time()
# combine iLeft + iRight => i
# LEXICOGRAPHICAL VS MATRIX ORDER Note: we reverse iLeft <=> iRight from evalTree because
# (iRight,iLeft,iFinal) = tup implies circuit[i] = circuit[iLeft] + circuit[iRight], but we want:
# since then matrixOf(circuit[i]) = matrixOf(circuit[iLeft]) * matrixOf(circuit[iRight])
(iRight, iLeft) = evalTree[i]
L, R = prodCache[iLeft], prodCache[iRight]
dL, dR = dProdCache[iLeft], dProdCache[iRight]
dProdCache[i] = _np.dot(dL, R) + \
_np.swapaxes(_np.dot(L, dR), 0, 1) # dot(dS, T) + dot(S, dT)
profiler.add_time("compute_dproduct_cache: dots", tm)
profiler.add_count("compute_dproduct_cache: dots")
scale = scaleCache[i] - (scaleCache[iLeft] + scaleCache[iRight])
if abs(scale) > 1e-8: # _np.isclose(scale,0) is SLOW!
dProdCache[i] /= _np.exp(scale)
if dProdCache[i].max() < DSMALL and dProdCache[i].min() > -DSMALL:
_warnings.warn("Scaled dProd small in order to keep prod managable.")
elif _np.count_nonzero(dProdCache[i]) and dProdCache[i].max() < DSMALL and dProdCache[i].min() > -DSMALL:
_warnings.warn("Would have scaled dProd but now will not alter scaleCache.")
#profiler.print_mem("DEBUGMEM: POINT2"); profiler.comm.barrier()
profiler.add_time("compute_dproduct_cache: serial", tSerialStart)
profiler.add_count("compute_dproduct_cache: num columns", nDerivCols)
return dProdCache
def _compute_hproduct_cache(self, evalTree, prodCache, dProdCache1,
dProdCache2, scaleCache, comm=None,
wrtSlice1=None, wrtSlice2=None):
"""
Computes a tree of product 2nd derivatives in a linear cache space. Will
use derivative rows and columns and then (as needed) a split tree
to parallelize computation, since there are no memory savings
from using a split tree.
"""
dim = self.dim
# Note: dProdCache?.shape = (#circuits,#params_to_diff_wrt,dim,dim)
nDerivCols1 = dProdCache1.shape[1]
nDerivCols2 = dProdCache2.shape[1]
assert(wrtSlice1 is None or _slct.length(wrtSlice1) == nDerivCols1)
assert(wrtSlice2 is None or _slct.length(wrtSlice2) == nDerivCols2)
hessn_shape = (nDerivCols1, nDerivCols2, dim, dim)
cacheSize = len(evalTree)
# ------------------------------------------------------------------
if comm is not None and comm.Get_size() > 1:
# parallelize of deriv cols, then sub-trees (if available and necessary)
if comm.Get_size() > nDerivCols1 * nDerivCols2:
#If there are more processors than deriv cells, give a
# warning -- note that we *cannot* make use of a tree being
# split because there's no good way to reconstruct the
# *non-final* parent-tree elements from those of the sub-trees.
_warnings.warn("Increased speed could be obtained"
" by giving hproduct cache computation"
" *fewer* processors and *smaller* (sub-)tree"
" (e.g. by splitting tree beforehand), as there"
" are more cpus than hessian elements.") # pragma: no cover
# allocate final result memory
hProdCache = _np.zeros((cacheSize,) + hessn_shape)
# Use comm to distribute columns
allDeriv1ColSlice = slice(0, nDerivCols1)
allDeriv2ColSlice = slice(0, nDerivCols2)
deriv1Slices, myDeriv1ColSlice, deriv1Owners, mySubComm = \
_mpit.distribute_slice(allDeriv1ColSlice, comm)
# Get slice into entire range of model params so that
# per-gate hessians can be computed properly
if wrtSlice1 is not None and wrtSlice1.start is not None:
myHessianSlice1 = _slct.shift(myDeriv1ColSlice, wrtSlice1.start)
else: myHessianSlice1 = myDeriv1ColSlice
#print("MPI: _compute_hproduct_cache over %d cols (rank %d computing %s)" \
# % (nDerivCols2, comm.Get_rank(), str(myDerivColSlice)))
if mySubComm is not None and mySubComm.Get_size() > 1:
deriv2Slices, myDeriv2ColSlice, deriv2Owners, mySubSubComm = \
_mpit.distribute_slice(allDeriv2ColSlice, mySubComm)
# Get slice into entire range of model params (see above)
if wrtSlice2 is not None and wrtSlice2.start is not None:
myHessianSlice2 = _slct.shift(myDeriv2ColSlice, wrtSlice2.start)
else: myHessianSlice2 = myDeriv2ColSlice
if mySubSubComm is not None and mySubSubComm.Get_size() > 1:
_warnings.warn("Too many processors to make use of in "
" _compute_hproduct_cache.")
#TODO: remove: not needed now that we track owners
#if mySubSubComm.Get_rank() > 0: myDeriv2ColSlice = slice(0,0)
# #don't compute anything on "extra", i.e. rank != 0, cpus
hProdCache[:, myDeriv1ColSlice, myDeriv2ColSlice] = self._compute_hproduct_cache(
evalTree, prodCache, dProdCache1[:, myDeriv1ColSlice], dProdCache2[:, myDeriv2ColSlice],
scaleCache, None, myHessianSlice1, myHessianSlice2)
# pass None as comm, *not* mySubSubComm, since we can't do any further parallelization
_mpit.gather_slices(deriv2Slices, deriv2Owners, hProdCache, [None, myDeriv1ColSlice],
2, mySubComm) # , gatherMemLimit) #gather over col-distribution (Deriv2)
#note: gathering axis 2 of hProdCache[:,myDeriv1ColSlice],
# dim=(cacheSize,nDerivCols1,nDerivCols2,dim,dim)
else:
#compute "Deriv1" row-derivatives distribution only; don't use column distribution
hProdCache[:, myDeriv1ColSlice] = self._compute_hproduct_cache(
evalTree, prodCache, dProdCache1[:, myDeriv1ColSlice], dProdCache2,
scaleCache, None, myHessianSlice1, wrtSlice2)
# pass None as comm, *not* mySubComm (this is ok, see "if" condition above)
_mpit.gather_slices(deriv1Slices, deriv1Owners, hProdCache, [], 1, comm)
#, gatherMemLimit) #gather over row-distribution (Deriv1)
#note: gathering axis 1 of hProdCache,
# dim=(cacheSize,nDerivCols1,nDerivCols2,dim,dim)
return hProdCache
# ------------------------------------------------------------------
if evalTree.is_split():
_warnings.warn("Ignoring tree splitting in hproduct cache calc.")
hProdCache = _np.zeros((cacheSize,) + hessn_shape)
#First element of cache are given by evalTree's initial single- or zero-operation labels
wrtIndices1 = _slct.indices(wrtSlice1) if (wrtSlice1 is not None) else None
wrtIndices2 = _slct.indices(wrtSlice2) if (wrtSlice2 is not None) else None
for i, opLabel in zip(evalTree.get_init_indices(), evalTree.get_init_labels()):
if opLabel == "": # special case of empty label == no gate
hProdCache[i] = _np.zeros(hessn_shape)
elif not self.sos.get_operation(opLabel).has_nonzero_hessian():
#all gate elements are at most linear in params, so
# all hessians for single- or zero-operation sequences are zero.
hProdCache[i] = _np.zeros(hessn_shape)
else:
hoperation = self.hoperation(opLabel,
wrtFilter1=wrtIndices1,
wrtFilter2=wrtIndices2)
hProdCache[i] = hoperation / _np.exp(scaleCache[i])
#evaluate operation sequences using tree (skip over the zero and single-gate-strings)
for i in evalTree.get_evaluation_order():
# combine iLeft + iRight => i
# LEXICOGRAPHICAL VS MATRIX ORDER Note: we reverse iLeft <=> iRight from evalTree because
# (iRight,iLeft,iFinal) = tup implies circuit[i] = circuit[iLeft] + circuit[iRight], but we want:
# since then matrixOf(circuit[i]) = matrixOf(circuit[iLeft]) * matrixOf(circuit[iRight])
(iRight, iLeft) = evalTree[i]
L, R = prodCache[iLeft], prodCache[iRight]
dL1, dR1 = dProdCache1[iLeft], dProdCache1[iRight]
dL2, dR2 = dProdCache2[iLeft], dProdCache2[iRight]
hL, hR = hProdCache[iLeft], hProdCache[iRight]
# Note: L, R = GxG ; dL,dR = vgs x GxG ; hL,hR = vgs x vgs x GxG
dLdRa = _np.swapaxes(_np.dot(dL1, dR2), 1, 2)
dLdRb = _np.swapaxes(_np.dot(dL2, dR1), 1, 2)
dLdR_sym = dLdRa + _np.swapaxes(dLdRb, 0, 1)
hProdCache[i] = _np.dot(hL, R) + dLdR_sym + _np.transpose(_np.dot(L, hR), (1, 2, 0, 3))
scale = scaleCache[i] - (scaleCache[iLeft] + scaleCache[iRight])
if abs(scale) > 1e-8: # _np.isclose(scale,0) is SLOW!
hProdCache[i] /= _np.exp(scale)
if hProdCache[i].max() < HSMALL and hProdCache[i].min() > -HSMALL:
_warnings.warn("Scaled hProd small in order to keep prod managable.")
elif _np.count_nonzero(hProdCache[i]) and hProdCache[i].max() < HSMALL and hProdCache[i].min() > -HSMALL:
_warnings.warn("hProd is small (oh well!).")
return hProdCache
## END CACHE FUNCTIONS
def default_distribute_method(self):
"""
Return the preferred MPI distribution mode for this calculator.
"""
return "deriv"
def estimate_cache_size(self, nCircuits):
"""
Return an estimate of the ideal/desired cache size given a number of
operation sequences.
Returns
-------
int
"""
return int(1.3 * nCircuits)
def construct_evaltree(self, simplified_circuits, numSubtreeComms):
"""
Constructs an EvalTree object appropriate for this calculator.
Parameters
----------
simplified_circuits : list
A list of Circuits or tuples of operation labels which specify
the operation sequences to create an evaluation tree out of
(most likely because you want to computed their probabilites).
These are a "simplified" circuits in that they should only contain
"deterministic" elements (no POVM or Instrument labels).
numSubtreeComms : int
The number of processor groups that will be assigned to
subtrees of the created tree. This aids in the tree construction
by giving the tree information it needs to distribute itself
among the available processors.
Returns
-------
MatrixEvalTree
"""
evTree = _MatrixEvalTree()
evTree.initialize(simplified_circuits, numSubtreeComms)
return evTree
def estimate_mem_usage(self, subcalls, cache_size, num_subtrees,
num_subtree_proc_groups, num_param1_groups,
num_param2_groups, num_final_strs):
"""
Estimate the memory required by a given set of subcalls to computation functions.
Parameters
----------
subcalls : list of strs
A list of the names of the subcalls to estimate memory usage for.
cache_size : int
The size of the evaluation tree that will be passed to the
functions named by `subcalls`.
num_subtrees : int
The number of subtrees to split the full evaluation tree into.
num_subtree_proc_groups : int
The number of processor groups used to (in parallel) iterate through
the subtrees. It can often be useful to have fewer processor groups
then subtrees (even == 1) in order to perform the parallelization
over the parameter groups.
num_param1_groups : int
The number of groups to divide the first-derivative parameters into.
Computation will be automatically parallelized over these groups.
num_param2_groups : int
The number of groups to divide the second-derivative parameters into.
Computation will be automatically parallelized over these groups.
num_final_strs : int
The number of final strings (may be less than or greater than
`cacheSize`) the tree will hold.
Returns
-------
int
The memory estimate in bytes.
"""
#Note: num_final_strs is irrelevant here b/c cachesize is always >= num_final_strs
# and this dictates how large all the storage arrays are.
np1, np2 = num_param1_groups, num_param2_groups
FLOATSIZE = 8 # in bytes: TODO: a better way
dim = self.dim
nspam = int(round(_np.sqrt(self.dim))) # an estimate - could compute?
wrtLen1 = (self.Np + np1 - 1) // np1 # ceiling(num_params / np1)
wrtLen2 = (self.Np + np2 - 1) // np2 # ceiling(num_params / np2)
mem = 0
for fnName in subcalls:
if fnName == "bulk_fill_probs":
mem += cache_size * dim * dim # product cache
mem += cache_size # scale cache (exps)
mem += cache_size # scale vals
elif fnName == "bulk_fill_dprobs":
mem += cache_size * wrtLen1 * dim * dim # dproduct cache
mem += cache_size * dim * dim # product cache
mem += cache_size # scale cache
mem += cache_size # scale vals
elif fnName == "bulk_fill_hprobs":
mem += cache_size * wrtLen1 * wrtLen2 * dim * dim # hproduct cache
mem += cache_size * (wrtLen1 + wrtLen2) * dim * dim # dproduct cache
mem += cache_size * dim * dim # product cache
mem += cache_size # scale cache
mem += cache_size # scale vals
elif fnName == "bulk_hprobs_by_block":
#Note: includes "results" memory since this is allocated within
# the generator and yielded, *not* allocated by the user.
mem += 2 * cache_size * nspam * wrtLen1 * wrtLen2 # hprobs & dprobs12 results
mem += cache_size * nspam * (wrtLen1 + wrtLen2) # dprobs1 & dprobs2
mem += cache_size * wrtLen1 * wrtLen2 * dim * dim # hproduct cache
mem += cache_size * (wrtLen1 + wrtLen2) * dim * dim # dproduct cache
mem += cache_size * dim * dim # product cache
mem += cache_size # scale cache
mem += cache_size # scale vals
## It doesn't make sense to include these since their required memory is fixed
## (and dominated) by the output array size. Could throw more informative error?
#elif fnName == "bulk_product":
# mem += cache_size * dim * dim # product cache
# mem += cache_size # scale cache
# mem += cache_size # scale vals
#
#elif fnName == "bulk_dproduct":
# mem += cache_size * num_params * dim * dim # dproduct cache
# mem += cache_size * dim * dim # product cache
# mem += cache_size # scale cache
# mem += cache_size # scale vals
#
#elif fnName == "bulk_hproduct":
# mem += cache_size * num_params**2 * dim * dim # hproduct cache
# mem += cache_size * num_params * dim * dim # dproduct cache
# mem += cache_size * dim * dim # product cache
# mem += cache_size # scale cache
# mem += cache_size # scale vals
else:
raise ValueError("Unknown subcall name: %s" % fnName)
return mem * FLOATSIZE
def bulk_product(self, evalTree, bScale=False, comm=None):
"""
Compute the products of many operation sequences at once.
Parameters
----------
evalTree : EvalTree
given by a prior call to bulk_evaltree. Specifies the operation sequences
to compute the bulk operation on.
bScale : bool, optional
When True, return a scaling factor (see below).
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors. This is done over operation sequences when a
*split* evalTree is given, otherwise no parallelization is performed.
Returns
-------
prods : numpy array
Array of shape S x G x G, where:
- S == the number of operation sequences
- G == the linear dimension of a operation matrix (G x G operation matrices).
scaleValues : numpy array
Only returned when bScale == True. A length-S array specifying
the scaling that needs to be applied to the resulting products
(final_product[i] = scaleValues[i] * prods[i]).
"""
prodCache, scaleCache = self._compute_product_cache(evalTree, comm)
#use cached data to construct return values
Gs = evalTree.final_view(prodCache, axis=0)
#shape == ( len(circuit_list), dim, dim ), Gs[i] is product for i-th operation sequence
scaleExps = evalTree.final_view(scaleCache)
old_err = _np.seterr(over='ignore')
scaleVals = _np.exp(scaleExps) # may overflow, but OK if infs occur here
_np.seterr(**old_err)
if bScale:
return Gs, scaleVals
else:
old_err = _np.seterr(over='ignore')
Gs = _np.swapaxes(_np.swapaxes(Gs, 0, 2) * scaleVals, 0, 2) # may overflow, but ok
_np.seterr(**old_err)
return Gs
def bulk_dproduct(self, evalTree, flat=False, bReturnProds=False,
bScale=False, comm=None, wrtFilter=None):
"""
Compute the derivative of a many operation sequences at once.
Parameters
----------
evalTree : EvalTree
given by a prior call to bulk_evaltree. Specifies the operation sequences
to compute the bulk operation on.
flat : bool, optional
Affects the shape of the returned derivative array (see below).
bReturnProds : bool, optional
when set to True, additionally return the probabilities.
bScale : bool, optional
When True, return a scaling factor (see below).
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors. Distribution is first done over the
set of parameters being differentiated with respect to. If there are
more processors than model parameters, distribution over a split
evalTree (if given) is possible.
wrtFilter : list of ints, optional
If not None, a list of integers specifying which gate parameters
to include in the derivative. Each element is an index into an
array of gate parameters ordered by concatenating each gate's
parameters (in the order specified by the model). This argument
is used internally for distributing derivative calculations across
multiple processors.
Returns
-------
derivs : numpy array
* if flat == False, an array of shape S x M x G x G, where:
- S == len(circuit_list)
- M == the length of the vectorized model
- G == the linear dimension of a operation matrix (G x G operation matrices)
and derivs[i,j,k,l] holds the derivative of the (k,l)-th entry
of the i-th operation sequence product with respect to the j-th model
parameter.
* if flat == True, an array of shape S*N x M where:
- N == the number of entries in a single flattened gate (ordering same as numpy.flatten),
- S,M == as above,
and deriv[i,j] holds the derivative of the (i % G^2)-th entry of
the (i / G^2)-th flattened operation sequence product with respect to
the j-th model parameter.
products : numpy array
Only returned when bReturnProds == True. An array of shape
S x G x G; products[i] is the i-th operation sequence product.
scaleVals : numpy array
Only returned when bScale == True. An array of shape S such that
scaleVals[i] contains the multiplicative scaling needed for
the derivatives and/or products for the i-th operation sequence.
"""
nCircuits = evalTree.num_final_strings()
nDerivCols = self.Np if (wrtFilter is None) else _slct.length(wrtFilter)
dim = self.dim
wrtSlice = _slct.list_to_slice(wrtFilter) if (wrtFilter is not None) else None
#TODO: just allow slices as argument: wrtFilter -> wrtSlice?
prodCache, scaleCache = self._compute_product_cache(evalTree, comm)
dProdCache = self._compute_dproduct_cache(evalTree, prodCache, scaleCache,
comm, wrtSlice)
#use cached data to construct return values
old_err = _np.seterr(over='ignore')
scaleExps = evalTree.final_view(scaleCache)
scaleVals = _np.exp(scaleExps) # may overflow, but OK if infs occur here
_np.seterr(**old_err)
if bReturnProds:
Gs = evalTree.final_view(prodCache, axis=0)
#shape == ( len(circuit_list), dim, dim ),
# Gs[i] is product for i-th operation sequence
dGs = evalTree.final_view(dProdCache, axis=0)
#shape == ( len(circuit_list), nDerivCols, dim, dim ),
# dGs[i] is dprod_dOps for ith string
if not bScale:
old_err = _np.seterr(over='ignore', invalid='ignore')
Gs = _np.swapaxes(_np.swapaxes(Gs, 0, 2) * scaleVals, 0, 2) # may overflow, but ok
# may overflow or get nans (invalid), but ok
dGs = _np.swapaxes(_np.swapaxes(dGs, 0, 3) * scaleVals, 0, 3)
# convert nans to zero, as these occur b/c an inf scaleVal is mult by a zero deriv value (see below)
dGs[_np.isnan(dGs)] = 0
_np.seterr(**old_err)
if flat:
dGs = _np.swapaxes(_np.swapaxes(dGs, 0, 1).reshape(
(nDerivCols, nCircuits * dim**2)), 0, 1) # cols = deriv cols, rows = flattened everything else
return (dGs, Gs, scaleVals) if bScale else (dGs, Gs)
else:
dGs = evalTree.final_view(dProdCache, axis=0)
#shape == ( len(circuit_list), nDerivCols, dim, dim ),
# dGs[i] is dprod_dOps for ith string
if not bScale:
old_err = _np.seterr(over='ignore', invalid='ignore')
# may overflow or get nans (invalid), but ok
dGs = _np.swapaxes(_np.swapaxes(dGs, 0, 3) * scaleVals, 0, 3)
# convert nans to zero, as these occur b/c an inf scaleVal is mult by a zero deriv value, and we
dGs[_np.isnan(dGs)] = 0
# assume the zero deriv value trumps since we've renormed to keep all the products within decent bounds
#assert( len( (_np.isnan(dGs)).nonzero()[0] ) == 0 )
#assert( len( (_np.isinf(dGs)).nonzero()[0] ) == 0 )
#dGs = clip(dGs,-1e300,1e300)
_np.seterr(**old_err)
if flat:
dGs = _np.swapaxes(_np.swapaxes(dGs, 0, 1).reshape(
(nDerivCols, nCircuits * dim**2)), 0, 1) # cols = deriv cols, rows = flattened everything else
return (dGs, scaleVals) if bScale else dGs
def bulk_hproduct(self, evalTree, flat=False, bReturnDProdsAndProds=False,
bScale=False, comm=None, wrtFilter1=None, wrtFilter2=None):
"""
Return the Hessian of many operation sequence products at once.
Parameters
----------
evalTree : EvalTree
given by a prior call to bulk_evaltree. Specifies the operation sequences
to compute the bulk operation on.
flat : bool, optional
Affects the shape of the returned derivative array (see below).
bReturnDProdsAndProds : bool, optional
when set to True, additionally return the probabilities and
their derivatives (see below).
bScale : bool, optional
When True, return a scaling factor (see below).
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors. Distribution is first done over the
set of parameters being differentiated with respect to when the
*second* derivative is taken. If there are more processors than
model parameters, distribution over a split evalTree (if given)
is possible.
wrtFilter1, wrtFilter2 : list of ints, optional
If not None, a list of integers specifying which gate parameters
to differentiate with respect to in the first (row) and second (col)
derivative operations, respectively. Each element is an index into an
array of gate parameters ordered by concatenating each gate's
parameters (in the order specified by the model). This argument
is used internally for distributing derivative calculations across
multiple processors.
Returns
-------
hessians : numpy array
* if flat == False, an array of shape S x M x M x G x G, where
- S == len(circuit_list)
- M == the length of the vectorized model
- G == the linear dimension of a operation matrix (G x G operation matrices)
and hessians[i,j,k,l,m] holds the derivative of the (l,m)-th entry
of the i-th operation sequence product with respect to the k-th then j-th
model parameters.
* if flat == True, an array of shape S*N x M x M where
- N == the number of entries in a single flattened gate (ordering as numpy.flatten),
- S,M == as above,
and hessians[i,j,k] holds the derivative of the (i % G^2)-th entry
of the (i / G^2)-th flattened operation sequence product with respect to
the k-th then j-th model parameters.
derivs1, derivs2 : numpy array
Only returned if bReturnDProdsAndProds == True.
* if flat == False, two arrays of shape S x M x G x G, where
- S == len(circuit_list)
- M == the number of model params or wrtFilter1 or 2, respectively
- G == the linear dimension of a operation matrix (G x G operation matrices)
and derivs[i,j,k,l] holds the derivative of the (k,l)-th entry
of the i-th operation sequence product with respect to the j-th model
parameter.
* if flat == True, an array of shape S*N x M where
- N == the number of entries in a single flattened gate (ordering is
the same as that used by numpy.flatten),
- S,M == as above,
and deriv[i,j] holds the derivative of the (i % G^2)-th entry of
the (i / G^2)-th flattened operation sequence product with respect to
the j-th model parameter.
products : numpy array
Only returned when bReturnDProdsAndProds == True. An array of shape
S x G x G; products[i] is the i-th operation sequence product.
scaleVals : numpy array
Only returned when bScale == True. An array of shape S such that
scaleVals[i] contains the multiplicative scaling needed for
the hessians, derivatives, and/or products for the i-th operation sequence.
"""
dim = self.dim
nDerivCols1 = self.Np if (wrtFilter1 is None) else _slct.length(wrtFilter1)
nDerivCols2 = self.Np if (wrtFilter2 is None) else _slct.length(wrtFilter2)
nCircuits = evalTree.num_final_strings() # len(circuit_list)
wrtSlice1 = _slct.list_to_slice(wrtFilter1) if (wrtFilter1 is not None) else None
wrtSlice2 = _slct.list_to_slice(wrtFilter2) if (wrtFilter2 is not None) else None
#TODO: just allow slices as argument: wrtFilter -> wrtSlice?
prodCache, scaleCache = self._compute_product_cache(evalTree, comm)
dProdCache1 = self._compute_dproduct_cache(evalTree, prodCache, scaleCache,
comm, wrtSlice1)
dProdCache2 = dProdCache1 if (wrtSlice1 == wrtSlice2) else \
self._compute_dproduct_cache(evalTree, prodCache, scaleCache,
comm, wrtSlice2)
hProdCache = self._compute_hproduct_cache(evalTree, prodCache, dProdCache1, dProdCache2,
scaleCache, comm, wrtSlice1, wrtSlice2)
#use cached data to construct return values
old_err = _np.seterr(over='ignore')
scaleExps = evalTree.final_view(scaleCache)
scaleVals = _np.exp(scaleExps) # may overflow, but OK if infs occur here
_np.seterr(**old_err)
if bReturnDProdsAndProds:
Gs = evalTree.final_view(prodCache, axis=0)
#shape == ( len(circuit_list), dim, dim ),
# Gs[i] is product for i-th operation sequence
dGs1 = evalTree.final_view(dProdCache1, axis=0)
dGs2 = evalTree.final_view(dProdCache2, axis=0)
#shape == ( len(circuit_list), nDerivColsX, dim, dim ),
# dGs[i] is dprod_dOps for ith string
hGs = evalTree.final_view(hProdCache, axis=0)
#shape == ( len(circuit_list), nDerivCols1, nDerivCols2, dim, dim ),
# hGs[i] is hprod_dGates for ith string
if not bScale:
old_err = _np.seterr(over='ignore', invalid='ignore')
Gs = _np.swapaxes(_np.swapaxes(Gs, 0, 2) * scaleVals, 0, 2) # may overflow, but ok
# may overflow or get nans (invalid), but ok
dGs1 = _np.swapaxes(_np.swapaxes(dGs1, 0, 3) * scaleVals, 0, 3)
# may overflow or get nans (invalid), but ok
dGs2 = _np.swapaxes(_np.swapaxes(dGs2, 0, 3) * scaleVals, 0, 3)
# may overflow or get nans (invalid), but ok
hGs = _np.swapaxes(_np.swapaxes(hGs, 0, 4) * scaleVals, 0, 4)
# convert nans to zero, as these occur b/c an inf scaleVal is mult by a zero deriv value (see below)
dGs1[_np.isnan(dGs1)] = 0
# convert nans to zero, as these occur b/c an inf scaleVal is mult by a zero deriv value (see below)
dGs2[_np.isnan(dGs2)] = 0
# convert nans to zero, as these occur b/c an inf scaleVal is mult by a zero hessian value (see below)
hGs[_np.isnan(hGs)] = 0
_np.seterr(**old_err)
if flat:
# cols = deriv cols, rows = flattened all else
dGs1 = _np.swapaxes(_np.swapaxes(dGs1, 0, 1).reshape((nDerivCols1, nCircuits * dim**2)), 0, 1)
# cols = deriv cols, rows = flattened all else
dGs2 = _np.swapaxes(_np.swapaxes(dGs2, 0, 1).reshape((nDerivCols2, nCircuits * dim**2)), 0, 1)
hGs = _np.rollaxis(_np.rollaxis(hGs, 0, 3).reshape(
(nDerivCols1, nDerivCols2, nCircuits * dim**2)), 2) # cols = deriv cols, rows = all else
return (hGs, dGs1, dGs2, Gs, scaleVals) if bScale else (hGs, dGs1, dGs2, Gs)
else:
hGs = evalTree.final_view(hProdCache, axis=0)
#shape == ( len(circuit_list), nDerivCols, nDerivCols, dim, dim )
if not bScale:
old_err = _np.seterr(over='ignore', invalid='ignore')
# may overflow or get nans (invalid), but ok
hGs = _np.swapaxes(_np.swapaxes(hGs, 0, 4) * scaleVals, 0, 4)
# convert nans to zero, as these occur b/c an inf scaleVal is mult by a zero hessian value, and we
hGs[_np.isnan(hGs)] = 0
# assume the zero hessian value trumps since we've renormed to keep all the products within decent
# bounds
#assert( len( (_np.isnan(hGs)).nonzero()[0] ) == 0 )
#assert( len( (_np.isinf(hGs)).nonzero()[0] ) == 0 )
#hGs = clip(hGs,-1e300,1e300)
_np.seterr(**old_err)
if flat: hGs = _np.rollaxis(_np.rollaxis(hGs, 0, 3).reshape(
(nDerivCols1, nDerivCols2, nCircuits * dim**2)), 2) # as above
return (hGs, scaleVals) if bScale else hGs
def _scaleExp(self, scaleExps):
old_err = _np.seterr(over='ignore')
scaleVals = _np.exp(scaleExps) # may overflow, but OK if infs occur here
_np.seterr(**old_err)
return scaleVals
def _rhoE_from_spamTuple(self, spamTuple):
assert(len(spamTuple) == 2)
if isinstance(spamTuple[0], _Label):
rholabel, elabel = spamTuple
# This calculator uses the convention that rho has shape (N,1)
rho = self.sos.get_prep(rholabel).todense()[:, None]
E = _np.conjugate(_np.transpose(self.sos.get_effect(elabel).todense()
[:, None])) # convention: E has shape (1,N)
else:
# a "custom" spamLabel consisting of a pair of SPAMVec (or array)
# objects: (prepVec, effectVec)
rho, Eraw = spamTuple
E = _np.conjugate(_np.transpose(Eraw))
return rho, E
def _rhoEs_from_spamTuples(self, rholabel, elabels):
#Note: no support for "custom" spamlabels...
# This calculator uses the convention that rho has shape (N,1)
rho = self.sos.get_prep(rholabel).todense()[:, None]
Es = [self.sos.get_effect(elabel).todense()[:, None] for elabel in elabels]
Es = _np.conjugate(_np.transpose(_np.concatenate(Es, axis=1))) # convention: Es has shape (len(elabels),N)
return rho, Es
def _probs_from_rhoE(self, rho, E, Gs, scaleVals):
if self.evotype == "statevec": raise NotImplementedError("Unitary evolution not fully supported yet!")
#Compute probability and save in return array
# want vp[iFinal] = float(dot(E, dot(G, rho)))
# vp[i] = sum_k,l E[0,k] Gs[i,k,l] rho[l,0] * scaleVals[i]
# vp[i] = sum_k E[0,k] dot(Gs, rho)[i,k,0] * scaleVals[i]
# vp[i] = dot( E, dot(Gs, rho))[0,i,0] * scaleVals[i]
# vp = squeeze( dot( E, dot(Gs, rho)), axis=(0,2) ) * scaleVals
return _np.squeeze(_np.dot(E, _np.dot(Gs, rho)), axis=(0, 2)) * scaleVals
# shape == (len(circuit_list),) ; may overflow but OK
def _dprobs_from_rhoE(self, spamTuple, rho, E, Gs, dGs, scaleVals, wrtSlice=None):
if self.evotype == "statevec": raise NotImplementedError("Unitary evolution not fully supported yet!")
rholabel, elabel = spamTuple
rhoVec = self.sos.get_prep(rholabel) # distinct from rho,E b/c rho,E are
EVec = self.sos.get_effect(elabel) # arrays, these are SPAMVecs
nCircuits = Gs.shape[0]
rho_wrtFilter, rho_gpindices = self._process_wrtFilter(wrtSlice, self.sos.get_prep(rholabel))
E_wrtFilter, E_gpindices = self._process_wrtFilter(wrtSlice, self.sos.get_effect(elabel))
nDerivCols = self.Np if wrtSlice is None else _slct.length(wrtSlice)
# GATE DERIVS (assume dGs is already sized/filtered) -------------------
assert(dGs.shape[1] == nDerivCols), "dGs must be pre-filtered!"
#Compute d(probability)/dOps and save in return list (now have G,dG => product, dprod_dOps)
# prod, dprod_dOps = G,dG
# dp_dOps[i,j] = sum_k,l E[0,k] dGs[i,j,k,l] rho[l,0]
# dp_dOps[i,j] = sum_k E[0,k] dot( dGs, rho )[i,j,k,0]
# dp_dOps[i,j] = dot( E, dot( dGs, rho ) )[0,i,j,0]
# dp_dOps = squeeze( dot( E, dot( dGs, rho ) ), axis=(0,3))
old_err2 = _np.seterr(invalid='ignore', over='ignore')
dp_dOps = _np.squeeze(_np.dot(E, _np.dot(dGs, rho)), axis=(0, 3)) * scaleVals[:, None]
_np.seterr(**old_err2)
# may overflow, but OK ; shape == (len(circuit_list), nDerivCols)
# may also give invalid value due to scaleVals being inf and dot-prod being 0. In
# this case set to zero since we can't tell whether it's + or - inf anyway...
dp_dOps[_np.isnan(dp_dOps)] = 0
#SPAM -------------
# Get: dp_drhos[i, rho_gpindices] = dot(E,Gs[i],drho/drhoP)
# dp_drhos[i,J0+J] = sum_kl E[0,k] Gs[i,k,l] drhoP[l,J]
# dp_drhos[i,J0+J] = dot(E, Gs, drhoP)[0,i,J]
# dp_drhos[:,J0+J] = squeeze(dot(E, Gs, drhoP),axis=(0,))[:,J]
dp_drhos = _np.zeros((nCircuits, nDerivCols))
_fas(dp_drhos, [None, rho_gpindices],
_np.squeeze(_np.dot(_np.dot(E, Gs),
rhoVec.deriv_wrt_params(rho_wrtFilter)),
axis=(0,)) * scaleVals[:, None]) # may overflow, but OK
# Get: dp_dEs[i, E_gpindices] = dot(transpose(dE/dEP),Gs[i],rho))
# dp_dEs[i,J0+J] = sum_lj dEPT[J,j] Gs[i,j,l] rho[l,0]
# dp_dEs[i,J0+J] = sum_j dEP[j,J] dot(Gs, rho)[i,j]
# dp_dEs[i,J0+J] = sum_j dot(Gs, rho)[i,j,0] dEP[j,J]
# dp_dEs[i,J0+J] = dot(squeeze(dot(Gs, rho),2), dEP)[i,J]
# dp_dEs[:,J0+J] = dot(squeeze(dot(Gs, rho),axis=(2,)), dEP)[:,J]
dp_dEs = _np.zeros((nCircuits, nDerivCols))
# may overflow, but OK (deriv w.r.t any of self.effects - independent of which)
dp_dAnyE = _np.squeeze(_np.dot(Gs, rho), axis=(2,)) * scaleVals[:, None]
_fas(dp_dEs, [None, E_gpindices],
_np.dot(dp_dAnyE, EVec.deriv_wrt_params(E_wrtFilter)))
sub_vdp = dp_drhos + dp_dEs + dp_dOps
return sub_vdp
#def _get_filter_info(self, wrtSlices):
# """
# Returns a "filter" object containing info about the mapping
# of prep and effect parameters onto a final "filtered" set.
# """
# PrepEffectFilter = _collections.namedtuple(
# 'PrepEffectFilter', 'rho_local_slices rho_global_slices ' +
# 'e_local_slices e_global_slices num_rho_params num_e_params')
#
# if wrtSlices is not None:
# loc_rho_slices = [
# _slct.shift(_slct.intersect(
# wrtSlices['preps'],
# slice(self.rho_offset[i],self.rho_offset[i+1])),
# -self.rho_offset[i]) for i in range(len(self.preps))]
# tmp_num_params = [_slct.length(s) for s in loc_rho_slices]
# tmp_offsets = [ sum(tmp_num_params[0:i]) for i in range(len(self.preps)+1) ]
# global_rho_slices = [ slice(tmp_offsets[i],tmp_offsets[i+1])
# for i in range(len(self.preps)) ]
#
# loc_e_slices = [
# _slct.shift(_slct.intersect(
# wrtSlices['effects'],
# slice(self.e_offset[i],self.e_offset[i+1])),
# -self.e_offset[i]) for i in range(len(self.effects))]
# tmp_num_params = [_slct.length(s) for s in loc_e_slices]
# tmp_offsets = [ sum(tmp_num_params[0:i]) for i in range(len(self.effects)+1) ]
# global_e_slices = [ slice(tmp_offsets[i],tmp_offsets[i+1])
# for i in range(len(self.effects)) ]
#
# return PrepEffectFilter(rho_local_slices=loc_rho_slices,
# rho_global_slices=global_rho_slices,
# e_local_slices=loc_e_slices,
# e_global_slices=global_e_slices,
# num_rho_params=_slct.length(wrtSlices['preps']),
# num_e_params=_slct.length(wrtSlices['effects']))
# else:
# loc_rho_slices = [slice(None,None)]*len(self.preps)
# loc_e_slices = [slice(None,None)]*len(self.effects)
# global_rho_slices = [slice(self.rho_offset[i],self.rho_offset[i+1]) for i in range(len(self.preps)) ]
# global_e_slices = [slice(self.e_offset[i],self.e_offset[i+1]) for i in range(len(self.effects)) ]
# return PrepEffectFilter(rho_local_slices=loc_rho_slices,
# rho_global_slices=global_rho_slices,
# e_local_slices=loc_e_slices,
# e_global_slices=global_e_slices,
# num_rho_params=self.tot_rho_params,
# num_e_params=self.tot_e_params)
def _hprobs_from_rhoE(self, spamTuple, rho, E, Gs, dGs1, dGs2, hGs, scaleVals,
wrtSlice1=None, wrtSlice2=None):
if self.evotype == "statevec": raise NotImplementedError("Unitary evolution not fully supported yet!")
rholabel, elabel = spamTuple
rhoVec = self.sos.get_prep(rholabel) # distinct from rho,E b/c rho,E are
EVec = self.sos.get_effect(elabel) # arrays, these are SPAMVecs
nCircuits = Gs.shape[0]
rho_wrtFilter1, rho_gpindices1 = self._process_wrtFilter(wrtSlice1, self.sos.get_prep(rholabel))
rho_wrtFilter2, rho_gpindices2 = self._process_wrtFilter(wrtSlice2, self.sos.get_prep(rholabel))
E_wrtFilter1, E_gpindices1 = self._process_wrtFilter(wrtSlice1, self.sos.get_effect(elabel))
E_wrtFilter2, E_gpindices2 = self._process_wrtFilter(wrtSlice2, self.sos.get_effect(elabel))
nDerivCols1 = self.Np if wrtSlice1 is None else _slct.length(wrtSlice1)
nDerivCols2 = self.Np if wrtSlice2 is None else _slct.length(wrtSlice2)
#flt1 = self._get_filter_info(wrtSlices1)
#flt2 = self._get_filter_info(wrtSlices2)
# GATE DERIVS (assume hGs is already sized/filtered) -------------------
assert(hGs.shape[1] == nDerivCols1), "hGs must be pre-filtered!"
assert(hGs.shape[2] == nDerivCols2), "hGs must be pre-filtered!"
#Compute d2(probability)/dGates2 and save in return list
# d2pr_dOps2[i,j,k] = sum_l,m E[0,l] hGs[i,j,k,l,m] rho[m,0]
# d2pr_dOps2[i,j,k] = sum_l E[0,l] dot( dGs, rho )[i,j,k,l,0]
# d2pr_dOps2[i,j,k] = dot( E, dot( dGs, rho ) )[0,i,j,k,0]
# d2pr_dOps2 = squeeze( dot( E, dot( dGs, rho ) ), axis=(0,4))
old_err2 = _np.seterr(invalid='ignore', over='ignore')
d2pr_dOps2 = _np.squeeze(_np.dot(E, _np.dot(hGs, rho)), axis=(0, 4)) * scaleVals[:, None, None]
_np.seterr(**old_err2)
# may overflow, but OK ; shape == (len(circuit_list), nDerivCols, nDerivCols)
# may also give invalid value due to scaleVals being inf and dot-prod being 0. In
# this case set to zero since we can't tell whether it's + or - inf anyway...
d2pr_dOps2[_np.isnan(d2pr_dOps2)] = 0
# SPAM DERIVS (assume dGs1 and dGs2 are already sized/filtered) --------
assert(dGs1.shape[1] == nDerivCols1), "dGs1 must be pre-filtered!"
assert(dGs2.shape[1] == nDerivCols2), "dGs1 must be pre-filtered!"
# Get: d2pr_drhos[i, j, rho_gpindices] = dot(E,dGs[i,j],drho/drhoP))
# d2pr_drhos[i,j,J0+J] = sum_kl E[0,k] dGs[i,j,k,l] drhoP[l,J]
# d2pr_drhos[i,j,J0+J] = dot(E, dGs, drhoP)[0,i,j,J]
# d2pr_drhos[:,:,J0+J] = squeeze(dot(E, dGs, drhoP),axis=(0,))[:,:,J]
drho = rhoVec.deriv_wrt_params(rho_wrtFilter2)
d2pr_drhos1 = _np.zeros((nCircuits, nDerivCols1, nDerivCols2))
_fas(d2pr_drhos1, [None, None, rho_gpindices2],
_np.squeeze(_np.dot(_np.dot(E, dGs1), drho), axis=(0,))
* scaleVals[:, None, None]) # overflow OK
# get d2pr_drhos where gate derivatives are wrt the 2nd set of gate parameters
if dGs1 is dGs2 and wrtSlice1 == wrtSlice2: # TODO: better check for equivalence: maybe let dGs2 be None?
assert(nDerivCols1 == nDerivCols2)
d2pr_drhos2 = _np.transpose(d2pr_drhos1, (0, 2, 1))
else:
drho = rhoVec.deriv_wrt_params(rho_wrtFilter1)
d2pr_drhos2 = _np.zeros((nCircuits, nDerivCols2, nDerivCols1))
_fas(d2pr_drhos2, [None, None, rho_gpindices1],
_np.squeeze(_np.dot(_np.dot(E, dGs2), drho), axis=(0,))
* scaleVals[:, None, None]) # overflow OK
d2pr_drhos2 = _np.transpose(d2pr_drhos2, (0, 2, 1))
# Get: d2pr_dEs[i, j, E_gpindices] = dot(transpose(dE/dEP),dGs[i,j],rho)
# d2pr_dEs[i,j,J0+J] = sum_kl dEPT[J,k] dGs[i,j,k,l] rho[l,0]
# d2pr_dEs[i,j,J0+J] = sum_k dEP[k,J] dot(dGs, rho)[i,j,k,0]
# d2pr_dEs[i,j,J0+J] = dot( squeeze(dot(dGs, rho),axis=(3,)), dEP)[i,j,J]
# d2pr_dEs[:,:,J0+J] = dot( squeeze(dot(dGs, rho),axis=(3,)), dEP)[:,:,J]
d2pr_dEs1 = _np.zeros((nCircuits, nDerivCols1, nDerivCols2))
dp_dAnyE = _np.squeeze(_np.dot(dGs1, rho), axis=(3,)) * scaleVals[:, None, None] # overflow OK
devec = EVec.deriv_wrt_params(E_wrtFilter2)
_fas(d2pr_dEs1, [None, None, E_gpindices2],
_np.dot(dp_dAnyE, devec))
# get d2pr_dEs where gate derivatives are wrt the 2nd set of gate parameters
if dGs1 is dGs2 and wrtSlice1 == wrtSlice2: # TODO: better check for equivalence: maybe let dGs2 be None?
assert(nDerivCols1 == nDerivCols2)
d2pr_dEs2 = _np.transpose(d2pr_dEs1, (0, 2, 1))
else:
d2pr_dEs2 = _np.zeros((nCircuits, nDerivCols2, nDerivCols1))
dp_dAnyE = _np.squeeze(_np.dot(dGs2, rho), axis=(3,)) * scaleVals[:, None, None] # overflow OK
devec = EVec.deriv_wrt_params(E_wrtFilter1)
_fas(d2pr_dEs2, [None, None, E_gpindices1], _np.dot(dp_dAnyE, devec))
d2pr_dEs2 = _np.transpose(d2pr_dEs2, (0, 2, 1))
# Get: d2pr_dErhos[i, e_offset[eIndex]:e_offset[eIndex+1], e_offset[rhoIndex]:e_offset[rhoIndex+1]] =
# dEP^T * prod[i,:,:] * drhoP
# d2pr_dErhos[i,J0+J,K0+K] = sum jk dEPT[J,j] prod[i,j,k] drhoP[k,K]
# d2pr_dErhos[i,J0+J,K0+K] = sum j dEPT[J,j] dot(prod,drhoP)[i,j,K]
# d2pr_dErhos[i,J0+J,K0+K] = dot(dEPT,prod,drhoP)[J,i,K]
# d2pr_dErhos[i,J0+J,K0+K] = swapaxes(dot(dEPT,prod,drhoP),0,1)[i,J,K]
# d2pr_dErhos[:,J0+J,K0+K] = swapaxes(dot(dEPT,prod,drhoP),0,1)[:,J,K]
d2pr_dErhos1 = _np.zeros((nCircuits, nDerivCols1, nDerivCols2))
drho = rhoVec.deriv_wrt_params(rho_wrtFilter2)
dp_dAnyE = _np.dot(Gs, drho) * scaleVals[:, None, None] # overflow OK
devec = EVec.deriv_wrt_params(E_wrtFilter1)
_fas(d2pr_dErhos1, (None, E_gpindices1, rho_gpindices2),
_np.swapaxes(_np.dot(_np.transpose(devec), dp_dAnyE), 0, 1))
# get d2pr_dEs where E derivatives are wrt the 2nd set of gate parameters
if wrtSlice1 == wrtSlice2: # Note: this doesn't involve gate derivatives
d2pr_dErhos2 = _np.transpose(d2pr_dErhos1, (0, 2, 1))
else:
d2pr_dErhos2 = _np.zeros((nCircuits, nDerivCols2, nDerivCols1))
drho = rhoVec.deriv_wrt_params(rho_wrtFilter1)
dp_dAnyE = _np.dot(Gs, drho) * scaleVals[:, None, None] # overflow OK
devec = EVec.deriv_wrt_params(E_wrtFilter2)
_fas(d2pr_dErhos2, [None, E_gpindices2, rho_gpindices1],
_np.swapaxes(_np.dot(_np.transpose(devec), dp_dAnyE), 0, 1))
d2pr_dErhos2 = _np.transpose(d2pr_dErhos2, (0, 2, 1))
#Note: these 2nd derivatives are non-zero when the spam vectors have
# a more than linear dependence on their parameters.
if self.sos.get_prep(rholabel).has_nonzero_hessian():
dp_dAnyRho = _np.dot(E, Gs).squeeze(0) * scaleVals[:, None] # overflow OK
d2pr_d2rhos = _np.zeros((nCircuits, nDerivCols1, nDerivCols2))
_fas(d2pr_d2rhos, [None, rho_gpindices1, rho_gpindices2],
_np.tensordot(dp_dAnyRho, self.sos.get_prep(rholabel).hessian_wrt_params(
rho_wrtFilter1, rho_wrtFilter2), (1, 0)))
# _np.einsum('ij,jkl->ikl', dp_dAnyRho, self.sos.get_prep(rholabel).hessian_wrt_params(
# rho_wrtFilter1, rho_wrtFilter2))
else:
d2pr_d2rhos = 0
if self.sos.get_effect(elabel).has_nonzero_hessian():
dp_dAnyE = _np.dot(Gs, rho).squeeze(2) * scaleVals[:, None] # overflow OK
d2pr_d2Es = _np.zeros((nCircuits, nDerivCols1, nDerivCols2))
_fas(d2pr_d2Es, [None, E_gpindices1, E_gpindices2],
_np.tensordot(dp_dAnyE, self.sos.get_effect(elabel).hessian_wrt_params(
E_wrtFilter1, E_wrtFilter2), (1, 0)))
# _np.einsum('ij,jkl->ikl', dp_dAnyE, self.sos.get_effect(elabel).hessian_wrt_params(
# E_wrtFilter1, E_wrtFilter2))
else:
d2pr_d2Es = 0
# END SPAM DERIVS -----------------------
ret = d2pr_d2rhos + d2pr_dErhos2 + d2pr_drhos2 # wrt rho
ret += d2pr_dErhos1 + d2pr_d2Es + d2pr_dEs2 # wrt E
ret += d2pr_drhos1 + d2pr_dEs1 + d2pr_dOps2 # wrt gates
return ret
def _check(self, evalTree, prMxToFill=None, dprMxToFill=None, hprMxToFill=None, clipTo=None):
# compare with older slower version that should do the same thing (for debugging)
master_circuit_list = evalTree.generate_circuit_list(permute=False) # raw operation sequences
for spamTuple, (fInds, gInds) in evalTree.spamtuple_indices.items():
circuit_list = master_circuit_list[gInds]
if prMxToFill is not None:
check_vp = _np.array([self.prs(spamTuple[0], [spamTuple[1]], circuit, clipTo, False)[0]
for circuit in circuit_list])
if _nla.norm(prMxToFill[fInds] - check_vp) > 1e-6:
_warnings.warn("norm(vp-check_vp) = %g - %g = %g" %
(_nla.norm(prMxToFill[fInds]),
_nla.norm(check_vp),
_nla.norm(prMxToFill[fInds] - check_vp))) # pragma: no cover
if dprMxToFill is not None:
check_vdp = _np.concatenate(
[self.dpr(spamTuple, circuit, False, clipTo)
for circuit in circuit_list], axis=0)
if _nla.norm(dprMxToFill[fInds] - check_vdp) > 1e-6:
_warnings.warn("norm(vdp-check_vdp) = %g - %g = %g" %
(_nla.norm(dprMxToFill[fInds]),
_nla.norm(check_vdp),
_nla.norm(dprMxToFill[fInds] - check_vdp))) # pragma: no cover
if hprMxToFill is not None:
check_vhp = _np.concatenate(
[self.hpr(spamTuple, circuit, False, False, clipTo)
for circuit in circuit_list], axis=0)
if _nla.norm(hprMxToFill[fInds][0] - check_vhp[0]) > 1e-6:
_warnings.warn("norm(vhp-check_vhp) = %g - %g = %g" %
(_nla.norm(hprMxToFill[fInds]),
_nla.norm(check_vhp),
_nla.norm(hprMxToFill[fInds] - check_vhp))) # pragma: no cover
def bulk_fill_probs(self, mxToFill, evalTree,
clipTo=None, check=False, comm=None):
"""
Compute the outcome probabilities for an entire tree of operation sequences.
This routine fills a 1D array, `mxToFill` with the probabilities
corresponding to the *simplified* operation sequences found in an evaluation
tree, `evalTree`. An initial list of (general) :class:`Circuit`
objects is *simplified* into a lists of gate-only sequences along with
a mapping of final elements (i.e. probabilities) to gate-only sequence
and prep/effect pairs. The evaluation tree organizes how to efficiently
compute the gate-only sequences. This routine fills in `mxToFill`, which
must have length equal to the number of final elements (this can be
obtained by `evalTree.num_final_elements()`. To interpret which elements
correspond to which strings and outcomes, you'll need the mappings
generated when the original list of `Circuits` was simplified.
Parameters
----------
mxToFill : numpy ndarray
an already-allocated 1D numpy array of length equal to the
total number of computed elements (i.e. evalTree.num_final_elements())
evalTree : EvalTree
given by a prior call to bulk_evaltree. Specifies the *simplified* gate
strings to compute the bulk operation on.
clipTo : 2-tuple, optional
(min,max) to clip return value if not None.
check : boolean, optional
If True, perform extra checks within code to verify correctness,
generating warnings when checks fail. Used for testing, and runs
much slower when True.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors. Distribution is performed over
subtrees of evalTree (if it is split).
Returns
-------
None
"""
#get distribution across subtrees (groups if needed)
subtrees = evalTree.get_sub_trees()
mySubTreeIndices, subTreeOwners, mySubComm = evalTree.distribute(comm)
#eval on each local subtree
for iSubTree in mySubTreeIndices:
evalSubTree = subtrees[iSubTree]
#Free memory from previous subtree iteration before computing caches
scaleVals = Gs = prodCache = scaleCache = None
#Fill cache info
prodCache, scaleCache = self._compute_product_cache(evalSubTree, mySubComm)
#use cached data to final values
scaleVals = self._scaleExp(evalSubTree.final_view(scaleCache))
Gs = evalSubTree.final_view(prodCache, axis=0)
# ( nCircuits, dim, dim )
def calc_and_fill(spamTuple, fInds, gInds, pslc1, pslc2, sumInto):
""" Compute and fill result quantities for given arguments """
old_err = _np.seterr(over='ignore')
rho, E = self._rhoE_from_spamTuple(spamTuple)
_fas(mxToFill, [fInds], self._probs_from_rhoE(rho, E, Gs[gInds], scaleVals[gInds]), add=sumInto)
_np.seterr(**old_err)
self._fill_result_tuple((mxToFill,), evalSubTree,
slice(None), slice(None), calc_and_fill)
#collect/gather results
subtreeElementIndices = [t.final_element_indices(evalTree) for t in subtrees]
_mpit.gather_indices(subtreeElementIndices, subTreeOwners,
mxToFill, [], 0, comm)
#note: pass mxToFill, dim=(KS), so gather mxToFill[felslc] (axis=0)
if clipTo is not None:
_np.clip(mxToFill, clipTo[0], clipTo[1], out=mxToFill) # in-place clip
if check:
self._check(evalTree, mxToFill, clipTo=clipTo)
def bulk_fill_dprobs(self, mxToFill, evalTree,
prMxToFill=None, clipTo=None, check=False,
comm=None, wrtFilter=None, wrtBlockSize=None,
profiler=None, gatherMemLimit=None):
"""
Compute the outcome probability-derivatives for an entire tree of gate
strings.
Similar to `bulk_fill_probs(...)`, but fills a 2D array with
probability-derivatives for each "final element" of `evalTree`.
Parameters
----------
mxToFill : numpy ndarray
an already-allocated ExM numpy array where E is the total number of
computed elements (i.e. evalTree.num_final_elements()) and M is the
number of model parameters.
evalTree : EvalTree
given by a prior call to bulk_evaltree. Specifies the *simplified* gate
strings to compute the bulk operation on.
prMxToFill : numpy array, optional
when not None, an already-allocated length-E numpy array that is filled
with probabilities, just like in bulk_fill_probs(...).
clipTo : 2-tuple, optional
(min,max) to clip return value if not None.
check : boolean, optional
If True, perform extra checks within code to verify correctness,
generating warnings when checks fail. Used for testing, and runs
much slower when True.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors. Distribution is first performed over
subtrees of evalTree (if it is split), and then over blocks (subsets)
of the parameters being differentiated with respect to (see
wrtBlockSize).
wrtFilter : list of ints, optional
If not None, a list of integers specifying which parameters
to include in the derivative dimension. This argument is used
internally for distributing calculations across multiple
processors and to control memory usage. Cannot be specified
in conjuction with wrtBlockSize.
wrtBlockSize : int or float, optional
The maximum number of derivative columns to compute *products*
for simultaneously. None means compute all requested columns
at once. The minimum of wrtBlockSize and the size that makes
maximal use of available processors is used as the final block size.
This argument must be None if wrtFilter is not None. Set this to
non-None to reduce amount of intermediate memory required.
profiler : Profiler, optional
A profiler object used for to track timing and memory usage.
gatherMemLimit : int, optional
A memory limit in bytes to impose upon the "gather" operations
performed as a part of MPI processor syncronization.
Returns
-------
None
"""
tStart = _time.time()
if profiler is None: profiler = _dummy_profiler
if wrtFilter is not None:
assert(wrtBlockSize is None) # Cannot specify both wrtFilter and wrtBlockSize
wrtSlice = _slct.list_to_slice(wrtFilter)
else:
wrtSlice = None
profiler.mem_check("bulk_fill_dprobs: begin (expect ~ %.2fGB)"
% (mxToFill.nbytes / (1024.0**3)))
## memory profiling of python objects (never seemed very useful
## since numpy does all the major allocation/deallocation).
#if comm is None or comm.Get_rank() == 0:
# import objgraph
# objgraph.show_growth(limit=50)
#get distribution across subtrees (groups if needed)
subtrees = evalTree.get_sub_trees()
mySubTreeIndices, subTreeOwners, mySubComm = evalTree.distribute(comm)
#if comm is not None:
# print("MPI DEBUG: Rank%d subtee sizes = %s" %
# (comm.Get_rank(),",".join([str(len(subtrees[i]))
# for i in mySubTreeIndices])))
#eval on each local subtree
#my_results = []
for iSubTree in mySubTreeIndices:
evalSubTree = subtrees[iSubTree]
felInds = evalSubTree.final_element_indices(evalTree)
#Free memory from previous subtree iteration before computing caches
scaleVals = Gs = dGs = None
prodCache = scaleCache = dProdCache = None
#Fill cache info (not requiring column distribution)
tm = _time.time()
prodCache, scaleCache = self._compute_product_cache(evalSubTree, mySubComm)
profiler.add_time("bulk_fill_dprobs: compute_product_cache", tm)
#use cached data to final values
scaleVals = self._scaleExp(evalSubTree.final_view(scaleCache))
Gs = evalSubTree.final_view(prodCache, axis=0)
#( nCircuits, dim, dim )
profiler.mem_check("bulk_fill_dprobs: post compute product")
def calc_and_fill(spamTuple, fInds, gInds, pslc1, pslc2, sumInto):
""" Compute and fill result quantities for given arguments """
tm = _time.time()
old_err = _np.seterr(over='ignore')
rho, E = self._rhoE_from_spamTuple(spamTuple)
if prMxToFill is not None:
_fas(prMxToFill, [fInds], self._probs_from_rhoE(
rho, E, Gs[gInds], scaleVals[gInds]), add=sumInto)
_fas(mxToFill, [fInds, pslc1], self._dprobs_from_rhoE(
spamTuple, rho, E, Gs[gInds], dGs[gInds], scaleVals[gInds], wrtSlice),
add=sumInto)
_np.seterr(**old_err)
profiler.add_time("bulk_fill_dprobs: calc_and_fill", tm)
#Set wrtBlockSize to use available processors if it isn't specified
if wrtFilter is None:
blkSize = wrtBlockSize # could be None
if (mySubComm is not None) and (mySubComm.Get_size() > 1):
comm_blkSize = self.Np / mySubComm.Get_size()
blkSize = comm_blkSize if (blkSize is None) \
else min(comm_blkSize, blkSize) # override with smaller comm_blkSize
else:
blkSize = None # wrtFilter dictates block
if blkSize is None:
#Fill derivative cache info
tm = _time.time()
dProdCache = self._compute_dproduct_cache(evalSubTree, prodCache, scaleCache,
mySubComm, wrtSlice, profiler)
dGs = evalSubTree.final_view(dProdCache, axis=0)
#( nCircuits, nDerivCols, dim, dim )
profiler.add_time("bulk_fill_dprobs: compute_dproduct_cache", tm)
profiler.mem_check("bulk_fill_dprobs: post compute dproduct")
#Compute all requested derivative columns at once
self._fill_result_tuple((prMxToFill, mxToFill), evalSubTree,
slice(None), slice(None), calc_and_fill)
profiler.mem_check("bulk_fill_dprobs: post fill")
dProdCache = dGs = None # free mem
else: # Divide columns into blocks of at most blkSize
assert(wrtFilter is None) # cannot specify both wrtFilter and blkSize
nBlks = int(_np.ceil(self.Np / blkSize))
# num blocks required to achieve desired average size == blkSize
blocks = _mpit.slice_up_range(self.Np, nBlks, start=0)
# Create placeholder dGs for *no* gate params to compute
# derivatives wrt all spam parameters
dGs = _np.empty((Gs.shape[0], 0, self.dim, self.dim), 'd')
def calc_and_fill_p(spamTuple, fInds, gInds, pslc1, pslc2, sumInto):
""" Compute and fill result quantities for given arguments """
tm = _time.time()
old_err = _np.seterr(over='ignore')
rho, E = self._rhoE_from_spamTuple(spamTuple)
_fas(prMxToFill, [fInds],
self._probs_from_rhoE(rho, E, Gs[gInds], scaleVals[gInds]), add=sumInto)
_np.seterr(**old_err)
profiler.add_time("bulk_fill_dprobs: calc_and_fill_p", tm)
# Compute all probabilities all at once so they're not repeatedly
# computed for each block of derivative columns
if prMxToFill is not None:
self._fill_result_tuple((prMxToFill,), evalSubTree,
slice(None), slice(None), calc_and_fill_p)
profiler.mem_check("bulk_fill_dprobs: post fill probs")
#distribute derivative computation across blocks
myBlkIndices, blkOwners, blkComm = \
_mpit.distribute_indices(list(range(nBlks)), mySubComm)
if blkComm is not None:
_warnings.warn("Note: more CPUs(%d)" % mySubComm.Get_size()
+ " than derivative columns(%d)!" % self.Np
+ " [blkSize = %.1f, nBlks=%d]" % (blkSize, nBlks)) # pragma: no cover
def calc_and_fill_blk(spamTuple, fInds, gInds, pslc1, pslc2, sumInto):
""" Compute and fill result quantities blocks for given arguments """
tm = _time.time()
old_err = _np.seterr(over='ignore')
rho, E = self._rhoE_from_spamTuple(spamTuple)
block_wrtSlice = pslc1
_fas(mxToFill, [fInds, pslc1], self._dprobs_from_rhoE(
spamTuple, rho, E, Gs[gInds], dGs[gInds], scaleVals[gInds], block_wrtSlice),
add=sumInto)
_np.seterr(**old_err)
profiler.add_time("bulk_fill_dprobs: calc_and_fill_blk", tm)
for iBlk in myBlkIndices:
tm = _time.time()
block_wrtSlice = blocks[iBlk]
dProdCache = self._compute_dproduct_cache(evalSubTree, prodCache, scaleCache,
blkComm, block_wrtSlice, profiler)
profiler.add_time("bulk_fill_dprobs: compute_dproduct_cache", tm)
profiler.mem_check(
"bulk_fill_dprobs: post compute dproduct blk (expect "
" +%.2fGB, shape=%s)" % (dProdCache.nbytes / (1024.0**3),
str(dProdCache.shape)))
dGs = evalSubTree.final_view(dProdCache, axis=0)
#( nCircuits, nDerivCols, dim, dim )
self._fill_result_tuple(
(mxToFill,), evalSubTree,
blocks[iBlk], slice(None), calc_and_fill_blk)
profiler.mem_check("bulk_fill_dprobs: post fill blk")
dProdCache = dGs = None # free mem
#gather results
tm = _time.time()
_mpit.gather_slices(blocks, blkOwners, mxToFill, [felInds],
1, mySubComm, gatherMemLimit)
#note: gathering axis 1 of mxToFill[felInds], dim=(ks,M)
profiler.add_time("MPI IPC", tm)
profiler.mem_check("bulk_fill_dprobs: post gather blocks")
#collect/gather results
tm = _time.time()
subtreeElementIndices = [t.final_element_indices(evalTree) for t in subtrees]
_mpit.gather_indices(subtreeElementIndices, subTreeOwners,
mxToFill, [], 0, comm, gatherMemLimit)
#note: pass mxToFill, dim=(KS,M), so gather mxToFill[felInds] (axis=0)
if prMxToFill is not None:
_mpit.gather_indices(subtreeElementIndices, subTreeOwners,
prMxToFill, [], 0, comm)
#note: pass prMxToFill, dim=(KS,), so gather prMxToFill[felInds] (axis=0)
profiler.add_time("MPI IPC", tm)
profiler.mem_check("bulk_fill_dprobs: post gather subtrees")
if clipTo is not None and prMxToFill is not None:
_np.clip(prMxToFill, clipTo[0], clipTo[1], out=prMxToFill) # in-place clip
if check:
self._check(evalTree, prMxToFill, mxToFill, clipTo=clipTo)
profiler.add_time("bulk_fill_dprobs: total", tStart)
profiler.add_count("bulk_fill_dprobs count")
profiler.mem_check("bulk_fill_dprobs: end")
def bulk_fill_hprobs(self, mxToFill, evalTree,
prMxToFill=None, deriv1MxToFill=None, deriv2MxToFill=None,
clipTo=None, check=False, comm=None, wrtFilter1=None, wrtFilter2=None,
wrtBlockSize1=None, wrtBlockSize2=None, gatherMemLimit=None):
"""
Compute the outcome probability-Hessians for an entire tree of gate
strings.
Similar to `bulk_fill_probs(...)`, but fills a 3D array with
probability-Hessians for each "final element" of `evalTree`.
Parameters
----------
mxToFill : numpy ndarray
an already-allocated ExMxM numpy array where E is the total number of
computed elements (i.e. evalTree.num_final_elements()) and M1 & M2 are
the number of selected gate-set parameters (by wrtFilter1 and wrtFilter2).
evalTree : EvalTree
given by a prior call to bulk_evaltree. Specifies the *simplified* gate
strings to compute the bulk operation on.
prMxToFill : numpy array, optional
when not None, an already-allocated length-E numpy array that is filled
with probabilities, just like in bulk_fill_probs(...).
derivMxToFill1, derivMxToFill2 : numpy array, optional
when not None, an already-allocated ExM numpy array that is filled
with probability derivatives, similar to bulk_fill_dprobs(...), but
where M is the number of model parameters selected for the 1st and 2nd
differentiation, respectively (i.e. by wrtFilter1 and wrtFilter2).
clipTo : 2-tuple, optional
(min,max) to clip return value if not None.
check : boolean, optional
If True, perform extra checks within code to verify correctness,
generating warnings when checks fail. Used for testing, and runs
much slower when True.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors. Distribution is first performed over
subtrees of evalTree (if it is split), and then over blocks (subsets)
of the parameters being differentiated with respect to (see
wrtBlockSize).
wrtFilter1, wrtFilter2 : list of ints, optional
If not None, a list of integers specifying which model parameters
to differentiate with respect to in the first (row) and second (col)
derivative operations, respectively.
wrtBlockSize2, wrtBlockSize2 : int or float, optional
The maximum number of 1st (row) and 2nd (col) derivatives to compute
*products* for simultaneously. None means compute all requested
rows or columns at once. The minimum of wrtBlockSize and the size
that makes maximal use of available processors is used as the final
block size. These arguments must be None if the corresponding
wrtFilter is not None. Set this to non-None to reduce amount of
intermediate memory required.
profiler : Profiler, optional
A profiler object used for to track timing and memory usage.
gatherMemLimit : int, optional
A memory limit in bytes to impose upon the "gather" operations
performed as a part of MPI processor syncronization.
Returns
-------
None
"""
if wrtFilter1 is not None:
assert(wrtBlockSize1 is None and wrtBlockSize2 is None) # Cannot specify both wrtFilter and wrtBlockSize
wrtSlice1 = _slct.list_to_slice(wrtFilter1)
else:
wrtSlice1 = None
if wrtFilter2 is not None:
assert(wrtBlockSize1 is None and wrtBlockSize2 is None) # Cannot specify both wrtFilter and wrtBlockSize
wrtSlice2 = _slct.list_to_slice(wrtFilter2)
else:
wrtSlice2 = None
#get distribution across subtrees (groups if needed)
subtrees = evalTree.get_sub_trees()
mySubTreeIndices, subTreeOwners, mySubComm = evalTree.distribute(comm)
#eval on each local subtree
for iSubTree in mySubTreeIndices:
evalSubTree = subtrees[iSubTree]
felInds = evalSubTree.final_element_indices(evalTree)
#Free memory from previous subtree iteration before computing caches
scaleVals = Gs = dGs1 = dGs2 = hGs = None
prodCache = scaleCache = None
#Fill product cache info (not requiring row or column distribution)
prodCache, scaleCache = self._compute_product_cache(evalSubTree, mySubComm)
scaleVals = self._scaleExp(evalSubTree.final_view(scaleCache))
Gs = evalSubTree.final_view(prodCache, axis=0)
#( nCircuits, dim, dim )
def calc_and_fill(spamTuple, fInds, gInds, pslc1, pslc2, sumInto):
""" Compute and fill result quantities for given arguments """
old_err = _np.seterr(over='ignore')
rho, E = self._rhoE_from_spamTuple(spamTuple)
if prMxToFill is not None:
_fas(prMxToFill, [fInds], self._probs_from_rhoE(rho, E, Gs[gInds], scaleVals[gInds]), add=sumInto)
if deriv1MxToFill is not None:
_fas(deriv1MxToFill, [fInds, pslc1], self._dprobs_from_rhoE(
spamTuple, rho, E, Gs[gInds], dGs1[gInds], scaleVals[gInds], wrtSlice1), add=sumInto)
if deriv2MxToFill is not None:
_fas(deriv2MxToFill, [fInds, pslc2], self._dprobs_from_rhoE(
spamTuple, rho, E, Gs[gInds], dGs2[gInds], scaleVals[gInds], wrtSlice2), add=sumInto)
_fas(mxToFill, [fInds, pslc1, pslc2], self._hprobs_from_rhoE(
spamTuple, rho, E, Gs[gInds], dGs1[gInds], dGs2[gInds],
hGs[gInds], scaleVals[gInds], wrtSlice1, wrtSlice2), add=sumInto)
_np.seterr(**old_err)
#Set wrtBlockSize to use available processors if it isn't specified
if wrtFilter1 is None and wrtFilter2 is None:
blkSize1 = wrtBlockSize1 # could be None
blkSize2 = wrtBlockSize2 # could be None
if (mySubComm is not None) and (mySubComm.Get_size() > 1):
comm_blkSize = self.Np / mySubComm.Get_size()
blkSize1 = comm_blkSize if (blkSize1 is None) \
else min(comm_blkSize, blkSize1) # override with smaller comm_blkSize
blkSize2 = comm_blkSize if (blkSize2 is None) \
else min(comm_blkSize, blkSize2) # override with smaller comm_blkSize
else:
blkSize1 = blkSize2 = None # wrtFilter1 & wrtFilter2 dictates block
if blkSize1 is None and blkSize2 is None:
#Fill hessian cache info
dProdCache1 = self._compute_dproduct_cache(
evalSubTree, prodCache, scaleCache, mySubComm, wrtSlice1)
dProdCache2 = dProdCache1 if (wrtSlice1 == wrtSlice2) else \
self._compute_dproduct_cache(evalSubTree, prodCache,
scaleCache, mySubComm, wrtSlice2)
dGs1 = evalSubTree.final_view(dProdCache1, axis=0)
dGs2 = evalSubTree.final_view(dProdCache2, axis=0)
#( nCircuits, nDerivColsX, dim, dim )
hProdCache = self._compute_hproduct_cache(evalSubTree, prodCache, dProdCache1,
dProdCache2, scaleCache, mySubComm,
wrtSlice1, wrtSlice2)
hGs = evalSubTree.final_view(hProdCache, axis=0)
#( nCircuits, len(wrtFilter1), len(wrtFilter2), dim, dim )
#Compute all requested derivative columns at once
self._fill_result_tuple((prMxToFill, deriv1MxToFill, deriv2MxToFill, mxToFill),
evalSubTree, slice(None), slice(None), calc_and_fill)
else: # Divide columns into blocks of at most blkSize
assert(wrtFilter1 is None and wrtFilter2 is None) # cannot specify both wrtFilter and blkSize
nBlks1 = int(_np.ceil(self.Np / blkSize1))
nBlks2 = int(_np.ceil(self.Np / blkSize2))
# num blocks required to achieve desired average size == blkSize1 or blkSize2
blocks1 = _mpit.slice_up_range(self.Np, nBlks1)
blocks2 = _mpit.slice_up_range(self.Np, nBlks2)
#distribute derivative computation across blocks
myBlk1Indices, blk1Owners, blk1Comm = \
_mpit.distribute_indices(list(range(nBlks1)), mySubComm)
myBlk2Indices, blk2Owners, blk2Comm = \
_mpit.distribute_indices(list(range(nBlks2)), blk1Comm)
if blk2Comm is not None:
_warnings.warn("Note: more CPUs(%d)" % mySubComm.Get_size()
+ " than hessian elements(%d)!" % (self.Np**2)
+ " [blkSize = {%.1f,%.1f}, nBlks={%d,%d}]" % (blkSize1, blkSize2, nBlks1, nBlks2)) # pragma: no cover # noqa
for iBlk1 in myBlk1Indices:
blk_wrtSlice1 = blocks1[iBlk1]
dProdCache1 = self._compute_dproduct_cache(
evalSubTree, prodCache, scaleCache, blk1Comm, blk_wrtSlice1)
dGs1 = evalSubTree.final_view(dProdCache1, axis=0)
for iBlk2 in myBlk2Indices:
blk_wrtSlice2 = blocks2[iBlk2]
if blk_wrtSlice1 == blk_wrtSlice2:
dProdCache2 = dProdCache1; dGs2 = dGs1
else:
dProdCache2 = self._compute_dproduct_cache(
evalSubTree, prodCache, scaleCache, blk2Comm, blk_wrtSlice2)
dGs2 = evalSubTree.final_view(dProdCache2, axis=0)
hProdCache = self._compute_hproduct_cache(
evalSubTree, prodCache, dProdCache1, dProdCache2,
scaleCache, blk2Comm, blk_wrtSlice1, blk_wrtSlice2)
hGs = evalSubTree.final_view(hProdCache, axis=0)
#Set filtering for calc_and_fill
wrtSlice1 = blocks1[iBlk1]
wrtSlice2 = blocks2[iBlk2]
self._fill_result_tuple((prMxToFill, deriv1MxToFill, deriv2MxToFill, mxToFill),
evalSubTree, blocks1[iBlk1], blocks2[iBlk2], calc_and_fill)
hProdCache = hGs = dProdCache2 = dGs2 = None # free mem
dProdCache1 = dGs1 = None # free mem
#gather column results: gather axis 2 of mxToFill[felInds,blocks1[iBlk1]], dim=(ks,blk1,M)
_mpit.gather_slices(blocks2, blk2Owners, mxToFill, [felInds, blocks1[iBlk1]],
2, blk1Comm, gatherMemLimit)
#gather row results; gather axis 1 of mxToFill[felInds], dim=(ks,M,M)
_mpit.gather_slices(blocks1, blk1Owners, mxToFill, [felInds],
1, mySubComm, gatherMemLimit)
if deriv1MxToFill is not None:
_mpit.gather_slices(blocks1, blk1Owners, deriv1MxToFill, [felInds],
1, mySubComm, gatherMemLimit)
if deriv2MxToFill is not None:
_mpit.gather_slices(blocks2, blk2Owners, deriv2MxToFill, [felInds],
1, blk1Comm, gatherMemLimit)
#Note: deriv2MxToFill gets computed on every inner loop completion
# (to save mem) but isn't gathered until now (but using blk1Comm).
# (just as prMxToFill is computed fully on each inner loop *iteration*!)
#collect/gather results
subtreeElementIndices = [t.final_element_indices(evalTree) for t in subtrees]
_mpit.gather_indices(subtreeElementIndices, subTreeOwners,
mxToFill, [], 0, comm, gatherMemLimit)
if deriv1MxToFill is not None:
_mpit.gather_indices(subtreeElementIndices, subTreeOwners,
deriv1MxToFill, [], 0, comm, gatherMemLimit)
if deriv2MxToFill is not None:
_mpit.gather_indices(subtreeElementIndices, subTreeOwners,
deriv2MxToFill, [], 0, comm, gatherMemLimit)
if prMxToFill is not None:
_mpit.gather_indices(subtreeElementIndices, subTreeOwners,
prMxToFill, [], 0, comm)
if clipTo is not None and prMxToFill is not None:
_np.clip(prMxToFill, clipTo[0], clipTo[1], out=prMxToFill) # in-place clip
if check:
self._check(evalTree, prMxToFill, deriv1MxToFill, mxToFill, clipTo)
def bulk_hprobs_by_block(self, evalTree, wrtSlicesList,
bReturnDProbs12=False, comm=None):
"""
Constructs a generator that computes the 2nd derivatives of the
probabilities generated by a each gate sequence given by evalTree
column-by-column.
This routine can be useful when memory constraints make constructing
the entire Hessian at once impractical, and one is able to compute
reduce results from a single column of the Hessian at a time. For
example, the Hessian of a function of many gate sequence probabilities
can often be computed column-by-column from the using the columns of
the operation sequences.
Parameters
----------
spam_label_rows : dictionary
a dictionary with keys == spam labels and values which
are integer row indices into mxToFill, specifying the
correspondence between rows of mxToFill and spam labels.
evalTree : EvalTree
given by a prior call to bulk_evaltree. Specifies the operation sequences
to compute the bulk operation on. This tree *cannot* be split.
wrtSlicesList : list
A list of `(rowSlice,colSlice)` 2-tuples, each of which specify
a "block" of the Hessian to compute. Iterating over the output
of this function iterates over these computed blocks, in the order
given by `wrtSlicesList`. `rowSlice` and `colSlice` must by Python
`slice` objects.
bReturnDProbs12 : boolean, optional
If true, the generator computes a 2-tuple: (hessian_col, d12_col),
where d12_col is a column of the matrix d12 defined by:
d12[iSpamLabel,iOpStr,p1,p2] = dP/d(p1)*dP/d(p2) where P is is
the probability generated by the sequence and spam label indexed
by iOpStr and iSpamLabel. d12 has the same dimensions as the
Hessian, and turns out to be useful when computing the Hessian
of functions of the probabilities.
comm : mpi4py.MPI.Comm, optional
When not None, an MPI communicator for distributing the computation
across multiple processors. Distribution is performed as in
bulk_product, bulk_dproduct, and bulk_hproduct.
Returns
-------
block_generator
A generator which, when iterated, yields the 3-tuple
`(rowSlice, colSlice, hprobs)` or `(rowSlice, colSlice, dprobs12)`
(the latter if `bReturnDProbs12 == True`). `rowSlice` and `colSlice`
are slices directly from `wrtSlicesList`. `hprobs` and `dprobs12` are
arrays of shape K x S x B x B', where:
- K is the length of spam_label_rows,
- S is the number of operation sequences (i.e. evalTree.num_final_strings()),
- B is the number of parameter rows (the length of rowSlice)
- B' is the number of parameter columns (the length of colSlice)
If `mx`, `dp1`, and `dp2` are the outputs of :func:`bulk_fill_hprobs`
(i.e. args `mxToFill`, `deriv1MxToFill`, and `deriv1MxToFill`), then:
- `hprobs == mx[:,:,rowSlice,colSlice]`
- `dprobs12 == dp1[:,:,rowSlice,None] * dp2[:,:,None,colSlice]`
"""
assert(not evalTree.is_split()), "`evalTree` cannot be split"
nElements = evalTree.num_final_elements()
#Fill product cache info (not distributed)
prodCache, scaleCache = self._compute_product_cache(evalTree, comm)
scaleVals = self._scaleExp(evalTree.final_view(scaleCache))
Gs = evalTree.final_view(prodCache, axis=0)
#( nCircuits, dim, dim )
#Same as in bulk_fill_hprobs (TODO consolidate?)
#NOTE: filtering is done via the yet-to-be-defined local variables
# wrtSlice1 and wrtSlice2, of the parent-function scope. This use of
# closures seems confusing and we should do something else LATER.
def calc_and_fill(spamTuple, fInds, gInds, pslc1, pslc2, sumInto):
""" Compute and fill result quantities for given arguments """
old_err = _np.seterr(over='ignore')
rho, E = self._rhoE_from_spamTuple(spamTuple)
#if prMxToFill is not None:
# _fas(prMxToFill, [fInds],
# self._probs_from_rhoE(rho, E, Gs[gInds], scaleVals[gInds]), add=sumInto)
if deriv1MxToFill is not None:
_fas(deriv1MxToFill, [fInds, pslc1], self._dprobs_from_rhoE(
spamTuple, rho, E, Gs[gInds], dGs1[gInds], scaleVals[gInds], wrtSlice1), add=sumInto)
if deriv2MxToFill is not None:
_fas(deriv2MxToFill, [fInds, pslc2], self._dprobs_from_rhoE(
spamTuple, rho, E, Gs[gInds], dGs2[gInds], scaleVals[gInds], wrtSlice2), add=sumInto)
_fas(mxToFill, [fInds, pslc1, pslc2], self._hprobs_from_rhoE(
spamTuple, rho, E, Gs[gInds], dGs1[gInds], dGs2[gInds],
hGs[gInds], scaleVals[gInds], wrtSlice1, wrtSlice2), add=sumInto)
_np.seterr(**old_err)
#NOTE: don't distribute wrtSlicesList across comm procs,
# as we assume the user has already done any such distribution
# and has given each processor a list appropriate for it.
# Use comm only for speeding up the calcs of the given
# wrtSlicesList
last_wrtSlice1 = None # keep last dProdCache1
for wrtSlice1, wrtSlice2 in wrtSlicesList:
if wrtSlice1 != last_wrtSlice1:
dProdCache1 = dGs1 = None # free Mem
dProdCache1 = self._compute_dproduct_cache(
evalTree, prodCache, scaleCache, comm, wrtSlice1)
dGs1 = evalTree.final_view(dProdCache1, axis=0)
last_wrtSlice1 = wrtSlice1
if (wrtSlice1 == wrtSlice2):
dProdCache2 = dProdCache1; dGs2 = dGs1
else:
dProdCache2 = self._compute_dproduct_cache(
evalTree, prodCache, scaleCache, comm, wrtSlice2)
dGs2 = evalTree.final_view(dProdCache2, axis=0)
hProdCache = self._compute_hproduct_cache(
evalTree, prodCache, dProdCache1, dProdCache2,
scaleCache, comm, wrtSlice1, wrtSlice2)
hGs = evalTree.final_view(hProdCache, axis=0)
if bReturnDProbs12:
dprobs1 = _np.zeros((nElements, _slct.length(wrtSlice1)), 'd')
dprobs2 = _np.zeros((nElements, _slct.length(wrtSlice2)), 'd')
else:
dprobs1 = dprobs2 = None
hprobs = _np.zeros((nElements, _slct.length(wrtSlice1),
_slct.length(wrtSlice2)), 'd')
#prMxToFill = None
deriv1MxToFill = dprobs1
deriv2MxToFill = dprobs2
mxToFill = hprobs
#Fill arrays
self._fill_result_tuple((None, dprobs1, dprobs2, hprobs), evalTree,
slice(None), slice(None), calc_and_fill)
hProdCache = hGs = dProdCache2 = dGs2 = None # free mem
if bReturnDProbs12:
dprobs12 = dprobs1[:, :, None] * dprobs2[:, None, :] # (KM,N,1) * (KM,1,N') = (KM,N,N')
yield wrtSlice1, wrtSlice2, hprobs, dprobs12
else:
yield wrtSlice1, wrtSlice2, hprobs
dProdCache1 = dGs1 = None # free mem
def _fill_result_tuple(self, result_tup, evalTree,
param_slice1, param_slice2, calc_and_fill_fn):
"""
This function takes a "calc-and-fill" function, which computes
and *fills* (i.e. doesn't return to save copying) some arrays. The
arrays that are filled internally to `calc_and_fill_fn` must be the
same as the elements of `result_tup`. The fill function computes
values for only a single spam label (specified to it by the first
two arguments), and in general only a specified slice of the values
for this spam label (given by the subsequent arguments, except for
the last). The final argument is a boolean specifying whether
the filling should overwrite or add to the existing array values,
which is a functionality needed to correctly handle the remainder
spam label.
"""
pslc1 = param_slice1
pslc2 = param_slice2
for spamTuple, (fInds, gInds) in evalTree.spamtuple_indices.items():
# fInds = "final indices" = the "element" indices in the final
# filled quantity combining both spam and gate-sequence indices
# gInds = "gate sequence indices" = indices into the (tree-) list of
# all of the raw operation sequences which need to be computed
# for the current spamTuple (this list has the SAME length as fInds).
calc_and_fill_fn(spamTuple, fInds, gInds, pslc1, pslc2, False) # TODO: remove SumInto == True cases
return
| 1.640625 | 2 |
tests/testing/test_check_data.py | Lambda-School-Labs/cryptolytic-ds | 13 | 12795113 | <filename>tests/testing/test_check_data.py
import cryptolytic.data.sql as sql
import cryptolytic.util as util
import cryptolytic.data.historical as h
def test_check_tables():
util.timeout(h.live_update, 10)
for api, exchange_id, trading_pair in h.yield_unique_pair():
df = (sql.get_some_candles
({'api': api, 'exchange_id': exchange_id, 'trading_pair': trading_pair,
'period' : 300},
n=100000,
verbose=True))
assert df.shape[0] > 100 # check to see that every trading pair has candles for it
| 2.28125 | 2 |
src/activity_notifications.py | yxtay/code-ex | 0 | 12795114 | <reponame>yxtay/code-ex
from collections import Counter
def activity_notifications(expenditure, d):
count = 0
counter = Counter(expenditure[:d])
for i in range(len(expenditure) - d):
median = compute_median(counter)
if expenditure[i + d] >= 2 * median:
count += 1
counter[expenditure[i]] -= 1
counter[expenditure[i + d]] += 1
return count
def compute_median(counter):
d = sum(counter.values())
mid = d // 2
count = 0
sorted_keys = sorted(counter.keys())
for i, k in enumerate(sorted_keys):
count += counter[k]
if count > mid:
if d % 2 == 1:
return k
else:
if counter[k] > 1:
return k
else:
return (k + sorted_keys[i - 1]) / 2
| 2.71875 | 3 |
movies/utils2.py | Saifur43/Movie-Success-Prediction | 7 | 12795115 | <filename>movies/utils2.py
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from .models import Star, Director
def stars_update():
stars = Star.objects.all()
for star in stars:
if star.insta_followers == 0:
print(star.s_name)
url = star.star_link
# page = requests.get(url)
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options, executable_path='E:\geckodriver.exe')
driver.get(url)
try:
x = driver.find_element_by_xpath("//span[contains(text(),'Actor')]").text
except:
x = driver.find_element_by_xpath("//span[contains(text(),'Actress')]").text
if x == "Actor":
try:
credit = driver.find_element_by_xpath("//div[@id='filmo-head-actor']").text
image = driver.find_element_by_xpath("//img[@id='name-poster']").get_attribute('src')
about = driver.find_element_by_xpath("//div[@class='inline']").text
credit = float(credit[13:16])
star.weight = credit
star.about = about
star.star_img = image
except:
continue
elif x == "Actress":
try:
credit = driver.find_element_by_xpath("//div[@id='filmo-head-actress']").text
image = driver.find_element_by_xpath("//img[@id='name-poster']").get_attribute('src')
about = driver.find_element_by_xpath("//div[@class='inline']").text
credit = float(credit[15:18])
star.weight = credit
star.about = about
star.star_img = image
except:
continue
title = str(star.s_name) + " instagram"
driver.get("https://duckduckgo.com/")
driver.find_element_by_xpath("//input[@name='q']").send_keys(str(title))
driver.find_element_by_id("search_button_homepage").click()
data = driver.find_element_by_xpath("//div[@id='r1-0']//a[contains(@class,'result__check')]")
star.star_insta = data.get_attribute('href')
try:
followers_data = driver.find_element_by_xpath(
"//div[contains(@class,'results js-results')]//div[1]//div[1]//div[2]").text
followers_insta = followers_data.split(" ")
followers = followers_insta[0]
if followers[-1] == 'k':
followers = followers[:-1]
followers = float(followers) * 1000
star.insta_followers = int(followers)
elif followers[-1] == 'm':
followers = followers[:-1]
followers = float(followers) * 1000000
star.insta_followers = int(followers)
else:
followers = float(followers)
star.insta_followers = int(followers)
except:
continue
driver.quit()
try:
star.save()
except:
continue
else:
continue
def director_update():
directors = Director.objects.all()
for director in directors:
url = director.director_link
options = Options()
options.headless = True
driver = webdriver.Firefox(options=options, executable_path='E:\geckodriver.exe')
driver.get(url)
try:
credit = driver.find_element_by_xpath("//div[@id='filmo-head-director']").text
image = driver.find_element_by_xpath("//img[@id='name-poster']").get_attribute('src')
about = driver.find_element_by_xpath("//div[@class='inline']").text
credit = float(credit[16:18])
director.weight = credit
director.about = about
if director.id is None:
director.d_img = "https://images-na.ssl-images-amazon.com/images/I/818%2BI9cEsEL._SY606_.jpg"
director.save()
except:
continue
driver.quit()
| 3 | 3 |
epm/epm_logical_if.py | btc-ag/revengtools | 2 | 12795116 | <gh_stars>1-10
# -*- coding: UTF-8 -*-
'''
Created on 29.09.2010
@author: SIGIESEC
'''
from commons.core_if import EnumerationItem, Enumeration
class _LogicalEntityType(EnumerationItem):
pass
class LogicalEntityTypes(Enumeration):
Component = _LogicalEntityType
Interface = _LogicalEntityType
Configurator = _LogicalEntityType
class LogicalElement(object):
def __init__(self, element_type, element_name):
self.__type = element_type
self.__name = element_name
def get_type(self):
return self.__type
def get_name(self):
return self.__name
| 2.390625 | 2 |
tests/test_structures.py | UMDBPP/PacketRaven | 4 | 12795117 | from packetraven.packets.structures import DoublyLinkedList
def test_index():
list_1 = DoublyLinkedList([0, 5, 4, 'foo', 5, 6])
assert list_1[0] == 0
assert list_1[0] is list_1.head.value
assert list_1[3] == 'foo'
assert list_1[-2] == 5
assert list_1[-1] == 6
assert list_1[-1] is list_1.tail.value
assert list_1[:2] == [0, 5]
assert list_1[[1, 3, 0]] == [5, 'foo', 0]
def test_length():
list_1 = DoublyLinkedList()
assert len(list_1) == 0
list_2 = DoublyLinkedList([0, 'foo'])
assert len(list_2) == 2
def test_extend():
list_1 = DoublyLinkedList([0])
list_1.extend(['foo', 5])
assert list_1 == [0, 'foo', 5]
assert list_1.head is not list_1.tail
def test_append():
list_1 = DoublyLinkedList()
list_1.append(0)
assert list_1[0] == 0
assert list_1[-1] == 0
assert list_1.head is list_1.tail
def test_insert():
list_1 = DoublyLinkedList([0, 'foo'])
list_1.insert('bar', 0)
assert list_1 == ['bar', 0, 'foo']
def test_equality():
list_1 = DoublyLinkedList([5, 4, 'foo'])
assert list_1 == [5, 4, 'foo']
assert list_1 == (5, 4, 'foo')
assert list_1 != [5, 4, 'foo', 6, 2]
def test_remove():
list_1 = DoublyLinkedList(['a', 'a'])
list_1.remove('a')
assert len(list_1) == 0
assert list_1.head is None
assert list_1.tail is None
list_2 = DoublyLinkedList(['a', 'b', 'c'])
del list_2[0]
del list_2[-1]
assert len(list_2) == 1
assert list_2[0] == 'b'
assert list_2[-1] == 'b'
list_3 = DoublyLinkedList([0, 5, 4, 'foo', 0, 0])
list_3.remove(0)
assert list_3 == [5, 4, 'foo']
assert list_3[0] == 5
assert list_3[-1] == 'foo'
| 2.796875 | 3 |
tests/test_functions.py | rohankumardubey/griffe | 0 | 12795118 | """Test functions loading."""
import inspect
from griffe.loader import GriffeLoader
from tests import FIXTURES_DIR
loader = GriffeLoader()
def test_loading_functions_arguments(): # noqa: WPS218
"""Test functions arguments loading."""
module = loader.load_module(FIXTURES_DIR / "functions" / "arguments.py")
assert module.members
assert len(module.members) == 11 # noqa: WPS432
function = module["f_posonly"]
assert len(function.arguments) == 1
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
function = module["f_posonly_default"]
assert len(function.arguments) == 1
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default == "0"
function = module["f_posonly_poskw"]
assert len(function.arguments) == 2
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default is None
function = module["f_posonly_poskw_default"]
assert len(function.arguments) == 2
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default == "0"
function = module["f_posonly_default_poskw_default"]
assert len(function.arguments) == 2
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default == "0"
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default == "1"
function = module["f_posonly_poskw_kwonly"]
assert len(function.arguments) == 3
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default is None
arg = function.arguments[2]
assert arg is function.arguments["kwonly"]
assert arg.name == "kwonly"
assert arg.kind is inspect.Parameter.KEYWORD_ONLY
assert arg.default is None
function = module["f_posonly_poskw_kwonly_default"]
assert len(function.arguments) == 3
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default is None
arg = function.arguments[2]
assert arg is function.arguments["kwonly"]
assert arg.name == "kwonly"
assert arg.kind is inspect.Parameter.KEYWORD_ONLY
assert arg.default == "0"
function = module["f_posonly_poskw_default_kwonly_default"]
assert len(function.arguments) == 3
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default is None
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default == "0"
arg = function.arguments[2]
assert arg is function.arguments["kwonly"]
assert arg.name == "kwonly"
assert arg.kind is inspect.Parameter.KEYWORD_ONLY
assert arg.default == "1"
function = module["f_posonly_default_poskw_default_kwonly_default"]
arg = function.arguments[0]
assert arg is function.arguments["posonly"]
assert arg.name == "posonly"
assert arg.kind is inspect.Parameter.POSITIONAL_ONLY
assert arg.default == "0"
arg = function.arguments[1]
assert arg is function.arguments["poskw"]
assert arg.name == "poskw"
assert arg.kind is inspect.Parameter.POSITIONAL_OR_KEYWORD
assert arg.default == "1"
arg = function.arguments[2]
assert arg is function.arguments["kwonly"]
assert arg.name == "kwonly"
assert arg.kind is inspect.Parameter.KEYWORD_ONLY
assert arg.default == "2"
function = module["f_var"]
assert len(function.arguments) == 3
arg = function.arguments[0]
assert arg.name == "*args"
assert arg.annotation == "str"
arg = function.arguments[1]
assert arg.annotation is None
arg = function.arguments[2]
assert arg.name == "**kwargs"
assert arg.annotation == "int"
function = module["f_annorations"]
assert len(function.arguments) == 4
arg = function.arguments[0]
assert arg.annotation == "str"
arg = function.arguments[1]
assert arg.annotation == "Any"
arg = function.arguments[2]
assert arg.annotation == "typing.Optional[typing.List[int]]"
arg = function.arguments[3]
assert arg.annotation == "float | None"
| 2.796875 | 3 |
EXAMPLES/DATA TYPES/String/password_generator.py | nkpydev/Python-Learning | 0 | 12795119 | <gh_stars>0
#--- Generic Imports ---#
import string
import random
from random import randint
if __name__ == '__main__':
characters = string.ascii_lowercase + string.ascii_uppercase + string.digits
password = ''.join(random.choice(characters) for x in range(randint(8,16)))
print('Password:\t',password) | 3.234375 | 3 |
python/specex_compute_sky.py | marcelo-alvarez/specex | 0 | 12795120 | #!/usr/bin/env python
import pyfits,sys,json,pylab,string,numpy,os,scipy,scipy.sparse,scipy.linalg
from scipy.sparse.linalg import spsolve
from math import *
from specex_cholesky import *
if len(sys.argv)<3 :
print sys.argv[0],"inspec.fits plPlugMapM.par outspec.fits (sky.fit)"
sys.exit(12);
infilename=sys.argv[1]
plplgmap=sys.argv[2]
outfilename=sys.argv[3]
skyfilename=""
if(len(sys.argv)>3) :
skyfilename=sys.argv[4]
# get spectrograph id, hardcoded for now, will be read in fits
camera=pyfits.open(infilename)[0].header["CAMERAS"]
specid=string.atoi(camera[1])
# find sky fibers
skyfibers=[]
file=open(plplgmap)
for line in file.readlines() :
if line.find("PLUGMAPOBJ") != 0 :
continue
vals=string.split(line," ")
holetype=vals[8]
if holetype != "OBJECT" :
continue
objType=vals[21]
if objType != "SKY" :
continue
spectrographId=string.atoi(vals[24])
if spectrographId != specid :
continue
fiberId=string.atoi(vals[25])
#print line
#print objType,spectrographId,fiberId
myfiberid=fiberId-1
if specid==2 :
myfiberid-=500
skyfibers.append(myfiberid)
file.close()
print "skyfibers (now starting at 0)=",skyfibers
hdulist=pyfits.open(infilename)
spectra=hdulist[0].data
invar=hdulist[1].data
wave=hdulist[2].data
Rdata=hdulist[3].data
mask=hdulist["FMASK"].data
skyfibers=numpy.intersect1d(skyfibers,numpy.where(mask==0)[0])
print "skyfibers (after masking)=",skyfibers
skyspectra=spectra[skyfibers,:]
skyinvar=invar[skyfibers,:]
nskyfibers=len(skyfibers)
nfibers=Rdata.shape[0]
d=Rdata.shape[1]/2
nwave=Rdata.shape[2]
offsets = range(d,-d-1,-1)
print "solving for the mean deconvolved sky"
print "filling A and B"
A=numpy.matrix(numpy.zeros((nwave,nwave))) # dense because additions of band matrices not implemented
B=numpy.zeros((1,nwave))
for fiber in skyfibers:
R=scipy.sparse.dia_matrix((Rdata[fiber],offsets),(nwave,nwave))
Ninv=scipy.sparse.dia_matrix((invar[fiber,:],[0]),(nwave,nwave))
tmp=invar[fiber,:]*spectra[fiber,:]
tmp2=R.transpose()*Ninv*R
A+=tmp2.todense()
B+=R.transpose().dot(tmp)
print "done"
print "solving"
deconvolvedsky,dskycovmat=cholesky_solve_and_invert(A,B[0])
print "done"
# compute only once the sky variance because expensive and in any case approximate because we only keep the diagonal
# most conservative is to evaluate it at the highest resolution (most variance)
# also the *mean* sky statistical uncertainty is negligible wrt to the Poisson noise of the subtracted sky of each
# fiber that is already included in the invar of each spectrum
# last point, the sky error is certainly dominated by sky variations in field of view that we have neglected
R=scipy.sparse.dia_matrix((Rdata[nfibers/2],offsets),(nwave,nwave))
Rt=R.transpose()
sky=numpy.dot(R.toarray(),deconvolvedsky)
print "computing covmat"
skycovmat=Rt.dot(Rt.dot(dskycovmat).transpose())
skyvar=numpy.diag(skycovmat)
print "done"
if skyfilename != "" :
print "writing skymodel to",skyfilename
skyinvar=1/numpy.diag(skycovmat)
sky_array=numpy.zeros((1,sky.shape[0]))
sky_array[0]=sky
skyinvar_array=numpy.zeros((1,skyinvar.shape[0]))
skyinvar_array[0]=skyinvar
pyfits.HDUList([pyfits.PrimaryHDU(sky_array),pyfits.ImageHDU(skyinvar_array),pyfits.ImageHDU(wave)]).writeto(skyfilename,clobber=True)
#pyfits.HDUList([pyfits.PrimaryHDU(skycovmat)]).writeto("skycovmat.fits",clobber=True)
print "subtracting sky to all fibers"
valid_fibers=numpy.where(mask==0)[0]
for fiber in valid_fibers :
R=scipy.sparse.dia_matrix((Rdata[fiber],offsets),(nwave,nwave))
Rt=R.transpose()
sky=numpy.dot(R.toarray(),deconvolvedsky) # it is a numpy.matrix that has to be converted to a numpy.array
spectra[fiber] -= sky
invar[fiber] = 1/( 1/invar[fiber] + skyvar )
print "done"
print "writing result to",outfilename
hdulist.writeto(outfilename,clobber=True)
sys.exit(0)
| 2.25 | 2 |
configmanager/base.py | haizaar/configmanager | 13 | 12795121 | <reponame>haizaar/configmanager<filename>configmanager/base.py
from .utils import not_set
class BaseItem(object):
is_item = True
is_section = False
is_config = False
def is_config_item(obj):
return isinstance(obj, BaseItem)
class BaseSection(object):
"""
A base class to allow detection of section classes and instances.
No other functionality to be added here.
"""
is_item = False
is_section = True
is_config = False
def is_config_section(obj):
return isinstance(obj, BaseSection)
class ItemAttribute(object):
"""
Class used in :class:`.Item` class to declare attributes of config items.
"""
def __init__(self, name, default=not_set, value=not_set, allow_dynamic_override=False):
self.name = name
self.default = default
self.value = value
self.attr_name = '_{}'.format(self.name)
# If set to True, this becomes an expensive attribute because now when
# its value is requested we will check for a registered
# dynamic attribute with the same name and if available use the dynamic value instead of set value.
# Attributes that are used in majority of value calculation should avoid this.
# For example, envvar, which is consulted on every value request, is designed
# to be a cheap attribute. Users can, however, override envvar_name which is
# used only if envvar is set to True.
self.allow_dynamic_override = allow_dynamic_override
def __set__(self, instance, value):
setattr(instance, self.attr_name, value)
def __get__(self, instance, owner):
if self.allow_dynamic_override:
if instance.section:
try:
return instance.section.get_item_attribute(instance, self.name)
except AttributeError:
pass
return getattr(instance, self.attr_name, self.default)
| 2.765625 | 3 |
forms.py | EricSekyere/Find-Mate | 0 | 12795122 | from flask_wtf import FlaskForm, RecaptchaField
from wtforms import StringField , PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, Length
class SignupForm(FlaskForm):
firstname = StringField("First name", validators=[DataRequired("Enter your name")])
lastname = StringField("Last name", validators=[DataRequired("Enter your last name")])
email = StringField("Email", validators=[DataRequired("Provide your email"), Email("Please enter a valid email")])
password = PasswordField("Password", validators=[DataRequired("Enter a valid password"), Length(min=8, message="Password must be a minimum of 8 charaters")])
submit = SubmitField("Submit", validators=[DataRequired()])
#recaptcha = RecaptchaField({'hl': 'zh', 'render': 'explicit'})
| 3.109375 | 3 |
before-2021/src/main/python/loadenv.py | wangyaomail/zzti-spark-learn | 1 | 12795123 | import sys
import os
from os.path import *
def pl():
return dirname(dirname(dirname(dirname(os.path.abspath(__file__)))))
class SparkEnv:
def __init__(self, name):
os.environ['HADOOP_HOME'] = dirname(dirname(dirname(dirname(os.path.abspath(__file__))))) + r'/hadoopdir'
os.environ['SPARK_HOME'] = r"D:\assistlibs\hadoop\spark-2.2.3-bin-hadoop2.6"
sys.path.append(r"D:\assistlibs\hadoop\spark-2.2.3-bin-hadoop2.6\python")
from pyspark import SparkContext
self.sc = SparkContext("local", name)
self.sc.setLogLevel("WARN")
from pyspark.sql import SparkSession
self.ss = SparkSession.builder.appName(name).getOrCreate()
def postInit(self):
return (self, self.sc, self.ss)
def projLoc(self):
return dirname(dirname(dirname(dirname(os.path.abspath(__file__)))))
| 2.5625 | 3 |
HackerRank/Python3/set_union.py | santoshgawande/DS-Algorithms | 0 | 12795124 | am = int(input())
a = set(map(int,input().split()))
bm = int(input())
b =set(map(int,input().split()))
print(len(a.union(b))) | 2.96875 | 3 |
EulerProject/0xx/00x/006/main.py | mathtimes/math-lab | 1 | 12795125 | <reponame>mathtimes/math-lab
i1 = i2 = 0
for i in range(1,101):
i1 += i
i2 += i**2
print(i1**2-i2) | 3.203125 | 3 |
app/core/module/chat_main.py | xiaomi2019/lolita_son | 0 | 12795126 | <gh_stars>0
# coding: utf-8
#
import app.base.game_module_mgr
from app.core.game_event_def import *
from app.protocol.ProtocolDesc import *
import app.protocol.netutil as netutil
from twisted.python import log
import app.util.helper as helper
import app.util.lang_config as lang_config
import app.chat.memmode as memmode
from firefly.server.globalobject import GlobalObject
import app.core.game_module_def as game_module_def
class chat_main(app.base.game_module_mgr.game_module):
def __init__(self):
super(chat_main,self).__init__();
self.character_map = {};
return
def start(self):
super(chat_main,self).start();
self.register_event(EVENT_LOGIN,self.on_login);
self.register_event(EVENT_LOGOUT,self.on_logout);
self.register_event(EVENT_RELOGIN,self.on_relogin);
self.register_net_event(C2S_CHAT,self.on_chat);
self.register_event(EVENT_SEND2CLIENT,self._send2client);
self.register_event(EVENT_SEND2CLIENTBYCID,self._send2clientbycid)
return
def _getdidbycid(self,cId):
if self.character_map.has_key(cId):
return self.character_map[cId];
return
def _send2clientbycid(self,ud):
cmd = ud[0]
cId = ud[1];
dId = self._getdidbycid(cId);
if dId == None:
log.err("_send2clientbycid err:%s %s"%(cId,ud));
return
data = ud[2];
buf = netutil.s2c_data2bufbycmd(cmd,data);
GlobalObject().remote['gate'].callRemote("pushObject",cmd,buf, [dId])
return
def _send2client(self,ud):
cmd = ud[0]
dId = ud[1];
data = ud[2];
buf = netutil.s2c_data2bufbycmd(cmd,data);
GlobalObject().remote['gate'].callRemote("pushObject",cmd,buf, [dId])
return
def _float_msg(self,cId,msg):
c_data = {};
c_data['msg'] = msg;
self.fire_event(EVENT_SEND2CLIENTBYCID,[S2C_NOTIFY_FLOAT,cId,c_data]);
return;
def _is_cId_valid(self,cId):#其实就是角色是否在线的判定
return self.character_map.has_key(cId);
def on_relogin(self,ud):
dId = ud["dId"];
cId = ud["cId"];
if self.character_map.has_key(cId):
self.character_map[cId] = dId;
return
def on_login(self,ud):
dId = ud["dId"];
cId = ud["cId"];
self.character_map[cId] = dId;
c_data = memmode.tb_character_admin.getObj(cId);
if not c_data:
log.msg('chat_main on_login fatal err %d'%(cId));
return
c_info = c_data.get('data');
return
def on_logout(self,ud):
dId = ud["dId"];
cId = ud["cId"];
if self.character_map.has_key(cId):
del self.character_map[cId];
return
def on_chat(self,ud):
dId = ud["dId"];
cId = ud["cId"];
data = ud["data"];
ch = data["ch"];
msg = data["msg"];
#todo
print "on_chat %d %s"%(cId,msg);
c_data = memmode.tb_character_admin.getObj(cId);
if not c_data:
log.msg('chat_main on_chat fatal err %d'%(cId));
return
c_info = c_data.get('data');
data = {};
data['ch'] = ch;
data['srvid'] = 0;
data['pid'] = cId;
data['shape'] = c_info["figure"];
data['vip'] = 0;
data['name'] = c_info["nickname"];
data['msg'] = msg;
cmd = S2C_CHAT;
buf = netutil.s2c_data2bufbycmd(cmd,data);
exclude_list = [];
GlobalObject().remote['gate'].callRemote("pushObjectOthers",cmd,buf,exclude_list);
return
def dispose(self):
super(chat_main,self).dispose();
return | 1.859375 | 2 |
predict.py | binh234/capu | 0 | 12795127 | <reponame>binh234/capu<filename>predict.py
import argparse
from utils.helpers import read_lines
from gector.gec_model import GecBERTModel
from tqdm import tqdm
import re
def predict_for_file(
input_file,
output_file,
model,
batch_size=32,
split_chunk=False,
chunk_size=32,
overlap_size=8,
min_words_cut=4
):
test_data = read_lines(input_file)
predictions = []
cnt_corrections = 0
batch = []
for sent in tqdm(test_data):
batch.append(sent.split())
if len(batch) == batch_size:
if split_chunk:
batch, batch_indices = split_chunks(batch, chunk_size, overlap_size)
preds, cnt = model.handle_batch(batch)
preds = merge_chunk([" ".join(x) for x in preds], batch_indices, overlap_size, min_words_cut)
else:
preds, cnt = model.handle_batch(batch)
preds = [" ".join(x) for x in preds]
predictions.extend(preds)
cnt_corrections += cnt
batch = []
if batch:
if split_chunk:
batch, batch_indices = split_chunks(batch, chunk_size, overlap_size)
preds, cnt = model.handle_batch(batch)
preds = merge_chunk([" ".join(x) for x in preds], batch_indices, overlap_size, min_words_cut)
else:
preds, cnt = model.handle_batch(batch)
preds = [" ".join(x) for x in preds]
predictions.extend(preds)
cnt_corrections += cnt
with open(output_file, 'w') as f:
f.write("\n".join(predictions) + '\n')
return cnt_corrections
def split_chunks(batch, chunk_size=32, overlap_size=8):
# return batch pairs of indices
stride = chunk_size - overlap_size
result = []
indices = []
for tokens in batch:
start = len(result)
num_token = len(tokens)
if num_token <= overlap_size:
result.append(tokens)
for i in range(0, num_token - overlap_size, stride):
result.append(tokens[i: i + chunk_size])
indices.append((start, len(result)))
return result, indices
def merge_chunk(batch, indices, overlap_size=8, min_words_cut=4):
head = overlap_size - min_words_cut
tail = min_words_cut
result = []
for (start, end) in indices:
tokens = []
for i in range(start, end):
try:
sub_text = batch[i].strip()
sub_text = re.sub(r'([\.\,\?\:]\s+)+', r'\1', sub_text)
sub_text = re.sub(r'\s+([\.\,\?\:])', r'\1', sub_text)
sub_tokens = sub_text.split()
if i == start:
if i == end - 1:
tokens = sub_tokens
else:
tokens.extend(sub_tokens[:-tail])
elif i == end - 1:
tokens.extend(sub_tokens[head:])
else:
tokens.extend(sub_tokens[head:-tail])
except Exception as e:
print(e)
text = " ".join(tokens)
text = re.sub(r'([\,\.\?\:])', r' \1', text)
result.append(text)
return result
def main(args):
# get all paths
model = GecBERTModel(vocab_path=args.vocab_path,
model_paths=args.model_path,
max_len=args.max_len, min_len=args.min_len,
iterations=args.iteration_count,
min_error_probability=args.min_error_probability,
lowercase_tokens=args.lowercase_tokens,
model_name=args.transformer_model,
special_tokens_fix=args.special_tokens_fix,
log=False,
confidence=args.additional_confidence,
is_ensemble=args.is_ensemble,
weigths=args.weights)
cnt_corrections = predict_for_file(args.input_file, args.output_file, model,
batch_size=args.batch_size, split_chunk=args.split_chunk,
chunk_size=args.chunk_size, overlap_size=args.overlap_size,
min_words_cut=args.min_words_cut)
# evaluate with m2 or ERRANT
print(f"Produced overall corrections: {cnt_corrections}")
if __name__ == '__main__':
# read parameters
parser = argparse.ArgumentParser()
parser.add_argument('--model_path',
help='Path to the model file.', nargs='+',
required=True)
parser.add_argument('--vocab_path',
help='Path to the model file.',
default='data/output_vocabulary' # to use pretrained models
)
parser.add_argument('--input_file',
help='Path to the evalset file',
required=True)
parser.add_argument('--output_file',
help='Path to the output file',
required=True)
parser.add_argument('--max_len',
type=int,
help='The max sentence length'
'(all longer will be truncated)',
default=64)
parser.add_argument('--min_len',
type=int,
help='The minimum sentence length'
'(all longer will be returned w/o changes)',
default=3)
parser.add_argument('--batch_size',
type=int,
help='The size of hidden unit cell.',
default=128)
parser.add_argument('--lowercase_tokens',
action='store_true',
help='Whether to lowercase tokens.',)
parser.add_argument('--transformer_model',
choices=['bert', 'gpt2', 'transformerxl', 'xlnet', 'distilbert', 'roberta', 'albert'
'bert-large', 'roberta-large', 'xlnet-large', 'vinai/phobert-base',
'vinai/phobert-large', 'xlm-roberta-base'],
help='Name of the transformer model.',
default='roberta')
parser.add_argument('--iteration_count',
type=int,
help='The number of iterations of the model.',
default=5)
parser.add_argument('--additional_confidence',
type=float,
help='How many probability to add to $KEEP token.',
default=0)
parser.add_argument('--min_error_probability',
type=float,
help='Minimum probability for each action to apply. '
'Also, minimum error probability, as described in the paper.',
default=0.0)
parser.add_argument('--special_tokens_fix',
type=int,
help='Whether to fix problem with [CLS], [SEP] tokens tokenization. '
'For reproducing reported results it should be 0 for BERT/XLNet and 1 for RoBERTa.',
default=1)
parser.add_argument('--is_ensemble',
action='store_true',
help='Whether to do ensembling.',)
parser.add_argument('--weights',
help='Used to calculate weighted average', nargs='+',
default=None)
parser.add_argument('--split_chunk',
action='store_true',
help='Whether to use chunk merging or not')
parser.add_argument('--chunk_size',
type=int,
help='Chunk size for chunk merging',
default=32)
parser.add_argument('--overlap_size',
type=int,
help='Overlapped words between two continuous chunks',
default=8)
parser.add_argument('--min_words_cut',
type=int,
help='number of words at the end the first chunk to be removed during merge',
default=4)
args = parser.parse_args()
main(args)
| 2.34375 | 2 |
admin/client.py | stackriot/flocker | 2,690 | 12795128 | # Copyright 2015 ClusterHQ Inc. See LICENSE file for details.
"""
Run the client installation tests.
"""
import os
import shutil
import sys
import tempfile
from characteristic import attributes
import docker
from effect import TypeDispatcher, sync_performer, perform
from twisted.python.usage import Options, UsageError
from flocker.provision import PackageSource
from flocker.provision._effect import Sequence, perform_sequence
from flocker.provision._install import (
ensure_minimal_setup,
task_cli_pkg_install,
task_cli_pip_prereqs,
task_cli_pip_install,
cli_pip_test,
)
from flocker.provision._ssh import (
Run, Sudo, Put, Comment, perform_sudo, perform_put)
@attributes(['image', 'package_manager'])
class DockerImage(object):
"""Holder for Docker image information."""
DOCKER_IMAGES = {
'centos-7': DockerImage(image='centos:7', package_manager='yum'),
'debian-8': DockerImage(image='debian:8', package_manager='apt'),
'fedora-22': DockerImage(image='fedora:22', package_manager='dnf'),
'ubuntu-14.04': DockerImage(image='ubuntu:14.04', package_manager='apt'),
'ubuntu-16.04': DockerImage(image='ubuntu:16.04', package_manager='apt'),
}
# No distribution is officially supported using pip, but the code can
# test the pip instructions using any of the images.
PIP_DISTRIBUTIONS = DOCKER_IMAGES.keys()
# Some distributions have packages created for them.
# Although CentOS 7 is not a supported client distribution, the client
# packages get built, and can be tested.
PACKAGED_CLIENT_DISTRIBUTIONS = (
'centos-7',
'ubuntu-14.04',
'ubuntu-16.04',
)
class ScriptBuilder(TypeDispatcher):
"""
Convert an Effect sequence to a shell script.
The effects are those defined in flocker.provision._effect and
flocker.provision._ssh._model.
"""
def __init__(self, effects):
self.lines = [
'#!/bin/bash',
'set -ex'
]
TypeDispatcher.__init__(self, {
Run: self.perform_run,
Sudo: perform_sudo,
Put: perform_put,
Comment: self.perform_comment,
Sequence: perform_sequence
})
perform(self, effects)
# Add blank line to terminate script with a newline
self.lines.append('')
self._script = '\n'.join(self.lines)
@sync_performer
def perform_run(self, dispatcher, intent):
"""
For Run effects, add the command line.
"""
self.lines.append(intent.command)
@sync_performer
def perform_comment(self, dispatcher, intent):
"""
For Comment effects, prefix the comment with #
"""
self.lines.append('# ' + intent.comment)
def script(self):
"""
Return the generated shell script.
"""
return self._script
def make_script_file(directory, effects):
"""
Create a shell script file from a sequence of effects.
:param bytes directory: The directory in which to create the script.
:param Effect effects: An effect which contains the commands,
typically a Sequence containing multiple commands.
:return: The base filename of the script.
"""
builder = ScriptBuilder(effects)
fd, filename = tempfile.mkstemp(dir=directory, text=True)
os.write(fd, builder.script())
os.close(fd)
os.chmod(filename, 0555)
return os.path.basename(filename)
class DockerContainer:
"""
Run commands in a Docker container.
"""
def __init__(self, image):
# Getting Docker to work correctly on any client platform can
# be tricky. See
# http://doc-dev.clusterhq.com/gettinginvolved/client-testing.html
# for details.
params = docker.utils.kwargs_from_env(assert_hostname=False)
self.docker = docker.Client(version='1.16', **params)
self.image = image
@classmethod
def from_distribution(cls, distribution):
"""
Create a DockerContainer with a given distribution name.
"""
return cls(DOCKER_IMAGES[distribution].image)
def start(self):
"""
Start the Docker container.
"""
# On OS X, shared volumes must be in /Users, so use the home directory.
# See 'Mount a host directory as a data volume' at
# https://docs.docker.com/userguide/dockervolumes/
self.tmpdir = tempfile.mkdtemp(dir=os.path.expanduser('~'))
try:
self.docker.pull(self.image)
container = self.docker.create_container(
image=self.image, command='/bin/bash', tty=True,
volumes=['/mnt/script'],
)
self.container_id = container[u'Id']
self.docker.start(
self.container_id,
binds={
self.tmpdir: {'bind': '/mnt/script', 'ro': True},
}
)
except:
os.rmdir(self.tmpdir)
raise
def stop(self):
"""
Stop the Docker container.
"""
self.docker.stop(self.container_id)
self.docker.remove_container(self.container_id)
shutil.rmtree(self.tmpdir)
def execute(self, commands, out=sys.stdout):
"""
Execute a set of commands in the Docker container.
The set of commands provided to one call of ``execute`` will be
executed in a single session. This means commands will see the
environment created by previous commands.
The output of the commands is sent to the ``out`` file object,
which must have a ``write`` method.
:param Effect commands: An Effect containing the commands to run,
probably a Sequence of Effects, one for each command to run.
:param out: Where to send command output. Any object with a
``write`` method.
:return int: The exit status of the commands. If all commands
succeed, this will be zero. If any command fails, this will
be non-zero.
"""
script_file = make_script_file(self.tmpdir, commands)
script = '/mnt/script/{}'.format(script_file)
session = self.docker.exec_create(self.container_id, script)
session_id = session[u'Id']
for output in self.docker.exec_start(session, stream=True):
out.write(output)
return self.docker.exec_inspect(session_id)[u'ExitCode']
class RunOptions(Options):
description = "Run the client tests."
optParameters = [
['distribution', None, None,
'The target distribution. '
'One of {}. With --pip, one of {}'.format(
', '.join(PACKAGED_CLIENT_DISTRIBUTIONS),
', '.join(PIP_DISTRIBUTIONS))],
['branch', None, None, 'Branch to grab packages from'],
['flocker-version', None, None, 'Flocker version to install'],
['build-server', None, 'http://build.clusterhq.com/',
'Base URL of build server for package downloads'],
]
optFlags = [
['pip', None, 'Install using pip rather than packages.'],
]
synopsis = ('Usage: run-client-tests --distribution <distribution> '
'[--branch <branch>] [--flocker-version <version>] '
'[--build-server <url>] [--pip]')
def __init__(self, top_level):
"""
:param FilePath top_level: The top-level of the flocker repository.
"""
Options.__init__(self)
self.top_level = top_level
def postOptions(self):
if self['distribution'] is None:
raise UsageError("Distribution required.")
self['package_source'] = PackageSource(
version=self['flocker-version'],
branch=self['branch'],
build_server=self['build-server'],
)
def get_steps_pip(distribution, package_source=PackageSource()):
"""
Get commands to run for testing client pip installation.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: An ``Effect`` to pass to a ``Dispatcher`` that supports
``Sequence``, ``Run``, ``Sudo``, ``Comment``, and ``Put``.
"""
if distribution not in PIP_DISTRIBUTIONS:
raise UsageError(
"Distribution %r not supported. Available distributions: %s"
% (distribution, ', '.join(PIP_DISTRIBUTIONS)))
package_manager = DOCKER_IMAGES[distribution].package_manager
virtualenv = 'flocker-client'
steps = [
ensure_minimal_setup(package_manager),
task_cli_pip_prereqs(package_manager),
task_cli_pip_install(virtualenv, package_source),
cli_pip_test(virtualenv, package_source),
]
return steps
def get_steps_pkg(distribution, package_source=PackageSource()):
"""
Get commands to run for testing client package installation.
:param bytes distribution: The distribution the node is running.
:param PackageSource package_source: The source from which to install the
package.
:return: An ``Effect`` to pass to a ``Dispatcher`` that supports
``Sequence``, ``Run``, ``Sudo``, ``Comment``, and ``Put``.
"""
if distribution not in PACKAGED_CLIENT_DISTRIBUTIONS:
raise UsageError(
"Distribution %r not supported. Available distributions: %s"
% (distribution, ', '.join(PACKAGED_CLIENT_DISTRIBUTIONS)))
package_manager = DOCKER_IMAGES[distribution].package_manager
steps = [
ensure_minimal_setup(package_manager),
task_cli_pkg_install(distribution, package_source),
]
return steps
def run_steps(container, steps, out=sys.stdout):
"""
Run a sequence of commands in a container.
:param DockerContainer container: Container in which to run the test.
:param Effect steps: Steps to to run the test.
:param file out: Stream to write output.
:return int: Exit status of steps.
"""
container.start()
try:
for commands in steps:
status = container.execute(commands, out)
if status != 0:
return status
finally:
container.stop()
return 0
def main(args, base_path, top_level):
"""
:param list args: The arguments passed to the script.
:param FilePath base_path: The executable being run.
:param FilePath top_level: The top-level of the Flocker repository.
"""
options = RunOptions(top_level=top_level)
try:
options.parseOptions(args)
except UsageError as e:
sys.exit("%s: %s\n" % (base_path.basename(), e))
distribution = options['distribution']
package_source = options['package_source']
if options['pip']:
get_steps = get_steps_pip
else:
get_steps = get_steps_pkg
steps = get_steps(distribution, package_source)
container = DockerContainer.from_distribution(distribution)
status = run_steps(container, steps)
sys.exit(status)
| 1.929688 | 2 |
util/unit_test/bc_test/bc_normal_testing.py | ajupatatero/neurasim | 0 | 12795129 | <gh_stars>0
import numpy as np
from engines.phi.torch.flow import *
from numpy.core import shape_base
from scipy.signal.filter_design import _vratio
from util.plot.plot_tools import *
from analysis.mesure import *
from neurasim import *
from util.operations.field_operate import *
out_dir='./'
Nx=10
Ny=10
Ly=10
Lx=10
dx=Lx/Nx
dy=Ly/Ny
xD=5
D=4
DOMAIN = Domain(x=Nx, y=Ny, boundaries=[OPEN, STICKY], bounds=Box[0:Lx, 0:Ly])
velocity = ((DOMAIN.staggered_grid(Noise(batch=1)) * 0 )+1) *(1,1)
BOX_MASK = HardGeometryMask(Box[xD-D:xD+D, Ly/2-D:Ly/2+D]) >> DOMAIN.scalar_grid()
FORCES_MASK = HardGeometryMask(Sphere([xD, Ly/2], radius=D/2 )) >> DOMAIN.scalar_grid()
zoom_pos=[xD + D -1, xD + D +1,
Ly/2 + D -1, Ly/2 + D +1 ]
vl = -1
vr = 2
vb = -1
vt = 2
vl = 0
vr = 0
vb = 0
vt = 0
#FUNCTION
[ [edge_hl_x, edge_hl_y], [edge_hr_x, edge_hr_y], [edge_vb_x, edge_vb_y], [edge_vt_x, edge_vt_y] ] = get_exterior_edges(FORCES_MASK)
[ [edge_hl_x, edge_hl_y], [edge_hr_x, edge_hr_y], [edge_vb_x, edge_vb_y], [edge_vt_x, edge_vt_y] ] = exterior_edge_to_interior_edge(edge_hl_x=edge_hl_x,
edge_hl_y=edge_hl_y, edge_hr_x=edge_hr_x, edge_hr_y=edge_hr_y, edge_vb_x=edge_vb_x, edge_vb_y=edge_vb_y, edge_vt_x=edge_vt_x, edge_vt_y=edge_vt_y)
velocity = to_numpy(velocity)
u = velocity[0]
v = velocity[1]
plot_field(FORCES_MASK,
plot_type=['surface'],
options=[ ['limits', [0, 1]],
#['full_zoom', True],
['zoom_position', zoom_pos],
['aux_contourn', False],
['grid', True],
['edges', [ [edge_hl_x, edge_hl_y], [edge_hr_x, edge_hr_y], [edge_vb_x, edge_vb_y], [edge_vt_x, edge_vt_y] ]],
['velocity', velocity],
],
Lx=Lx, Ly=Ly, dx=dx, dy=dy,
lx='x', ly='y',lbar='mask',
save=True, filename=f'{out_dir}normal_test_in.png')
#Set normal velocities
u[edge_hl_x, edge_hl_y] = vl
u[edge_hr_x +1, edge_hr_y] = vr
v[edge_vb_x, edge_vb_y] = vb
v[edge_vt_x, edge_vt_y + 1] = vt
#Pass to phiflow
velocity = to_staggered([u,v], Lx, Ly)
# vel= torch.zeros((1, 2, Nx+1, Ny+1))
# vel[0,0,:,:] = torch.from_numpy(u)
# vel[0,1,:,:] = torch.from_numpy(v)
# velocity_init = DOMAIN.staggered_grid(1)
# tensor_U = math.wrap(vel.cuda(), 'batch,vector,x,y')
# lower = math.wrap(velocity_init.box.lower)
# upper = math.wrap(velocity_init.box.upper)
# extrapolation = math.extrapolation.ZERO
# tensor_U_unstack = unstack_staggered_tensor(tensor_U)
# velocity = StaggeredGrid(tensor_U_unstack, geom.Box(lower, upper), extrapolation)
# END FUNCTION
####################################################
######################################################
#FINAL FUNCTION
##################################3
out_dir='./'
Nx=10
Ny=10
Ly=10
Lx=10
dx=Lx/Nx
dy=Ly/Ny
xD=5
D=4
DOMAIN = Domain(x=Nx, y=Ny, boundaries=[OPEN, STICKY], bounds=Box[0:Lx, 0:Ly])
velocity = ((DOMAIN.staggered_grid(Noise(batch=1)) * 0 )+1) *(1,1)
BOX_MASK = HardGeometryMask(Box[xD-D:xD+D, Ly/2-D:Ly/2+D]) >> DOMAIN.scalar_grid()
FORCES_MASK = HardGeometryMask(Sphere([xD, Ly/2], radius=D/2 )) >> DOMAIN.scalar_grid()
zoom_pos=[xD + D -1, xD + D +1,
Ly/2 + D -1, Ly/2 + D +1 ]
velocity = set_normal_bc(FORCES_MASK, velocity = velocity, velocity_BC = [0,0,0,0])
plot_field(FORCES_MASK,
plot_type=['surface'],
options=[ ['limits', [0, 1]],
#['full_zoom', True],
['zoom_position', zoom_pos],
['aux_contourn', False],
['grid', True],
['edges', [ [edge_hl_x, edge_hl_y], [edge_hr_x, edge_hr_y], [edge_vb_x, edge_vb_y], [edge_vt_x, edge_vt_y] ]],
['velocity', velocity],
],
Lx=Lx, Ly=Ly, dx=dx, dy=dy,
lx='x', ly='y',lbar='mask',
save=True, filename=f'{out_dir}normal_test_out.png')
| 1.601563 | 2 |
shooting.py | hcbh96/SC_Coursework_1 | 2 | 12795130 | from scipy.optimize import root
from scipy.integrate import solve_ivp
import numpy as np
def phase_cond(u, dudt):
res= np.array(dudt(0,u))
return res
def periodicity_cond(u, dudt, T):
# integrate the ode for time t from starting position U
res = np.array(u - solve_ivp(dudt, (0, T), u).y[:,-1])
return res
def g(state_vec, dudt):
T = state_vec[-1]
u=state_vec[0:-1]
res = np.concatenate((
periodicity_cond(u, dudt, T),
phase_cond(u, dudt),
))
return res
def shooting(state_vec, dudt):
"""
A function that returns an estimation of the starting condition of a BVP
subject to the first order differential equations
USAGE: shooting(state_vec, dudt)
INPUTS:
state_vec : ndarray
the state_vector to solve, [u0...uN,T] the final argument should be
the expected period of the limit cycle or the period of the limit
cycle.
dudt : ndarray
containing the first order differtial equations to be solved
----------
OUTPUT : an ndarray containing the corrected initial values for the limit cycle.
NOTE: This function is currently having issues when used with npc however it
is also currently passing all of its tests
"""
sol = root(g, state_vec, args=(dudt,), method="lm")
if sol["success"] == True:
print("Root finder found the solution u={} after {} function calls"
.format(sol["x"], sol["nfev"]))
return sol["x"]
else:
print("Root finder failed with error message: {}".format(sol["message"]))
return None
| 2.78125 | 3 |
utils/deployment/link_cover.py | jstavr/SDN_Project | 0 | 12795131 | <reponame>jstavr/SDN_Project
#!/usr/bin/python
import networkx as nx
import time, random, copy
topology_file = open("data/topology.data")
G=nx.read_edgelist(topology_file)
topology_file.close()
#Only numbers are end terminals
end_terminals = [unicode(str(x)) for x in range(1, 100)]
links = G.edges()
shortest_paths = nx.shortest_path(G)
rule_lists = []
for source in end_terminals:
for destination in end_terminals:
try:
if destination != source:
rule_lists.append(shortest_paths[source][destination])
except:
pass
start_packets = len(rule_lists)
result_rule_lists = []
st = time.time()
# Remove end links
new_links = copy.deepcopy(links)
for link in links:
(source, destination) = link
if source in end_terminals or destination in end_terminals:
new_links.remove(link)
links = new_links
# Min-Set-Cover
while len(links) > 0:
lucky_index = random.randint(0, len(rule_lists)-1)
lucky_path = rule_lists[lucky_index]
# Break the path into links, excluding the end terminals
for index in xrange(1, len(lucky_path)-2):
new = False
if (lucky_path[index],lucky_path[index+1]) in links:
new = True
result_rule_lists.append(lucky_path)
# Rules that have been hit already
if new:
for index2 in xrange(1, len(lucky_path)-2):
if (lucky_path[index],lucky_path[index+1]) in links:
links.remove((lucky_path[index],lucky_path[index+1]))
break
end_packets = len(result_rule_lists)
en = time.time()
print "Compression: Start=%d, End=%d, Ratio=%f, Time=%f" % (start_packets, end_packets, float(end_packets)/start_packets, en-st)
# Now we start to filter CSV file to select ones that we're interested in.
# Step 1: Build "DNS"
hosts_file = open('data/hosts.txt')
ips_file = open('data/ips.txt')
ip_to_host_dict = {}
for ip in ips_file:
host = hosts_file.readline().split('.')[0]
ip_to_host_dict[ip.strip()] = host
hosts_file.close()
ips_file.close()
# Step 2: Build test pairs
test_pairs = []
for rule_list in result_rule_lists:
test_pairs.append(("swan-ap%s"%rule_list[0], "swan-ap%s"%rule_list[-1]))
# Step 3: Filter
results_file = open('data/output.csv')
new_results_file = open('data/output_filtered.csv', 'w')
for line in results_file:
components = line.split(',')
source = components[1].strip('\"')
try:
destination = ip_to_host_dict[components[2].strip('\"')]
except:
continue
if (source, destination) in test_pairs or (destination, source) in test_pairs:
new_results_file.write(line)
results_file.close()
new_results_file.close()
| 2.5 | 2 |
agrirouter/onboarding/exceptions.py | DKE-Data/agrirouter-sdk-python | 0 | 12795132 | class AgriRouuterBaseException(Exception):
_message = ...
def __init__(self, message=None):
if not message:
message = self._message
self.message = message
class WrongCertificationType(AgriRouuterBaseException):
_message = "Wrong Certification type. Use onboarding.enums.CertificationTypes values instead."
class WrongGateWay(AgriRouuterBaseException):
_message = "Wrong Gate Way Id. Use onboarding.enums.GateWays values instead."
class RequestNotSigned(AgriRouuterBaseException):
_message = """
Request does not contain signature header. Please sign the request with request.sign() method.\n
Details on: https://docs.my-agrirouter.com/agrirouter-interface-documentation/latest/
integration/onboarding.html#signing-requests
"""
class BadMessagingResult(AgriRouuterBaseException):
_message = "Messaging Request failed"
| 2.6875 | 3 |
unnoise/medianV1.py | Krown0s/TraitementsImages | 0 | 12795133 | <reponame>Krown0s/TraitementsImages<filename>unnoise/medianV1.py
# -*- encoding: utf-8 -*-
from copy import deepcopy
from numpy import *
"""
Débruitage par filtrage médian
<image> l'image à débruiter
retourne l'image débruitée
"""
def median(image):
nombrePixel = 0
newimg = deepcopy(image)
# Parcours des lignes de pixels
for x in range(newimg.shape[0]):
# Parcours des colonnes de pixels
for y in range(newimg.shape[1]):
newimg[x][y] = getPixels(newimg, x, y)
if(image[x][y] == newimg[x][y]):
nombrePixel = nombrePixel + 1
#print("Nombre de pixels non modifies : " + str(nombrePixel))
#print("Pourcentage de pixels non modifes : " + str(image.shape[0]*image.shape[1]/nombrePixel) + str(" %"))
return newimg
# Il faut prendre les 8 pixels qui sont autours du pixel sur lequel nous travaillons
# Il faut ranger les pixels par valeurs croissantes
# Et ensuite prendre la mediane de cette liste ordonnee
"""
Image: Image sur laquelle nous travaillons
x : position x du pixel
y : position y du pixel
return : retourne une liste qui contient les 8 pixels autours du pixel sur lequel nous travaillons
"""
def getPixels(image, x, y):
liste = zeros(8, float)
if image.shape[0] - 1 >= x - 1 >= image.shape[0] - 1 >= 0 and image.shape[1] - 1 >= y - 1 >= 0:
liste[0] = image[x - 1][y - 1]
if image.shape[0] - 1 >= x - 1 >= 0:
liste[1] = image[x - 1][y]
if image.shape[0] - 1 >= x - 1 >= 0 and image.shape[1] - 1 >= y + 1 >= 0:
liste[2] = image[x - 1][y + 1]
if image.shape[1] - 1 >= y - 1 >= 0:
liste[3] = image[x][y - 1]
if image.shape[1] - 1 >= y + 1 >= 0:
liste[4] = image[x][y + 1]
if image.shape[0] - 1 >= x + 1 >= 0 and image.shape[1] - 1 >= y - 1 >= 0:
liste[5] = image[x + 1][y - 1]
if image.shape[0] - 1 >= x + 1 >= 0:
liste[6] = image[x + 1][y]
if image.shape[0] - 1 >= x + 1 >= 0 and image.shape[1] - 1 >= y + 1 >= 0:
liste[7] = image[x + 1][y + 1]
# Permet de trier notre liste
liste = sorted(liste)
mediane = (liste[3] + liste[4]) / 2.0
# Retourne la mediane (8 pixels donc 8/2 = 4)
return mediane | 2.984375 | 3 |
src/zope/pluggableauth/plugins/httpplugins.py | zopefoundation/zope.pluggableauth | 2 | 12795134 | <gh_stars>1-10
##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""PAS plugins related to HTTP
"""
__docformat__ = "reStructuredText"
import base64
from zope.interface import implementer, Interface
from zope.publisher.interfaces.http import IHTTPRequest
from zope.schema import TextLine
from zope.pluggableauth import interfaces
try:
unicode
except NameError:
# Py3: define unicode
unicode = str
class IHTTPBasicAuthRealm(Interface):
"""HTTP Basic Auth Realm
Represents the realm string that is used during basic HTTP authentication
"""
realm = TextLine(title=u'Realm',
description=u'HTTP Basic Authentication Realm',
required=True,
default=u'Zope')
@implementer(interfaces.ICredentialsPlugin, IHTTPBasicAuthRealm)
class HTTPBasicAuthCredentialsPlugin(object):
realm = 'Zope'
protocol = 'http auth'
def extractCredentials(self, request):
"""Extracts HTTP basic auth credentials from a request.
First we need to create a request that contains some credentials.
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest(
... environ={'HTTP_AUTHORIZATION': u'Basic bWdyOm1ncnB3'})
Now create the plugin and get the credentials.
>>> plugin = HTTPBasicAuthCredentialsPlugin()
>>> from pprint import pprint
>>> pprint(plugin.extractCredentials(request))
{'login': u'mgr', 'password': u'<PASSWORD>'}
Make sure we return `None`, if no authentication header has been
specified.
>>> print(plugin.extractCredentials(TestRequest()))
None
Also, this plugin can *only* handle basic authentication.
>>> request = TestRequest(environ={'HTTP_AUTHORIZATION': 'foo bar'})
>>> print(plugin.extractCredentials(TestRequest()))
None
This plugin only works with HTTP requests.
>>> from zope.publisher.base import TestRequest
>>> print(plugin.extractCredentials(TestRequest('/')))
None
According to RFC 2617, password can contain one or more colons;
user ID can't contain any colon.
>>> from zope.publisher.browser import TestRequest as BrowserRequest
>>> request = BrowserRequest('/',
... environ={'HTTP_AUTHORIZATION': u'Basic bWdyOm1ncnB3OndpdGg6Y29sb24='})
>>> pprint(plugin.extractCredentials(request))
{'login': u'mgr', 'password': u'<PASSWORD>'}
"""
if not IHTTPRequest.providedBy(request):
return None
if request._auth:
if request._auth.lower().startswith(u'basic '):
credentials = request._auth.split()[-1]
if isinstance(credentials, unicode):
# No encoding needed, should be base64 string anyways.
credentials = credentials.encode()
login, password = base64.b64decode(credentials).split(b':', 1)
return {'login': login.decode('utf-8'),
'password': password.decode('utf-8')}
return None
def challenge(self, request):
"""Issues an HTTP basic auth challenge for credentials.
The challenge is issued by setting the appropriate response headers.
To illustrate, we'll create a plugin:
>>> plugin = HTTPBasicAuthCredentialsPlugin()
The plugin adds its challenge to the HTTP response.
>>> from zope.publisher.browser import TestRequest
>>> request = TestRequest()
>>> response = request.response
>>> plugin.challenge(request)
True
>>> response._status
401
>>> response.getHeader('WWW-Authenticate', literal=True)
'basic realm="Zope"'
Notice that the realm is quoted, as per RFC 2617.
The plugin only works with HTTP requests.
>>> from zope.publisher.base import TestRequest
>>> request = TestRequest('/')
>>> response = request.response
>>> print(plugin.challenge(request))
False
"""
if not IHTTPRequest.providedBy(request):
return False
request.response.setHeader("WWW-Authenticate",
'basic realm="%s"' % self.realm,
literal=True)
request.response.setStatus(401)
return True
def logout(self, request):
"""Always returns False as logout is not supported by basic auth.
>>> plugin = HTTPBasicAuthCredentialsPlugin()
>>> from zope.publisher.browser import TestRequest
>>> plugin.logout(TestRequest())
False
"""
return False
| 2.0625 | 2 |
cmscalibration/workflows/gridkacalibration.py | mxsg/CMS-Model-Calibration | 0 | 12795135 | import logging
import os
from datetime import datetime
import pandas as pd
from analysis import calibrationreport, resource_usage, cpuefficiency, sampling
from analysis import jobreportanalysis
from analysis import jobreportcleaning
from analysis import nodeanalysis
from analysis.demandextraction import FilteredJobClassifier, JobDemandExtractor
from data.dataset import Metric
from exporters.datasetexport import ReferenceWalltimeExporter
from importers.dataset_import import DatasetImporter
from importers.gridkadata import GridKaNodeDataImporter, ColumnCoreUsageImporter, \
CPUEfficiencyReferenceImporter
from importers.jmimport import JMImporter
from importers.jobcounts import JobCountImporter
from importers.wmaimport import SummarizedWMAImporter
from interfaces.workflow import CalibrationWorkflow
from merge import job_node
from merge.merge_datasets import UnionDatasetMerge
from merge.reportmatching import JobReportMatcher
from utils import config, visualization
from utils import report as rp
from utils.report import ReportBuilder
from workflows.workflowutils import export_job_counts, export_parameters
# Todo Split this up into smaller methods
class GridKaCalibration(CalibrationWorkflow):
def __init__(self):
self.report = ReportBuilder(base_path=config.outputDirectory, filename='calibration-report.md')
def run(self):
self.report.append('# GridKa Calibration Run')
time_now = datetime.now().strftime('%Y-%m-%d, %H:%M:%S')
self.report.append('at {}'.format(time_now))
logging.info("Model Calibration run at {}".format(time_now))
start_date = pd.to_datetime(config.startDate)
end_date = pd.to_datetime(config.endDate)
day_count = (end_date - start_date).days
self.report.append()
self.report.append("Start date: {} \nEnd date: {}".format(start_date, end_date))
# Import data sets
##################
# Timezone correction correct for errors in timestamps of JobMonitoring data
dataset_importer = DatasetImporter(
JMImporter(timezone_correction='Europe/Berlin', hostname_suffix='.gridka.de', with_files=False))
jm_dataset = dataset_importer.import_dataset(config.inputPaths['jm'], start_date, end_date)
wm_dataset = DatasetImporter(SummarizedWMAImporter(with_files=False)) \
.import_dataset(config.inputPaths['wma'], start_date, end_date)
cached_matches = None
use_caching = config.cacheDir is not None
if use_caching:
match_cache_file = os.path.join(config.cacheDir, 'jm-wma-matches.csv')
if os.path.isfile(match_cache_file):
try:
cached_matches = pd.read_csv(match_cache_file,
usecols=[jm_dataset.df.index.name, wm_dataset.df.index.name])
logging.info(
"Loaded {} matches from match cache {}!".format(cached_matches.shape[0], match_cache_file))
except Exception:
logging.warning("No match cache found at {}!".format(match_cache_file))
# Match Jobmonitoring and WMArchive job reports
matcher = JobReportMatcher(timestamp_tolerance=10, time_grouping_freq='D')
matches = matcher.match_reports(jm_dataset, wm_dataset, use_files=False, previous_matches=cached_matches)
if use_caching:
match_cache_file = os.path.join(config.cacheDir, 'jm-wma-matches.csv')
logging.info("Writing {} matches to file {}".format(matches.shape[0], match_cache_file))
matches.to_csv(match_cache_file)
jobs_dataset = UnionDatasetMerge().merge_datasets(matches, jm_dataset, wm_dataset, left_index='UniqueID',
right_index='wmaid', left_suffix='jm', right_suffix='wma')
jobs_dataset.df = jobreportcleaning.clean_job_reports(jobs_dataset.df)
# Import node information
nodes = GridKaNodeDataImporter().import_file(config.inputPaths['nodeInfo'])
nodes = nodeanalysis.add_performance_data(nodes, simulated_cores=config.workflowOptions['coreSimulationMethod'],
thread_rate_method=config.workflowOptions['threadPerformanceMethod'])
# Match jobs to nodes
matched_jobs = job_node.match_jobs_to_node(jobs_dataset.df, nodes)
matched_jobs = jobreportanalysis.add_missing_node_info(matched_jobs, nodes)
jm_dataset.df = jobreportanalysis.add_performance_data(matched_jobs)
job_data = jm_dataset.df
# Import additional information for usage of GridKa site
core_importer = ColumnCoreUsageImporter()
core_df = core_importer.import_file(config.inputPaths['coreUsage'], start_date, end_date)
cms_avg_cores = core_df['cms'].mean()
avg_jobslots_reports = self.draw_jobslot_usage(jm_dataset, core_df)
# Visualize number of jobs in calibration report
job_counts_reference_summary = self.add_jobs_over_time(start_date, end_date)
# CPU Efficiencies
self.add_cpu_efficiency(job_data, start_date, end_date)
# Compute calibration parameters
node_types = nodeanalysis.extract_node_types(nodes)
# Scale the resource environment with both information from the job reports and the Pilot jobs
scaled_nodes_pilots = nodeanalysis.scale_site_by_jobslots(node_types, cms_avg_cores)
scaled_nodes_reports = nodeanalysis.scale_site_by_jobslots(node_types, avg_jobslots_reports)
type_split_cols = config.workflowOptions['typeSplitCols']
split_types = None
if 'splitTypes' in config.workflowOptions:
split_types = list(map(tuple, config.workflowOptions['splitTypes']))
job_classifier = FilteredJobClassifier(type_split_cols, split_types=split_types)
job_groups = job_classifier.split(job_data)
job_demand_extractor = JobDemandExtractor(self.report, equal_width=False, bin_count=60,
cutoff_quantile=0.95,
overflow_agg=config.workflowOptions['overflowAggregationMethod'],
additional_job_options=config.workflowOptions['additionalJobOptions'],
drop_overflow=config.workflowOptions.get('dropOverflow', False))
demands, partitions = job_demand_extractor.extract_job_demands(job_groups)
export_parameters('parameters_slots_from_pilots', scaled_nodes_pilots, demands)
export_parameters('parameters_slots_from_reports', scaled_nodes_reports, demands)
# Sample half of the reports, fix random state for reproducibility
reports_train, reports_test = sampling.split_samples(job_data, frac=0.5, random_state=38728)
sampling_report = ReportBuilder(base_path=config.outputDirectory, filename='calibration-report-sampled.md',
resource_dir='figures-sampling')
job_groups_train = job_classifier.split(reports_train)
job_demand_extractor.report = sampling_report
sample_demands, sample_partitions = job_demand_extractor.extract_job_demands(job_groups_train)
sampling_report.write()
export_parameters('parameters_slots_from_pilots_sampled0.5', scaled_nodes_pilots, sample_demands)
export_parameters('parameters_slots_from_reports_sampled0.5', scaled_nodes_reports, sample_demands)
# Export job throughputs from analyzed jobs
jobs_from_reports = job_data.copy()
jobs_from_reports[Metric.JOB_TYPE.value] = jobs_from_reports[Metric.JOB_TYPE.value].fillna('unknown')
job_counts_reports = jobs_from_reports.groupby(Metric.JOB_TYPE.value).size().reset_index()
job_counts_reports.columns = ['type', 'count']
job_counts_reports['throughput_day'] = job_counts_reports['count'].divide(day_count)
export_job_counts(job_counts_reports, 'parameters_slots_from_pilots',
config.outputPaths['jobCountReports'])
# Export walltimes
walltime_path = os.path.join(config.outputDirectory, 'parameters_slots_from_pilots',
config.outputPaths['walltimeReference'])
ReferenceWalltimeExporter().export_to_json_file(partitions, walltime_path)
# Write jobs to report
calibrationreport.add_jobs_report_section(jm_dataset, self.report)
# Write report out to disk
self.report.write()
def draw_jobslot_usage(self, jm_dataset, core_reference):
jobslot_timeseries = resource_usage.calculate_jobslot_usage(jm_dataset.df, jm_dataset.start, jm_dataset.end,
start_ts_col=Metric.START_TIME.value,
end_ts_col=Metric.STOP_TIME.value,
slot_col=Metric.USED_CORES.value)
jobslots_from_reports = jobslot_timeseries['totalSlots'].resample('s').pad().resample('H').mean()
avg_jobslots_reports = jobslots_from_reports.mean()
fig, axes = calibrationreport.multiple_jobslot_usage(
{'Extracted from job reports': jobslots_from_reports,
'Allocated to GridKa CMS pilots': core_reference['cms']})
self.report.add_figure(fig, axes, 'jobslot_usage_reference')
return avg_jobslots_reports
def add_jobs_over_time(self, start_date, end_date):
self.report.append("## Number of jobs completed over time")
job_counts = JobCountImporter().import_file(config.inputPaths['jobCountsReference'], start_date, end_date)
fig, axes = calibrationreport.jobtypes_over_time_df(job_counts, 'date', 'type')
self.report.add_figure(fig, axes, 'job_counts_reference', tight_layout=False)
job_counts_reference_summary = job_counts.groupby('type')['count'].sum().reset_index()
job_counts_reference_summary.columns = ['type', 'count']
job_counts_reference_summary['share'] = job_counts_reference_summary['count'] / job_counts_reference_summary[
'count'].sum()
job_counts_reference_summary['throughput_day'] = job_counts_reference_summary['count'].divide(
(end_date - start_date).days)
self.report.append("Job throughput from CMS Dashboard:")
self.report.append()
self.report.append_paragraph(rp.CodeBlock().append(job_counts_reference_summary.to_string()))
return job_counts_reference_summary
def add_cpu_efficiency(self, job_data, start_date, end_date):
efficiency_reference = CPUEfficiencyReferenceImporter(col='cms', output_column='value').import_file(
config.inputPaths['CPUEfficiencyReference'], start_date, end_date)
efficiency_timeseries, reports_average = cpuefficiency.calculate_efficiencies(job_data, freq='12h')
reference = efficiency_reference['value'].resample('12h').mean().rename('reference')
reference_mean = efficiency_reference['value'].mean()
from_reports = efficiency_timeseries.rename('measured')
# cpu_eff = pd.concat([reference, from_reports], axis=1)
fig, axes = visualization.draw_efficiency_timeseries(
{'extracted from job reports': from_reports, 'reference from GridKa monitoring': reference})
axes.set_ylabel("CPU efficiency (CPU time / wall time)")
axes.legend(['Extracted from job reports (average {:.2f}%)'.format(reports_average * 100),
'Reference from GridKa monitoring (average {:.2f}%)'.format(reference_mean * 100)])
axes.set_title("CPU efficiencies ({}, {} days)".format(config.runName, (end_date - start_date).days))
axes.set_xlim(left=start_date, right=(end_date - pd.Timedelta('1 days')))
fig.set_size_inches(8, 4.5)
self.report.add_figure(fig, axes, 'cpu_efficiencies_reference')
self.report.append("Efficiency from job reports: {} ".format(reports_average))
self.report.append("Efficiency from GridKa: {}".format(reference_mean))
| 2 | 2 |
src/hello_world.py | DreamMazeTeam/BootSrc | 0 | 12795136 | <gh_stars>0
import module1
module1.hello() | 1.007813 | 1 |
django/wx/apps.py | yanhuaijun/learngit | 2 | 12795137 | from django.apps import AppConfig
class WxConfig(AppConfig):
name = 'wx'
| 1.210938 | 1 |
02_P12.py | wiphoo/computer_programing_101 | 0 | 12795138 | #!/usr/bin/env python3
inputList = input()
abcList = sorted( [ int( x ) for x in inputList.split() ] )
print( 'abcList = {}'.format( abcList ) )
print( 'YES' if abcList[0] + abcList[1] > abcList[2] else 'NO' )
| 3.875 | 4 |
scripts/AQP_byPath/collect_results.py | TranslatorIIPrototypes/robo-commons | 1 | 12795139 | <reponame>TranslatorIIPrototypes/robo-commons<filename>scripts/AQP_byPath/collect_results.py
from neo4j.v1 import GraphDatabase
import redis
import json
import os
import time
def get_hits(b_id,atype,edge_name,neo4j):
cypher = f'MATCH (a:{atype})-[:{edge_name}]-(b {{id:"{b_id}"}}) RETURN distinct a.id'
rlist = run_query(cypher,neo4j)
return [ r['a.id'] for r in rlist ]
def run_query(cypherquery,driver):
start = time.time()
with driver.session() as session:
results = session.run(cypherquery)
end = time.time()
lr = list(results)
print (f' {end-start}, {len(lr)}')
return lr
def get_redis():
redis_host = '127.0.0.1'
redis_port = 6767
redis_db = 4
redis_driver = redis.StrictRedis(host=redis_host, port=int(redis_port), db=int(redis_db))
return redis_driver
def create_neo4j():
url = 'bolt://127.0.0.1:7687'
driver = GraphDatabase.driver(url, auth=("neo4j", os.environ['NEO4J_PASSWORD']))
return driver
def get_topologies(b_id,max_graphs,atype,predicting_edge,red):
key=f'MatchingTopologies({b_id},{max_graphs})'
print(key)
all_topologies = json.loads(red.get(key))
return [tuple(x) for x in all_topologies]
#return all_topologies
def assess_topology(topology,b_id,max_graphs,atype,predicting_edge,hits,redis):
rkey = f'MatchResults({b_id},{max_graphs},{topology})'
value = redis.get(rkey)
if value is None:
print(rkey)
exit()
all_results = json.loads(value)
retres = []
for one_result in all_results:
a_s = set(one_result['results'])
nhits = len( a_s.intersection(hits) )
recall = nhits / len(hits)
precision = nhits / len(a_s)
retres.append( (one_result['nodes'],one_result['edges'],len(a_s),nhits,recall,precision) )
return retres
def go(b_id, atype, predicting_edge, max_graphs):
#Currently, I have not added atype or predicting edge to the redis keys, but I should
red = get_redis()
neo = create_neo4j()
hits = get_hits(b_id,atype,predicting_edge,neo)
#for the given b, graphsize, what topologies do I need to check on?
topologies = get_topologies(b_id,max_graphs,atype,predicting_edge,red)
with open(f'results_{b_id}_{max_graphs}','w') as rfile, open(f'defs_{b_id}_{max_graphs}','w') as gfile:
rfile.write('query_id\tNumberResults\tNumberTruePostitives\tRecall\tPrecision\n')
gfile.write('query_id\ttopology\tnodes\tedges\n')
query_id = 0
for topology in topologies:
results = assess_topology(topology,b_id,max_graphs,atype,predicting_edge,hits,red)
for res in results:
#yuck yuck clean up
gfile.write(f'{query_id}\t{topology}\t{res[0]}\t{res[1]}\n')
rfile.write(f'{query_id}\t{res[2]}\t{res[3]}\t{res[4]}\t{res[5]}\n')
query_id += 1
if __name__ == '__main__':
go('MONDO:0005136','chemical_substance','treats',100000)
| 2.15625 | 2 |
cli/prog/cluster.py | rushk014/manager | 19 | 12795140 | <gh_stars>10-100
import click
from prog.cli import cli
from prog.cli import create
from prog.cli import delete
from prog.cli import request
from prog.cli import set
from prog.cli import unset
from prog.cli import show
from prog import client
from prog import output
from prog import utils
@show.group("cluster", invoke_without_command=True)
@click.pass_obj
@click.pass_context
def show_cluster(ctx, data):
"""Show clusters."""
if ctx.invoked_subcommand is not None:
return
clusters = data.client.list("experimental/cluster", "cluster")
if clusters is None:
return
columns = ("name", "api_server", "api_port", "username")
output.list(columns, clusters)
@show_cluster.command()
@click.argument("id_or_name")
@click.pass_obj
def detail(data, id_or_name):
"""Show cluster detail."""
cluster = data.client.show("experimental/cluster", "cluster", id_or_name)
if not cluster:
return
columns = ("name", "api_server", "api_port", "username")
output.show(columns, cluster)
@create.command("cluster")
@click.argument('name')
@click.argument('server')
@click.argument('port', type=int)
@click.argument('username')
@click.pass_obj
def create_cluster(data, name, server, port, username):
"""Create cluster."""
pass1 = click.prompt("User Password", hide_input=True)
pass2 = click.prompt("Confirm User Password", hide_input=True)
if pass1 != pass2:
click.echo("Passwords do not match")
return
cfg = {"name": name, "api_server": server, "api_port": port,
"username": username, "password": <PASSWORD>}
data.client.create("experimental/cluster", {"config": cfg})
@set.command("cluster")
@click.argument('name')
@click.option('--server', help="Set API server.")
@click.option('--port', type=int, help="Set API server port.")
@click.option('-u', '--username', help="Set username.")
@click.option("-p", "--password", is_flag=True, help="Set password.")
@click.pass_obj
def set_cluster(data, name, server, port, username, password):
"""Set cluster configuration."""
cfg = {"name": name}
doit = False
if server is not None:
doit = True
cfg["api_server"] = server
if port is not None:
doit = True
cfg["api_port"] = port
if username is not None:
doit = True
cfg["username"] = username
if password:
pass1 = click.prompt("Password", hide_input=True)
pass2 = click.prompt("Confirm Password", hide_input=True)
if pass1 != pass2:
click.echo("Passwords do not match")
return
cfg["password"] = <PASSWORD>
doit = True
if doit:
data.client.config("experimental/cluster", name, {"config": cfg})
else:
click.echo("Please specify configurations to be set.")
@delete.command("cluster")
@click.argument('name')
@click.pass_obj
def delete_cluster(data, name):
"""Delete cluster."""
data.client.delete("experimental/cluster", name)
@request.group('cluster')
@click.pass_obj
def request_cluster(data):
"""Request cluster"""
@request_cluster.command("test")
@click.argument('name')
@click.argument('server')
@click.argument('port', type=int)
@click.argument('username')
@click.pass_obj
def test_cluster(data, name, server, port, username):
"""test cluster."""
pass1 = click.prompt("User Password", hide_input=True)
cfg = {"name": name, "api_server": server, "api_port": port,
"username": username, "password": <PASSWORD>}
data.client.request("experimental/debug", "cluster", "test", {"test": cfg})
| 2.421875 | 2 |
scripts/searchbibs.py | acumb/SearchRefs | 0 | 12795141 | <reponame>acumb/SearchRefs<gh_stars>0
#!/usr/env python
"""Search bibliography database.
The search string takes the format of field:[field name] followed by a list of
terms to search for in that field. The implied boolean operator between these
terms is "and". To use other boolean operators with the same field, the
field:[field name] must be repeated after the operator. More generally, the
boolean operators "and", "or", and "not" can be placed before the field
specifier. The "and" operator is again the default between field specifiers.
The same field names are used to specify the desired output.
Example:
searchbibs.py -s field:keywords anillin and not field:keywords review -t title year author keywords
"""
import argparse
import bibtools.bib as btl
def main():
args = parse_args()
bibliography = btl.Bibliography(btl.BIB_DIRECTORY)
search_string = btl.SearchString(args.search_string)
bibliography.match_and_print_fields(search_string, args.terms)
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'-s',
type=str,
nargs='+',
dest='search_string',
help='Search string')
parser.add_argument(
'-t',
type=str,
nargs='+',
default=['title', 'year', 'author', 'annote'],
dest='terms',
help='Terms to print')
return parser.parse_args()
if __name__ == '__main__':
main()
| 3.546875 | 4 |
integration/tests/integration/tests/smoke/instance.py | sapcc/trove | 1 | 12795142 |
from proboscis.asserts import assert_equal
from proboscis import test
from proboscis import before_class
from trove.common.utils import poll_until
from trove.tests.util import create_client
class InstanceGenerator(object):
def __init__(self, client, status=None, name=None, flavor=None,
account_id=None, created_at=None, databases=None, users=None,
volume_size=None):
self.client = client
self.status = status
self.name = name
self.flavor = flavor
self.account_id = account_id
self.databases = databases
self.users = users
self.volume_size = volume_size
self.id = None
def create_instance(self):
#make the call to create the instance
instance = self.client.instances.create(self.name, self.flavor,
self.volume_size, self.databases, self.users)
self.client.assert_http_code(200)
#verify we are in a build state
assert_equal(instance.status, "BUILD")
#pull out the ID
self.id = instance.id
return instance
def wait_for_build_to_finish(self):
poll_until(lambda: self.client.instance.get(self.id),
lambda instance: instance.status != "BUILD",
time_out=600)
def get_active_instance(self):
instance = self.client.instance.get(self.id)
self.client.assert_http_code(200)
#check the container name
assert_equal(instance.name, self.name)
#pull out volume info and verify
assert_equal(str(instance.volume_size), str(self.volume_size))
#pull out the flavor and verify
assert_equal(str(instance.flavor), str(self.flavor))
return instance
@test(groups=['smoke', 'positive'])
class CreateInstance(object):
@before_class
def set_up(self):
client = create_client(is_admin=False)
name = 'test_createInstance_container'
flavor = 1
volume_size = 1
db_name = 'test_db'
databases = [
{
"name": db_name
}
]
users = [
{
"name": "lite",
"password": "<PASSWORD>",
"databases": [{"name": db_name}]
}
]
#create the Instance
instance = InstanceGenerator(client, name=self.name,
flavor=flavor,
volume_size=self.volume_size,
databases=databases, users=users)
instance.create_instance()
#wait for the instance
instance.wait_for_build_to_finish()
#get the active instance
inst = instance.get_active_instance()
#list out the databases for our instance and verify the db name
dbs = client.databases.list(inst.id)
client.assert_http_code(200)
assert_equal(len(dbs), 1)
assert_equal(dbs[0].name, instance.db_name)
client.instance.delete(inst.id)
client.assert_http_code(202)
| 2.53125 | 3 |
LC218.py | XinnWang/PythonLab | 0 | 12795143 | <gh_stars>0
buildings = [ [2,9,10], [3,7,15], [5,12,12], [15,20,10], [19,24,8] ]
#buildings = [[0,2,3],[2,5,3]]
s = []
for e in buildings:
s.append([e[0],e[2]])
s.append([e[1],-e[2]])
s = sorted(s,key=lambda x:(x[0],-x[1]))
print(s)
res = []
h = [0]
max_h = 0
for e in s:
if e[1] < 0:
h.remove(-e[1])
else:
h.append(e[1])
if max_h != max(h):
max_h = max(h)
res.append([e[0],max_h])
print(res)
| 2.875 | 3 |
Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/EDABIT/EARLIER/23_sales_season.py | okara83/Becoming-a-Data-Scientist | 0 | 12795144 | """
Sales Season
A retailer is having a store-wide "buy 3, get 1 free" sale.
For legal reasons, they can't charge their customers $0 for an article so a discount is applied to all products instead.
For example, if a customer gets three products a, b and c:
Product A Product B Product C
$15.99 $23.50 $10.75
She gets the cheapest one for free, so she ends up paying $15.99 + $23.50 = $39.49, but what her receipt says is:
Product A: $15.99 − Special Discount = $12.57
Product B: $23.50 − Special Discount = $18.47
Product C: $10.75 − Special Discount = $8.45
Total: $39.49
Create a function that takes in a list of prices for a customer's shopping cart and returns the prices
with the discount applied. Round all prices to the cent.
Examples
discount([2.99, 5.75, 3.35, 4.99]) ➞[2.47, 4.74, 2.76, 4.12]
# First product for free.
discount([10.75, 11.68]) ➞ [10.75, 11.68]
# No discounts applied.
discount([68.74, 17.85, 55.99]) ➞ [60.13, 15.62, 48.98]
# Second product for free.
Notes
The discount is calculated in percentual terms.
The deal applies to sets of three products: if a customer gets 9 products, she will get the three cheapest ones for free, but if she gets 10 or 11 products, she will still get three for free. Buying a 12th product would get her a fourth free product.
No cart splitting allowed.
"""
def discount(lst):
if len(lst)<3:
return(lst)
e = []
a = len(lst)//3
b = sorted(lst,reverse=True)
c = sum(b)
d= sum(b[:-a])
for i in lst:
e.append(round(i*(d/c),2))
return (e)
#discount([2.99, 5.75, 3.35, 4.99]) #, [2.47, 4.74, 2.76, 4.12])
#discount([10.75, 11.68]) #, [10.75, 11.68])
#discount([68.74, 17.85, 55.99]) #, [60.13, 15.62, 48.98])
discount([5.75, 14.99, 36.83, 12.15, 25.30, 5.75, 5.75, 5.75]) #, [5.16, 13.45, 33.06, 10.91, 22.71, 5.16, 5.16, 5.16])
#discount([14.15, 9.45, 3.72, 5.99, 8.13, 8.85]) #, [11.42, 7.63, 3.0, 4.83, 6.56, 7.14])
#discount([2.98, 0.25, 1.25]) #, [2.81, 0.24, 1.18])
#discount([9.20]) #, [9.20]) | 4.15625 | 4 |
tests/test_load.py | yaniv-aknin/fafalytics | 1 | 12795145 | <filename>tests/test_load.py
import testutils
from fafalytics import loader
from fafalytics.pyutils import Query as Q
def test_load():
dump = testutils.testdata / 'dump.json'
with open(dump) as handle:
resolver = loader.GameJsonResolver.from_handle(handle)
games = {game['id']: game for game in resolver}
game = games['14395861']
assert Q('playerStats/0/id')(game) == '28030229'
| 2.53125 | 3 |
manager.py | contrailnfx/Controller | 0 | 12795146 | <reponame>contrailnfx/Controller<filename>manager.py
import sys
import os
from orchestrator.app import app
from flask_sqlalchemy import SQLAlchemy
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.script import Manager, Server
host = app.config['HOST']
db_url= app.config['SQLALCHEMY_DATABASE_URI']
db = SQLAlchemy(app)
# default to dev config
env = os.environ.get('WEBAPP_ENV', 'dev')
app = app()
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command("server", Server())
manager.add_command('db', MigrateCommand)
@manager.shell
def make_shell_context():
return dict(
app=app,
db=db,
)
if __name__ == "__main__":
manager.run()
| 2.28125 | 2 |
src/graph_transpiler/webdnn/frontend/chainer/functions/noise.py | steerapi/webdnn | 1 | 12795147 | <gh_stars>1-10
import chainer
from webdnn.frontend.chainer.converter import ChainerConverter
from webdnn.util import console
@ChainerConverter.register_handler("Dropout")
def _convert_dropout(converter: ChainerConverter, c_op: "chainer.functions.Dropout"):
console.warning("[ChainerConverter] Dropout is ignored")
x = converter.get_variable(c_op.inputs[0])
converter.set_variable(c_op.outputs[0](), x)
# noinspection PyUnusedLocal
@ChainerConverter.register_handler("Gaussian")
def _convert_gaussian(converter: ChainerConverter, c_op: "chainer.functions.Gaussian"):
# TODO
raise NotImplementedError("[ChainerConverter] Gaussian is not supported")
# noinspection PyUnusedLocal
@ChainerConverter.register_handler("SimplifiedDropconnect")
def _convert_simplified_dropconnect(converter: ChainerConverter, c_op: "chainer.functions.SimplifiedDropconnect"):
# TODO
raise NotImplementedError("[ChainerConverter] SimplifiedDropconnect is not supported")
# noinspection PyUnusedLocal
@ChainerConverter.register_handler("Zoneout")
def _convert_zoneout(converter: ChainerConverter, c_op: "chainer.functions.Zoneout"):
# TODO
raise NotImplementedError("[ChainerConverter] Zoneout is not supported")
| 2.5 | 2 |
mini-projects/aic-15-image-search-engine/utils/utils.py | elbertsoftware/SpringboardAIC | 3 | 12795148 | <gh_stars>1-10
import numpy as np
from scipy.spatial.distance import hamming, cosine, euclidean
def compare_color(color_vectors,
uploaded_image_colors,
ids):
'''
Comparing color vectors of closest images from the training set with a color vector of a uploaded image (query image).
:param color_vectors: color features vectors of closest training set images to the uploaded image
:param uploaded_image_colors: color vector of the uploaded image
:param ids: indices of training images being closest to the uploaded image (output from a distance function)
'''
color_distances = []
for i in range(len(color_vectors)):
color_distances.append(euclidean(color_vectors[i], uploaded_image_colors))
#The 15 is just an random number that I have choosen, you can return as many as you need/want
return ids[np.argsort(color_distances)[:15]]
def cosine_distance(training_set_vectors, query_vector, top_n=50):
'''
Calculates cosine distances between query image (vector) and all training set images (vectors).
:param training_set_vectors: numpy Matrix, vectors for all images in the training set
:param query_vector: numpy vector, query image (new image) vector
:param top_n: integer, number of closest images to return
'''
distances = []
for i in range(len(training_set_vectors)): #For Cifar 10 -> 50k images
distances.append(cosine(training_set_vectors[i], query_vector[0]))
return np.argsort(distances)[:top_n]
def hamming_distance(training_set_vectors, query_vector, top_n=50):
'''
Calculates hamming distances between query image (vector) and all training set images (vectors).
:param training_set_vectors: numpy Matrix, vectors for all images in the training set
:param query_vector: numpy vector, query image (new image) vector
:param top_n: Integer, number of closest images to return
'''
distances = []
for i in range(len(training_set_vectors)): #For Cifar 10 -> 50k images
distances.append(hamming(training_set_vectors[i], query_vector[0]))
return np.argsort(distances)[:top_n]
def sparse_accuracy(true_labels, predicted_labels):
'''
Calculates accuracy of a model based on softmax outputs.
:param true_labels: numpy array, real labels of each sample. Example: [1, 2, 1, 0, 0]
:param predicted_labels: numpy matrix, softmax probabilities. Example [[0.2, 0.1, 0.7], [0.9, 0.05, 0.05]]
'''
assert len(true_labels) == len(predicted_labels)
correct = 0
for i in range(len(true_labels)):
if np.argmax(predicted_labels[i]) == true_labels[i]:
correct += 1
return correct / len(true_labels) | 2.90625 | 3 |
shell/lab0.py | utep-cs-systems-courses/os-shell-janguiano4 | 0 | 12795149 | #<NAME>
#CS4375: OS
#3 methods
from os import read #from os library import read method
next = 0
limit = 0
#This method calls read to fill a buffer, and gets one char at at time
def my_getChar(): #define = creating method : use method, loops, tryCatch
global next, limit #initializing 2 variables
if next == limit:
next = 0
limit = read(0,1000) #
if limit == 0:
return "EOF"
if next < len(limit) -1: #Check to make sure limit[next] wont go out of bounds.
c = chr(limit[next])#converting from ascii to char
next += 1
return c
else:
return "EOF"
def my_getLine():
global next
global limit
line = ""
char = my_getChar()
while (char != '' and char != "EOF"):
line += char
char = my_getChar()
next = 0
limit = 0
return line
def my_readLines():
numLines = 0
inLine = my_getLine()
while len(inLine):
numLines += 1
print(f"### Line {numLines}: <{str(inLine)}> ###\n")
inLine = my_getLine()
print(f"EOF after {numLines}\n")
| 3.6875 | 4 |
app/__init__.py | sebbesiren/game-api | 0 | 12795150 | <reponame>sebbesiren/game-api
from eve import Eve
from app.domain import DOMAIN
from app.services import Services
from config import Config
def create_app():
app = Eve()
Services.init_services(app=app, domain=DOMAIN)
return app
| 1.609375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.